diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /drivers/scsi/isci | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'drivers/scsi/isci')
24 files changed, 4143 insertions, 3946 deletions
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index 609dafd661d..6981b773a88 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c | |||
@@ -58,6 +58,7 @@ | |||
58 | #include "host.h" | 58 | #include "host.h" |
59 | #include "isci.h" | 59 | #include "isci.h" |
60 | #include "port.h" | 60 | #include "port.h" |
61 | #include "host.h" | ||
61 | #include "probe_roms.h" | 62 | #include "probe_roms.h" |
62 | #include "remote_device.h" | 63 | #include "remote_device.h" |
63 | #include "request.h" | 64 | #include "request.h" |
@@ -192,27 +193,22 @@ static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost) | |||
192 | 193 | ||
193 | static bool sci_controller_isr(struct isci_host *ihost) | 194 | static bool sci_controller_isr(struct isci_host *ihost) |
194 | { | 195 | { |
195 | if (sci_controller_completion_queue_has_entries(ihost)) | 196 | if (sci_controller_completion_queue_has_entries(ihost)) { |
196 | return true; | 197 | return true; |
198 | } else { | ||
199 | /* | ||
200 | * we have a spurious interrupt it could be that we have already | ||
201 | * emptied the completion queue from a previous interrupt */ | ||
202 | writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); | ||
197 | 203 | ||
198 | /* we have a spurious interrupt it could be that we have already | 204 | /* |
199 | * emptied the completion queue from a previous interrupt | 205 | * There is a race in the hardware that could cause us not to be notified |
200 | * FIXME: really!? | 206 | * of an interrupt completion if we do not take this step. We will mask |
201 | */ | 207 | * then unmask the interrupts so if there is another interrupt pending |
202 | writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); | 208 | * the clearing of the interrupt source we get the next interrupt message. */ |
203 | |||
204 | /* There is a race in the hardware that could cause us not to be | ||
205 | * notified of an interrupt completion if we do not take this | ||
206 | * step. We will mask then unmask the interrupts so if there is | ||
207 | * another interrupt pending the clearing of the interrupt | ||
208 | * source we get the next interrupt message. | ||
209 | */ | ||
210 | spin_lock(&ihost->scic_lock); | ||
211 | if (test_bit(IHOST_IRQ_ENABLED, &ihost->flags)) { | ||
212 | writel(0xFF000000, &ihost->smu_registers->interrupt_mask); | 209 | writel(0xFF000000, &ihost->smu_registers->interrupt_mask); |
213 | writel(0, &ihost->smu_registers->interrupt_mask); | 210 | writel(0, &ihost->smu_registers->interrupt_mask); |
214 | } | 211 | } |
215 | spin_unlock(&ihost->scic_lock); | ||
216 | 212 | ||
217 | return false; | 213 | return false; |
218 | } | 214 | } |
@@ -492,7 +488,7 @@ static void sci_controller_process_completions(struct isci_host *ihost) | |||
492 | u32 event_cycle; | 488 | u32 event_cycle; |
493 | 489 | ||
494 | dev_dbg(&ihost->pdev->dev, | 490 | dev_dbg(&ihost->pdev->dev, |
495 | "%s: completion queue beginning get:0x%08x\n", | 491 | "%s: completion queue begining get:0x%08x\n", |
496 | __func__, | 492 | __func__, |
497 | ihost->completion_queue_get); | 493 | ihost->completion_queue_get); |
498 | 494 | ||
@@ -647,21 +643,29 @@ static void isci_host_start_complete(struct isci_host *ihost, enum sci_status co | |||
647 | if (completion_status != SCI_SUCCESS) | 643 | if (completion_status != SCI_SUCCESS) |
648 | dev_info(&ihost->pdev->dev, | 644 | dev_info(&ihost->pdev->dev, |
649 | "controller start timed out, continuing...\n"); | 645 | "controller start timed out, continuing...\n"); |
646 | isci_host_change_state(ihost, isci_ready); | ||
650 | clear_bit(IHOST_START_PENDING, &ihost->flags); | 647 | clear_bit(IHOST_START_PENDING, &ihost->flags); |
651 | wake_up(&ihost->eventq); | 648 | wake_up(&ihost->eventq); |
652 | } | 649 | } |
653 | 650 | ||
654 | int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time) | 651 | int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time) |
655 | { | 652 | { |
656 | struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); | 653 | struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha; |
657 | struct isci_host *ihost = ha->lldd_ha; | ||
658 | 654 | ||
659 | if (test_bit(IHOST_START_PENDING, &ihost->flags)) | 655 | if (test_bit(IHOST_START_PENDING, &ihost->flags)) |
660 | return 0; | 656 | return 0; |
661 | 657 | ||
662 | sas_drain_work(ha); | 658 | /* todo: use sas_flush_discovery once it is upstream */ |
659 | scsi_flush_work(shost); | ||
660 | |||
661 | scsi_flush_work(shost); | ||
662 | |||
663 | dev_dbg(&ihost->pdev->dev, | ||
664 | "%s: ihost->status = %d, time = %ld\n", | ||
665 | __func__, isci_host_get_state(ihost), time); | ||
663 | 666 | ||
664 | return 1; | 667 | return 1; |
668 | |||
665 | } | 669 | } |
666 | 670 | ||
667 | /** | 671 | /** |
@@ -703,15 +707,14 @@ static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost) | |||
703 | 707 | ||
704 | static void sci_controller_enable_interrupts(struct isci_host *ihost) | 708 | static void sci_controller_enable_interrupts(struct isci_host *ihost) |
705 | { | 709 | { |
706 | set_bit(IHOST_IRQ_ENABLED, &ihost->flags); | 710 | BUG_ON(ihost->smu_registers == NULL); |
707 | writel(0, &ihost->smu_registers->interrupt_mask); | 711 | writel(0, &ihost->smu_registers->interrupt_mask); |
708 | } | 712 | } |
709 | 713 | ||
710 | void sci_controller_disable_interrupts(struct isci_host *ihost) | 714 | void sci_controller_disable_interrupts(struct isci_host *ihost) |
711 | { | 715 | { |
712 | clear_bit(IHOST_IRQ_ENABLED, &ihost->flags); | 716 | BUG_ON(ihost->smu_registers == NULL); |
713 | writel(0xffffffff, &ihost->smu_registers->interrupt_mask); | 717 | writel(0xffffffff, &ihost->smu_registers->interrupt_mask); |
714 | readl(&ihost->smu_registers->interrupt_mask); /* flush */ | ||
715 | } | 718 | } |
716 | 719 | ||
717 | static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost) | 720 | static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost) |
@@ -822,7 +825,7 @@ static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host * | |||
822 | &ihost->scu_registers->sdma.unsolicited_frame_put_pointer); | 825 | &ihost->scu_registers->sdma.unsolicited_frame_put_pointer); |
823 | } | 826 | } |
824 | 827 | ||
825 | void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status) | 828 | static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status) |
826 | { | 829 | { |
827 | if (ihost->sm.current_state_id == SCIC_STARTING) { | 830 | if (ihost->sm.current_state_id == SCIC_STARTING) { |
828 | /* | 831 | /* |
@@ -849,7 +852,6 @@ static bool is_phy_starting(struct isci_phy *iphy) | |||
849 | case SCI_PHY_SUB_AWAIT_SATA_POWER: | 852 | case SCI_PHY_SUB_AWAIT_SATA_POWER: |
850 | case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: | 853 | case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: |
851 | case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: | 854 | case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: |
852 | case SCI_PHY_SUB_AWAIT_OSSP_EN: | ||
853 | case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: | 855 | case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: |
854 | case SCI_PHY_SUB_FINAL: | 856 | case SCI_PHY_SUB_FINAL: |
855 | return true; | 857 | return true; |
@@ -858,39 +860,6 @@ static bool is_phy_starting(struct isci_phy *iphy) | |||
858 | } | 860 | } |
859 | } | 861 | } |
860 | 862 | ||
861 | bool is_controller_start_complete(struct isci_host *ihost) | ||
862 | { | ||
863 | int i; | ||
864 | |||
865 | for (i = 0; i < SCI_MAX_PHYS; i++) { | ||
866 | struct isci_phy *iphy = &ihost->phys[i]; | ||
867 | u32 state = iphy->sm.current_state_id; | ||
868 | |||
869 | /* in apc mode we need to check every phy, in | ||
870 | * mpc mode we only need to check phys that have | ||
871 | * been configured into a port | ||
872 | */ | ||
873 | if (is_port_config_apc(ihost)) | ||
874 | /* pass */; | ||
875 | else if (!phy_get_non_dummy_port(iphy)) | ||
876 | continue; | ||
877 | |||
878 | /* The controller start operation is complete iff: | ||
879 | * - all links have been given an opportunity to start | ||
880 | * - have no indication of a connected device | ||
881 | * - have an indication of a connected device and it has | ||
882 | * finished the link training process. | ||
883 | */ | ||
884 | if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) || | ||
885 | (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) || | ||
886 | (iphy->is_in_link_training == true && is_phy_starting(iphy)) || | ||
887 | (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask)) | ||
888 | return false; | ||
889 | } | ||
890 | |||
891 | return true; | ||
892 | } | ||
893 | |||
894 | /** | 863 | /** |
895 | * sci_controller_start_next_phy - start phy | 864 | * sci_controller_start_next_phy - start phy |
896 | * @scic: controller | 865 | * @scic: controller |
@@ -911,7 +880,35 @@ static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost) | |||
911 | return status; | 880 | return status; |
912 | 881 | ||
913 | if (ihost->next_phy_to_start >= SCI_MAX_PHYS) { | 882 | if (ihost->next_phy_to_start >= SCI_MAX_PHYS) { |
914 | if (is_controller_start_complete(ihost)) { | 883 | bool is_controller_start_complete = true; |
884 | u32 state; | ||
885 | u8 index; | ||
886 | |||
887 | for (index = 0; index < SCI_MAX_PHYS; index++) { | ||
888 | iphy = &ihost->phys[index]; | ||
889 | state = iphy->sm.current_state_id; | ||
890 | |||
891 | if (!phy_get_non_dummy_port(iphy)) | ||
892 | continue; | ||
893 | |||
894 | /* The controller start operation is complete iff: | ||
895 | * - all links have been given an opportunity to start | ||
896 | * - have no indication of a connected device | ||
897 | * - have an indication of a connected device and it has | ||
898 | * finished the link training process. | ||
899 | */ | ||
900 | if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) || | ||
901 | (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) || | ||
902 | (iphy->is_in_link_training == true && is_phy_starting(iphy))) { | ||
903 | is_controller_start_complete = false; | ||
904 | break; | ||
905 | } | ||
906 | } | ||
907 | |||
908 | /* | ||
909 | * The controller has successfully finished the start process. | ||
910 | * Inform the SCI Core user and transition to the READY state. */ | ||
911 | if (is_controller_start_complete == true) { | ||
915 | sci_controller_transition_to_ready(ihost, SCI_SUCCESS); | 912 | sci_controller_transition_to_ready(ihost, SCI_SUCCESS); |
916 | sci_del_timer(&ihost->phy_timer); | 913 | sci_del_timer(&ihost->phy_timer); |
917 | ihost->phy_startup_timer_pending = false; | 914 | ihost->phy_startup_timer_pending = false; |
@@ -992,8 +989,9 @@ static enum sci_status sci_controller_start(struct isci_host *ihost, | |||
992 | u16 index; | 989 | u16 index; |
993 | 990 | ||
994 | if (ihost->sm.current_state_id != SCIC_INITIALIZED) { | 991 | if (ihost->sm.current_state_id != SCIC_INITIALIZED) { |
995 | dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", | 992 | dev_warn(&ihost->pdev->dev, |
996 | __func__, ihost->sm.current_state_id); | 993 | "SCIC Controller start operation requested in " |
994 | "invalid state\n"); | ||
997 | return SCI_FAILURE_INVALID_STATE; | 995 | return SCI_FAILURE_INVALID_STATE; |
998 | } | 996 | } |
999 | 997 | ||
@@ -1044,7 +1042,7 @@ static enum sci_status sci_controller_start(struct isci_host *ihost, | |||
1044 | return SCI_SUCCESS; | 1042 | return SCI_SUCCESS; |
1045 | } | 1043 | } |
1046 | 1044 | ||
1047 | void isci_host_start(struct Scsi_Host *shost) | 1045 | void isci_host_scan_start(struct Scsi_Host *shost) |
1048 | { | 1046 | { |
1049 | struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha; | 1047 | struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha; |
1050 | unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost); | 1048 | unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost); |
@@ -1057,8 +1055,9 @@ void isci_host_start(struct Scsi_Host *shost) | |||
1057 | spin_unlock_irq(&ihost->scic_lock); | 1055 | spin_unlock_irq(&ihost->scic_lock); |
1058 | } | 1056 | } |
1059 | 1057 | ||
1060 | static void isci_host_stop_complete(struct isci_host *ihost) | 1058 | static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status) |
1061 | { | 1059 | { |
1060 | isci_host_change_state(ihost, isci_stopped); | ||
1062 | sci_controller_disable_interrupts(ihost); | 1061 | sci_controller_disable_interrupts(ihost); |
1063 | clear_bit(IHOST_STOP_PENDING, &ihost->flags); | 1062 | clear_bit(IHOST_STOP_PENDING, &ihost->flags); |
1064 | wake_up(&ihost->eventq); | 1063 | wake_up(&ihost->eventq); |
@@ -1077,34 +1076,6 @@ static void sci_controller_completion_handler(struct isci_host *ihost) | |||
1077 | writel(0, &ihost->smu_registers->interrupt_mask); | 1076 | writel(0, &ihost->smu_registers->interrupt_mask); |
1078 | } | 1077 | } |
1079 | 1078 | ||
1080 | void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task) | ||
1081 | { | ||
1082 | if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags) && | ||
1083 | !(task->task_state_flags & SAS_TASK_STATE_ABORTED)) { | ||
1084 | if (test_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags)) { | ||
1085 | /* Normal notification (task_done) */ | ||
1086 | dev_dbg(&ihost->pdev->dev, | ||
1087 | "%s: Normal - ireq/task = %p/%p\n", | ||
1088 | __func__, ireq, task); | ||
1089 | task->lldd_task = NULL; | ||
1090 | task->task_done(task); | ||
1091 | } else { | ||
1092 | dev_dbg(&ihost->pdev->dev, | ||
1093 | "%s: Error - ireq/task = %p/%p\n", | ||
1094 | __func__, ireq, task); | ||
1095 | if (sas_protocol_ata(task->task_proto)) | ||
1096 | task->lldd_task = NULL; | ||
1097 | sas_task_abort(task); | ||
1098 | } | ||
1099 | } else | ||
1100 | task->lldd_task = NULL; | ||
1101 | |||
1102 | if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags)) | ||
1103 | wake_up_all(&ihost->eventq); | ||
1104 | |||
1105 | if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags)) | ||
1106 | isci_free_tag(ihost, ireq->io_tag); | ||
1107 | } | ||
1108 | /** | 1079 | /** |
1109 | * isci_host_completion_routine() - This function is the delayed service | 1080 | * isci_host_completion_routine() - This function is the delayed service |
1110 | * routine that calls the sci core library's completion handler. It's | 1081 | * routine that calls the sci core library's completion handler. It's |
@@ -1113,25 +1084,111 @@ void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_ta | |||
1113 | * @data: This parameter specifies the ISCI host object | 1084 | * @data: This parameter specifies the ISCI host object |
1114 | * | 1085 | * |
1115 | */ | 1086 | */ |
1116 | void isci_host_completion_routine(unsigned long data) | 1087 | static void isci_host_completion_routine(unsigned long data) |
1117 | { | 1088 | { |
1118 | struct isci_host *ihost = (struct isci_host *)data; | 1089 | struct isci_host *ihost = (struct isci_host *)data; |
1090 | struct list_head completed_request_list; | ||
1091 | struct list_head errored_request_list; | ||
1092 | struct list_head *current_position; | ||
1093 | struct list_head *next_position; | ||
1094 | struct isci_request *request; | ||
1095 | struct isci_request *next_request; | ||
1096 | struct sas_task *task; | ||
1119 | u16 active; | 1097 | u16 active; |
1120 | 1098 | ||
1099 | INIT_LIST_HEAD(&completed_request_list); | ||
1100 | INIT_LIST_HEAD(&errored_request_list); | ||
1101 | |||
1121 | spin_lock_irq(&ihost->scic_lock); | 1102 | spin_lock_irq(&ihost->scic_lock); |
1103 | |||
1122 | sci_controller_completion_handler(ihost); | 1104 | sci_controller_completion_handler(ihost); |
1105 | |||
1106 | /* Take the lists of completed I/Os from the host. */ | ||
1107 | |||
1108 | list_splice_init(&ihost->requests_to_complete, | ||
1109 | &completed_request_list); | ||
1110 | |||
1111 | /* Take the list of errored I/Os from the host. */ | ||
1112 | list_splice_init(&ihost->requests_to_errorback, | ||
1113 | &errored_request_list); | ||
1114 | |||
1123 | spin_unlock_irq(&ihost->scic_lock); | 1115 | spin_unlock_irq(&ihost->scic_lock); |
1124 | 1116 | ||
1125 | /* | 1117 | /* Process any completions in the lists. */ |
1126 | * we subtract SCI_MAX_PORTS to account for the number of dummy TCs | 1118 | list_for_each_safe(current_position, next_position, |
1127 | * issued for hardware issue workaround | 1119 | &completed_request_list) { |
1128 | */ | ||
1129 | active = isci_tci_active(ihost) - SCI_MAX_PORTS; | ||
1130 | 1120 | ||
1131 | /* | 1121 | request = list_entry(current_position, struct isci_request, |
1132 | * the coalesence timeout doubles at each encoding step, so | 1122 | completed_node); |
1123 | task = isci_request_access_task(request); | ||
1124 | |||
1125 | /* Normal notification (task_done) */ | ||
1126 | dev_dbg(&ihost->pdev->dev, | ||
1127 | "%s: Normal - request/task = %p/%p\n", | ||
1128 | __func__, | ||
1129 | request, | ||
1130 | task); | ||
1131 | |||
1132 | /* Return the task to libsas */ | ||
1133 | if (task != NULL) { | ||
1134 | |||
1135 | task->lldd_task = NULL; | ||
1136 | if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) { | ||
1137 | |||
1138 | /* If the task is already in the abort path, | ||
1139 | * the task_done callback cannot be called. | ||
1140 | */ | ||
1141 | task->task_done(task); | ||
1142 | } | ||
1143 | } | ||
1144 | |||
1145 | spin_lock_irq(&ihost->scic_lock); | ||
1146 | isci_free_tag(ihost, request->io_tag); | ||
1147 | spin_unlock_irq(&ihost->scic_lock); | ||
1148 | } | ||
1149 | list_for_each_entry_safe(request, next_request, &errored_request_list, | ||
1150 | completed_node) { | ||
1151 | |||
1152 | task = isci_request_access_task(request); | ||
1153 | |||
1154 | /* Use sas_task_abort */ | ||
1155 | dev_warn(&ihost->pdev->dev, | ||
1156 | "%s: Error - request/task = %p/%p\n", | ||
1157 | __func__, | ||
1158 | request, | ||
1159 | task); | ||
1160 | |||
1161 | if (task != NULL) { | ||
1162 | |||
1163 | /* Put the task into the abort path if it's not there | ||
1164 | * already. | ||
1165 | */ | ||
1166 | if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) | ||
1167 | sas_task_abort(task); | ||
1168 | |||
1169 | } else { | ||
1170 | /* This is a case where the request has completed with a | ||
1171 | * status such that it needed further target servicing, | ||
1172 | * but the sas_task reference has already been removed | ||
1173 | * from the request. Since it was errored, it was not | ||
1174 | * being aborted, so there is nothing to do except free | ||
1175 | * it. | ||
1176 | */ | ||
1177 | |||
1178 | spin_lock_irq(&ihost->scic_lock); | ||
1179 | /* Remove the request from the remote device's list | ||
1180 | * of pending requests. | ||
1181 | */ | ||
1182 | list_del_init(&request->dev_node); | ||
1183 | isci_free_tag(ihost, request->io_tag); | ||
1184 | spin_unlock_irq(&ihost->scic_lock); | ||
1185 | } | ||
1186 | } | ||
1187 | |||
1188 | /* the coalesence timeout doubles at each encoding step, so | ||
1133 | * update it based on the ilog2 value of the outstanding requests | 1189 | * update it based on the ilog2 value of the outstanding requests |
1134 | */ | 1190 | */ |
1191 | active = isci_tci_active(ihost); | ||
1135 | writel(SMU_ICC_GEN_VAL(NUMBER, active) | | 1192 | writel(SMU_ICC_GEN_VAL(NUMBER, active) | |
1136 | SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)), | 1193 | SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)), |
1137 | &ihost->smu_registers->interrupt_coalesce_control); | 1194 | &ihost->smu_registers->interrupt_coalesce_control); |
@@ -1158,8 +1215,9 @@ void isci_host_completion_routine(unsigned long data) | |||
1158 | static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout) | 1215 | static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout) |
1159 | { | 1216 | { |
1160 | if (ihost->sm.current_state_id != SCIC_READY) { | 1217 | if (ihost->sm.current_state_id != SCIC_READY) { |
1161 | dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", | 1218 | dev_warn(&ihost->pdev->dev, |
1162 | __func__, ihost->sm.current_state_id); | 1219 | "SCIC Controller stop operation requested in " |
1220 | "invalid state\n"); | ||
1163 | return SCI_FAILURE_INVALID_STATE; | 1221 | return SCI_FAILURE_INVALID_STATE; |
1164 | } | 1222 | } |
1165 | 1223 | ||
@@ -1185,7 +1243,7 @@ static enum sci_status sci_controller_reset(struct isci_host *ihost) | |||
1185 | switch (ihost->sm.current_state_id) { | 1243 | switch (ihost->sm.current_state_id) { |
1186 | case SCIC_RESET: | 1244 | case SCIC_RESET: |
1187 | case SCIC_READY: | 1245 | case SCIC_READY: |
1188 | case SCIC_STOPPING: | 1246 | case SCIC_STOPPED: |
1189 | case SCIC_FAILED: | 1247 | case SCIC_FAILED: |
1190 | /* | 1248 | /* |
1191 | * The reset operation is not a graceful cleanup, just | 1249 | * The reset operation is not a graceful cleanup, just |
@@ -1194,58 +1252,28 @@ static enum sci_status sci_controller_reset(struct isci_host *ihost) | |||
1194 | sci_change_state(&ihost->sm, SCIC_RESETTING); | 1252 | sci_change_state(&ihost->sm, SCIC_RESETTING); |
1195 | return SCI_SUCCESS; | 1253 | return SCI_SUCCESS; |
1196 | default: | 1254 | default: |
1197 | dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", | 1255 | dev_warn(&ihost->pdev->dev, |
1198 | __func__, ihost->sm.current_state_id); | 1256 | "SCIC Controller reset operation requested in " |
1257 | "invalid state\n"); | ||
1199 | return SCI_FAILURE_INVALID_STATE; | 1258 | return SCI_FAILURE_INVALID_STATE; |
1200 | } | 1259 | } |
1201 | } | 1260 | } |
1202 | 1261 | ||
1203 | static enum sci_status sci_controller_stop_phys(struct isci_host *ihost) | 1262 | void isci_host_deinit(struct isci_host *ihost) |
1204 | { | 1263 | { |
1205 | u32 index; | 1264 | int i; |
1206 | enum sci_status status; | ||
1207 | enum sci_status phy_status; | ||
1208 | |||
1209 | status = SCI_SUCCESS; | ||
1210 | |||
1211 | for (index = 0; index < SCI_MAX_PHYS; index++) { | ||
1212 | phy_status = sci_phy_stop(&ihost->phys[index]); | ||
1213 | 1265 | ||
1214 | if (phy_status != SCI_SUCCESS && | 1266 | isci_host_change_state(ihost, isci_stopping); |
1215 | phy_status != SCI_FAILURE_INVALID_STATE) { | 1267 | for (i = 0; i < SCI_MAX_PORTS; i++) { |
1216 | status = SCI_FAILURE; | 1268 | struct isci_port *iport = &ihost->ports[i]; |
1269 | struct isci_remote_device *idev, *d; | ||
1217 | 1270 | ||
1218 | dev_warn(&ihost->pdev->dev, | 1271 | list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) { |
1219 | "%s: Controller stop operation failed to stop " | 1272 | if (test_bit(IDEV_ALLOCATED, &idev->flags)) |
1220 | "phy %d because of status %d.\n", | 1273 | isci_remote_device_stop(ihost, idev); |
1221 | __func__, | ||
1222 | ihost->phys[index].phy_index, phy_status); | ||
1223 | } | 1274 | } |
1224 | } | 1275 | } |
1225 | 1276 | ||
1226 | return status; | ||
1227 | } | ||
1228 | |||
1229 | |||
1230 | /** | ||
1231 | * isci_host_deinit - shutdown frame reception and dma | ||
1232 | * @ihost: host to take down | ||
1233 | * | ||
1234 | * This is called in either the driver shutdown or the suspend path. In | ||
1235 | * the shutdown case libsas went through port teardown and normal device | ||
1236 | * removal (i.e. physical links stayed up to service scsi_device removal | ||
1237 | * commands). In the suspend case we disable the hardware without | ||
1238 | * notifying libsas of the link down events since we want libsas to | ||
1239 | * remember the domain across the suspend/resume cycle | ||
1240 | */ | ||
1241 | void isci_host_deinit(struct isci_host *ihost) | ||
1242 | { | ||
1243 | int i; | ||
1244 | |||
1245 | /* disable output data selects */ | ||
1246 | for (i = 0; i < isci_gpio_count(ihost); i++) | ||
1247 | writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]); | ||
1248 | |||
1249 | set_bit(IHOST_STOP_PENDING, &ihost->flags); | 1277 | set_bit(IHOST_STOP_PENDING, &ihost->flags); |
1250 | 1278 | ||
1251 | spin_lock_irq(&ihost->scic_lock); | 1279 | spin_lock_irq(&ihost->scic_lock); |
@@ -1253,22 +1281,7 @@ void isci_host_deinit(struct isci_host *ihost) | |||
1253 | spin_unlock_irq(&ihost->scic_lock); | 1281 | spin_unlock_irq(&ihost->scic_lock); |
1254 | 1282 | ||
1255 | wait_for_stop(ihost); | 1283 | wait_for_stop(ihost); |
1256 | |||
1257 | /* phy stop is after controller stop to allow port and device to | ||
1258 | * go idle before shutting down the phys, but the expectation is | ||
1259 | * that i/o has been shut off well before we reach this | ||
1260 | * function. | ||
1261 | */ | ||
1262 | sci_controller_stop_phys(ihost); | ||
1263 | |||
1264 | /* disable sgpio: where the above wait should give time for the | ||
1265 | * enclosure to sample the gpios going inactive | ||
1266 | */ | ||
1267 | writel(0, &ihost->scu_registers->peg0.sgpio.interface_control); | ||
1268 | |||
1269 | spin_lock_irq(&ihost->scic_lock); | ||
1270 | sci_controller_reset(ihost); | 1284 | sci_controller_reset(ihost); |
1271 | spin_unlock_irq(&ihost->scic_lock); | ||
1272 | 1285 | ||
1273 | /* Cancel any/all outstanding port timers */ | 1286 | /* Cancel any/all outstanding port timers */ |
1274 | for (i = 0; i < ihost->logical_port_entries; i++) { | 1287 | for (i = 0; i < ihost->logical_port_entries; i++) { |
@@ -1307,6 +1320,29 @@ static void __iomem *smu_base(struct isci_host *isci_host) | |||
1307 | return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id; | 1320 | return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id; |
1308 | } | 1321 | } |
1309 | 1322 | ||
1323 | static void isci_user_parameters_get(struct sci_user_parameters *u) | ||
1324 | { | ||
1325 | int i; | ||
1326 | |||
1327 | for (i = 0; i < SCI_MAX_PHYS; i++) { | ||
1328 | struct sci_phy_user_params *u_phy = &u->phys[i]; | ||
1329 | |||
1330 | u_phy->max_speed_generation = phy_gen; | ||
1331 | |||
1332 | /* we are not exporting these for now */ | ||
1333 | u_phy->align_insertion_frequency = 0x7f; | ||
1334 | u_phy->in_connection_align_insertion_frequency = 0xff; | ||
1335 | u_phy->notify_enable_spin_up_insertion_frequency = 0x33; | ||
1336 | } | ||
1337 | |||
1338 | u->stp_inactivity_timeout = stp_inactive_to; | ||
1339 | u->ssp_inactivity_timeout = ssp_inactive_to; | ||
1340 | u->stp_max_occupancy_timeout = stp_max_occ_to; | ||
1341 | u->ssp_max_occupancy_timeout = ssp_max_occ_to; | ||
1342 | u->no_outbound_task_timeout = no_outbound_task_to; | ||
1343 | u->max_number_concurrent_device_spin_up = max_concurr_spinup; | ||
1344 | } | ||
1345 | |||
1310 | static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm) | 1346 | static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm) |
1311 | { | 1347 | { |
1312 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); | 1348 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); |
@@ -1444,15 +1480,6 @@ sci_controller_set_interrupt_coalescence(struct isci_host *ihost, | |||
1444 | static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm) | 1480 | static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm) |
1445 | { | 1481 | { |
1446 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); | 1482 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); |
1447 | u32 val; | ||
1448 | |||
1449 | /* enable clock gating for power control of the scu unit */ | ||
1450 | val = readl(&ihost->smu_registers->clock_gating_control); | ||
1451 | val &= ~(SMU_CGUCR_GEN_BIT(REGCLK_ENABLE) | | ||
1452 | SMU_CGUCR_GEN_BIT(TXCLK_ENABLE) | | ||
1453 | SMU_CGUCR_GEN_BIT(XCLK_ENABLE)); | ||
1454 | val |= SMU_CGUCR_GEN_BIT(IDLE_ENABLE); | ||
1455 | writel(val, &ihost->smu_registers->clock_gating_control); | ||
1456 | 1483 | ||
1457 | /* set the default interrupt coalescence number and timeout value. */ | 1484 | /* set the default interrupt coalescence number and timeout value. */ |
1458 | sci_controller_set_interrupt_coalescence(ihost, 0, 0); | 1485 | sci_controller_set_interrupt_coalescence(ihost, 0, 0); |
@@ -1466,6 +1493,32 @@ static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm) | |||
1466 | sci_controller_set_interrupt_coalescence(ihost, 0, 0); | 1493 | sci_controller_set_interrupt_coalescence(ihost, 0, 0); |
1467 | } | 1494 | } |
1468 | 1495 | ||
1496 | static enum sci_status sci_controller_stop_phys(struct isci_host *ihost) | ||
1497 | { | ||
1498 | u32 index; | ||
1499 | enum sci_status status; | ||
1500 | enum sci_status phy_status; | ||
1501 | |||
1502 | status = SCI_SUCCESS; | ||
1503 | |||
1504 | for (index = 0; index < SCI_MAX_PHYS; index++) { | ||
1505 | phy_status = sci_phy_stop(&ihost->phys[index]); | ||
1506 | |||
1507 | if (phy_status != SCI_SUCCESS && | ||
1508 | phy_status != SCI_FAILURE_INVALID_STATE) { | ||
1509 | status = SCI_FAILURE; | ||
1510 | |||
1511 | dev_warn(&ihost->pdev->dev, | ||
1512 | "%s: Controller stop operation failed to stop " | ||
1513 | "phy %d because of status %d.\n", | ||
1514 | __func__, | ||
1515 | ihost->phys[index].phy_index, phy_status); | ||
1516 | } | ||
1517 | } | ||
1518 | |||
1519 | return status; | ||
1520 | } | ||
1521 | |||
1469 | static enum sci_status sci_controller_stop_ports(struct isci_host *ihost) | 1522 | static enum sci_status sci_controller_stop_ports(struct isci_host *ihost) |
1470 | { | 1523 | { |
1471 | u32 index; | 1524 | u32 index; |
@@ -1525,11 +1578,10 @@ static void sci_controller_stopping_state_enter(struct sci_base_state_machine *s | |||
1525 | { | 1578 | { |
1526 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); | 1579 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); |
1527 | 1580 | ||
1528 | sci_controller_stop_devices(ihost); | 1581 | /* Stop all of the components for this controller */ |
1582 | sci_controller_stop_phys(ihost); | ||
1529 | sci_controller_stop_ports(ihost); | 1583 | sci_controller_stop_ports(ihost); |
1530 | 1584 | sci_controller_stop_devices(ihost); | |
1531 | if (!sci_controller_has_remote_devices_stopping(ihost)) | ||
1532 | isci_host_stop_complete(ihost); | ||
1533 | } | 1585 | } |
1534 | 1586 | ||
1535 | static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm) | 1587 | static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm) |
@@ -1555,9 +1607,6 @@ static void sci_controller_reset_hardware(struct isci_host *ihost) | |||
1555 | 1607 | ||
1556 | /* The write to the UFQGP clears the UFQPR */ | 1608 | /* The write to the UFQGP clears the UFQPR */ |
1557 | writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); | 1609 | writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); |
1558 | |||
1559 | /* clear all interrupts */ | ||
1560 | writel(~SMU_INTERRUPT_STATUS_RESERVED_MASK, &ihost->smu_registers->interrupt_status); | ||
1561 | } | 1610 | } |
1562 | 1611 | ||
1563 | static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm) | 1612 | static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm) |
@@ -1589,9 +1638,55 @@ static const struct sci_base_state sci_controller_state_table[] = { | |||
1589 | .enter_state = sci_controller_stopping_state_enter, | 1638 | .enter_state = sci_controller_stopping_state_enter, |
1590 | .exit_state = sci_controller_stopping_state_exit, | 1639 | .exit_state = sci_controller_stopping_state_exit, |
1591 | }, | 1640 | }, |
1641 | [SCIC_STOPPED] = {}, | ||
1592 | [SCIC_FAILED] = {} | 1642 | [SCIC_FAILED] = {} |
1593 | }; | 1643 | }; |
1594 | 1644 | ||
1645 | static void sci_controller_set_default_config_parameters(struct isci_host *ihost) | ||
1646 | { | ||
1647 | /* these defaults are overridden by the platform / firmware */ | ||
1648 | u16 index; | ||
1649 | |||
1650 | /* Default to APC mode. */ | ||
1651 | ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; | ||
1652 | |||
1653 | /* Default to APC mode. */ | ||
1654 | ihost->oem_parameters.controller.max_concurrent_dev_spin_up = 1; | ||
1655 | |||
1656 | /* Default to no SSC operation. */ | ||
1657 | ihost->oem_parameters.controller.do_enable_ssc = false; | ||
1658 | |||
1659 | /* Initialize all of the port parameter information to narrow ports. */ | ||
1660 | for (index = 0; index < SCI_MAX_PORTS; index++) { | ||
1661 | ihost->oem_parameters.ports[index].phy_mask = 0; | ||
1662 | } | ||
1663 | |||
1664 | /* Initialize all of the phy parameter information. */ | ||
1665 | for (index = 0; index < SCI_MAX_PHYS; index++) { | ||
1666 | /* Default to 6G (i.e. Gen 3) for now. */ | ||
1667 | ihost->user_parameters.phys[index].max_speed_generation = 3; | ||
1668 | |||
1669 | /* the frequencies cannot be 0 */ | ||
1670 | ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f; | ||
1671 | ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff; | ||
1672 | ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33; | ||
1673 | |||
1674 | /* | ||
1675 | * Previous Vitesse based expanders had a arbitration issue that | ||
1676 | * is worked around by having the upper 32-bits of SAS address | ||
1677 | * with a value greater then the Vitesse company identifier. | ||
1678 | * Hence, usage of 0x5FCFFFFF. */ | ||
1679 | ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id; | ||
1680 | ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF; | ||
1681 | } | ||
1682 | |||
1683 | ihost->user_parameters.stp_inactivity_timeout = 5; | ||
1684 | ihost->user_parameters.ssp_inactivity_timeout = 5; | ||
1685 | ihost->user_parameters.stp_max_occupancy_timeout = 5; | ||
1686 | ihost->user_parameters.ssp_max_occupancy_timeout = 20; | ||
1687 | ihost->user_parameters.no_outbound_task_timeout = 20; | ||
1688 | } | ||
1689 | |||
1595 | static void controller_timeout(unsigned long data) | 1690 | static void controller_timeout(unsigned long data) |
1596 | { | 1691 | { |
1597 | struct sci_timer *tmr = (struct sci_timer *)data; | 1692 | struct sci_timer *tmr = (struct sci_timer *)data; |
@@ -1608,7 +1703,7 @@ static void controller_timeout(unsigned long data) | |||
1608 | sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT); | 1703 | sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT); |
1609 | else if (sm->current_state_id == SCIC_STOPPING) { | 1704 | else if (sm->current_state_id == SCIC_STOPPING) { |
1610 | sci_change_state(sm, SCIC_FAILED); | 1705 | sci_change_state(sm, SCIC_FAILED); |
1611 | isci_host_stop_complete(ihost); | 1706 | isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT); |
1612 | } else /* / @todo Now what do we want to do in this case? */ | 1707 | } else /* / @todo Now what do we want to do in this case? */ |
1613 | dev_err(&ihost->pdev->dev, | 1708 | dev_err(&ihost->pdev->dev, |
1614 | "%s: Controller timer fired when controller was not " | 1709 | "%s: Controller timer fired when controller was not " |
@@ -1648,10 +1743,13 @@ static enum sci_status sci_controller_construct(struct isci_host *ihost, | |||
1648 | 1743 | ||
1649 | sci_init_timer(&ihost->timer, controller_timeout); | 1744 | sci_init_timer(&ihost->timer, controller_timeout); |
1650 | 1745 | ||
1746 | /* Initialize the User and OEM parameters to default values. */ | ||
1747 | sci_controller_set_default_config_parameters(ihost); | ||
1748 | |||
1651 | return sci_controller_reset(ihost); | 1749 | return sci_controller_reset(ihost); |
1652 | } | 1750 | } |
1653 | 1751 | ||
1654 | int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version) | 1752 | int sci_oem_parameters_validate(struct sci_oem_params *oem) |
1655 | { | 1753 | { |
1656 | int i; | 1754 | int i; |
1657 | 1755 | ||
@@ -1679,62 +1777,27 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version) | |||
1679 | } else | 1777 | } else |
1680 | return -EINVAL; | 1778 | return -EINVAL; |
1681 | 1779 | ||
1682 | if (oem->controller.max_concurr_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT || | 1780 | if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT) |
1683 | oem->controller.max_concurr_spin_up < 1) | ||
1684 | return -EINVAL; | 1781 | return -EINVAL; |
1685 | 1782 | ||
1686 | if (oem->controller.do_enable_ssc) { | 1783 | return 0; |
1687 | if (version < ISCI_ROM_VER_1_1 && oem->controller.do_enable_ssc != 1) | 1784 | } |
1688 | return -EINVAL; | ||
1689 | 1785 | ||
1690 | if (version >= ISCI_ROM_VER_1_1) { | 1786 | static enum sci_status sci_oem_parameters_set(struct isci_host *ihost) |
1691 | u8 test = oem->controller.ssc_sata_tx_spread_level; | 1787 | { |
1788 | u32 state = ihost->sm.current_state_id; | ||
1692 | 1789 | ||
1693 | switch (test) { | 1790 | if (state == SCIC_RESET || |
1694 | case 0: | 1791 | state == SCIC_INITIALIZING || |
1695 | case 2: | 1792 | state == SCIC_INITIALIZED) { |
1696 | case 3: | ||
1697 | case 6: | ||
1698 | case 7: | ||
1699 | break; | ||
1700 | default: | ||
1701 | return -EINVAL; | ||
1702 | } | ||
1703 | 1793 | ||
1704 | test = oem->controller.ssc_sas_tx_spread_level; | 1794 | if (sci_oem_parameters_validate(&ihost->oem_parameters)) |
1705 | if (oem->controller.ssc_sas_tx_type == 0) { | 1795 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; |
1706 | switch (test) { | ||
1707 | case 0: | ||
1708 | case 2: | ||
1709 | case 3: | ||
1710 | break; | ||
1711 | default: | ||
1712 | return -EINVAL; | ||
1713 | } | ||
1714 | } else if (oem->controller.ssc_sas_tx_type == 1) { | ||
1715 | switch (test) { | ||
1716 | case 0: | ||
1717 | case 3: | ||
1718 | case 6: | ||
1719 | break; | ||
1720 | default: | ||
1721 | return -EINVAL; | ||
1722 | } | ||
1723 | } | ||
1724 | } | ||
1725 | } | ||
1726 | 1796 | ||
1727 | return 0; | 1797 | return SCI_SUCCESS; |
1728 | } | 1798 | } |
1729 | 1799 | ||
1730 | static u8 max_spin_up(struct isci_host *ihost) | 1800 | return SCI_FAILURE_INVALID_STATE; |
1731 | { | ||
1732 | if (ihost->user_parameters.max_concurr_spinup) | ||
1733 | return min_t(u8, ihost->user_parameters.max_concurr_spinup, | ||
1734 | MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT); | ||
1735 | else | ||
1736 | return min_t(u8, ihost->oem_parameters.controller.max_concurr_spin_up, | ||
1737 | MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT); | ||
1738 | } | 1801 | } |
1739 | 1802 | ||
1740 | static void power_control_timeout(unsigned long data) | 1803 | static void power_control_timeout(unsigned long data) |
@@ -1766,38 +1829,14 @@ static void power_control_timeout(unsigned long data) | |||
1766 | if (iphy == NULL) | 1829 | if (iphy == NULL) |
1767 | continue; | 1830 | continue; |
1768 | 1831 | ||
1769 | if (ihost->power_control.phys_granted_power >= max_spin_up(ihost)) | 1832 | if (ihost->power_control.phys_granted_power >= |
1833 | ihost->oem_parameters.controller.max_concurrent_dev_spin_up) | ||
1770 | break; | 1834 | break; |
1771 | 1835 | ||
1772 | ihost->power_control.requesters[i] = NULL; | 1836 | ihost->power_control.requesters[i] = NULL; |
1773 | ihost->power_control.phys_waiting--; | 1837 | ihost->power_control.phys_waiting--; |
1774 | ihost->power_control.phys_granted_power++; | 1838 | ihost->power_control.phys_granted_power++; |
1775 | sci_phy_consume_power_handler(iphy); | 1839 | sci_phy_consume_power_handler(iphy); |
1776 | |||
1777 | if (iphy->protocol == SAS_PROTOCOL_SSP) { | ||
1778 | u8 j; | ||
1779 | |||
1780 | for (j = 0; j < SCI_MAX_PHYS; j++) { | ||
1781 | struct isci_phy *requester = ihost->power_control.requesters[j]; | ||
1782 | |||
1783 | /* | ||
1784 | * Search the power_control queue to see if there are other phys | ||
1785 | * attached to the same remote device. If found, take all of | ||
1786 | * them out of await_sas_power state. | ||
1787 | */ | ||
1788 | if (requester != NULL && requester != iphy) { | ||
1789 | u8 other = memcmp(requester->frame_rcvd.iaf.sas_addr, | ||
1790 | iphy->frame_rcvd.iaf.sas_addr, | ||
1791 | sizeof(requester->frame_rcvd.iaf.sas_addr)); | ||
1792 | |||
1793 | if (other == 0) { | ||
1794 | ihost->power_control.requesters[j] = NULL; | ||
1795 | ihost->power_control.phys_waiting--; | ||
1796 | sci_phy_consume_power_handler(requester); | ||
1797 | } | ||
1798 | } | ||
1799 | } | ||
1800 | } | ||
1801 | } | 1840 | } |
1802 | 1841 | ||
1803 | /* | 1842 | /* |
@@ -1816,7 +1855,8 @@ void sci_controller_power_control_queue_insert(struct isci_host *ihost, | |||
1816 | { | 1855 | { |
1817 | BUG_ON(iphy == NULL); | 1856 | BUG_ON(iphy == NULL); |
1818 | 1857 | ||
1819 | if (ihost->power_control.phys_granted_power < max_spin_up(ihost)) { | 1858 | if (ihost->power_control.phys_granted_power < |
1859 | ihost->oem_parameters.controller.max_concurrent_dev_spin_up) { | ||
1820 | ihost->power_control.phys_granted_power++; | 1860 | ihost->power_control.phys_granted_power++; |
1821 | sci_phy_consume_power_handler(iphy); | 1861 | sci_phy_consume_power_handler(iphy); |
1822 | 1862 | ||
@@ -1832,34 +1872,9 @@ void sci_controller_power_control_queue_insert(struct isci_host *ihost, | |||
1832 | ihost->power_control.timer_started = true; | 1872 | ihost->power_control.timer_started = true; |
1833 | 1873 | ||
1834 | } else { | 1874 | } else { |
1835 | /* | 1875 | /* Add the phy in the waiting list */ |
1836 | * There are phys, attached to the same sas address as this phy, are | 1876 | ihost->power_control.requesters[iphy->phy_index] = iphy; |
1837 | * already in READY state, this phy don't need wait. | 1877 | ihost->power_control.phys_waiting++; |
1838 | */ | ||
1839 | u8 i; | ||
1840 | struct isci_phy *current_phy; | ||
1841 | |||
1842 | for (i = 0; i < SCI_MAX_PHYS; i++) { | ||
1843 | u8 other; | ||
1844 | current_phy = &ihost->phys[i]; | ||
1845 | |||
1846 | other = memcmp(current_phy->frame_rcvd.iaf.sas_addr, | ||
1847 | iphy->frame_rcvd.iaf.sas_addr, | ||
1848 | sizeof(current_phy->frame_rcvd.iaf.sas_addr)); | ||
1849 | |||
1850 | if (current_phy->sm.current_state_id == SCI_PHY_READY && | ||
1851 | current_phy->protocol == SAS_PROTOCOL_SSP && | ||
1852 | other == 0) { | ||
1853 | sci_phy_consume_power_handler(iphy); | ||
1854 | break; | ||
1855 | } | ||
1856 | } | ||
1857 | |||
1858 | if (i == SCI_MAX_PHYS) { | ||
1859 | /* Add the phy in the waiting list */ | ||
1860 | ihost->power_control.requesters[iphy->phy_index] = iphy; | ||
1861 | ihost->power_control.phys_waiting++; | ||
1862 | } | ||
1863 | } | 1878 | } |
1864 | } | 1879 | } |
1865 | 1880 | ||
@@ -1874,250 +1889,162 @@ void sci_controller_power_control_queue_remove(struct isci_host *ihost, | |||
1874 | ihost->power_control.requesters[iphy->phy_index] = NULL; | 1889 | ihost->power_control.requesters[iphy->phy_index] = NULL; |
1875 | } | 1890 | } |
1876 | 1891 | ||
1877 | static int is_long_cable(int phy, unsigned char selection_byte) | ||
1878 | { | ||
1879 | return !!(selection_byte & (1 << phy)); | ||
1880 | } | ||
1881 | |||
1882 | static int is_medium_cable(int phy, unsigned char selection_byte) | ||
1883 | { | ||
1884 | return !!(selection_byte & (1 << (phy + 4))); | ||
1885 | } | ||
1886 | |||
1887 | static enum cable_selections decode_selection_byte( | ||
1888 | int phy, | ||
1889 | unsigned char selection_byte) | ||
1890 | { | ||
1891 | return ((selection_byte & (1 << phy)) ? 1 : 0) | ||
1892 | + (selection_byte & (1 << (phy + 4)) ? 2 : 0); | ||
1893 | } | ||
1894 | |||
1895 | static unsigned char *to_cable_select(struct isci_host *ihost) | ||
1896 | { | ||
1897 | if (is_cable_select_overridden()) | ||
1898 | return ((unsigned char *)&cable_selection_override) | ||
1899 | + ihost->id; | ||
1900 | else | ||
1901 | return &ihost->oem_parameters.controller.cable_selection_mask; | ||
1902 | } | ||
1903 | |||
1904 | enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy) | ||
1905 | { | ||
1906 | return decode_selection_byte(phy, *to_cable_select(ihost)); | ||
1907 | } | ||
1908 | |||
1909 | char *lookup_cable_names(enum cable_selections selection) | ||
1910 | { | ||
1911 | static char *cable_names[] = { | ||
1912 | [short_cable] = "short", | ||
1913 | [long_cable] = "long", | ||
1914 | [medium_cable] = "medium", | ||
1915 | [undefined_cable] = "<undefined, assumed long>" /* bit 0==1 */ | ||
1916 | }; | ||
1917 | return (selection <= undefined_cable) ? cable_names[selection] | ||
1918 | : cable_names[undefined_cable]; | ||
1919 | } | ||
1920 | |||
1921 | #define AFE_REGISTER_WRITE_DELAY 10 | 1892 | #define AFE_REGISTER_WRITE_DELAY 10 |
1922 | 1893 | ||
1894 | /* Initialize the AFE for this phy index. We need to read the AFE setup from | ||
1895 | * the OEM parameters | ||
1896 | */ | ||
1923 | static void sci_controller_afe_initialization(struct isci_host *ihost) | 1897 | static void sci_controller_afe_initialization(struct isci_host *ihost) |
1924 | { | 1898 | { |
1925 | struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe; | ||
1926 | const struct sci_oem_params *oem = &ihost->oem_parameters; | 1899 | const struct sci_oem_params *oem = &ihost->oem_parameters; |
1927 | struct pci_dev *pdev = ihost->pdev; | 1900 | struct pci_dev *pdev = ihost->pdev; |
1928 | u32 afe_status; | 1901 | u32 afe_status; |
1929 | u32 phy_id; | 1902 | u32 phy_id; |
1930 | unsigned char cable_selection_mask = *to_cable_select(ihost); | ||
1931 | 1903 | ||
1932 | /* Clear DFX Status registers */ | 1904 | /* Clear DFX Status registers */ |
1933 | writel(0x0081000f, &afe->afe_dfx_master_control0); | 1905 | writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0); |
1934 | udelay(AFE_REGISTER_WRITE_DELAY); | 1906 | udelay(AFE_REGISTER_WRITE_DELAY); |
1935 | 1907 | ||
1936 | if (is_b0(pdev) || is_c0(pdev) || is_c1(pdev)) { | 1908 | if (is_b0(pdev)) { |
1937 | /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement | 1909 | /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement |
1938 | * Timer, PM Stagger Timer | 1910 | * Timer, PM Stagger Timer */ |
1939 | */ | 1911 | writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2); |
1940 | writel(0x0007FFFF, &afe->afe_pmsn_master_control2); | ||
1941 | udelay(AFE_REGISTER_WRITE_DELAY); | 1912 | udelay(AFE_REGISTER_WRITE_DELAY); |
1942 | } | 1913 | } |
1943 | 1914 | ||
1944 | /* Configure bias currents to normal */ | 1915 | /* Configure bias currents to normal */ |
1945 | if (is_a2(pdev)) | 1916 | if (is_a2(pdev)) |
1946 | writel(0x00005A00, &afe->afe_bias_control); | 1917 | writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control); |
1947 | else if (is_b0(pdev) || is_c0(pdev)) | 1918 | else if (is_b0(pdev) || is_c0(pdev)) |
1948 | writel(0x00005F00, &afe->afe_bias_control); | 1919 | writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control); |
1949 | else if (is_c1(pdev)) | ||
1950 | writel(0x00005500, &afe->afe_bias_control); | ||
1951 | 1920 | ||
1952 | udelay(AFE_REGISTER_WRITE_DELAY); | 1921 | udelay(AFE_REGISTER_WRITE_DELAY); |
1953 | 1922 | ||
1954 | /* Enable PLL */ | 1923 | /* Enable PLL */ |
1955 | if (is_a2(pdev)) | 1924 | if (is_b0(pdev) || is_c0(pdev)) |
1956 | writel(0x80040908, &afe->afe_pll_control0); | 1925 | writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0); |
1957 | else if (is_b0(pdev) || is_c0(pdev)) | 1926 | else |
1958 | writel(0x80040A08, &afe->afe_pll_control0); | 1927 | writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0); |
1959 | else if (is_c1(pdev)) { | ||
1960 | writel(0x80000B08, &afe->afe_pll_control0); | ||
1961 | udelay(AFE_REGISTER_WRITE_DELAY); | ||
1962 | writel(0x00000B08, &afe->afe_pll_control0); | ||
1963 | udelay(AFE_REGISTER_WRITE_DELAY); | ||
1964 | writel(0x80000B08, &afe->afe_pll_control0); | ||
1965 | } | ||
1966 | 1928 | ||
1967 | udelay(AFE_REGISTER_WRITE_DELAY); | 1929 | udelay(AFE_REGISTER_WRITE_DELAY); |
1968 | 1930 | ||
1969 | /* Wait for the PLL to lock */ | 1931 | /* Wait for the PLL to lock */ |
1970 | do { | 1932 | do { |
1971 | afe_status = readl(&afe->afe_common_block_status); | 1933 | afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status); |
1972 | udelay(AFE_REGISTER_WRITE_DELAY); | 1934 | udelay(AFE_REGISTER_WRITE_DELAY); |
1973 | } while ((afe_status & 0x00001000) == 0); | 1935 | } while ((afe_status & 0x00001000) == 0); |
1974 | 1936 | ||
1975 | if (is_a2(pdev)) { | 1937 | if (is_a2(pdev)) { |
1976 | /* Shorten SAS SNW lock time (RxLock timer value from 76 | 1938 | /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */ |
1977 | * us to 50 us) | 1939 | writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0); |
1978 | */ | ||
1979 | writel(0x7bcc96ad, &afe->afe_pmsn_master_control0); | ||
1980 | udelay(AFE_REGISTER_WRITE_DELAY); | 1940 | udelay(AFE_REGISTER_WRITE_DELAY); |
1981 | } | 1941 | } |
1982 | 1942 | ||
1983 | for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) { | 1943 | for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) { |
1984 | struct scu_afe_transceiver __iomem *xcvr = &afe->scu_afe_xcvr[phy_id]; | ||
1985 | const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id]; | 1944 | const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id]; |
1986 | int cable_length_long = | ||
1987 | is_long_cable(phy_id, cable_selection_mask); | ||
1988 | int cable_length_medium = | ||
1989 | is_medium_cable(phy_id, cable_selection_mask); | ||
1990 | 1945 | ||
1991 | if (is_a2(pdev)) { | 1946 | if (is_b0(pdev)) { |
1992 | /* All defaults, except the Receive Word | 1947 | /* Configure transmitter SSC parameters */ |
1993 | * Alignament/Comma Detect Enable....(0xe800) | 1948 | writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control); |
1994 | */ | ||
1995 | writel(0x00004512, &xcvr->afe_xcvr_control0); | ||
1996 | udelay(AFE_REGISTER_WRITE_DELAY); | ||
1997 | |||
1998 | writel(0x0050100F, &xcvr->afe_xcvr_control1); | ||
1999 | udelay(AFE_REGISTER_WRITE_DELAY); | ||
2000 | } else if (is_b0(pdev)) { | ||
2001 | /* Configure transmitter SSC parameters */ | ||
2002 | writel(0x00030000, &xcvr->afe_tx_ssc_control); | ||
2003 | udelay(AFE_REGISTER_WRITE_DELAY); | 1949 | udelay(AFE_REGISTER_WRITE_DELAY); |
2004 | } else if (is_c0(pdev)) { | 1950 | } else if (is_c0(pdev)) { |
2005 | /* Configure transmitter SSC parameters */ | 1951 | /* Configure transmitter SSC parameters */ |
2006 | writel(0x00010202, &xcvr->afe_tx_ssc_control); | 1952 | writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control); |
2007 | udelay(AFE_REGISTER_WRITE_DELAY); | 1953 | udelay(AFE_REGISTER_WRITE_DELAY); |
2008 | 1954 | ||
2009 | /* All defaults, except the Receive Word | 1955 | /* |
2010 | * Alignament/Comma Detect Enable....(0xe800) | 1956 | * All defaults, except the Receive Word Alignament/Comma Detect |
2011 | */ | 1957 | * Enable....(0xe800) */ |
2012 | writel(0x00014500, &xcvr->afe_xcvr_control0); | 1958 | writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); |
2013 | udelay(AFE_REGISTER_WRITE_DELAY); | 1959 | udelay(AFE_REGISTER_WRITE_DELAY); |
2014 | } else if (is_c1(pdev)) { | 1960 | } else { |
2015 | /* Configure transmitter SSC parameters */ | 1961 | /* |
2016 | writel(0x00010202, &xcvr->afe_tx_ssc_control); | 1962 | * All defaults, except the Receive Word Alignament/Comma Detect |
1963 | * Enable....(0xe800) */ | ||
1964 | writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); | ||
2017 | udelay(AFE_REGISTER_WRITE_DELAY); | 1965 | udelay(AFE_REGISTER_WRITE_DELAY); |
2018 | 1966 | ||
2019 | /* All defaults, except the Receive Word | 1967 | writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1); |
2020 | * Alignament/Comma Detect Enable....(0xe800) | ||
2021 | */ | ||
2022 | writel(0x0001C500, &xcvr->afe_xcvr_control0); | ||
2023 | udelay(AFE_REGISTER_WRITE_DELAY); | 1968 | udelay(AFE_REGISTER_WRITE_DELAY); |
2024 | } | 1969 | } |
2025 | 1970 | ||
2026 | /* Power up TX and RX out from power down (PWRDNTX and | 1971 | /* |
2027 | * PWRDNRX) & increase TX int & ext bias 20%....(0xe85c) | 1972 | * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) |
2028 | */ | 1973 | * & increase TX int & ext bias 20%....(0xe85c) */ |
2029 | if (is_a2(pdev)) | 1974 | if (is_a2(pdev)) |
2030 | writel(0x000003F0, &xcvr->afe_channel_control); | 1975 | writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); |
2031 | else if (is_b0(pdev)) { | 1976 | else if (is_b0(pdev)) { |
2032 | writel(0x000003D7, &xcvr->afe_channel_control); | 1977 | /* Power down TX and RX (PWRDNTX and PWRDNRX) */ |
1978 | writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); | ||
2033 | udelay(AFE_REGISTER_WRITE_DELAY); | 1979 | udelay(AFE_REGISTER_WRITE_DELAY); |
2034 | 1980 | ||
2035 | writel(0x000003D4, &xcvr->afe_channel_control); | 1981 | /* |
2036 | } else if (is_c0(pdev)) { | 1982 | * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) |
2037 | writel(0x000001E7, &xcvr->afe_channel_control); | 1983 | * & increase TX int & ext bias 20%....(0xe85c) */ |
2038 | udelay(AFE_REGISTER_WRITE_DELAY); | 1984 | writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); |
2039 | 1985 | } else { | |
2040 | writel(0x000001E4, &xcvr->afe_channel_control); | 1986 | writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); |
2041 | } else if (is_c1(pdev)) { | ||
2042 | writel(cable_length_long ? 0x000002F7 : 0x000001F7, | ||
2043 | &xcvr->afe_channel_control); | ||
2044 | udelay(AFE_REGISTER_WRITE_DELAY); | 1987 | udelay(AFE_REGISTER_WRITE_DELAY); |
2045 | 1988 | ||
2046 | writel(cable_length_long ? 0x000002F4 : 0x000001F4, | 1989 | /* |
2047 | &xcvr->afe_channel_control); | 1990 | * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) |
1991 | * & increase TX int & ext bias 20%....(0xe85c) */ | ||
1992 | writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); | ||
2048 | } | 1993 | } |
2049 | udelay(AFE_REGISTER_WRITE_DELAY); | 1994 | udelay(AFE_REGISTER_WRITE_DELAY); |
2050 | 1995 | ||
2051 | if (is_a2(pdev)) { | 1996 | if (is_a2(pdev)) { |
2052 | /* Enable TX equalization (0xe824) */ | 1997 | /* Enable TX equalization (0xe824) */ |
2053 | writel(0x00040000, &xcvr->afe_tx_control); | 1998 | writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); |
2054 | udelay(AFE_REGISTER_WRITE_DELAY); | 1999 | udelay(AFE_REGISTER_WRITE_DELAY); |
2055 | } | 2000 | } |
2056 | 2001 | ||
2057 | if (is_a2(pdev) || is_b0(pdev)) | 2002 | /* |
2058 | /* RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, | 2003 | * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On), |
2059 | * TPD=0x0(TX Power On), RDD=0x0(RX Detect | 2004 | * RDD=0x0(RX Detect Enabled) ....(0xe800) */ |
2060 | * Enabled) ....(0xe800) | 2005 | writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); |
2061 | */ | ||
2062 | writel(0x00004100, &xcvr->afe_xcvr_control0); | ||
2063 | else if (is_c0(pdev)) | ||
2064 | writel(0x00014100, &xcvr->afe_xcvr_control0); | ||
2065 | else if (is_c1(pdev)) | ||
2066 | writel(0x0001C100, &xcvr->afe_xcvr_control0); | ||
2067 | udelay(AFE_REGISTER_WRITE_DELAY); | 2006 | udelay(AFE_REGISTER_WRITE_DELAY); |
2068 | 2007 | ||
2069 | /* Leave DFE/FFE on */ | 2008 | /* Leave DFE/FFE on */ |
2070 | if (is_a2(pdev)) | 2009 | if (is_a2(pdev)) |
2071 | writel(0x3F11103F, &xcvr->afe_rx_ssc_control0); | 2010 | writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); |
2072 | else if (is_b0(pdev)) { | 2011 | else if (is_b0(pdev)) { |
2073 | writel(0x3F11103F, &xcvr->afe_rx_ssc_control0); | 2012 | writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); |
2074 | udelay(AFE_REGISTER_WRITE_DELAY); | 2013 | udelay(AFE_REGISTER_WRITE_DELAY); |
2075 | /* Enable TX equalization (0xe824) */ | 2014 | /* Enable TX equalization (0xe824) */ |
2076 | writel(0x00040000, &xcvr->afe_tx_control); | 2015 | writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); |
2077 | } else if (is_c0(pdev)) { | 2016 | } else { |
2078 | writel(0x01400C0F, &xcvr->afe_rx_ssc_control1); | 2017 | writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1); |
2079 | udelay(AFE_REGISTER_WRITE_DELAY); | ||
2080 | |||
2081 | writel(0x3F6F103F, &xcvr->afe_rx_ssc_control0); | ||
2082 | udelay(AFE_REGISTER_WRITE_DELAY); | ||
2083 | |||
2084 | /* Enable TX equalization (0xe824) */ | ||
2085 | writel(0x00040000, &xcvr->afe_tx_control); | ||
2086 | } else if (is_c1(pdev)) { | ||
2087 | writel(cable_length_long ? 0x01500C0C : | ||
2088 | cable_length_medium ? 0x01400C0D : 0x02400C0D, | ||
2089 | &xcvr->afe_xcvr_control1); | ||
2090 | udelay(AFE_REGISTER_WRITE_DELAY); | ||
2091 | |||
2092 | writel(0x000003E0, &xcvr->afe_dfx_rx_control1); | ||
2093 | udelay(AFE_REGISTER_WRITE_DELAY); | 2018 | udelay(AFE_REGISTER_WRITE_DELAY); |
2094 | 2019 | ||
2095 | writel(cable_length_long ? 0x33091C1F : | 2020 | writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); |
2096 | cable_length_medium ? 0x3315181F : 0x2B17161F, | ||
2097 | &xcvr->afe_rx_ssc_control0); | ||
2098 | udelay(AFE_REGISTER_WRITE_DELAY); | 2021 | udelay(AFE_REGISTER_WRITE_DELAY); |
2099 | 2022 | ||
2100 | /* Enable TX equalization (0xe824) */ | 2023 | /* Enable TX equalization (0xe824) */ |
2101 | writel(0x00040000, &xcvr->afe_tx_control); | 2024 | writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); |
2102 | } | 2025 | } |
2103 | 2026 | ||
2104 | udelay(AFE_REGISTER_WRITE_DELAY); | 2027 | udelay(AFE_REGISTER_WRITE_DELAY); |
2105 | 2028 | ||
2106 | writel(oem_phy->afe_tx_amp_control0, &xcvr->afe_tx_amp_control0); | 2029 | writel(oem_phy->afe_tx_amp_control0, |
2030 | &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0); | ||
2107 | udelay(AFE_REGISTER_WRITE_DELAY); | 2031 | udelay(AFE_REGISTER_WRITE_DELAY); |
2108 | 2032 | ||
2109 | writel(oem_phy->afe_tx_amp_control1, &xcvr->afe_tx_amp_control1); | 2033 | writel(oem_phy->afe_tx_amp_control1, |
2034 | &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1); | ||
2110 | udelay(AFE_REGISTER_WRITE_DELAY); | 2035 | udelay(AFE_REGISTER_WRITE_DELAY); |
2111 | 2036 | ||
2112 | writel(oem_phy->afe_tx_amp_control2, &xcvr->afe_tx_amp_control2); | 2037 | writel(oem_phy->afe_tx_amp_control2, |
2038 | &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2); | ||
2113 | udelay(AFE_REGISTER_WRITE_DELAY); | 2039 | udelay(AFE_REGISTER_WRITE_DELAY); |
2114 | 2040 | ||
2115 | writel(oem_phy->afe_tx_amp_control3, &xcvr->afe_tx_amp_control3); | 2041 | writel(oem_phy->afe_tx_amp_control3, |
2042 | &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3); | ||
2116 | udelay(AFE_REGISTER_WRITE_DELAY); | 2043 | udelay(AFE_REGISTER_WRITE_DELAY); |
2117 | } | 2044 | } |
2118 | 2045 | ||
2119 | /* Transfer control to the PEs */ | 2046 | /* Transfer control to the PEs */ |
2120 | writel(0x00010f00, &afe->afe_dfx_master_control0); | 2047 | writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0); |
2121 | udelay(AFE_REGISTER_WRITE_DELAY); | 2048 | udelay(AFE_REGISTER_WRITE_DELAY); |
2122 | } | 2049 | } |
2123 | 2050 | ||
@@ -2139,8 +2066,9 @@ static enum sci_status sci_controller_initialize(struct isci_host *ihost) | |||
2139 | unsigned long i, state, val; | 2066 | unsigned long i, state, val; |
2140 | 2067 | ||
2141 | if (ihost->sm.current_state_id != SCIC_RESET) { | 2068 | if (ihost->sm.current_state_id != SCIC_RESET) { |
2142 | dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", | 2069 | dev_warn(&ihost->pdev->dev, |
2143 | __func__, ihost->sm.current_state_id); | 2070 | "SCIC Controller initialize operation requested " |
2071 | "in invalid state\n"); | ||
2144 | return SCI_FAILURE_INVALID_STATE; | 2072 | return SCI_FAILURE_INVALID_STATE; |
2145 | } | 2073 | } |
2146 | 2074 | ||
@@ -2243,76 +2171,96 @@ static enum sci_status sci_controller_initialize(struct isci_host *ihost) | |||
2243 | return result; | 2171 | return result; |
2244 | } | 2172 | } |
2245 | 2173 | ||
2246 | static int sci_controller_dma_alloc(struct isci_host *ihost) | 2174 | static enum sci_status sci_user_parameters_set(struct isci_host *ihost, |
2175 | struct sci_user_parameters *sci_parms) | ||
2176 | { | ||
2177 | u32 state = ihost->sm.current_state_id; | ||
2178 | |||
2179 | if (state == SCIC_RESET || | ||
2180 | state == SCIC_INITIALIZING || | ||
2181 | state == SCIC_INITIALIZED) { | ||
2182 | u16 index; | ||
2183 | |||
2184 | /* | ||
2185 | * Validate the user parameters. If they are not legal, then | ||
2186 | * return a failure. | ||
2187 | */ | ||
2188 | for (index = 0; index < SCI_MAX_PHYS; index++) { | ||
2189 | struct sci_phy_user_params *user_phy; | ||
2190 | |||
2191 | user_phy = &sci_parms->phys[index]; | ||
2192 | |||
2193 | if (!((user_phy->max_speed_generation <= | ||
2194 | SCIC_SDS_PARM_MAX_SPEED) && | ||
2195 | (user_phy->max_speed_generation > | ||
2196 | SCIC_SDS_PARM_NO_SPEED))) | ||
2197 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; | ||
2198 | |||
2199 | if (user_phy->in_connection_align_insertion_frequency < | ||
2200 | 3) | ||
2201 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; | ||
2202 | |||
2203 | if ((user_phy->in_connection_align_insertion_frequency < | ||
2204 | 3) || | ||
2205 | (user_phy->align_insertion_frequency == 0) || | ||
2206 | (user_phy-> | ||
2207 | notify_enable_spin_up_insertion_frequency == | ||
2208 | 0)) | ||
2209 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; | ||
2210 | } | ||
2211 | |||
2212 | if ((sci_parms->stp_inactivity_timeout == 0) || | ||
2213 | (sci_parms->ssp_inactivity_timeout == 0) || | ||
2214 | (sci_parms->stp_max_occupancy_timeout == 0) || | ||
2215 | (sci_parms->ssp_max_occupancy_timeout == 0) || | ||
2216 | (sci_parms->no_outbound_task_timeout == 0)) | ||
2217 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; | ||
2218 | |||
2219 | memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms)); | ||
2220 | |||
2221 | return SCI_SUCCESS; | ||
2222 | } | ||
2223 | |||
2224 | return SCI_FAILURE_INVALID_STATE; | ||
2225 | } | ||
2226 | |||
2227 | static int sci_controller_mem_init(struct isci_host *ihost) | ||
2247 | { | 2228 | { |
2248 | struct device *dev = &ihost->pdev->dev; | 2229 | struct device *dev = &ihost->pdev->dev; |
2230 | dma_addr_t dma; | ||
2249 | size_t size; | 2231 | size_t size; |
2250 | int i; | 2232 | int err; |
2251 | |||
2252 | /* detect re-initialization */ | ||
2253 | if (ihost->completion_queue) | ||
2254 | return 0; | ||
2255 | 2233 | ||
2256 | size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32); | 2234 | size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32); |
2257 | ihost->completion_queue = dmam_alloc_coherent(dev, size, &ihost->cq_dma, | 2235 | ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); |
2258 | GFP_KERNEL); | ||
2259 | if (!ihost->completion_queue) | 2236 | if (!ihost->completion_queue) |
2260 | return -ENOMEM; | 2237 | return -ENOMEM; |
2261 | 2238 | ||
2239 | writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower); | ||
2240 | writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper); | ||
2241 | |||
2262 | size = ihost->remote_node_entries * sizeof(union scu_remote_node_context); | 2242 | size = ihost->remote_node_entries * sizeof(union scu_remote_node_context); |
2263 | ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &ihost->rnc_dma, | 2243 | ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma, |
2264 | GFP_KERNEL); | 2244 | GFP_KERNEL); |
2265 | |||
2266 | if (!ihost->remote_node_context_table) | 2245 | if (!ihost->remote_node_context_table) |
2267 | return -ENOMEM; | 2246 | return -ENOMEM; |
2268 | 2247 | ||
2248 | writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower); | ||
2249 | writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper); | ||
2250 | |||
2269 | size = ihost->task_context_entries * sizeof(struct scu_task_context), | 2251 | size = ihost->task_context_entries * sizeof(struct scu_task_context), |
2270 | ihost->task_context_table = dmam_alloc_coherent(dev, size, &ihost->tc_dma, | 2252 | ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); |
2271 | GFP_KERNEL); | ||
2272 | if (!ihost->task_context_table) | 2253 | if (!ihost->task_context_table) |
2273 | return -ENOMEM; | 2254 | return -ENOMEM; |
2274 | 2255 | ||
2275 | size = SCI_UFI_TOTAL_SIZE; | 2256 | ihost->task_context_dma = dma; |
2276 | ihost->ufi_buf = dmam_alloc_coherent(dev, size, &ihost->ufi_dma, GFP_KERNEL); | 2257 | writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower); |
2277 | if (!ihost->ufi_buf) | 2258 | writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper); |
2278 | return -ENOMEM; | ||
2279 | |||
2280 | for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) { | ||
2281 | struct isci_request *ireq; | ||
2282 | dma_addr_t dma; | ||
2283 | |||
2284 | ireq = dmam_alloc_coherent(dev, sizeof(*ireq), &dma, GFP_KERNEL); | ||
2285 | if (!ireq) | ||
2286 | return -ENOMEM; | ||
2287 | |||
2288 | ireq->tc = &ihost->task_context_table[i]; | ||
2289 | ireq->owning_controller = ihost; | ||
2290 | ireq->request_daddr = dma; | ||
2291 | ireq->isci_host = ihost; | ||
2292 | ihost->reqs[i] = ireq; | ||
2293 | } | ||
2294 | |||
2295 | return 0; | ||
2296 | } | ||
2297 | |||
2298 | static int sci_controller_mem_init(struct isci_host *ihost) | ||
2299 | { | ||
2300 | int err = sci_controller_dma_alloc(ihost); | ||
2301 | 2259 | ||
2260 | err = sci_unsolicited_frame_control_construct(ihost); | ||
2302 | if (err) | 2261 | if (err) |
2303 | return err; | 2262 | return err; |
2304 | 2263 | ||
2305 | writel(lower_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_lower); | ||
2306 | writel(upper_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_upper); | ||
2307 | |||
2308 | writel(lower_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_lower); | ||
2309 | writel(upper_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_upper); | ||
2310 | |||
2311 | writel(lower_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_lower); | ||
2312 | writel(upper_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_upper); | ||
2313 | |||
2314 | sci_unsolicited_frame_control_construct(ihost); | ||
2315 | |||
2316 | /* | 2264 | /* |
2317 | * Inform the silicon as to the location of the UF headers and | 2265 | * Inform the silicon as to the location of the UF headers and |
2318 | * address table. | 2266 | * address table. |
@@ -2330,22 +2278,22 @@ static int sci_controller_mem_init(struct isci_host *ihost) | |||
2330 | return 0; | 2278 | return 0; |
2331 | } | 2279 | } |
2332 | 2280 | ||
2333 | /** | ||
2334 | * isci_host_init - (re-)initialize hardware and internal (private) state | ||
2335 | * @ihost: host to init | ||
2336 | * | ||
2337 | * Any public facing objects (like asd_sas_port, and asd_sas_phys), or | ||
2338 | * one-time initialization objects like locks and waitqueues, are | ||
2339 | * not touched (they are initialized in isci_host_alloc) | ||
2340 | */ | ||
2341 | int isci_host_init(struct isci_host *ihost) | 2281 | int isci_host_init(struct isci_host *ihost) |
2342 | { | 2282 | { |
2343 | int i, err; | 2283 | int err = 0, i; |
2344 | enum sci_status status; | 2284 | enum sci_status status; |
2285 | struct sci_user_parameters sci_user_params; | ||
2286 | struct isci_pci_info *pci_info = to_pci_info(ihost->pdev); | ||
2287 | |||
2288 | spin_lock_init(&ihost->state_lock); | ||
2289 | spin_lock_init(&ihost->scic_lock); | ||
2290 | init_waitqueue_head(&ihost->eventq); | ||
2291 | |||
2292 | isci_host_change_state(ihost, isci_starting); | ||
2293 | |||
2294 | status = sci_controller_construct(ihost, scu_base(ihost), | ||
2295 | smu_base(ihost)); | ||
2345 | 2296 | ||
2346 | spin_lock_irq(&ihost->scic_lock); | ||
2347 | status = sci_controller_construct(ihost, scu_base(ihost), smu_base(ihost)); | ||
2348 | spin_unlock_irq(&ihost->scic_lock); | ||
2349 | if (status != SCI_SUCCESS) { | 2297 | if (status != SCI_SUCCESS) { |
2350 | dev_err(&ihost->pdev->dev, | 2298 | dev_err(&ihost->pdev->dev, |
2351 | "%s: sci_controller_construct failed - status = %x\n", | 2299 | "%s: sci_controller_construct failed - status = %x\n", |
@@ -2354,6 +2302,48 @@ int isci_host_init(struct isci_host *ihost) | |||
2354 | return -ENODEV; | 2302 | return -ENODEV; |
2355 | } | 2303 | } |
2356 | 2304 | ||
2305 | ihost->sas_ha.dev = &ihost->pdev->dev; | ||
2306 | ihost->sas_ha.lldd_ha = ihost; | ||
2307 | |||
2308 | /* | ||
2309 | * grab initial values stored in the controller object for OEM and USER | ||
2310 | * parameters | ||
2311 | */ | ||
2312 | isci_user_parameters_get(&sci_user_params); | ||
2313 | status = sci_user_parameters_set(ihost, &sci_user_params); | ||
2314 | if (status != SCI_SUCCESS) { | ||
2315 | dev_warn(&ihost->pdev->dev, | ||
2316 | "%s: sci_user_parameters_set failed\n", | ||
2317 | __func__); | ||
2318 | return -ENODEV; | ||
2319 | } | ||
2320 | |||
2321 | /* grab any OEM parameters specified in orom */ | ||
2322 | if (pci_info->orom) { | ||
2323 | status = isci_parse_oem_parameters(&ihost->oem_parameters, | ||
2324 | pci_info->orom, | ||
2325 | ihost->id); | ||
2326 | if (status != SCI_SUCCESS) { | ||
2327 | dev_warn(&ihost->pdev->dev, | ||
2328 | "parsing firmware oem parameters failed\n"); | ||
2329 | return -EINVAL; | ||
2330 | } | ||
2331 | } | ||
2332 | |||
2333 | status = sci_oem_parameters_set(ihost); | ||
2334 | if (status != SCI_SUCCESS) { | ||
2335 | dev_warn(&ihost->pdev->dev, | ||
2336 | "%s: sci_oem_parameters_set failed\n", | ||
2337 | __func__); | ||
2338 | return -ENODEV; | ||
2339 | } | ||
2340 | |||
2341 | tasklet_init(&ihost->completion_tasklet, | ||
2342 | isci_host_completion_routine, (unsigned long)ihost); | ||
2343 | |||
2344 | INIT_LIST_HEAD(&ihost->requests_to_complete); | ||
2345 | INIT_LIST_HEAD(&ihost->requests_to_errorback); | ||
2346 | |||
2357 | spin_lock_irq(&ihost->scic_lock); | 2347 | spin_lock_irq(&ihost->scic_lock); |
2358 | status = sci_controller_initialize(ihost); | 2348 | status = sci_controller_initialize(ihost); |
2359 | spin_unlock_irq(&ihost->scic_lock); | 2349 | spin_unlock_irq(&ihost->scic_lock); |
@@ -2369,11 +2359,36 @@ int isci_host_init(struct isci_host *ihost) | |||
2369 | if (err) | 2359 | if (err) |
2370 | return err; | 2360 | return err; |
2371 | 2361 | ||
2372 | /* enable sgpio */ | 2362 | for (i = 0; i < SCI_MAX_PORTS; i++) |
2373 | writel(1, &ihost->scu_registers->peg0.sgpio.interface_control); | 2363 | isci_port_init(&ihost->ports[i], ihost, i); |
2374 | for (i = 0; i < isci_gpio_count(ihost); i++) | 2364 | |
2375 | writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]); | 2365 | for (i = 0; i < SCI_MAX_PHYS; i++) |
2376 | writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code); | 2366 | isci_phy_init(&ihost->phys[i], ihost, i); |
2367 | |||
2368 | for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { | ||
2369 | struct isci_remote_device *idev = &ihost->devices[i]; | ||
2370 | |||
2371 | INIT_LIST_HEAD(&idev->reqs_in_process); | ||
2372 | INIT_LIST_HEAD(&idev->node); | ||
2373 | } | ||
2374 | |||
2375 | for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) { | ||
2376 | struct isci_request *ireq; | ||
2377 | dma_addr_t dma; | ||
2378 | |||
2379 | ireq = dmam_alloc_coherent(&ihost->pdev->dev, | ||
2380 | sizeof(struct isci_request), &dma, | ||
2381 | GFP_KERNEL); | ||
2382 | if (!ireq) | ||
2383 | return -ENOMEM; | ||
2384 | |||
2385 | ireq->tc = &ihost->task_context_table[i]; | ||
2386 | ireq->owning_controller = ihost; | ||
2387 | spin_lock_init(&ireq->state_lock); | ||
2388 | ireq->request_daddr = dma; | ||
2389 | ireq->isci_host = ihost; | ||
2390 | ihost->reqs[i] = ireq; | ||
2391 | } | ||
2377 | 2392 | ||
2378 | return 0; | 2393 | return 0; |
2379 | } | 2394 | } |
@@ -2420,7 +2435,7 @@ void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport, | |||
2420 | } | 2435 | } |
2421 | } | 2436 | } |
2422 | 2437 | ||
2423 | bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost) | 2438 | static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost) |
2424 | { | 2439 | { |
2425 | u32 index; | 2440 | u32 index; |
2426 | 2441 | ||
@@ -2446,7 +2461,7 @@ void sci_controller_remote_device_stopped(struct isci_host *ihost, | |||
2446 | } | 2461 | } |
2447 | 2462 | ||
2448 | if (!sci_controller_has_remote_devices_stopping(ihost)) | 2463 | if (!sci_controller_has_remote_devices_stopping(ihost)) |
2449 | isci_host_stop_complete(ihost); | 2464 | sci_change_state(&ihost->sm, SCIC_STOPPED); |
2450 | } | 2465 | } |
2451 | 2466 | ||
2452 | void sci_controller_post_request(struct isci_host *ihost, u32 request) | 2467 | void sci_controller_post_request(struct isci_host *ihost, u32 request) |
@@ -2608,8 +2623,7 @@ enum sci_status sci_controller_start_io(struct isci_host *ihost, | |||
2608 | enum sci_status status; | 2623 | enum sci_status status; |
2609 | 2624 | ||
2610 | if (ihost->sm.current_state_id != SCIC_READY) { | 2625 | if (ihost->sm.current_state_id != SCIC_READY) { |
2611 | dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", | 2626 | dev_warn(&ihost->pdev->dev, "invalid state to start I/O"); |
2612 | __func__, ihost->sm.current_state_id); | ||
2613 | return SCI_FAILURE_INVALID_STATE; | 2627 | return SCI_FAILURE_INVALID_STATE; |
2614 | } | 2628 | } |
2615 | 2629 | ||
@@ -2633,26 +2647,22 @@ enum sci_status sci_controller_terminate_request(struct isci_host *ihost, | |||
2633 | enum sci_status status; | 2647 | enum sci_status status; |
2634 | 2648 | ||
2635 | if (ihost->sm.current_state_id != SCIC_READY) { | 2649 | if (ihost->sm.current_state_id != SCIC_READY) { |
2636 | dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", | 2650 | dev_warn(&ihost->pdev->dev, |
2637 | __func__, ihost->sm.current_state_id); | 2651 | "invalid state to terminate request\n"); |
2638 | return SCI_FAILURE_INVALID_STATE; | 2652 | return SCI_FAILURE_INVALID_STATE; |
2639 | } | 2653 | } |
2640 | status = sci_io_request_terminate(ireq); | ||
2641 | 2654 | ||
2642 | dev_dbg(&ihost->pdev->dev, "%s: status=%d; ireq=%p; flags=%lx\n", | 2655 | status = sci_io_request_terminate(ireq); |
2643 | __func__, status, ireq, ireq->flags); | 2656 | if (status != SCI_SUCCESS) |
2657 | return status; | ||
2644 | 2658 | ||
2645 | if ((status == SCI_SUCCESS) && | 2659 | /* |
2646 | !test_bit(IREQ_PENDING_ABORT, &ireq->flags) && | 2660 | * Utilize the original post context command and or in the POST_TC_ABORT |
2647 | !test_and_set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags)) { | 2661 | * request sub-type. |
2648 | /* Utilize the original post context command and or in the | 2662 | */ |
2649 | * POST_TC_ABORT request sub-type. | 2663 | sci_controller_post_request(ihost, |
2650 | */ | 2664 | ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); |
2651 | sci_controller_post_request( | 2665 | return SCI_SUCCESS; |
2652 | ihost, ireq->post_context | | ||
2653 | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); | ||
2654 | } | ||
2655 | return status; | ||
2656 | } | 2666 | } |
2657 | 2667 | ||
2658 | /** | 2668 | /** |
@@ -2686,8 +2696,7 @@ enum sci_status sci_controller_complete_io(struct isci_host *ihost, | |||
2686 | clear_bit(IREQ_ACTIVE, &ireq->flags); | 2696 | clear_bit(IREQ_ACTIVE, &ireq->flags); |
2687 | return SCI_SUCCESS; | 2697 | return SCI_SUCCESS; |
2688 | default: | 2698 | default: |
2689 | dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", | 2699 | dev_warn(&ihost->pdev->dev, "invalid state to complete I/O"); |
2690 | __func__, ihost->sm.current_state_id); | ||
2691 | return SCI_FAILURE_INVALID_STATE; | 2700 | return SCI_FAILURE_INVALID_STATE; |
2692 | } | 2701 | } |
2693 | 2702 | ||
@@ -2698,8 +2707,7 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq) | |||
2698 | struct isci_host *ihost = ireq->owning_controller; | 2707 | struct isci_host *ihost = ireq->owning_controller; |
2699 | 2708 | ||
2700 | if (ihost->sm.current_state_id != SCIC_READY) { | 2709 | if (ihost->sm.current_state_id != SCIC_READY) { |
2701 | dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", | 2710 | dev_warn(&ihost->pdev->dev, "invalid state to continue I/O"); |
2702 | __func__, ihost->sm.current_state_id); | ||
2703 | return SCI_FAILURE_INVALID_STATE; | 2711 | return SCI_FAILURE_INVALID_STATE; |
2704 | } | 2712 | } |
2705 | 2713 | ||
@@ -2752,56 +2760,3 @@ enum sci_task_status sci_controller_start_task(struct isci_host *ihost, | |||
2752 | 2760 | ||
2753 | return status; | 2761 | return status; |
2754 | } | 2762 | } |
2755 | |||
2756 | static int sci_write_gpio_tx_gp(struct isci_host *ihost, u8 reg_index, u8 reg_count, u8 *write_data) | ||
2757 | { | ||
2758 | int d; | ||
2759 | |||
2760 | /* no support for TX_GP_CFG */ | ||
2761 | if (reg_index == 0) | ||
2762 | return -EINVAL; | ||
2763 | |||
2764 | for (d = 0; d < isci_gpio_count(ihost); d++) { | ||
2765 | u32 val = 0x444; /* all ODx.n clear */ | ||
2766 | int i; | ||
2767 | |||
2768 | for (i = 0; i < 3; i++) { | ||
2769 | int bit = (i << 2) + 2; | ||
2770 | |||
2771 | bit = try_test_sas_gpio_gp_bit(to_sas_gpio_od(d, i), | ||
2772 | write_data, reg_index, | ||
2773 | reg_count); | ||
2774 | if (bit < 0) | ||
2775 | break; | ||
2776 | |||
2777 | /* if od is set, clear the 'invert' bit */ | ||
2778 | val &= ~(bit << ((i << 2) + 2)); | ||
2779 | } | ||
2780 | |||
2781 | if (i < 3) | ||
2782 | break; | ||
2783 | writel(val, &ihost->scu_registers->peg0.sgpio.output_data_select[d]); | ||
2784 | } | ||
2785 | |||
2786 | /* unless reg_index is > 1, we should always be able to write at | ||
2787 | * least one register | ||
2788 | */ | ||
2789 | return d > 0; | ||
2790 | } | ||
2791 | |||
2792 | int isci_gpio_write(struct sas_ha_struct *sas_ha, u8 reg_type, u8 reg_index, | ||
2793 | u8 reg_count, u8 *write_data) | ||
2794 | { | ||
2795 | struct isci_host *ihost = sas_ha->lldd_ha; | ||
2796 | int written; | ||
2797 | |||
2798 | switch (reg_type) { | ||
2799 | case SAS_GPIO_REG_TX_GP: | ||
2800 | written = sci_write_gpio_tx_gp(ihost, reg_index, reg_count, write_data); | ||
2801 | break; | ||
2802 | default: | ||
2803 | written = -EINVAL; | ||
2804 | } | ||
2805 | |||
2806 | return written; | ||
2807 | } | ||
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index 4911310a38f..9f33831a2f0 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h | |||
@@ -55,7 +55,6 @@ | |||
55 | #ifndef _SCI_HOST_H_ | 55 | #ifndef _SCI_HOST_H_ |
56 | #define _SCI_HOST_H_ | 56 | #define _SCI_HOST_H_ |
57 | 57 | ||
58 | #include <scsi/sas_ata.h> | ||
59 | #include "remote_device.h" | 58 | #include "remote_device.h" |
60 | #include "phy.h" | 59 | #include "phy.h" |
61 | #include "isci.h" | 60 | #include "isci.h" |
@@ -109,8 +108,6 @@ struct sci_port_configuration_agent; | |||
109 | typedef void (*port_config_fn)(struct isci_host *, | 108 | typedef void (*port_config_fn)(struct isci_host *, |
110 | struct sci_port_configuration_agent *, | 109 | struct sci_port_configuration_agent *, |
111 | struct isci_port *, struct isci_phy *); | 110 | struct isci_port *, struct isci_phy *); |
112 | bool is_port_config_apc(struct isci_host *ihost); | ||
113 | bool is_controller_start_complete(struct isci_host *ihost); | ||
114 | 111 | ||
115 | struct sci_port_configuration_agent { | 112 | struct sci_port_configuration_agent { |
116 | u16 phy_configured_mask; | 113 | u16 phy_configured_mask; |
@@ -160,17 +157,13 @@ struct isci_host { | |||
160 | struct sci_power_control power_control; | 157 | struct sci_power_control power_control; |
161 | u8 io_request_sequence[SCI_MAX_IO_REQUESTS]; | 158 | u8 io_request_sequence[SCI_MAX_IO_REQUESTS]; |
162 | struct scu_task_context *task_context_table; | 159 | struct scu_task_context *task_context_table; |
163 | dma_addr_t tc_dma; | 160 | dma_addr_t task_context_dma; |
164 | union scu_remote_node_context *remote_node_context_table; | 161 | union scu_remote_node_context *remote_node_context_table; |
165 | dma_addr_t rnc_dma; | ||
166 | u32 *completion_queue; | 162 | u32 *completion_queue; |
167 | dma_addr_t cq_dma; | ||
168 | u32 completion_queue_get; | 163 | u32 completion_queue_get; |
169 | u32 logical_port_entries; | 164 | u32 logical_port_entries; |
170 | u32 remote_node_entries; | 165 | u32 remote_node_entries; |
171 | u32 task_context_entries; | 166 | u32 task_context_entries; |
172 | void *ufi_buf; | ||
173 | dma_addr_t ufi_dma; | ||
174 | struct sci_unsolicited_frame_control uf_control; | 167 | struct sci_unsolicited_frame_control uf_control; |
175 | 168 | ||
176 | /* phy startup */ | 169 | /* phy startup */ |
@@ -194,16 +187,19 @@ struct isci_host { | |||
194 | int id; /* unique within a given pci device */ | 187 | int id; /* unique within a given pci device */ |
195 | struct isci_phy phys[SCI_MAX_PHYS]; | 188 | struct isci_phy phys[SCI_MAX_PHYS]; |
196 | struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */ | 189 | struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */ |
197 | struct asd_sas_port sas_ports[SCI_MAX_PORTS]; | ||
198 | struct sas_ha_struct sas_ha; | 190 | struct sas_ha_struct sas_ha; |
199 | 191 | ||
192 | spinlock_t state_lock; | ||
200 | struct pci_dev *pdev; | 193 | struct pci_dev *pdev; |
194 | enum isci_status status; | ||
201 | #define IHOST_START_PENDING 0 | 195 | #define IHOST_START_PENDING 0 |
202 | #define IHOST_STOP_PENDING 1 | 196 | #define IHOST_STOP_PENDING 1 |
203 | #define IHOST_IRQ_ENABLED 2 | ||
204 | unsigned long flags; | 197 | unsigned long flags; |
205 | wait_queue_head_t eventq; | 198 | wait_queue_head_t eventq; |
199 | struct Scsi_Host *shost; | ||
206 | struct tasklet_struct completion_tasklet; | 200 | struct tasklet_struct completion_tasklet; |
201 | struct list_head requests_to_complete; | ||
202 | struct list_head requests_to_errorback; | ||
207 | spinlock_t scic_lock; | 203 | spinlock_t scic_lock; |
208 | struct isci_request *reqs[SCI_MAX_IO_REQUESTS]; | 204 | struct isci_request *reqs[SCI_MAX_IO_REQUESTS]; |
209 | struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES]; | 205 | struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES]; |
@@ -277,6 +273,13 @@ enum sci_controller_states { | |||
277 | SCIC_STOPPING, | 273 | SCIC_STOPPING, |
278 | 274 | ||
279 | /** | 275 | /** |
276 | * This state indicates that the controller has successfully been stopped. | ||
277 | * In this state no new IO operations are permitted. | ||
278 | * This state is entered from the STOPPING state. | ||
279 | */ | ||
280 | SCIC_STOPPED, | ||
281 | |||
282 | /** | ||
280 | * This state indicates that the controller could not successfully be | 283 | * This state indicates that the controller could not successfully be |
281 | * initialized. In this state no new IO operations are permitted. | 284 | * initialized. In this state no new IO operations are permitted. |
282 | * This state is entered from the INITIALIZING state. | 285 | * This state is entered from the INITIALIZING state. |
@@ -305,16 +308,32 @@ static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev) | |||
305 | return pci_get_drvdata(pdev); | 308 | return pci_get_drvdata(pdev); |
306 | } | 309 | } |
307 | 310 | ||
308 | static inline struct Scsi_Host *to_shost(struct isci_host *ihost) | ||
309 | { | ||
310 | return ihost->sas_ha.core.shost; | ||
311 | } | ||
312 | |||
313 | #define for_each_isci_host(id, ihost, pdev) \ | 311 | #define for_each_isci_host(id, ihost, pdev) \ |
314 | for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \ | 312 | for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \ |
315 | id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \ | 313 | id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \ |
316 | ihost = to_pci_info(pdev)->hosts[++id]) | 314 | ihost = to_pci_info(pdev)->hosts[++id]) |
317 | 315 | ||
316 | static inline enum isci_status isci_host_get_state(struct isci_host *isci_host) | ||
317 | { | ||
318 | return isci_host->status; | ||
319 | } | ||
320 | |||
321 | static inline void isci_host_change_state(struct isci_host *isci_host, | ||
322 | enum isci_status status) | ||
323 | { | ||
324 | unsigned long flags; | ||
325 | |||
326 | dev_dbg(&isci_host->pdev->dev, | ||
327 | "%s: isci_host = %p, state = 0x%x", | ||
328 | __func__, | ||
329 | isci_host, | ||
330 | status); | ||
331 | spin_lock_irqsave(&isci_host->state_lock, flags); | ||
332 | isci_host->status = status; | ||
333 | spin_unlock_irqrestore(&isci_host->state_lock, flags); | ||
334 | |||
335 | } | ||
336 | |||
318 | static inline void wait_for_start(struct isci_host *ihost) | 337 | static inline void wait_for_start(struct isci_host *ihost) |
319 | { | 338 | { |
320 | wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags)); | 339 | wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags)); |
@@ -340,11 +359,6 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev) | |||
340 | return dev->port->ha->lldd_ha; | 359 | return dev->port->ha->lldd_ha; |
341 | } | 360 | } |
342 | 361 | ||
343 | static inline struct isci_host *idev_to_ihost(struct isci_remote_device *idev) | ||
344 | { | ||
345 | return dev_to_ihost(idev->domain_dev); | ||
346 | } | ||
347 | |||
348 | /* we always use protocol engine group zero */ | 362 | /* we always use protocol engine group zero */ |
349 | #define ISCI_PEG 0 | 363 | #define ISCI_PEG 0 |
350 | 364 | ||
@@ -363,7 +377,8 @@ static inline int sci_remote_device_node_count(struct isci_remote_device *idev) | |||
363 | { | 377 | { |
364 | struct domain_device *dev = idev->domain_dev; | 378 | struct domain_device *dev = idev->domain_dev; |
365 | 379 | ||
366 | if (dev_is_sata(dev) && dev->parent) | 380 | if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) && |
381 | !idev->is_direct_attached) | ||
367 | return SCU_STP_REMOTE_NODE_COUNT; | 382 | return SCU_STP_REMOTE_NODE_COUNT; |
368 | return SCU_SSP_REMOTE_NODE_COUNT; | 383 | return SCU_SSP_REMOTE_NODE_COUNT; |
369 | } | 384 | } |
@@ -378,6 +393,24 @@ static inline int sci_remote_device_node_count(struct isci_remote_device *idev) | |||
378 | #define sci_controller_clear_invalid_phy(controller, phy) \ | 393 | #define sci_controller_clear_invalid_phy(controller, phy) \ |
379 | ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index)) | 394 | ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index)) |
380 | 395 | ||
396 | static inline struct device *sciphy_to_dev(struct isci_phy *iphy) | ||
397 | { | ||
398 | |||
399 | if (!iphy || !iphy->isci_port || !iphy->isci_port->isci_host) | ||
400 | return NULL; | ||
401 | |||
402 | return &iphy->isci_port->isci_host->pdev->dev; | ||
403 | } | ||
404 | |||
405 | static inline struct device *sciport_to_dev(struct isci_port *iport) | ||
406 | { | ||
407 | |||
408 | if (!iport || !iport->isci_host) | ||
409 | return NULL; | ||
410 | |||
411 | return &iport->isci_host->pdev->dev; | ||
412 | } | ||
413 | |||
381 | static inline struct device *scirdev_to_dev(struct isci_remote_device *idev) | 414 | static inline struct device *scirdev_to_dev(struct isci_remote_device *idev) |
382 | { | 415 | { |
383 | if (!idev || !idev->isci_port || !idev->isci_port->isci_host) | 416 | if (!idev || !idev->isci_port || !idev->isci_port->isci_host) |
@@ -402,48 +435,11 @@ static inline bool is_b0(struct pci_dev *pdev) | |||
402 | 435 | ||
403 | static inline bool is_c0(struct pci_dev *pdev) | 436 | static inline bool is_c0(struct pci_dev *pdev) |
404 | { | 437 | { |
405 | if (pdev->revision == 5) | 438 | if (pdev->revision >= 5) |
406 | return true; | ||
407 | return false; | ||
408 | } | ||
409 | |||
410 | static inline bool is_c1(struct pci_dev *pdev) | ||
411 | { | ||
412 | if (pdev->revision >= 6) | ||
413 | return true; | 439 | return true; |
414 | return false; | 440 | return false; |
415 | } | 441 | } |
416 | 442 | ||
417 | enum cable_selections { | ||
418 | short_cable = 0, | ||
419 | long_cable = 1, | ||
420 | medium_cable = 2, | ||
421 | undefined_cable = 3 | ||
422 | }; | ||
423 | |||
424 | #define CABLE_OVERRIDE_DISABLED (0x10000) | ||
425 | |||
426 | static inline int is_cable_select_overridden(void) | ||
427 | { | ||
428 | return cable_selection_override < CABLE_OVERRIDE_DISABLED; | ||
429 | } | ||
430 | |||
431 | enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy); | ||
432 | void validate_cable_selections(struct isci_host *ihost); | ||
433 | char *lookup_cable_names(enum cable_selections); | ||
434 | |||
435 | /* set hw control for 'activity', even though active enclosures seem to drive | ||
436 | * the activity led on their own. Skip setting FSENG control on 'status' due | ||
437 | * to unexpected operation and 'error' due to not being a supported automatic | ||
438 | * FSENG output | ||
439 | */ | ||
440 | #define SGPIO_HW_CONTROL 0x00000443 | ||
441 | |||
442 | static inline int isci_gpio_count(struct isci_host *ihost) | ||
443 | { | ||
444 | return ARRAY_SIZE(ihost->scu_registers->peg0.sgpio.output_data_select); | ||
445 | } | ||
446 | |||
447 | void sci_controller_post_request(struct isci_host *ihost, | 443 | void sci_controller_post_request(struct isci_host *ihost, |
448 | u32 request); | 444 | u32 request); |
449 | void sci_controller_release_frame(struct isci_host *ihost, | 445 | void sci_controller_release_frame(struct isci_host *ihost, |
@@ -459,32 +455,66 @@ void sci_controller_free_remote_node_context( | |||
459 | struct isci_remote_device *idev, | 455 | struct isci_remote_device *idev, |
460 | u16 node_id); | 456 | u16 node_id); |
461 | 457 | ||
462 | struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag); | 458 | struct isci_request *sci_request_by_tag(struct isci_host *ihost, |
463 | void sci_controller_power_control_queue_insert(struct isci_host *ihost, | 459 | u16 io_tag); |
464 | struct isci_phy *iphy); | 460 | |
465 | void sci_controller_power_control_queue_remove(struct isci_host *ihost, | 461 | void sci_controller_power_control_queue_insert( |
466 | struct isci_phy *iphy); | 462 | struct isci_host *ihost, |
467 | void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport, | 463 | struct isci_phy *iphy); |
468 | struct isci_phy *iphy); | 464 | |
469 | void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport, | 465 | void sci_controller_power_control_queue_remove( |
470 | struct isci_phy *iphy); | 466 | struct isci_host *ihost, |
471 | void sci_controller_remote_device_stopped(struct isci_host *ihost, | 467 | struct isci_phy *iphy); |
472 | struct isci_remote_device *idev); | 468 | |
469 | void sci_controller_link_up( | ||
470 | struct isci_host *ihost, | ||
471 | struct isci_port *iport, | ||
472 | struct isci_phy *iphy); | ||
473 | |||
474 | void sci_controller_link_down( | ||
475 | struct isci_host *ihost, | ||
476 | struct isci_port *iport, | ||
477 | struct isci_phy *iphy); | ||
478 | |||
479 | void sci_controller_remote_device_stopped( | ||
480 | struct isci_host *ihost, | ||
481 | struct isci_remote_device *idev); | ||
482 | |||
483 | void sci_controller_copy_task_context( | ||
484 | struct isci_host *ihost, | ||
485 | struct isci_request *ireq); | ||
486 | |||
487 | void sci_controller_register_setup(struct isci_host *ihost); | ||
473 | 488 | ||
474 | enum sci_status sci_controller_continue_io(struct isci_request *ireq); | 489 | enum sci_status sci_controller_continue_io(struct isci_request *ireq); |
475 | int isci_host_scan_finished(struct Scsi_Host *, unsigned long); | 490 | int isci_host_scan_finished(struct Scsi_Host *, unsigned long); |
476 | void isci_host_start(struct Scsi_Host *); | 491 | void isci_host_scan_start(struct Scsi_Host *); |
477 | u16 isci_alloc_tag(struct isci_host *ihost); | 492 | u16 isci_alloc_tag(struct isci_host *ihost); |
478 | enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag); | 493 | enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag); |
479 | void isci_tci_free(struct isci_host *ihost, u16 tci); | 494 | void isci_tci_free(struct isci_host *ihost, u16 tci); |
480 | void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task); | ||
481 | 495 | ||
482 | int isci_host_init(struct isci_host *); | 496 | int isci_host_init(struct isci_host *); |
483 | void isci_host_completion_routine(unsigned long data); | 497 | |
484 | void isci_host_deinit(struct isci_host *); | 498 | void isci_host_init_controller_names( |
485 | void sci_controller_disable_interrupts(struct isci_host *ihost); | 499 | struct isci_host *isci_host, |
486 | bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost); | 500 | unsigned int controller_idx); |
487 | void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status); | 501 | |
502 | void isci_host_deinit( | ||
503 | struct isci_host *); | ||
504 | |||
505 | void isci_host_port_link_up( | ||
506 | struct isci_host *, | ||
507 | struct isci_port *, | ||
508 | struct isci_phy *); | ||
509 | int isci_host_dev_found(struct domain_device *); | ||
510 | |||
511 | void isci_host_remote_device_start_complete( | ||
512 | struct isci_host *, | ||
513 | struct isci_remote_device *, | ||
514 | enum sci_status); | ||
515 | |||
516 | void sci_controller_disable_interrupts( | ||
517 | struct isci_host *ihost); | ||
488 | 518 | ||
489 | enum sci_status sci_controller_start_io( | 519 | enum sci_status sci_controller_start_io( |
490 | struct isci_host *ihost, | 520 | struct isci_host *ihost, |
@@ -512,7 +542,4 @@ void sci_port_configuration_agent_construct( | |||
512 | enum sci_status sci_port_configuration_agent_initialize( | 542 | enum sci_status sci_port_configuration_agent_initialize( |
513 | struct isci_host *ihost, | 543 | struct isci_host *ihost, |
514 | struct sci_port_configuration_agent *port_agent); | 544 | struct sci_port_configuration_agent *port_agent); |
515 | |||
516 | int isci_gpio_write(struct sas_ha_struct *, u8 reg_type, u8 reg_index, | ||
517 | u8 reg_count, u8 *write_data); | ||
518 | #endif | 545 | #endif |
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index d73fdcfeb45..29aa34efb0f 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c | |||
@@ -60,13 +60,12 @@ | |||
60 | #include <linux/efi.h> | 60 | #include <linux/efi.h> |
61 | #include <asm/string.h> | 61 | #include <asm/string.h> |
62 | #include <scsi/scsi_host.h> | 62 | #include <scsi/scsi_host.h> |
63 | #include "host.h" | ||
64 | #include "isci.h" | 63 | #include "isci.h" |
65 | #include "task.h" | 64 | #include "task.h" |
66 | #include "probe_roms.h" | 65 | #include "probe_roms.h" |
67 | 66 | ||
68 | #define MAJ 1 | 67 | #define MAJ 1 |
69 | #define MIN 1 | 68 | #define MIN 0 |
70 | #define BUILD 0 | 69 | #define BUILD 0 |
71 | #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ | 70 | #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ |
72 | __stringify(BUILD) | 71 | __stringify(BUILD) |
@@ -95,7 +94,7 @@ MODULE_DEVICE_TABLE(pci, isci_id_table); | |||
95 | 94 | ||
96 | /* linux isci specific settings */ | 95 | /* linux isci specific settings */ |
97 | 96 | ||
98 | unsigned char no_outbound_task_to = 2; | 97 | unsigned char no_outbound_task_to = 20; |
99 | module_param(no_outbound_task_to, byte, 0); | 98 | module_param(no_outbound_task_to, byte, 0); |
100 | MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)"); | 99 | MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)"); |
101 | 100 | ||
@@ -115,22 +114,14 @@ u16 stp_inactive_to = 5; | |||
115 | module_param(stp_inactive_to, ushort, 0); | 114 | module_param(stp_inactive_to, ushort, 0); |
116 | MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)"); | 115 | MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)"); |
117 | 116 | ||
118 | unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED; | 117 | unsigned char phy_gen = 3; |
119 | module_param(phy_gen, byte, 0); | 118 | module_param(phy_gen, byte, 0); |
120 | MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)"); | 119 | MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)"); |
121 | 120 | ||
122 | unsigned char max_concurr_spinup; | 121 | unsigned char max_concurr_spinup = 1; |
123 | module_param(max_concurr_spinup, byte, 0); | 122 | module_param(max_concurr_spinup, byte, 0); |
124 | MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup"); | 123 | MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup"); |
125 | 124 | ||
126 | uint cable_selection_override = CABLE_OVERRIDE_DISABLED; | ||
127 | module_param(cable_selection_override, uint, 0); | ||
128 | |||
129 | MODULE_PARM_DESC(cable_selection_override, | ||
130 | "This field indicates length of the SAS/SATA cable between " | ||
131 | "host and device. If any bits > 15 are set (default) " | ||
132 | "indicates \"use platform defaults\""); | ||
133 | |||
134 | static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf) | 125 | static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf) |
135 | { | 126 | { |
136 | struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev); | 127 | struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev); |
@@ -155,8 +146,9 @@ static struct scsi_host_template isci_sht = { | |||
155 | .queuecommand = sas_queuecommand, | 146 | .queuecommand = sas_queuecommand, |
156 | .target_alloc = sas_target_alloc, | 147 | .target_alloc = sas_target_alloc, |
157 | .slave_configure = sas_slave_configure, | 148 | .slave_configure = sas_slave_configure, |
149 | .slave_destroy = sas_slave_destroy, | ||
158 | .scan_finished = isci_host_scan_finished, | 150 | .scan_finished = isci_host_scan_finished, |
159 | .scan_start = isci_host_start, | 151 | .scan_start = isci_host_scan_start, |
160 | .change_queue_depth = sas_change_queue_depth, | 152 | .change_queue_depth = sas_change_queue_depth, |
161 | .change_queue_type = sas_change_queue_type, | 153 | .change_queue_type = sas_change_queue_type, |
162 | .bios_param = sas_bios_param, | 154 | .bios_param = sas_bios_param, |
@@ -166,9 +158,9 @@ static struct scsi_host_template isci_sht = { | |||
166 | .sg_tablesize = SG_ALL, | 158 | .sg_tablesize = SG_ALL, |
167 | .max_sectors = SCSI_DEFAULT_MAX_SECTORS, | 159 | .max_sectors = SCSI_DEFAULT_MAX_SECTORS, |
168 | .use_clustering = ENABLE_CLUSTERING, | 160 | .use_clustering = ENABLE_CLUSTERING, |
169 | .eh_abort_handler = sas_eh_abort_handler, | 161 | .eh_device_reset_handler = sas_eh_device_reset_handler, |
170 | .eh_device_reset_handler = sas_eh_device_reset_handler, | 162 | .eh_bus_reset_handler = isci_bus_reset_handler, |
171 | .eh_bus_reset_handler = sas_eh_bus_reset_handler, | 163 | .slave_alloc = sas_slave_alloc, |
172 | .target_destroy = sas_target_destroy, | 164 | .target_destroy = sas_target_destroy, |
173 | .ioctl = sas_ioctl, | 165 | .ioctl = sas_ioctl, |
174 | .shost_attrs = isci_host_attrs, | 166 | .shost_attrs = isci_host_attrs, |
@@ -194,18 +186,12 @@ static struct sas_domain_function_template isci_transport_ops = { | |||
194 | .lldd_lu_reset = isci_task_lu_reset, | 186 | .lldd_lu_reset = isci_task_lu_reset, |
195 | .lldd_query_task = isci_task_query_task, | 187 | .lldd_query_task = isci_task_query_task, |
196 | 188 | ||
197 | /* ata recovery called from ata-eh */ | ||
198 | .lldd_ata_check_ready = isci_ata_check_ready, | ||
199 | |||
200 | /* Port and Adapter management */ | 189 | /* Port and Adapter management */ |
201 | .lldd_clear_nexus_port = isci_task_clear_nexus_port, | 190 | .lldd_clear_nexus_port = isci_task_clear_nexus_port, |
202 | .lldd_clear_nexus_ha = isci_task_clear_nexus_ha, | 191 | .lldd_clear_nexus_ha = isci_task_clear_nexus_ha, |
203 | 192 | ||
204 | /* Phy management */ | 193 | /* Phy management */ |
205 | .lldd_control_phy = isci_phy_control, | 194 | .lldd_control_phy = isci_phy_control, |
206 | |||
207 | /* GPIO support */ | ||
208 | .lldd_write_gpio = isci_gpio_write, | ||
209 | }; | 195 | }; |
210 | 196 | ||
211 | 197 | ||
@@ -222,7 +208,7 @@ static struct sas_domain_function_template isci_transport_ops = { | |||
222 | * @isci_host: This parameter specifies the lldd specific wrapper for the | 208 | * @isci_host: This parameter specifies the lldd specific wrapper for the |
223 | * libsas sas_ha struct. | 209 | * libsas sas_ha struct. |
224 | * | 210 | * |
225 | * This method returns an error code indicating success or failure. The user | 211 | * This method returns an error code indicating sucess or failure. The user |
226 | * should check for possible memory allocation error return otherwise, a zero | 212 | * should check for possible memory allocation error return otherwise, a zero |
227 | * indicates success. | 213 | * indicates success. |
228 | */ | 214 | */ |
@@ -245,13 +231,18 @@ static int isci_register_sas_ha(struct isci_host *isci_host) | |||
245 | if (!sas_ports) | 231 | if (!sas_ports) |
246 | return -ENOMEM; | 232 | return -ENOMEM; |
247 | 233 | ||
234 | /*----------------- Libsas Initialization Stuff---------------------- | ||
235 | * Set various fields in the sas_ha struct: | ||
236 | */ | ||
237 | |||
248 | sas_ha->sas_ha_name = DRV_NAME; | 238 | sas_ha->sas_ha_name = DRV_NAME; |
249 | sas_ha->lldd_module = THIS_MODULE; | 239 | sas_ha->lldd_module = THIS_MODULE; |
250 | sas_ha->sas_addr = &isci_host->phys[0].sas_addr[0]; | 240 | sas_ha->sas_addr = &isci_host->phys[0].sas_addr[0]; |
251 | 241 | ||
242 | /* set the array of phy and port structs. */ | ||
252 | for (i = 0; i < SCI_MAX_PHYS; i++) { | 243 | for (i = 0; i < SCI_MAX_PHYS; i++) { |
253 | sas_phys[i] = &isci_host->phys[i].sas_phy; | 244 | sas_phys[i] = &isci_host->phys[i].sas_phy; |
254 | sas_ports[i] = &isci_host->sas_ports[i]; | 245 | sas_ports[i] = &isci_host->ports[i].sas_port; |
255 | } | 246 | } |
256 | 247 | ||
257 | sas_ha->sas_phy = sas_phys; | 248 | sas_ha->sas_phy = sas_phys; |
@@ -274,15 +265,16 @@ static void isci_unregister(struct isci_host *isci_host) | |||
274 | if (!isci_host) | 265 | if (!isci_host) |
275 | return; | 266 | return; |
276 | 267 | ||
268 | shost = isci_host->shost; | ||
269 | |||
277 | sas_unregister_ha(&isci_host->sas_ha); | 270 | sas_unregister_ha(&isci_host->sas_ha); |
278 | 271 | ||
279 | shost = to_shost(isci_host); | 272 | sas_remove_host(isci_host->shost); |
280 | sas_remove_host(shost); | 273 | scsi_remove_host(isci_host->shost); |
281 | scsi_remove_host(shost); | 274 | scsi_host_put(isci_host->shost); |
282 | scsi_host_put(shost); | ||
283 | } | 275 | } |
284 | 276 | ||
285 | static int isci_pci_init(struct pci_dev *pdev) | 277 | static int __devinit isci_pci_init(struct pci_dev *pdev) |
286 | { | 278 | { |
287 | int err, bar_num, bar_mask = 0; | 279 | int err, bar_num, bar_mask = 0; |
288 | void __iomem * const *iomap; | 280 | void __iomem * const *iomap; |
@@ -399,199 +391,30 @@ static int isci_setup_interrupts(struct pci_dev *pdev) | |||
399 | return err; | 391 | return err; |
400 | } | 392 | } |
401 | 393 | ||
402 | static void isci_user_parameters_get(struct sci_user_parameters *u) | ||
403 | { | ||
404 | int i; | ||
405 | |||
406 | for (i = 0; i < SCI_MAX_PHYS; i++) { | ||
407 | struct sci_phy_user_params *u_phy = &u->phys[i]; | ||
408 | |||
409 | u_phy->max_speed_generation = phy_gen; | ||
410 | |||
411 | /* we are not exporting these for now */ | ||
412 | u_phy->align_insertion_frequency = 0x7f; | ||
413 | u_phy->in_connection_align_insertion_frequency = 0xff; | ||
414 | u_phy->notify_enable_spin_up_insertion_frequency = 0x33; | ||
415 | } | ||
416 | |||
417 | u->stp_inactivity_timeout = stp_inactive_to; | ||
418 | u->ssp_inactivity_timeout = ssp_inactive_to; | ||
419 | u->stp_max_occupancy_timeout = stp_max_occ_to; | ||
420 | u->ssp_max_occupancy_timeout = ssp_max_occ_to; | ||
421 | u->no_outbound_task_timeout = no_outbound_task_to; | ||
422 | u->max_concurr_spinup = max_concurr_spinup; | ||
423 | } | ||
424 | |||
425 | static enum sci_status sci_user_parameters_set(struct isci_host *ihost, | ||
426 | struct sci_user_parameters *sci_parms) | ||
427 | { | ||
428 | u16 index; | ||
429 | |||
430 | /* | ||
431 | * Validate the user parameters. If they are not legal, then | ||
432 | * return a failure. | ||
433 | */ | ||
434 | for (index = 0; index < SCI_MAX_PHYS; index++) { | ||
435 | struct sci_phy_user_params *u; | ||
436 | |||
437 | u = &sci_parms->phys[index]; | ||
438 | |||
439 | if (!((u->max_speed_generation <= SCIC_SDS_PARM_MAX_SPEED) && | ||
440 | (u->max_speed_generation > SCIC_SDS_PARM_NO_SPEED))) | ||
441 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; | ||
442 | |||
443 | if (u->in_connection_align_insertion_frequency < 3) | ||
444 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; | ||
445 | |||
446 | if ((u->in_connection_align_insertion_frequency < 3) || | ||
447 | (u->align_insertion_frequency == 0) || | ||
448 | (u->notify_enable_spin_up_insertion_frequency == 0)) | ||
449 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; | ||
450 | } | ||
451 | |||
452 | if ((sci_parms->stp_inactivity_timeout == 0) || | ||
453 | (sci_parms->ssp_inactivity_timeout == 0) || | ||
454 | (sci_parms->stp_max_occupancy_timeout == 0) || | ||
455 | (sci_parms->ssp_max_occupancy_timeout == 0) || | ||
456 | (sci_parms->no_outbound_task_timeout == 0)) | ||
457 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; | ||
458 | |||
459 | memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms)); | ||
460 | |||
461 | return SCI_SUCCESS; | ||
462 | } | ||
463 | |||
464 | static void sci_oem_defaults(struct isci_host *ihost) | ||
465 | { | ||
466 | /* these defaults are overridden by the platform / firmware */ | ||
467 | struct sci_user_parameters *user = &ihost->user_parameters; | ||
468 | struct sci_oem_params *oem = &ihost->oem_parameters; | ||
469 | int i; | ||
470 | |||
471 | /* Default to APC mode. */ | ||
472 | oem->controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; | ||
473 | |||
474 | /* Default to APC mode. */ | ||
475 | oem->controller.max_concurr_spin_up = 1; | ||
476 | |||
477 | /* Default to no SSC operation. */ | ||
478 | oem->controller.do_enable_ssc = false; | ||
479 | |||
480 | /* Default to short cables on all phys. */ | ||
481 | oem->controller.cable_selection_mask = 0; | ||
482 | |||
483 | /* Initialize all of the port parameter information to narrow ports. */ | ||
484 | for (i = 0; i < SCI_MAX_PORTS; i++) | ||
485 | oem->ports[i].phy_mask = 0; | ||
486 | |||
487 | /* Initialize all of the phy parameter information. */ | ||
488 | for (i = 0; i < SCI_MAX_PHYS; i++) { | ||
489 | /* Default to 3G (i.e. Gen 2). */ | ||
490 | user->phys[i].max_speed_generation = SCIC_SDS_PARM_GEN2_SPEED; | ||
491 | |||
492 | /* the frequencies cannot be 0 */ | ||
493 | user->phys[i].align_insertion_frequency = 0x7f; | ||
494 | user->phys[i].in_connection_align_insertion_frequency = 0xff; | ||
495 | user->phys[i].notify_enable_spin_up_insertion_frequency = 0x33; | ||
496 | |||
497 | /* Previous Vitesse based expanders had a arbitration issue that | ||
498 | * is worked around by having the upper 32-bits of SAS address | ||
499 | * with a value greater then the Vitesse company identifier. | ||
500 | * Hence, usage of 0x5FCFFFFF. | ||
501 | */ | ||
502 | oem->phys[i].sas_address.low = 0x1 + ihost->id; | ||
503 | oem->phys[i].sas_address.high = 0x5FCFFFFF; | ||
504 | } | ||
505 | |||
506 | user->stp_inactivity_timeout = 5; | ||
507 | user->ssp_inactivity_timeout = 5; | ||
508 | user->stp_max_occupancy_timeout = 5; | ||
509 | user->ssp_max_occupancy_timeout = 20; | ||
510 | user->no_outbound_task_timeout = 2; | ||
511 | } | ||
512 | |||
513 | static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) | 394 | static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) |
514 | { | 395 | { |
515 | struct isci_orom *orom = to_pci_info(pdev)->orom; | 396 | struct isci_host *isci_host; |
516 | struct sci_user_parameters sci_user_params; | ||
517 | u8 oem_version = ISCI_ROM_VER_1_0; | ||
518 | struct isci_host *ihost; | ||
519 | struct Scsi_Host *shost; | 397 | struct Scsi_Host *shost; |
520 | int err, i; | 398 | int err; |
521 | |||
522 | ihost = devm_kzalloc(&pdev->dev, sizeof(*ihost), GFP_KERNEL); | ||
523 | if (!ihost) | ||
524 | return NULL; | ||
525 | |||
526 | ihost->pdev = pdev; | ||
527 | ihost->id = id; | ||
528 | spin_lock_init(&ihost->scic_lock); | ||
529 | init_waitqueue_head(&ihost->eventq); | ||
530 | ihost->sas_ha.dev = &ihost->pdev->dev; | ||
531 | ihost->sas_ha.lldd_ha = ihost; | ||
532 | tasklet_init(&ihost->completion_tasklet, | ||
533 | isci_host_completion_routine, (unsigned long)ihost); | ||
534 | |||
535 | /* validate module parameters */ | ||
536 | /* TODO: kill struct sci_user_parameters and reference directly */ | ||
537 | sci_oem_defaults(ihost); | ||
538 | isci_user_parameters_get(&sci_user_params); | ||
539 | if (sci_user_parameters_set(ihost, &sci_user_params)) { | ||
540 | dev_warn(&pdev->dev, | ||
541 | "%s: sci_user_parameters_set failed\n", __func__); | ||
542 | return NULL; | ||
543 | } | ||
544 | |||
545 | /* sanity check platform (or 'firmware') oem parameters */ | ||
546 | if (orom) { | ||
547 | if (id < 0 || id >= SCI_MAX_CONTROLLERS || id > orom->hdr.num_elements) { | ||
548 | dev_warn(&pdev->dev, "parsing firmware oem parameters failed\n"); | ||
549 | return NULL; | ||
550 | } | ||
551 | ihost->oem_parameters = orom->ctrl[id]; | ||
552 | oem_version = orom->hdr.version; | ||
553 | } | ||
554 | 399 | ||
555 | /* validate oem parameters (platform, firmware, or built-in defaults) */ | 400 | isci_host = devm_kzalloc(&pdev->dev, sizeof(*isci_host), GFP_KERNEL); |
556 | if (sci_oem_parameters_validate(&ihost->oem_parameters, oem_version)) { | 401 | if (!isci_host) |
557 | dev_warn(&pdev->dev, "oem parameter validation failed\n"); | ||
558 | return NULL; | 402 | return NULL; |
559 | } | ||
560 | |||
561 | for (i = 0; i < SCI_MAX_PORTS; i++) { | ||
562 | struct isci_port *iport = &ihost->ports[i]; | ||
563 | 403 | ||
564 | INIT_LIST_HEAD(&iport->remote_dev_list); | 404 | isci_host->pdev = pdev; |
565 | iport->isci_host = ihost; | 405 | isci_host->id = id; |
566 | } | ||
567 | |||
568 | for (i = 0; i < SCI_MAX_PHYS; i++) | ||
569 | isci_phy_init(&ihost->phys[i], ihost, i); | ||
570 | |||
571 | for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { | ||
572 | struct isci_remote_device *idev = &ihost->devices[i]; | ||
573 | |||
574 | INIT_LIST_HEAD(&idev->node); | ||
575 | } | ||
576 | 406 | ||
577 | shost = scsi_host_alloc(&isci_sht, sizeof(void *)); | 407 | shost = scsi_host_alloc(&isci_sht, sizeof(void *)); |
578 | if (!shost) | 408 | if (!shost) |
579 | return NULL; | 409 | return NULL; |
410 | isci_host->shost = shost; | ||
580 | 411 | ||
581 | dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: " | 412 | err = isci_host_init(isci_host); |
582 | "{%s, %s, %s, %s}\n", | ||
583 | (is_cable_select_overridden() ? "* " : ""), ihost->id, | ||
584 | lookup_cable_names(decode_cable_selection(ihost, 3)), | ||
585 | lookup_cable_names(decode_cable_selection(ihost, 2)), | ||
586 | lookup_cable_names(decode_cable_selection(ihost, 1)), | ||
587 | lookup_cable_names(decode_cable_selection(ihost, 0))); | ||
588 | |||
589 | err = isci_host_init(ihost); | ||
590 | if (err) | 413 | if (err) |
591 | goto err_shost; | 414 | goto err_shost; |
592 | 415 | ||
593 | SHOST_TO_SAS_HA(shost) = &ihost->sas_ha; | 416 | SHOST_TO_SAS_HA(shost) = &isci_host->sas_ha; |
594 | ihost->sas_ha.core.shost = shost; | 417 | isci_host->sas_ha.core.shost = shost; |
595 | shost->transportt = isci_transport_template; | 418 | shost->transportt = isci_transport_template; |
596 | 419 | ||
597 | shost->max_id = ~0; | 420 | shost->max_id = ~0; |
@@ -602,11 +425,11 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) | |||
602 | if (err) | 425 | if (err) |
603 | goto err_shost; | 426 | goto err_shost; |
604 | 427 | ||
605 | err = isci_register_sas_ha(ihost); | 428 | err = isci_register_sas_ha(isci_host); |
606 | if (err) | 429 | if (err) |
607 | goto err_shost_remove; | 430 | goto err_shost_remove; |
608 | 431 | ||
609 | return ihost; | 432 | return isci_host; |
610 | 433 | ||
611 | err_shost_remove: | 434 | err_shost_remove: |
612 | scsi_remove_host(shost); | 435 | scsi_remove_host(shost); |
@@ -616,7 +439,7 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) | |||
616 | return NULL; | 439 | return NULL; |
617 | } | 440 | } |
618 | 441 | ||
619 | static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | 442 | static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
620 | { | 443 | { |
621 | struct isci_pci_info *pci_info; | 444 | struct isci_pci_info *pci_info; |
622 | int err, i; | 445 | int err, i; |
@@ -639,11 +462,11 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
639 | if (!orom) | 462 | if (!orom) |
640 | orom = isci_request_oprom(pdev); | 463 | orom = isci_request_oprom(pdev); |
641 | 464 | ||
642 | for (i = 0; orom && i < num_controllers(pdev); i++) { | 465 | for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) { |
643 | if (sci_oem_parameters_validate(&orom->ctrl[i], | 466 | if (sci_oem_parameters_validate(&orom->ctrl[i])) { |
644 | orom->hdr.version)) { | ||
645 | dev_warn(&pdev->dev, | 467 | dev_warn(&pdev->dev, |
646 | "[%d]: invalid oem parameters detected, falling back to firmware\n", i); | 468 | "[%d]: invalid oem parameters detected, falling back to firmware\n", i); |
469 | devm_kfree(&pdev->dev, orom); | ||
647 | orom = NULL; | 470 | orom = NULL; |
648 | break; | 471 | break; |
649 | } | 472 | } |
@@ -685,13 +508,6 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
685 | goto err_host_alloc; | 508 | goto err_host_alloc; |
686 | } | 509 | } |
687 | pci_info->hosts[i] = h; | 510 | pci_info->hosts[i] = h; |
688 | |||
689 | /* turn on DIF support */ | ||
690 | scsi_host_set_prot(to_shost(h), | ||
691 | SHOST_DIF_TYPE1_PROTECTION | | ||
692 | SHOST_DIF_TYPE2_PROTECTION | | ||
693 | SHOST_DIF_TYPE3_PROTECTION); | ||
694 | scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC); | ||
695 | } | 511 | } |
696 | 512 | ||
697 | err = isci_setup_interrupts(pdev); | 513 | err = isci_setup_interrupts(pdev); |
@@ -699,7 +515,7 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
699 | goto err_host_alloc; | 515 | goto err_host_alloc; |
700 | 516 | ||
701 | for_each_isci_host(i, isci_host, pdev) | 517 | for_each_isci_host(i, isci_host, pdev) |
702 | scsi_scan_host(to_shost(isci_host)); | 518 | scsi_scan_host(isci_host->shost); |
703 | 519 | ||
704 | return 0; | 520 | return 0; |
705 | 521 | ||
@@ -709,79 +525,23 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
709 | return err; | 525 | return err; |
710 | } | 526 | } |
711 | 527 | ||
712 | static void isci_pci_remove(struct pci_dev *pdev) | 528 | static void __devexit isci_pci_remove(struct pci_dev *pdev) |
713 | { | 529 | { |
714 | struct isci_host *ihost; | 530 | struct isci_host *ihost; |
715 | int i; | 531 | int i; |
716 | 532 | ||
717 | for_each_isci_host(i, ihost, pdev) { | 533 | for_each_isci_host(i, ihost, pdev) { |
718 | wait_for_start(ihost); | ||
719 | isci_unregister(ihost); | 534 | isci_unregister(ihost); |
720 | isci_host_deinit(ihost); | 535 | isci_host_deinit(ihost); |
536 | sci_controller_disable_interrupts(ihost); | ||
721 | } | 537 | } |
722 | } | 538 | } |
723 | 539 | ||
724 | #ifdef CONFIG_PM | ||
725 | static int isci_suspend(struct device *dev) | ||
726 | { | ||
727 | struct pci_dev *pdev = to_pci_dev(dev); | ||
728 | struct isci_host *ihost; | ||
729 | int i; | ||
730 | |||
731 | for_each_isci_host(i, ihost, pdev) { | ||
732 | sas_suspend_ha(&ihost->sas_ha); | ||
733 | isci_host_deinit(ihost); | ||
734 | } | ||
735 | |||
736 | pci_save_state(pdev); | ||
737 | pci_disable_device(pdev); | ||
738 | pci_set_power_state(pdev, PCI_D3hot); | ||
739 | |||
740 | return 0; | ||
741 | } | ||
742 | |||
743 | static int isci_resume(struct device *dev) | ||
744 | { | ||
745 | struct pci_dev *pdev = to_pci_dev(dev); | ||
746 | struct isci_host *ihost; | ||
747 | int rc, i; | ||
748 | |||
749 | pci_set_power_state(pdev, PCI_D0); | ||
750 | pci_restore_state(pdev); | ||
751 | |||
752 | rc = pcim_enable_device(pdev); | ||
753 | if (rc) { | ||
754 | dev_err(&pdev->dev, | ||
755 | "enabling device failure after resume(%d)\n", rc); | ||
756 | return rc; | ||
757 | } | ||
758 | |||
759 | pci_set_master(pdev); | ||
760 | |||
761 | for_each_isci_host(i, ihost, pdev) { | ||
762 | sas_prep_resume_ha(&ihost->sas_ha); | ||
763 | |||
764 | isci_host_init(ihost); | ||
765 | isci_host_start(ihost->sas_ha.core.shost); | ||
766 | wait_for_start(ihost); | ||
767 | |||
768 | sas_resume_ha(&ihost->sas_ha); | ||
769 | } | ||
770 | |||
771 | return 0; | ||
772 | } | ||
773 | |||
774 | static SIMPLE_DEV_PM_OPS(isci_pm_ops, isci_suspend, isci_resume); | ||
775 | #endif | ||
776 | |||
777 | static struct pci_driver isci_pci_driver = { | 540 | static struct pci_driver isci_pci_driver = { |
778 | .name = DRV_NAME, | 541 | .name = DRV_NAME, |
779 | .id_table = isci_id_table, | 542 | .id_table = isci_id_table, |
780 | .probe = isci_pci_probe, | 543 | .probe = isci_pci_probe, |
781 | .remove = isci_pci_remove, | 544 | .remove = __devexit_p(isci_pci_remove), |
782 | #ifdef CONFIG_PM | ||
783 | .driver.pm = &isci_pm_ops, | ||
784 | #endif | ||
785 | }; | 545 | }; |
786 | 546 | ||
787 | static __init int isci_init(void) | 547 | static __init int isci_init(void) |
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h index 234ab46fce3..8efeb6b0832 100644 --- a/drivers/scsi/isci/isci.h +++ b/drivers/scsi/isci/isci.h | |||
@@ -480,7 +480,6 @@ extern u16 ssp_inactive_to; | |||
480 | extern u16 stp_inactive_to; | 480 | extern u16 stp_inactive_to; |
481 | extern unsigned char phy_gen; | 481 | extern unsigned char phy_gen; |
482 | extern unsigned char max_concurr_spinup; | 482 | extern unsigned char max_concurr_spinup; |
483 | extern uint cable_selection_override; | ||
484 | 483 | ||
485 | irqreturn_t isci_msix_isr(int vec, void *data); | 484 | irqreturn_t isci_msix_isr(int vec, void *data); |
486 | irqreturn_t isci_intx_isr(int vec, void *data); | 485 | irqreturn_t isci_intx_isr(int vec, void *data); |
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c index cb87b2ef7c9..430fc8ff014 100644 --- a/drivers/scsi/isci/phy.c +++ b/drivers/scsi/isci/phy.c | |||
@@ -59,16 +59,6 @@ | |||
59 | #include "scu_event_codes.h" | 59 | #include "scu_event_codes.h" |
60 | #include "probe_roms.h" | 60 | #include "probe_roms.h" |
61 | 61 | ||
62 | #undef C | ||
63 | #define C(a) (#a) | ||
64 | static const char *phy_state_name(enum sci_phy_states state) | ||
65 | { | ||
66 | static const char * const strings[] = PHY_STATES; | ||
67 | |||
68 | return strings[state]; | ||
69 | } | ||
70 | #undef C | ||
71 | |||
72 | /* Maximum arbitration wait time in micro-seconds */ | 62 | /* Maximum arbitration wait time in micro-seconds */ |
73 | #define SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME (700) | 63 | #define SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME (700) |
74 | 64 | ||
@@ -77,19 +67,6 @@ enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy) | |||
77 | return iphy->max_negotiated_speed; | 67 | return iphy->max_negotiated_speed; |
78 | } | 68 | } |
79 | 69 | ||
80 | static struct isci_host *phy_to_host(struct isci_phy *iphy) | ||
81 | { | ||
82 | struct isci_phy *table = iphy - iphy->phy_index; | ||
83 | struct isci_host *ihost = container_of(table, typeof(*ihost), phys[0]); | ||
84 | |||
85 | return ihost; | ||
86 | } | ||
87 | |||
88 | static struct device *sciphy_to_dev(struct isci_phy *iphy) | ||
89 | { | ||
90 | return &phy_to_host(iphy)->pdev->dev; | ||
91 | } | ||
92 | |||
93 | static enum sci_status | 70 | static enum sci_status |
94 | sci_phy_transport_layer_initialization(struct isci_phy *iphy, | 71 | sci_phy_transport_layer_initialization(struct isci_phy *iphy, |
95 | struct scu_transport_layer_registers __iomem *reg) | 72 | struct scu_transport_layer_registers __iomem *reg) |
@@ -114,23 +91,22 @@ sci_phy_transport_layer_initialization(struct isci_phy *iphy, | |||
114 | 91 | ||
115 | static enum sci_status | 92 | static enum sci_status |
116 | sci_phy_link_layer_initialization(struct isci_phy *iphy, | 93 | sci_phy_link_layer_initialization(struct isci_phy *iphy, |
117 | struct scu_link_layer_registers __iomem *llr) | 94 | struct scu_link_layer_registers __iomem *reg) |
118 | { | 95 | { |
119 | struct isci_host *ihost = iphy->owning_port->owning_controller; | 96 | struct isci_host *ihost = iphy->owning_port->owning_controller; |
120 | struct sci_phy_user_params *phy_user; | ||
121 | struct sci_phy_oem_params *phy_oem; | ||
122 | int phy_idx = iphy->phy_index; | 97 | int phy_idx = iphy->phy_index; |
123 | struct sci_phy_cap phy_cap; | 98 | struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx]; |
99 | struct sci_phy_oem_params *phy_oem = | ||
100 | &ihost->oem_parameters.phys[phy_idx]; | ||
124 | u32 phy_configuration; | 101 | u32 phy_configuration; |
102 | struct sci_phy_cap phy_cap; | ||
125 | u32 parity_check = 0; | 103 | u32 parity_check = 0; |
126 | u32 parity_count = 0; | 104 | u32 parity_count = 0; |
127 | u32 llctl, link_rate; | 105 | u32 llctl, link_rate; |
128 | u32 clksm_value = 0; | 106 | u32 clksm_value = 0; |
129 | u32 sp_timeouts = 0; | 107 | u32 sp_timeouts = 0; |
130 | 108 | ||
131 | phy_user = &ihost->user_parameters.phys[phy_idx]; | 109 | iphy->link_layer_registers = reg; |
132 | phy_oem = &ihost->oem_parameters.phys[phy_idx]; | ||
133 | iphy->link_layer_registers = llr; | ||
134 | 110 | ||
135 | /* Set our IDENTIFY frame data */ | 111 | /* Set our IDENTIFY frame data */ |
136 | #define SCI_END_DEVICE 0x01 | 112 | #define SCI_END_DEVICE 0x01 |
@@ -140,26 +116,32 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy, | |||
140 | SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) | | 116 | SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) | |
141 | SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) | | 117 | SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) | |
142 | SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE), | 118 | SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE), |
143 | &llr->transmit_identification); | 119 | &iphy->link_layer_registers->transmit_identification); |
144 | 120 | ||
145 | /* Write the device SAS Address */ | 121 | /* Write the device SAS Address */ |
146 | writel(0xFEDCBA98, &llr->sas_device_name_high); | 122 | writel(0xFEDCBA98, |
147 | writel(phy_idx, &llr->sas_device_name_low); | 123 | &iphy->link_layer_registers->sas_device_name_high); |
124 | writel(phy_idx, &iphy->link_layer_registers->sas_device_name_low); | ||
148 | 125 | ||
149 | /* Write the source SAS Address */ | 126 | /* Write the source SAS Address */ |
150 | writel(phy_oem->sas_address.high, &llr->source_sas_address_high); | 127 | writel(phy_oem->sas_address.high, |
151 | writel(phy_oem->sas_address.low, &llr->source_sas_address_low); | 128 | &iphy->link_layer_registers->source_sas_address_high); |
129 | writel(phy_oem->sas_address.low, | ||
130 | &iphy->link_layer_registers->source_sas_address_low); | ||
152 | 131 | ||
153 | /* Clear and Set the PHY Identifier */ | 132 | /* Clear and Set the PHY Identifier */ |
154 | writel(0, &llr->identify_frame_phy_id); | 133 | writel(0, &iphy->link_layer_registers->identify_frame_phy_id); |
155 | writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx), &llr->identify_frame_phy_id); | 134 | writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx), |
135 | &iphy->link_layer_registers->identify_frame_phy_id); | ||
156 | 136 | ||
157 | /* Change the initial state of the phy configuration register */ | 137 | /* Change the initial state of the phy configuration register */ |
158 | phy_configuration = readl(&llr->phy_configuration); | 138 | phy_configuration = |
139 | readl(&iphy->link_layer_registers->phy_configuration); | ||
159 | 140 | ||
160 | /* Hold OOB state machine in reset */ | 141 | /* Hold OOB state machine in reset */ |
161 | phy_configuration |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET); | 142 | phy_configuration |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET); |
162 | writel(phy_configuration, &llr->phy_configuration); | 143 | writel(phy_configuration, |
144 | &iphy->link_layer_registers->phy_configuration); | ||
163 | 145 | ||
164 | /* Configure the SNW capabilities */ | 146 | /* Configure the SNW capabilities */ |
165 | phy_cap.all = 0; | 147 | phy_cap.all = 0; |
@@ -167,64 +149,15 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy, | |||
167 | phy_cap.gen3_no_ssc = 1; | 149 | phy_cap.gen3_no_ssc = 1; |
168 | phy_cap.gen2_no_ssc = 1; | 150 | phy_cap.gen2_no_ssc = 1; |
169 | phy_cap.gen1_no_ssc = 1; | 151 | phy_cap.gen1_no_ssc = 1; |
170 | if (ihost->oem_parameters.controller.do_enable_ssc) { | 152 | if (ihost->oem_parameters.controller.do_enable_ssc == true) { |
171 | struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe; | ||
172 | struct scu_afe_transceiver __iomem *xcvr = &afe->scu_afe_xcvr[phy_idx]; | ||
173 | struct isci_pci_info *pci_info = to_pci_info(ihost->pdev); | ||
174 | bool en_sas = false; | ||
175 | bool en_sata = false; | ||
176 | u32 sas_type = 0; | ||
177 | u32 sata_spread = 0x2; | ||
178 | u32 sas_spread = 0x2; | ||
179 | |||
180 | phy_cap.gen3_ssc = 1; | 153 | phy_cap.gen3_ssc = 1; |
181 | phy_cap.gen2_ssc = 1; | 154 | phy_cap.gen2_ssc = 1; |
182 | phy_cap.gen1_ssc = 1; | 155 | phy_cap.gen1_ssc = 1; |
183 | |||
184 | if (pci_info->orom->hdr.version < ISCI_ROM_VER_1_1) | ||
185 | en_sas = en_sata = true; | ||
186 | else { | ||
187 | sata_spread = ihost->oem_parameters.controller.ssc_sata_tx_spread_level; | ||
188 | sas_spread = ihost->oem_parameters.controller.ssc_sas_tx_spread_level; | ||
189 | |||
190 | if (sata_spread) | ||
191 | en_sata = true; | ||
192 | |||
193 | if (sas_spread) { | ||
194 | en_sas = true; | ||
195 | sas_type = ihost->oem_parameters.controller.ssc_sas_tx_type; | ||
196 | } | ||
197 | |||
198 | } | ||
199 | |||
200 | if (en_sas) { | ||
201 | u32 reg; | ||
202 | |||
203 | reg = readl(&xcvr->afe_xcvr_control0); | ||
204 | reg |= (0x00100000 | (sas_type << 19)); | ||
205 | writel(reg, &xcvr->afe_xcvr_control0); | ||
206 | |||
207 | reg = readl(&xcvr->afe_tx_ssc_control); | ||
208 | reg |= sas_spread << 8; | ||
209 | writel(reg, &xcvr->afe_tx_ssc_control); | ||
210 | } | ||
211 | |||
212 | if (en_sata) { | ||
213 | u32 reg; | ||
214 | |||
215 | reg = readl(&xcvr->afe_tx_ssc_control); | ||
216 | reg |= sata_spread; | ||
217 | writel(reg, &xcvr->afe_tx_ssc_control); | ||
218 | |||
219 | reg = readl(&llr->stp_control); | ||
220 | reg |= 1 << 12; | ||
221 | writel(reg, &llr->stp_control); | ||
222 | } | ||
223 | } | 156 | } |
224 | 157 | ||
225 | /* The SAS specification indicates that the phy_capabilities that | 158 | /* |
226 | * are transmitted shall have an even parity. Calculate the parity. | 159 | * The SAS specification indicates that the phy_capabilities that |
227 | */ | 160 | * are transmitted shall have an even parity. Calculate the parity. */ |
228 | parity_check = phy_cap.all; | 161 | parity_check = phy_cap.all; |
229 | while (parity_check != 0) { | 162 | while (parity_check != 0) { |
230 | if (parity_check & 0x1) | 163 | if (parity_check & 0x1) |
@@ -232,20 +165,20 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy, | |||
232 | parity_check >>= 1; | 165 | parity_check >>= 1; |
233 | } | 166 | } |
234 | 167 | ||
235 | /* If parity indicates there are an odd number of bits set, then | 168 | /* |
236 | * set the parity bit to 1 in the phy capabilities. | 169 | * If parity indicates there are an odd number of bits set, then |
237 | */ | 170 | * set the parity bit to 1 in the phy capabilities. */ |
238 | if ((parity_count % 2) != 0) | 171 | if ((parity_count % 2) != 0) |
239 | phy_cap.parity = 1; | 172 | phy_cap.parity = 1; |
240 | 173 | ||
241 | writel(phy_cap.all, &llr->phy_capabilities); | 174 | writel(phy_cap.all, &iphy->link_layer_registers->phy_capabilities); |
242 | 175 | ||
243 | /* Set the enable spinup period but disable the ability to send | 176 | /* Set the enable spinup period but disable the ability to send |
244 | * notify enable spinup | 177 | * notify enable spinup |
245 | */ | 178 | */ |
246 | writel(SCU_ENSPINUP_GEN_VAL(COUNT, | 179 | writel(SCU_ENSPINUP_GEN_VAL(COUNT, |
247 | phy_user->notify_enable_spin_up_insertion_frequency), | 180 | phy_user->notify_enable_spin_up_insertion_frequency), |
248 | &llr->notify_enable_spinup_control); | 181 | &iphy->link_layer_registers->notify_enable_spinup_control); |
249 | 182 | ||
250 | /* Write the ALIGN Insertion Ferequency for connected phy and | 183 | /* Write the ALIGN Insertion Ferequency for connected phy and |
251 | * inpendent of connected state | 184 | * inpendent of connected state |
@@ -256,13 +189,11 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy, | |||
256 | clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL, | 189 | clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL, |
257 | phy_user->align_insertion_frequency); | 190 | phy_user->align_insertion_frequency); |
258 | 191 | ||
259 | writel(clksm_value, &llr->clock_skew_management); | 192 | writel(clksm_value, &iphy->link_layer_registers->clock_skew_management); |
260 | 193 | ||
261 | if (is_c0(ihost->pdev) || is_c1(ihost->pdev)) { | 194 | /* @todo Provide a way to write this register correctly */ |
262 | writel(0x04210400, &llr->afe_lookup_table_control); | 195 | writel(0x02108421, |
263 | writel(0x020A7C05, &llr->sas_primitive_timeout); | 196 | &iphy->link_layer_registers->afe_lookup_table_control); |
264 | } else | ||
265 | writel(0x02108421, &llr->afe_lookup_table_control); | ||
266 | 197 | ||
267 | llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT, | 198 | llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT, |
268 | (u8)ihost->user_parameters.no_outbound_task_timeout); | 199 | (u8)ihost->user_parameters.no_outbound_task_timeout); |
@@ -279,9 +210,9 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy, | |||
279 | break; | 210 | break; |
280 | } | 211 | } |
281 | llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate); | 212 | llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate); |
282 | writel(llctl, &llr->link_layer_control); | 213 | writel(llctl, &iphy->link_layer_registers->link_layer_control); |
283 | 214 | ||
284 | sp_timeouts = readl(&llr->sas_phy_timeouts); | 215 | sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts); |
285 | 216 | ||
286 | /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */ | 217 | /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */ |
287 | sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF); | 218 | sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF); |
@@ -291,23 +222,20 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy, | |||
291 | */ | 222 | */ |
292 | sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B); | 223 | sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B); |
293 | 224 | ||
294 | writel(sp_timeouts, &llr->sas_phy_timeouts); | 225 | writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts); |
295 | 226 | ||
296 | if (is_a2(ihost->pdev)) { | 227 | if (is_a2(ihost->pdev)) { |
297 | /* Program the max ARB time for the PHY to 700us so we | 228 | /* Program the max ARB time for the PHY to 700us so we inter-operate with |
298 | * inter-operate with the PMC expander which shuts down | 229 | * the PMC expander which shuts down PHYs if the expander PHY generates too |
299 | * PHYs if the expander PHY generates too many breaks. | 230 | * many breaks. This time value will guarantee that the initiator PHY will |
300 | * This time value will guarantee that the initiator PHY | 231 | * generate the break. |
301 | * will generate the break. | ||
302 | */ | 232 | */ |
303 | writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME, | 233 | writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME, |
304 | &llr->maximum_arbitration_wait_timer_timeout); | 234 | &iphy->link_layer_registers->maximum_arbitration_wait_timer_timeout); |
305 | } | 235 | } |
306 | 236 | ||
307 | /* Disable link layer hang detection, rely on the OS timeout for | 237 | /* Disable link layer hang detection, rely on the OS timeout for I/O timeouts. */ |
308 | * I/O timeouts. | 238 | writel(0, &iphy->link_layer_registers->link_layer_hang_detection_timeout); |
309 | */ | ||
310 | writel(0, &llr->link_layer_hang_detection_timeout); | ||
311 | 239 | ||
312 | /* We can exit the initial state to the stopped state */ | 240 | /* We can exit the initial state to the stopped state */ |
313 | sci_change_state(&iphy->sm, SCI_PHY_STOPPED); | 241 | sci_change_state(&iphy->sm, SCI_PHY_STOPPED); |
@@ -469,8 +397,8 @@ enum sci_status sci_phy_start(struct isci_phy *iphy) | |||
469 | enum sci_phy_states state = iphy->sm.current_state_id; | 397 | enum sci_phy_states state = iphy->sm.current_state_id; |
470 | 398 | ||
471 | if (state != SCI_PHY_STOPPED) { | 399 | if (state != SCI_PHY_STOPPED) { |
472 | dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n", | 400 | dev_dbg(sciphy_to_dev(iphy), |
473 | __func__, phy_state_name(state)); | 401 | "%s: in wrong state: %d\n", __func__, state); |
474 | return SCI_FAILURE_INVALID_STATE; | 402 | return SCI_FAILURE_INVALID_STATE; |
475 | } | 403 | } |
476 | 404 | ||
@@ -495,8 +423,8 @@ enum sci_status sci_phy_stop(struct isci_phy *iphy) | |||
495 | case SCI_PHY_READY: | 423 | case SCI_PHY_READY: |
496 | break; | 424 | break; |
497 | default: | 425 | default: |
498 | dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n", | 426 | dev_dbg(sciphy_to_dev(iphy), |
499 | __func__, phy_state_name(state)); | 427 | "%s: in wrong state: %d\n", __func__, state); |
500 | return SCI_FAILURE_INVALID_STATE; | 428 | return SCI_FAILURE_INVALID_STATE; |
501 | } | 429 | } |
502 | 430 | ||
@@ -509,8 +437,8 @@ enum sci_status sci_phy_reset(struct isci_phy *iphy) | |||
509 | enum sci_phy_states state = iphy->sm.current_state_id; | 437 | enum sci_phy_states state = iphy->sm.current_state_id; |
510 | 438 | ||
511 | if (state != SCI_PHY_READY) { | 439 | if (state != SCI_PHY_READY) { |
512 | dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n", | 440 | dev_dbg(sciphy_to_dev(iphy), |
513 | __func__, phy_state_name(state)); | 441 | "%s: in wrong state: %d\n", __func__, state); |
514 | return SCI_FAILURE_INVALID_STATE; | 442 | return SCI_FAILURE_INVALID_STATE; |
515 | } | 443 | } |
516 | 444 | ||
@@ -559,8 +487,8 @@ enum sci_status sci_phy_consume_power_handler(struct isci_phy *iphy) | |||
559 | return SCI_SUCCESS; | 487 | return SCI_SUCCESS; |
560 | } | 488 | } |
561 | default: | 489 | default: |
562 | dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n", | 490 | dev_dbg(sciphy_to_dev(iphy), |
563 | __func__, phy_state_name(state)); | 491 | "%s: in wrong state: %d\n", __func__, state); |
564 | return SCI_FAILURE_INVALID_STATE; | 492 | return SCI_FAILURE_INVALID_STATE; |
565 | } | 493 | } |
566 | } | 494 | } |
@@ -580,7 +508,7 @@ static void sci_phy_start_sas_link_training(struct isci_phy *iphy) | |||
580 | 508 | ||
581 | sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN); | 509 | sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN); |
582 | 510 | ||
583 | iphy->protocol = SAS_PROTOCOL_SSP; | 511 | iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS; |
584 | } | 512 | } |
585 | 513 | ||
586 | static void sci_phy_start_sata_link_training(struct isci_phy *iphy) | 514 | static void sci_phy_start_sata_link_training(struct isci_phy *iphy) |
@@ -591,7 +519,7 @@ static void sci_phy_start_sata_link_training(struct isci_phy *iphy) | |||
591 | */ | 519 | */ |
592 | sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER); | 520 | sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER); |
593 | 521 | ||
594 | iphy->protocol = SAS_PROTOCOL_SATA; | 522 | iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA; |
595 | } | 523 | } |
596 | 524 | ||
597 | /** | 525 | /** |
@@ -614,73 +542,6 @@ static void sci_phy_complete_link_training(struct isci_phy *iphy, | |||
614 | sci_change_state(&iphy->sm, next_state); | 542 | sci_change_state(&iphy->sm, next_state); |
615 | } | 543 | } |
616 | 544 | ||
617 | static const char *phy_event_name(u32 event_code) | ||
618 | { | ||
619 | switch (scu_get_event_code(event_code)) { | ||
620 | case SCU_EVENT_PORT_SELECTOR_DETECTED: | ||
621 | return "port selector"; | ||
622 | case SCU_EVENT_SENT_PORT_SELECTION: | ||
623 | return "port selection"; | ||
624 | case SCU_EVENT_HARD_RESET_TRANSMITTED: | ||
625 | return "tx hard reset"; | ||
626 | case SCU_EVENT_HARD_RESET_RECEIVED: | ||
627 | return "rx hard reset"; | ||
628 | case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: | ||
629 | return "identify timeout"; | ||
630 | case SCU_EVENT_LINK_FAILURE: | ||
631 | return "link fail"; | ||
632 | case SCU_EVENT_SATA_SPINUP_HOLD: | ||
633 | return "sata spinup hold"; | ||
634 | case SCU_EVENT_SAS_15_SSC: | ||
635 | case SCU_EVENT_SAS_15: | ||
636 | return "sas 1.5"; | ||
637 | case SCU_EVENT_SAS_30_SSC: | ||
638 | case SCU_EVENT_SAS_30: | ||
639 | return "sas 3.0"; | ||
640 | case SCU_EVENT_SAS_60_SSC: | ||
641 | case SCU_EVENT_SAS_60: | ||
642 | return "sas 6.0"; | ||
643 | case SCU_EVENT_SATA_15_SSC: | ||
644 | case SCU_EVENT_SATA_15: | ||
645 | return "sata 1.5"; | ||
646 | case SCU_EVENT_SATA_30_SSC: | ||
647 | case SCU_EVENT_SATA_30: | ||
648 | return "sata 3.0"; | ||
649 | case SCU_EVENT_SATA_60_SSC: | ||
650 | case SCU_EVENT_SATA_60: | ||
651 | return "sata 6.0"; | ||
652 | case SCU_EVENT_SAS_PHY_DETECTED: | ||
653 | return "sas detect"; | ||
654 | case SCU_EVENT_SATA_PHY_DETECTED: | ||
655 | return "sata detect"; | ||
656 | default: | ||
657 | return "unknown"; | ||
658 | } | ||
659 | } | ||
660 | |||
661 | #define phy_event_dbg(iphy, state, code) \ | ||
662 | dev_dbg(sciphy_to_dev(iphy), "phy-%d:%d: %s event: %s (%x)\n", \ | ||
663 | phy_to_host(iphy)->id, iphy->phy_index, \ | ||
664 | phy_state_name(state), phy_event_name(code), code) | ||
665 | |||
666 | #define phy_event_warn(iphy, state, code) \ | ||
667 | dev_warn(sciphy_to_dev(iphy), "phy-%d:%d: %s event: %s (%x)\n", \ | ||
668 | phy_to_host(iphy)->id, iphy->phy_index, \ | ||
669 | phy_state_name(state), phy_event_name(code), code) | ||
670 | |||
671 | |||
672 | void scu_link_layer_set_txcomsas_timeout(struct isci_phy *iphy, u32 timeout) | ||
673 | { | ||
674 | u32 val; | ||
675 | |||
676 | /* Extend timeout */ | ||
677 | val = readl(&iphy->link_layer_registers->transmit_comsas_signal); | ||
678 | val &= ~SCU_SAS_LLTXCOMSAS_GEN_VAL(NEGTIME, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_MASK); | ||
679 | val |= SCU_SAS_LLTXCOMSAS_GEN_VAL(NEGTIME, timeout); | ||
680 | |||
681 | writel(val, &iphy->link_layer_registers->transmit_comsas_signal); | ||
682 | } | ||
683 | |||
684 | enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) | 545 | enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) |
685 | { | 546 | { |
686 | enum sci_phy_states state = iphy->sm.current_state_id; | 547 | enum sci_phy_states state = iphy->sm.current_state_id; |
@@ -696,15 +557,12 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) | |||
696 | sci_phy_start_sata_link_training(iphy); | 557 | sci_phy_start_sata_link_training(iphy); |
697 | iphy->is_in_link_training = true; | 558 | iphy->is_in_link_training = true; |
698 | break; | 559 | break; |
699 | case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: | ||
700 | /* Extend timeout value */ | ||
701 | scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED); | ||
702 | |||
703 | /* Start the oob/sn state machine over again */ | ||
704 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); | ||
705 | break; | ||
706 | default: | 560 | default: |
707 | phy_event_dbg(iphy, state, event_code); | 561 | dev_dbg(sciphy_to_dev(iphy), |
562 | "%s: PHY starting substate machine received " | ||
563 | "unexpected event_code %x\n", | ||
564 | __func__, | ||
565 | event_code); | ||
708 | return SCI_FAILURE; | 566 | return SCI_FAILURE; |
709 | } | 567 | } |
710 | return SCI_SUCCESS; | 568 | return SCI_SUCCESS; |
@@ -737,21 +595,15 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) | |||
737 | sci_phy_start_sata_link_training(iphy); | 595 | sci_phy_start_sata_link_training(iphy); |
738 | break; | 596 | break; |
739 | case SCU_EVENT_LINK_FAILURE: | 597 | case SCU_EVENT_LINK_FAILURE: |
740 | /* Change the timeout value to default */ | ||
741 | scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); | ||
742 | |||
743 | /* Link failure change state back to the starting state */ | 598 | /* Link failure change state back to the starting state */ |
744 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); | 599 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); |
745 | break; | 600 | break; |
746 | case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: | ||
747 | /* Extend the timeout value */ | ||
748 | scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED); | ||
749 | |||
750 | /* Start the oob/sn state machine over again */ | ||
751 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); | ||
752 | break; | ||
753 | default: | 601 | default: |
754 | phy_event_warn(iphy, state, event_code); | 602 | dev_warn(sciphy_to_dev(iphy), |
603 | "%s: PHY starting substate machine received " | ||
604 | "unexpected event_code %x\n", | ||
605 | __func__, event_code); | ||
606 | |||
755 | return SCI_FAILURE; | 607 | return SCI_FAILURE; |
756 | break; | 608 | break; |
757 | } | 609 | } |
@@ -770,43 +622,37 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) | |||
770 | sci_phy_start_sata_link_training(iphy); | 622 | sci_phy_start_sata_link_training(iphy); |
771 | break; | 623 | break; |
772 | case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: | 624 | case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: |
773 | /* Extend the timeout value */ | ||
774 | scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED); | ||
775 | |||
776 | /* Start the oob/sn state machine over again */ | ||
777 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); | ||
778 | break; | ||
779 | case SCU_EVENT_LINK_FAILURE: | 625 | case SCU_EVENT_LINK_FAILURE: |
780 | scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); | ||
781 | case SCU_EVENT_HARD_RESET_RECEIVED: | 626 | case SCU_EVENT_HARD_RESET_RECEIVED: |
782 | /* Start the oob/sn state machine over again */ | 627 | /* Start the oob/sn state machine over again */ |
783 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); | 628 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); |
784 | break; | 629 | break; |
785 | default: | 630 | default: |
786 | phy_event_warn(iphy, state, event_code); | 631 | dev_warn(sciphy_to_dev(iphy), |
632 | "%s: PHY starting substate machine received " | ||
633 | "unexpected event_code %x\n", | ||
634 | __func__, event_code); | ||
787 | return SCI_FAILURE; | 635 | return SCI_FAILURE; |
788 | } | 636 | } |
789 | return SCI_SUCCESS; | 637 | return SCI_SUCCESS; |
790 | case SCI_PHY_SUB_AWAIT_SAS_POWER: | 638 | case SCI_PHY_SUB_AWAIT_SAS_POWER: |
791 | switch (scu_get_event_code(event_code)) { | 639 | switch (scu_get_event_code(event_code)) { |
792 | case SCU_EVENT_LINK_FAILURE: | 640 | case SCU_EVENT_LINK_FAILURE: |
793 | /* Change the timeout value to default */ | ||
794 | scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); | ||
795 | |||
796 | /* Link failure change state back to the starting state */ | 641 | /* Link failure change state back to the starting state */ |
797 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); | 642 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); |
798 | break; | 643 | break; |
799 | default: | 644 | default: |
800 | phy_event_warn(iphy, state, event_code); | 645 | dev_warn(sciphy_to_dev(iphy), |
646 | "%s: PHY starting substate machine received unexpected " | ||
647 | "event_code %x\n", | ||
648 | __func__, | ||
649 | event_code); | ||
801 | return SCI_FAILURE; | 650 | return SCI_FAILURE; |
802 | } | 651 | } |
803 | return SCI_SUCCESS; | 652 | return SCI_SUCCESS; |
804 | case SCI_PHY_SUB_AWAIT_SATA_POWER: | 653 | case SCI_PHY_SUB_AWAIT_SATA_POWER: |
805 | switch (scu_get_event_code(event_code)) { | 654 | switch (scu_get_event_code(event_code)) { |
806 | case SCU_EVENT_LINK_FAILURE: | 655 | case SCU_EVENT_LINK_FAILURE: |
807 | /* Change the timeout value to default */ | ||
808 | scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); | ||
809 | |||
810 | /* Link failure change state back to the starting state */ | 656 | /* Link failure change state back to the starting state */ |
811 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); | 657 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); |
812 | break; | 658 | break; |
@@ -824,16 +670,17 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) | |||
824 | break; | 670 | break; |
825 | 671 | ||
826 | default: | 672 | default: |
827 | phy_event_warn(iphy, state, event_code); | 673 | dev_warn(sciphy_to_dev(iphy), |
674 | "%s: PHY starting substate machine received " | ||
675 | "unexpected event_code %x\n", | ||
676 | __func__, event_code); | ||
677 | |||
828 | return SCI_FAILURE; | 678 | return SCI_FAILURE; |
829 | } | 679 | } |
830 | return SCI_SUCCESS; | 680 | return SCI_SUCCESS; |
831 | case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: | 681 | case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: |
832 | switch (scu_get_event_code(event_code)) { | 682 | switch (scu_get_event_code(event_code)) { |
833 | case SCU_EVENT_LINK_FAILURE: | 683 | case SCU_EVENT_LINK_FAILURE: |
834 | /* Change the timeout value to default */ | ||
835 | scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); | ||
836 | |||
837 | /* Link failure change state back to the starting state */ | 684 | /* Link failure change state back to the starting state */ |
838 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); | 685 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); |
839 | break; | 686 | break; |
@@ -843,7 +690,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) | |||
843 | */ | 690 | */ |
844 | break; | 691 | break; |
845 | case SCU_EVENT_SATA_PHY_DETECTED: | 692 | case SCU_EVENT_SATA_PHY_DETECTED: |
846 | iphy->protocol = SAS_PROTOCOL_SATA; | 693 | iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA; |
847 | 694 | ||
848 | /* We have received the SATA PHY notification change state */ | 695 | /* We have received the SATA PHY notification change state */ |
849 | sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN); | 696 | sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN); |
@@ -855,8 +702,13 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) | |||
855 | sci_phy_start_sas_link_training(iphy); | 702 | sci_phy_start_sas_link_training(iphy); |
856 | break; | 703 | break; |
857 | default: | 704 | default: |
858 | phy_event_warn(iphy, state, event_code); | 705 | dev_warn(sciphy_to_dev(iphy), |
859 | return SCI_FAILURE; | 706 | "%s: PHY starting substate machine received " |
707 | "unexpected event_code %x\n", | ||
708 | __func__, | ||
709 | event_code); | ||
710 | |||
711 | return SCI_FAILURE;; | ||
860 | } | 712 | } |
861 | return SCI_SUCCESS; | 713 | return SCI_SUCCESS; |
862 | case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: | 714 | case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: |
@@ -882,9 +734,6 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) | |||
882 | SCI_PHY_SUB_AWAIT_SIG_FIS_UF); | 734 | SCI_PHY_SUB_AWAIT_SIG_FIS_UF); |
883 | break; | 735 | break; |
884 | case SCU_EVENT_LINK_FAILURE: | 736 | case SCU_EVENT_LINK_FAILURE: |
885 | /* Change the timeout value to default */ | ||
886 | scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); | ||
887 | |||
888 | /* Link failure change state back to the starting state */ | 737 | /* Link failure change state back to the starting state */ |
889 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); | 738 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); |
890 | break; | 739 | break; |
@@ -895,7 +744,11 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) | |||
895 | sci_phy_start_sas_link_training(iphy); | 744 | sci_phy_start_sas_link_training(iphy); |
896 | break; | 745 | break; |
897 | default: | 746 | default: |
898 | phy_event_warn(iphy, state, event_code); | 747 | dev_warn(sciphy_to_dev(iphy), |
748 | "%s: PHY starting substate machine received " | ||
749 | "unexpected event_code %x\n", | ||
750 | __func__, event_code); | ||
751 | |||
899 | return SCI_FAILURE; | 752 | return SCI_FAILURE; |
900 | } | 753 | } |
901 | 754 | ||
@@ -908,43 +761,38 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) | |||
908 | break; | 761 | break; |
909 | 762 | ||
910 | case SCU_EVENT_LINK_FAILURE: | 763 | case SCU_EVENT_LINK_FAILURE: |
911 | /* Change the timeout value to default */ | ||
912 | scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); | ||
913 | |||
914 | /* Link failure change state back to the starting state */ | 764 | /* Link failure change state back to the starting state */ |
915 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); | 765 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); |
916 | break; | 766 | break; |
917 | 767 | ||
918 | default: | 768 | default: |
919 | phy_event_warn(iphy, state, event_code); | 769 | dev_warn(sciphy_to_dev(iphy), |
770 | "%s: PHY starting substate machine received " | ||
771 | "unexpected event_code %x\n", | ||
772 | __func__, | ||
773 | event_code); | ||
774 | |||
920 | return SCI_FAILURE; | 775 | return SCI_FAILURE; |
921 | } | 776 | } |
922 | return SCI_SUCCESS; | 777 | return SCI_SUCCESS; |
923 | case SCI_PHY_READY: | 778 | case SCI_PHY_READY: |
924 | switch (scu_get_event_code(event_code)) { | 779 | switch (scu_get_event_code(event_code)) { |
925 | case SCU_EVENT_LINK_FAILURE: | 780 | case SCU_EVENT_LINK_FAILURE: |
926 | /* Set default timeout */ | ||
927 | scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); | ||
928 | |||
929 | /* Link failure change state back to the starting state */ | 781 | /* Link failure change state back to the starting state */ |
930 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); | 782 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); |
931 | break; | 783 | break; |
932 | case SCU_EVENT_BROADCAST_CHANGE: | 784 | case SCU_EVENT_BROADCAST_CHANGE: |
933 | case SCU_EVENT_BROADCAST_SES: | ||
934 | case SCU_EVENT_BROADCAST_RESERVED0: | ||
935 | case SCU_EVENT_BROADCAST_RESERVED1: | ||
936 | case SCU_EVENT_BROADCAST_EXPANDER: | ||
937 | case SCU_EVENT_BROADCAST_AEN: | ||
938 | /* Broadcast change received. Notify the port. */ | 785 | /* Broadcast change received. Notify the port. */ |
939 | if (phy_get_non_dummy_port(iphy) != NULL) | 786 | if (phy_get_non_dummy_port(iphy) != NULL) |
940 | sci_port_broadcast_change_received(iphy->owning_port, iphy); | 787 | sci_port_broadcast_change_received(iphy->owning_port, iphy); |
941 | else | 788 | else |
942 | iphy->bcn_received_while_port_unassigned = true; | 789 | iphy->bcn_received_while_port_unassigned = true; |
943 | break; | 790 | break; |
944 | case SCU_EVENT_BROADCAST_RESERVED3: | ||
945 | case SCU_EVENT_BROADCAST_RESERVED4: | ||
946 | default: | 791 | default: |
947 | phy_event_warn(iphy, state, event_code); | 792 | dev_warn(sciphy_to_dev(iphy), |
793 | "%sP SCIC PHY 0x%p ready state machine received " | ||
794 | "unexpected event_code %x\n", | ||
795 | __func__, iphy, event_code); | ||
948 | return SCI_FAILURE_INVALID_STATE; | 796 | return SCI_FAILURE_INVALID_STATE; |
949 | } | 797 | } |
950 | return SCI_SUCCESS; | 798 | return SCI_SUCCESS; |
@@ -955,14 +803,18 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) | |||
955 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); | 803 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); |
956 | break; | 804 | break; |
957 | default: | 805 | default: |
958 | phy_event_warn(iphy, state, event_code); | 806 | dev_warn(sciphy_to_dev(iphy), |
807 | "%s: SCIC PHY 0x%p resetting state machine received " | ||
808 | "unexpected event_code %x\n", | ||
809 | __func__, iphy, event_code); | ||
810 | |||
959 | return SCI_FAILURE_INVALID_STATE; | 811 | return SCI_FAILURE_INVALID_STATE; |
960 | break; | 812 | break; |
961 | } | 813 | } |
962 | return SCI_SUCCESS; | 814 | return SCI_SUCCESS; |
963 | default: | 815 | default: |
964 | dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n", | 816 | dev_dbg(sciphy_to_dev(iphy), |
965 | __func__, phy_state_name(state)); | 817 | "%s: in wrong state: %d\n", __func__, state); |
966 | return SCI_FAILURE_INVALID_STATE; | 818 | return SCI_FAILURE_INVALID_STATE; |
967 | } | 819 | } |
968 | } | 820 | } |
@@ -1055,8 +907,8 @@ enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index) | |||
1055 | return result; | 907 | return result; |
1056 | } | 908 | } |
1057 | default: | 909 | default: |
1058 | dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n", | 910 | dev_dbg(sciphy_to_dev(iphy), |
1059 | __func__, phy_state_name(state)); | 911 | "%s: in wrong state: %d\n", __func__, state); |
1060 | return SCI_FAILURE_INVALID_STATE; | 912 | return SCI_FAILURE_INVALID_STATE; |
1061 | } | 913 | } |
1062 | 914 | ||
@@ -1197,26 +1049,24 @@ static void scu_link_layer_stop_protocol_engine( | |||
1197 | writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control); | 1049 | writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control); |
1198 | } | 1050 | } |
1199 | 1051 | ||
1200 | static void scu_link_layer_start_oob(struct isci_phy *iphy) | 1052 | /** |
1053 | * | ||
1054 | * | ||
1055 | * This method will start the OOB/SN state machine for this struct isci_phy object. | ||
1056 | */ | ||
1057 | static void scu_link_layer_start_oob( | ||
1058 | struct isci_phy *iphy) | ||
1201 | { | 1059 | { |
1202 | struct scu_link_layer_registers __iomem *ll = iphy->link_layer_registers; | 1060 | u32 scu_sas_pcfg_value; |
1203 | u32 val; | 1061 | |
1204 | 1062 | scu_sas_pcfg_value = | |
1205 | /** Reset OOB sequence - start */ | 1063 | readl(&iphy->link_layer_registers->phy_configuration); |
1206 | val = readl(&ll->phy_configuration); | 1064 | scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE); |
1207 | val &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) | | 1065 | scu_sas_pcfg_value &= |
1208 | SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE) | | 1066 | ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) | |
1209 | SCU_SAS_PCFG_GEN_BIT(HARD_RESET)); | 1067 | SCU_SAS_PCFG_GEN_BIT(HARD_RESET)); |
1210 | writel(val, &ll->phy_configuration); | 1068 | writel(scu_sas_pcfg_value, |
1211 | readl(&ll->phy_configuration); /* flush */ | 1069 | &iphy->link_layer_registers->phy_configuration); |
1212 | /** Reset OOB sequence - end */ | ||
1213 | |||
1214 | /** Start OOB sequence - start */ | ||
1215 | val = readl(&ll->phy_configuration); | ||
1216 | val |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE); | ||
1217 | writel(val, &ll->phy_configuration); | ||
1218 | readl(&ll->phy_configuration); /* flush */ | ||
1219 | /** Start OOB sequence - end */ | ||
1220 | } | 1070 | } |
1221 | 1071 | ||
1222 | /** | 1072 | /** |
@@ -1237,7 +1087,6 @@ static void scu_link_layer_tx_hard_reset( | |||
1237 | * to the starting state. */ | 1087 | * to the starting state. */ |
1238 | phy_configuration_value = | 1088 | phy_configuration_value = |
1239 | readl(&iphy->link_layer_registers->phy_configuration); | 1089 | readl(&iphy->link_layer_registers->phy_configuration); |
1240 | phy_configuration_value &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE)); | ||
1241 | phy_configuration_value |= | 1090 | phy_configuration_value |= |
1242 | (SCU_SAS_PCFG_GEN_BIT(HARD_RESET) | | 1091 | (SCU_SAS_PCFG_GEN_BIT(HARD_RESET) | |
1243 | SCU_SAS_PCFG_GEN_BIT(OOB_RESET)); | 1092 | SCU_SAS_PCFG_GEN_BIT(OOB_RESET)); |
@@ -1279,7 +1128,7 @@ static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm) | |||
1279 | scu_link_layer_start_oob(iphy); | 1128 | scu_link_layer_start_oob(iphy); |
1280 | 1129 | ||
1281 | /* We don't know what kind of phy we are going to be just yet */ | 1130 | /* We don't know what kind of phy we are going to be just yet */ |
1282 | iphy->protocol = SAS_PROTOCOL_NONE; | 1131 | iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN; |
1283 | iphy->bcn_received_while_port_unassigned = false; | 1132 | iphy->bcn_received_while_port_unassigned = false; |
1284 | 1133 | ||
1285 | if (iphy->sm.previous_state_id == SCI_PHY_READY) | 1134 | if (iphy->sm.previous_state_id == SCI_PHY_READY) |
@@ -1314,7 +1163,7 @@ static void sci_phy_resetting_state_enter(struct sci_base_state_machine *sm) | |||
1314 | */ | 1163 | */ |
1315 | sci_port_deactivate_phy(iphy->owning_port, iphy, false); | 1164 | sci_port_deactivate_phy(iphy->owning_port, iphy, false); |
1316 | 1165 | ||
1317 | if (iphy->protocol == SAS_PROTOCOL_SSP) { | 1166 | if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) { |
1318 | scu_link_layer_tx_hard_reset(iphy); | 1167 | scu_link_layer_tx_hard_reset(iphy); |
1319 | } else { | 1168 | } else { |
1320 | /* The SCU does not need to have a discrete reset state so | 1169 | /* The SCU does not need to have a discrete reset state so |
@@ -1380,7 +1229,7 @@ void sci_phy_construct(struct isci_phy *iphy, | |||
1380 | iphy->owning_port = iport; | 1229 | iphy->owning_port = iport; |
1381 | iphy->phy_index = phy_index; | 1230 | iphy->phy_index = phy_index; |
1382 | iphy->bcn_received_while_port_unassigned = false; | 1231 | iphy->bcn_received_while_port_unassigned = false; |
1383 | iphy->protocol = SAS_PROTOCOL_NONE; | 1232 | iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN; |
1384 | iphy->link_layer_registers = NULL; | 1233 | iphy->link_layer_registers = NULL; |
1385 | iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN; | 1234 | iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN; |
1386 | 1235 | ||
@@ -1400,6 +1249,7 @@ void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index) | |||
1400 | sas_addr = cpu_to_be64(sci_sas_addr); | 1249 | sas_addr = cpu_to_be64(sci_sas_addr); |
1401 | memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr)); | 1250 | memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr)); |
1402 | 1251 | ||
1252 | iphy->isci_port = NULL; | ||
1403 | iphy->sas_phy.enabled = 0; | 1253 | iphy->sas_phy.enabled = 0; |
1404 | iphy->sas_phy.id = index; | 1254 | iphy->sas_phy.id = index; |
1405 | iphy->sas_phy.sas_addr = &iphy->sas_addr[0]; | 1255 | iphy->sas_phy.sas_addr = &iphy->sas_addr[0]; |
@@ -1433,48 +1283,36 @@ int isci_phy_control(struct asd_sas_phy *sas_phy, | |||
1433 | { | 1283 | { |
1434 | int ret = 0; | 1284 | int ret = 0; |
1435 | struct isci_phy *iphy = sas_phy->lldd_phy; | 1285 | struct isci_phy *iphy = sas_phy->lldd_phy; |
1436 | struct asd_sas_port *port = sas_phy->port; | 1286 | struct isci_port *iport = iphy->isci_port; |
1437 | struct isci_host *ihost = sas_phy->ha->lldd_ha; | 1287 | struct isci_host *ihost = sas_phy->ha->lldd_ha; |
1438 | unsigned long flags; | 1288 | unsigned long flags; |
1439 | 1289 | ||
1440 | dev_dbg(&ihost->pdev->dev, | 1290 | dev_dbg(&ihost->pdev->dev, |
1441 | "%s: phy %p; func %d; buf %p; isci phy %p, port %p\n", | 1291 | "%s: phy %p; func %d; buf %p; isci phy %p, port %p\n", |
1442 | __func__, sas_phy, func, buf, iphy, port); | 1292 | __func__, sas_phy, func, buf, iphy, iport); |
1443 | 1293 | ||
1444 | switch (func) { | 1294 | switch (func) { |
1445 | case PHY_FUNC_DISABLE: | 1295 | case PHY_FUNC_DISABLE: |
1446 | spin_lock_irqsave(&ihost->scic_lock, flags); | 1296 | spin_lock_irqsave(&ihost->scic_lock, flags); |
1447 | scu_link_layer_start_oob(iphy); | ||
1448 | sci_phy_stop(iphy); | 1297 | sci_phy_stop(iphy); |
1449 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1298 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1450 | break; | 1299 | break; |
1451 | 1300 | ||
1452 | case PHY_FUNC_LINK_RESET: | 1301 | case PHY_FUNC_LINK_RESET: |
1453 | spin_lock_irqsave(&ihost->scic_lock, flags); | 1302 | spin_lock_irqsave(&ihost->scic_lock, flags); |
1454 | scu_link_layer_start_oob(iphy); | ||
1455 | sci_phy_stop(iphy); | 1303 | sci_phy_stop(iphy); |
1456 | sci_phy_start(iphy); | 1304 | sci_phy_start(iphy); |
1457 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1305 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1458 | break; | 1306 | break; |
1459 | 1307 | ||
1460 | case PHY_FUNC_HARD_RESET: | 1308 | case PHY_FUNC_HARD_RESET: |
1461 | if (!port) | 1309 | if (!iport) |
1462 | return -ENODEV; | 1310 | return -ENODEV; |
1463 | 1311 | ||
1464 | ret = isci_port_perform_hard_reset(ihost, port->lldd_port, iphy); | 1312 | /* Perform the port reset. */ |
1313 | ret = isci_port_perform_hard_reset(ihost, iport, iphy); | ||
1465 | 1314 | ||
1466 | break; | 1315 | break; |
1467 | case PHY_FUNC_GET_EVENTS: { | ||
1468 | struct scu_link_layer_registers __iomem *r; | ||
1469 | struct sas_phy *phy = sas_phy->phy; | ||
1470 | |||
1471 | r = iphy->link_layer_registers; | ||
1472 | phy->running_disparity_error_count = readl(&r->running_disparity_error_count); | ||
1473 | phy->loss_of_dword_sync_count = readl(&r->loss_of_sync_error_count); | ||
1474 | phy->phy_reset_problem_count = readl(&r->phy_reset_problem_count); | ||
1475 | phy->invalid_dword_count = readl(&r->invalid_dword_counter); | ||
1476 | break; | ||
1477 | } | ||
1478 | 1316 | ||
1479 | default: | 1317 | default: |
1480 | dev_dbg(&ihost->pdev->dev, | 1318 | dev_dbg(&ihost->pdev->dev, |
diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h index 45fecfa36a9..67699c8e321 100644 --- a/drivers/scsi/isci/phy.h +++ b/drivers/scsi/isci/phy.h | |||
@@ -76,6 +76,13 @@ | |||
76 | */ | 76 | */ |
77 | #define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT 250 | 77 | #define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT 250 |
78 | 78 | ||
79 | enum sci_phy_protocol { | ||
80 | SCIC_SDS_PHY_PROTOCOL_UNKNOWN, | ||
81 | SCIC_SDS_PHY_PROTOCOL_SAS, | ||
82 | SCIC_SDS_PHY_PROTOCOL_SATA, | ||
83 | SCIC_SDS_MAX_PHY_PROTOCOLS | ||
84 | }; | ||
85 | |||
79 | /** | 86 | /** |
80 | * isci_phy - hba local phy infrastructure | 87 | * isci_phy - hba local phy infrastructure |
81 | * @sm: | 88 | * @sm: |
@@ -88,7 +95,7 @@ struct isci_phy { | |||
88 | struct sci_base_state_machine sm; | 95 | struct sci_base_state_machine sm; |
89 | struct isci_port *owning_port; | 96 | struct isci_port *owning_port; |
90 | enum sas_linkrate max_negotiated_speed; | 97 | enum sas_linkrate max_negotiated_speed; |
91 | enum sas_protocol protocol; | 98 | enum sci_phy_protocol protocol; |
92 | u8 phy_index; | 99 | u8 phy_index; |
93 | bool bcn_received_while_port_unassigned; | 100 | bool bcn_received_while_port_unassigned; |
94 | bool is_in_link_training; | 101 | bool is_in_link_training; |
@@ -96,6 +103,7 @@ struct isci_phy { | |||
96 | struct scu_transport_layer_registers __iomem *transport_layer_registers; | 103 | struct scu_transport_layer_registers __iomem *transport_layer_registers; |
97 | struct scu_link_layer_registers __iomem *link_layer_registers; | 104 | struct scu_link_layer_registers __iomem *link_layer_registers; |
98 | struct asd_sas_phy sas_phy; | 105 | struct asd_sas_phy sas_phy; |
106 | struct isci_port *isci_port; | ||
99 | u8 sas_addr[SAS_ADDR_SIZE]; | 107 | u8 sas_addr[SAS_ADDR_SIZE]; |
100 | union { | 108 | union { |
101 | struct sas_identify_frame iaf; | 109 | struct sas_identify_frame iaf; |
@@ -336,65 +344,101 @@ enum sci_phy_counter_id { | |||
336 | SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR | 344 | SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR |
337 | }; | 345 | }; |
338 | 346 | ||
339 | /** | 347 | enum sci_phy_states { |
340 | * enum sci_phy_states - phy state machine states | 348 | /** |
341 | * @SCI_PHY_INITIAL: Simply the initial state for the base domain state | 349 | * Simply the initial state for the base domain state machine. |
342 | * machine. | 350 | */ |
343 | * @SCI_PHY_STOPPED: phy has successfully been stopped. In this state | 351 | SCI_PHY_INITIAL, |
344 | * no new IO operations are permitted on this phy. | 352 | |
345 | * @SCI_PHY_STARTING: the phy is in the process of becomming ready. In | 353 | /** |
346 | * this state no new IO operations are permitted on | 354 | * This state indicates that the phy has successfully been stopped. |
347 | * this phy. | 355 | * In this state no new IO operations are permitted on this phy. |
348 | * @SCI_PHY_SUB_INITIAL: Initial state | 356 | * This state is entered from the INITIAL state. |
349 | * @SCI_PHY_SUB_AWAIT_OSSP_EN: Wait state for the hardware OSSP event | 357 | * This state is entered from the STARTING state. |
350 | * type notification | 358 | * This state is entered from the READY state. |
351 | * @SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: Wait state for the PHY speed | 359 | * This state is entered from the RESETTING state. |
352 | * notification | 360 | */ |
353 | * @SCI_PHY_SUB_AWAIT_IAF_UF: Wait state for the IAF Unsolicited frame | 361 | SCI_PHY_STOPPED, |
354 | * notification | 362 | |
355 | * @SCI_PHY_SUB_AWAIT_SAS_POWER: Wait state for the request to consume | 363 | /** |
356 | * power | 364 | * This state indicates that the phy is in the process of becomming |
357 | * @SCI_PHY_SUB_AWAIT_SATA_POWER: Wait state for request to consume | 365 | * ready. In this state no new IO operations are permitted on this phy. |
358 | * power | 366 | * This state is entered from the STOPPED state. |
359 | * @SCI_PHY_SUB_AWAIT_SATA_PHY_EN: Wait state for the SATA PHY | 367 | * This state is entered from the READY state. |
360 | * notification | 368 | * This state is entered from the RESETTING state. |
361 | * @SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: Wait for the SATA PHY speed | 369 | */ |
362 | * notification | 370 | SCI_PHY_STARTING, |
363 | * @SCI_PHY_SUB_AWAIT_SIG_FIS_UF: Wait state for the SIGNATURE FIS | 371 | |
364 | * unsolicited frame notification | 372 | /** |
365 | * @SCI_PHY_SUB_FINAL: Exit state for this state machine | 373 | * Initial state |
366 | * @SCI_PHY_READY: phy is now ready. Thus, the user is able to perform | 374 | */ |
367 | * IO operations utilizing this phy as long as it is | 375 | SCI_PHY_SUB_INITIAL, |
368 | * currently part of a valid port. This state is | 376 | |
369 | * entered from the STARTING state. | 377 | /** |
370 | * @SCI_PHY_RESETTING: phy is in the process of being reset. In this | 378 | * Wait state for the hardware OSSP event type notification |
371 | * state no new IO operations are permitted on this | 379 | */ |
372 | * phy. This state is entered from the READY state. | 380 | SCI_PHY_SUB_AWAIT_OSSP_EN, |
373 | * @SCI_PHY_FINAL: Simply the final state for the base phy state | 381 | |
374 | * machine. | 382 | /** |
375 | */ | 383 | * Wait state for the PHY speed notification |
376 | #define PHY_STATES {\ | 384 | */ |
377 | C(PHY_INITIAL),\ | 385 | SCI_PHY_SUB_AWAIT_SAS_SPEED_EN, |
378 | C(PHY_STOPPED),\ | 386 | |
379 | C(PHY_STARTING),\ | 387 | /** |
380 | C(PHY_SUB_INITIAL),\ | 388 | * Wait state for the IAF Unsolicited frame notification |
381 | C(PHY_SUB_AWAIT_OSSP_EN),\ | 389 | */ |
382 | C(PHY_SUB_AWAIT_SAS_SPEED_EN),\ | 390 | SCI_PHY_SUB_AWAIT_IAF_UF, |
383 | C(PHY_SUB_AWAIT_IAF_UF),\ | 391 | |
384 | C(PHY_SUB_AWAIT_SAS_POWER),\ | 392 | /** |
385 | C(PHY_SUB_AWAIT_SATA_POWER),\ | 393 | * Wait state for the request to consume power |
386 | C(PHY_SUB_AWAIT_SATA_PHY_EN),\ | 394 | */ |
387 | C(PHY_SUB_AWAIT_SATA_SPEED_EN),\ | 395 | SCI_PHY_SUB_AWAIT_SAS_POWER, |
388 | C(PHY_SUB_AWAIT_SIG_FIS_UF),\ | 396 | |
389 | C(PHY_SUB_FINAL),\ | 397 | /** |
390 | C(PHY_READY),\ | 398 | * Wait state for request to consume power |
391 | C(PHY_RESETTING),\ | 399 | */ |
392 | C(PHY_FINAL),\ | 400 | SCI_PHY_SUB_AWAIT_SATA_POWER, |
393 | } | 401 | |
394 | #undef C | 402 | /** |
395 | #define C(a) SCI_##a | 403 | * Wait state for the SATA PHY notification |
396 | enum sci_phy_states PHY_STATES; | 404 | */ |
397 | #undef C | 405 | SCI_PHY_SUB_AWAIT_SATA_PHY_EN, |
406 | |||
407 | /** | ||
408 | * Wait for the SATA PHY speed notification | ||
409 | */ | ||
410 | SCI_PHY_SUB_AWAIT_SATA_SPEED_EN, | ||
411 | |||
412 | /** | ||
413 | * Wait state for the SIGNATURE FIS unsolicited frame notification | ||
414 | */ | ||
415 | SCI_PHY_SUB_AWAIT_SIG_FIS_UF, | ||
416 | |||
417 | /** | ||
418 | * Exit state for this state machine | ||
419 | */ | ||
420 | SCI_PHY_SUB_FINAL, | ||
421 | |||
422 | /** | ||
423 | * This state indicates the the phy is now ready. Thus, the user | ||
424 | * is able to perform IO operations utilizing this phy as long as it | ||
425 | * is currently part of a valid port. | ||
426 | * This state is entered from the STARTING state. | ||
427 | */ | ||
428 | SCI_PHY_READY, | ||
429 | |||
430 | /** | ||
431 | * This state indicates that the phy is in the process of being reset. | ||
432 | * In this state no new IO operations are permitted on this phy. | ||
433 | * This state is entered from the READY state. | ||
434 | */ | ||
435 | SCI_PHY_RESETTING, | ||
436 | |||
437 | /** | ||
438 | * Simply the final state for the base phy state machine. | ||
439 | */ | ||
440 | SCI_PHY_FINAL, | ||
441 | }; | ||
398 | 442 | ||
399 | void sci_phy_construct( | 443 | void sci_phy_construct( |
400 | struct isci_phy *iphy, | 444 | struct isci_phy *iphy, |
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c index 13098b09a82..8f6f9b77e41 100644 --- a/drivers/scsi/isci/port.c +++ b/drivers/scsi/isci/port.c | |||
@@ -60,29 +60,18 @@ | |||
60 | #define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000) | 60 | #define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000) |
61 | #define SCU_DUMMY_INDEX (0xFFFF) | 61 | #define SCU_DUMMY_INDEX (0xFFFF) |
62 | 62 | ||
63 | #undef C | 63 | static void isci_port_change_state(struct isci_port *iport, enum isci_status status) |
64 | #define C(a) (#a) | ||
65 | const char *port_state_name(enum sci_port_states state) | ||
66 | { | 64 | { |
67 | static const char * const strings[] = PORT_STATES; | 65 | unsigned long flags; |
68 | |||
69 | return strings[state]; | ||
70 | } | ||
71 | #undef C | ||
72 | |||
73 | static struct device *sciport_to_dev(struct isci_port *iport) | ||
74 | { | ||
75 | int i = iport->physical_port_index; | ||
76 | struct isci_port *table; | ||
77 | struct isci_host *ihost; | ||
78 | |||
79 | if (i == SCIC_SDS_DUMMY_PORT) | ||
80 | i = SCI_MAX_PORTS+1; | ||
81 | 66 | ||
82 | table = iport - i; | 67 | dev_dbg(&iport->isci_host->pdev->dev, |
83 | ihost = container_of(table, typeof(*ihost), ports[0]); | 68 | "%s: iport = %p, state = 0x%x\n", |
69 | __func__, iport, status); | ||
84 | 70 | ||
85 | return &ihost->pdev->dev; | 71 | /* XXX pointless lock */ |
72 | spin_lock_irqsave(&iport->state_lock, flags); | ||
73 | iport->status = status; | ||
74 | spin_unlock_irqrestore(&iport->state_lock, flags); | ||
86 | } | 75 | } |
87 | 76 | ||
88 | static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto) | 77 | static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto) |
@@ -125,7 +114,7 @@ static u32 sci_port_get_phys(struct isci_port *iport) | |||
125 | * value is returned if the specified port is not valid. When this value is | 114 | * value is returned if the specified port is not valid. When this value is |
126 | * returned, no data is copied to the properties output parameter. | 115 | * returned, no data is copied to the properties output parameter. |
127 | */ | 116 | */ |
128 | enum sci_status sci_port_get_properties(struct isci_port *iport, | 117 | static enum sci_status sci_port_get_properties(struct isci_port *iport, |
129 | struct sci_port_properties *prop) | 118 | struct sci_port_properties *prop) |
130 | { | 119 | { |
131 | if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT) | 120 | if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT) |
@@ -156,15 +145,48 @@ static void sci_port_bcn_enable(struct isci_port *iport) | |||
156 | } | 145 | } |
157 | } | 146 | } |
158 | 147 | ||
148 | /* called under sci_lock to stabilize phy:port associations */ | ||
149 | void isci_port_bcn_enable(struct isci_host *ihost, struct isci_port *iport) | ||
150 | { | ||
151 | int i; | ||
152 | |||
153 | clear_bit(IPORT_BCN_BLOCKED, &iport->flags); | ||
154 | wake_up(&ihost->eventq); | ||
155 | |||
156 | if (!test_and_clear_bit(IPORT_BCN_PENDING, &iport->flags)) | ||
157 | return; | ||
158 | |||
159 | for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) { | ||
160 | struct isci_phy *iphy = iport->phy_table[i]; | ||
161 | |||
162 | if (!iphy) | ||
163 | continue; | ||
164 | |||
165 | ihost->sas_ha.notify_port_event(&iphy->sas_phy, | ||
166 | PORTE_BROADCAST_RCVD); | ||
167 | break; | ||
168 | } | ||
169 | } | ||
170 | |||
159 | static void isci_port_bc_change_received(struct isci_host *ihost, | 171 | static void isci_port_bc_change_received(struct isci_host *ihost, |
160 | struct isci_port *iport, | 172 | struct isci_port *iport, |
161 | struct isci_phy *iphy) | 173 | struct isci_phy *iphy) |
162 | { | 174 | { |
163 | dev_dbg(&ihost->pdev->dev, | 175 | if (iport && test_bit(IPORT_BCN_BLOCKED, &iport->flags)) { |
164 | "%s: isci_phy = %p, sas_phy = %p\n", | 176 | dev_dbg(&ihost->pdev->dev, |
165 | __func__, iphy, &iphy->sas_phy); | 177 | "%s: disabled BCN; isci_phy = %p, sas_phy = %p\n", |
178 | __func__, iphy, &iphy->sas_phy); | ||
179 | set_bit(IPORT_BCN_PENDING, &iport->flags); | ||
180 | atomic_inc(&iport->event); | ||
181 | wake_up(&ihost->eventq); | ||
182 | } else { | ||
183 | dev_dbg(&ihost->pdev->dev, | ||
184 | "%s: isci_phy = %p, sas_phy = %p\n", | ||
185 | __func__, iphy, &iphy->sas_phy); | ||
166 | 186 | ||
167 | ihost->sas_ha.notify_port_event(&iphy->sas_phy, PORTE_BROADCAST_RCVD); | 187 | ihost->sas_ha.notify_port_event(&iphy->sas_phy, |
188 | PORTE_BROADCAST_RCVD); | ||
189 | } | ||
168 | sci_port_bcn_enable(iport); | 190 | sci_port_bcn_enable(iport); |
169 | } | 191 | } |
170 | 192 | ||
@@ -176,15 +198,21 @@ static void isci_port_link_up(struct isci_host *isci_host, | |||
176 | struct sci_port_properties properties; | 198 | struct sci_port_properties properties; |
177 | unsigned long success = true; | 199 | unsigned long success = true; |
178 | 200 | ||
201 | BUG_ON(iphy->isci_port != NULL); | ||
202 | |||
203 | iphy->isci_port = iport; | ||
204 | |||
179 | dev_dbg(&isci_host->pdev->dev, | 205 | dev_dbg(&isci_host->pdev->dev, |
180 | "%s: isci_port = %p\n", | 206 | "%s: isci_port = %p\n", |
181 | __func__, iport); | 207 | __func__, iport); |
182 | 208 | ||
183 | spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags); | 209 | spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags); |
184 | 210 | ||
211 | isci_port_change_state(iphy->isci_port, isci_starting); | ||
212 | |||
185 | sci_port_get_properties(iport, &properties); | 213 | sci_port_get_properties(iport, &properties); |
186 | 214 | ||
187 | if (iphy->protocol == SAS_PROTOCOL_SATA) { | 215 | if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) { |
188 | u64 attached_sas_address; | 216 | u64 attached_sas_address; |
189 | 217 | ||
190 | iphy->sas_phy.oob_mode = SATA_OOB_MODE; | 218 | iphy->sas_phy.oob_mode = SATA_OOB_MODE; |
@@ -204,7 +232,7 @@ static void isci_port_link_up(struct isci_host *isci_host, | |||
204 | 232 | ||
205 | memcpy(&iphy->sas_phy.attached_sas_addr, | 233 | memcpy(&iphy->sas_phy.attached_sas_addr, |
206 | &attached_sas_address, sizeof(attached_sas_address)); | 234 | &attached_sas_address, sizeof(attached_sas_address)); |
207 | } else if (iphy->protocol == SAS_PROTOCOL_SSP) { | 235 | } else if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) { |
208 | iphy->sas_phy.oob_mode = SAS_OOB_MODE; | 236 | iphy->sas_phy.oob_mode = SAS_OOB_MODE; |
209 | iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame); | 237 | iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame); |
210 | 238 | ||
@@ -212,7 +240,7 @@ static void isci_port_link_up(struct isci_host *isci_host, | |||
212 | memcpy(iphy->sas_phy.attached_sas_addr, | 240 | memcpy(iphy->sas_phy.attached_sas_addr, |
213 | iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE); | 241 | iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE); |
214 | } else { | 242 | } else { |
215 | dev_err(&isci_host->pdev->dev, "%s: unknown target\n", __func__); | 243 | dev_err(&isci_host->pdev->dev, "%s: unkown target\n", __func__); |
216 | success = false; | 244 | success = false; |
217 | } | 245 | } |
218 | 246 | ||
@@ -250,11 +278,14 @@ static void isci_port_link_down(struct isci_host *isci_host, | |||
250 | /* check to see if this is the last phy on this port. */ | 278 | /* check to see if this is the last phy on this port. */ |
251 | if (isci_phy->sas_phy.port && | 279 | if (isci_phy->sas_phy.port && |
252 | isci_phy->sas_phy.port->num_phys == 1) { | 280 | isci_phy->sas_phy.port->num_phys == 1) { |
281 | atomic_inc(&isci_port->event); | ||
282 | isci_port_bcn_enable(isci_host, isci_port); | ||
283 | |||
253 | /* change the state for all devices on this port. The | 284 | /* change the state for all devices on this port. The |
254 | * next task sent to this device will be returned as | 285 | * next task sent to this device will be returned as |
255 | * SAS_TASK_UNDELIVERED, and the scsi mid layer will | 286 | * SAS_TASK_UNDELIVERED, and the scsi mid layer will |
256 | * remove the target | 287 | * remove the target |
257 | */ | 288 | */ |
258 | list_for_each_entry(isci_device, | 289 | list_for_each_entry(isci_device, |
259 | &isci_port->remote_dev_list, | 290 | &isci_port->remote_dev_list, |
260 | node) { | 291 | node) { |
@@ -264,6 +295,7 @@ static void isci_port_link_down(struct isci_host *isci_host, | |||
264 | set_bit(IDEV_GONE, &isci_device->flags); | 295 | set_bit(IDEV_GONE, &isci_device->flags); |
265 | } | 296 | } |
266 | } | 297 | } |
298 | isci_port_change_state(isci_port, isci_stopping); | ||
267 | } | 299 | } |
268 | 300 | ||
269 | /* Notify libsas of the borken link, this will trigger calls to our | 301 | /* Notify libsas of the borken link, this will trigger calls to our |
@@ -273,35 +305,49 @@ static void isci_port_link_down(struct isci_host *isci_host, | |||
273 | isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy, | 305 | isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy, |
274 | PHYE_LOSS_OF_SIGNAL); | 306 | PHYE_LOSS_OF_SIGNAL); |
275 | 307 | ||
308 | isci_phy->isci_port = NULL; | ||
309 | |||
276 | dev_dbg(&isci_host->pdev->dev, | 310 | dev_dbg(&isci_host->pdev->dev, |
277 | "%s: isci_port = %p - Done\n", __func__, isci_port); | 311 | "%s: isci_port = %p - Done\n", __func__, isci_port); |
278 | } | 312 | } |
279 | 313 | ||
280 | static bool is_port_ready_state(enum sci_port_states state) | 314 | |
315 | /** | ||
316 | * isci_port_ready() - This function is called by the sci core when a link | ||
317 | * becomes ready. | ||
318 | * @isci_host: This parameter specifies the isci host object. | ||
319 | * @port: This parameter specifies the sci port with the active link. | ||
320 | * | ||
321 | */ | ||
322 | static void isci_port_ready(struct isci_host *isci_host, struct isci_port *isci_port) | ||
281 | { | 323 | { |
282 | switch (state) { | 324 | dev_dbg(&isci_host->pdev->dev, |
283 | case SCI_PORT_READY: | 325 | "%s: isci_port = %p\n", __func__, isci_port); |
284 | case SCI_PORT_SUB_WAITING: | 326 | |
285 | case SCI_PORT_SUB_OPERATIONAL: | 327 | complete_all(&isci_port->start_complete); |
286 | case SCI_PORT_SUB_CONFIGURING: | 328 | isci_port_change_state(isci_port, isci_ready); |
287 | return true; | 329 | return; |
288 | default: | ||
289 | return false; | ||
290 | } | ||
291 | } | 330 | } |
292 | 331 | ||
293 | /* flag dummy rnc hanling when exiting a ready state */ | 332 | /** |
294 | static void port_state_machine_change(struct isci_port *iport, | 333 | * isci_port_not_ready() - This function is called by the sci core when a link |
295 | enum sci_port_states state) | 334 | * is not ready. All remote devices on this link will be removed if they are |
335 | * in the stopping state. | ||
336 | * @isci_host: This parameter specifies the isci host object. | ||
337 | * @port: This parameter specifies the sci port with the active link. | ||
338 | * | ||
339 | */ | ||
340 | static void isci_port_not_ready(struct isci_host *isci_host, struct isci_port *isci_port) | ||
296 | { | 341 | { |
297 | struct sci_base_state_machine *sm = &iport->sm; | 342 | dev_dbg(&isci_host->pdev->dev, |
298 | enum sci_port_states old_state = sm->current_state_id; | 343 | "%s: isci_port = %p\n", __func__, isci_port); |
299 | 344 | } | |
300 | if (is_port_ready_state(old_state) && !is_port_ready_state(state)) | ||
301 | iport->ready_exit = true; | ||
302 | 345 | ||
303 | sci_change_state(sm, state); | 346 | static void isci_port_stop_complete(struct isci_host *ihost, |
304 | iport->ready_exit = false; | 347 | struct isci_port *iport, |
348 | enum sci_status completion_status) | ||
349 | { | ||
350 | dev_dbg(&ihost->pdev->dev, "Port stop complete\n"); | ||
305 | } | 351 | } |
306 | 352 | ||
307 | /** | 353 | /** |
@@ -315,37 +361,14 @@ static void port_state_machine_change(struct isci_port *iport, | |||
315 | static void isci_port_hard_reset_complete(struct isci_port *isci_port, | 361 | static void isci_port_hard_reset_complete(struct isci_port *isci_port, |
316 | enum sci_status completion_status) | 362 | enum sci_status completion_status) |
317 | { | 363 | { |
318 | struct isci_host *ihost = isci_port->owning_controller; | 364 | dev_dbg(&isci_port->isci_host->pdev->dev, |
319 | |||
320 | dev_dbg(&ihost->pdev->dev, | ||
321 | "%s: isci_port = %p, completion_status=%x\n", | 365 | "%s: isci_port = %p, completion_status=%x\n", |
322 | __func__, isci_port, completion_status); | 366 | __func__, isci_port, completion_status); |
323 | 367 | ||
324 | /* Save the status of the hard reset from the port. */ | 368 | /* Save the status of the hard reset from the port. */ |
325 | isci_port->hard_reset_status = completion_status; | 369 | isci_port->hard_reset_status = completion_status; |
326 | 370 | ||
327 | if (completion_status != SCI_SUCCESS) { | 371 | complete_all(&isci_port->hard_reset_complete); |
328 | |||
329 | /* The reset failed. The port state is now SCI_PORT_FAILED. */ | ||
330 | if (isci_port->active_phy_mask == 0) { | ||
331 | int phy_idx = isci_port->last_active_phy; | ||
332 | struct isci_phy *iphy = &ihost->phys[phy_idx]; | ||
333 | |||
334 | /* Generate the link down now to the host, since it | ||
335 | * was intercepted by the hard reset state machine when | ||
336 | * it really happened. | ||
337 | */ | ||
338 | isci_port_link_down(ihost, iphy, isci_port); | ||
339 | } | ||
340 | /* Advance the port state so that link state changes will be | ||
341 | * noticed. | ||
342 | */ | ||
343 | port_state_machine_change(isci_port, SCI_PORT_SUB_WAITING); | ||
344 | |||
345 | } | ||
346 | clear_bit(IPORT_RESET_PENDING, &isci_port->state); | ||
347 | wake_up(&ihost->eventq); | ||
348 | |||
349 | } | 372 | } |
350 | 373 | ||
351 | /* This method will return a true value if the specified phy can be assigned to | 374 | /* This method will return a true value if the specified phy can be assigned to |
@@ -517,7 +540,7 @@ void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_a | |||
517 | */ | 540 | */ |
518 | iphy = sci_port_get_a_connected_phy(iport); | 541 | iphy = sci_port_get_a_connected_phy(iport); |
519 | if (iphy) { | 542 | if (iphy) { |
520 | if (iphy->protocol != SAS_PROTOCOL_SATA) { | 543 | if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) { |
521 | sci_phy_get_attached_sas_address(iphy, sas); | 544 | sci_phy_get_attached_sas_address(iphy, sas); |
522 | } else { | 545 | } else { |
523 | sci_phy_get_sas_address(iphy, sas); | 546 | sci_phy_get_sas_address(iphy, sas); |
@@ -612,26 +635,19 @@ void sci_port_setup_transports(struct isci_port *iport, u32 device_id) | |||
612 | } | 635 | } |
613 | } | 636 | } |
614 | 637 | ||
615 | static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy) | 638 | static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy, |
616 | { | 639 | bool do_notify_user) |
617 | sci_phy_resume(iphy); | ||
618 | iport->enabled_phy_mask |= 1 << iphy->phy_index; | ||
619 | } | ||
620 | |||
621 | static void sci_port_activate_phy(struct isci_port *iport, | ||
622 | struct isci_phy *iphy, | ||
623 | u8 flags) | ||
624 | { | 640 | { |
625 | struct isci_host *ihost = iport->owning_controller; | 641 | struct isci_host *ihost = iport->owning_controller; |
626 | 642 | ||
627 | if (iphy->protocol != SAS_PROTOCOL_SATA && (flags & PF_RESUME)) | 643 | if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) |
628 | sci_phy_resume(iphy); | 644 | sci_phy_resume(iphy); |
629 | 645 | ||
630 | iport->active_phy_mask |= 1 << iphy->phy_index; | 646 | iport->active_phy_mask |= 1 << iphy->phy_index; |
631 | 647 | ||
632 | sci_controller_clear_invalid_phy(ihost, iphy); | 648 | sci_controller_clear_invalid_phy(ihost, iphy); |
633 | 649 | ||
634 | if (flags & PF_NOTIFY) | 650 | if (do_notify_user == true) |
635 | isci_port_link_up(ihost, iport, iphy); | 651 | isci_port_link_up(ihost, iport, iphy); |
636 | } | 652 | } |
637 | 653 | ||
@@ -641,19 +657,12 @@ void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy, | |||
641 | struct isci_host *ihost = iport->owning_controller; | 657 | struct isci_host *ihost = iport->owning_controller; |
642 | 658 | ||
643 | iport->active_phy_mask &= ~(1 << iphy->phy_index); | 659 | iport->active_phy_mask &= ~(1 << iphy->phy_index); |
644 | iport->enabled_phy_mask &= ~(1 << iphy->phy_index); | ||
645 | if (!iport->active_phy_mask) | ||
646 | iport->last_active_phy = iphy->phy_index; | ||
647 | 660 | ||
648 | iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN; | 661 | iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN; |
649 | 662 | ||
650 | /* Re-assign the phy back to the LP as if it were a narrow port for APC | 663 | /* Re-assign the phy back to the LP as if it were a narrow port */ |
651 | * mode. For MPC mode, the phy will remain in the port. | 664 | writel(iphy->phy_index, |
652 | */ | 665 | &iport->port_pe_configuration_register[iphy->phy_index]); |
653 | if (iport->owning_controller->oem_parameters.controller.mode_type == | ||
654 | SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) | ||
655 | writel(iphy->phy_index, | ||
656 | &iport->port_pe_configuration_register[iphy->phy_index]); | ||
657 | 666 | ||
658 | if (do_notify_user == true) | 667 | if (do_notify_user == true) |
659 | isci_port_link_down(ihost, iphy, iport); | 668 | isci_port_link_down(ihost, iphy, iport); |
@@ -674,20 +683,49 @@ static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *i | |||
674 | } | 683 | } |
675 | } | 684 | } |
676 | 685 | ||
686 | static bool is_port_ready_state(enum sci_port_states state) | ||
687 | { | ||
688 | switch (state) { | ||
689 | case SCI_PORT_READY: | ||
690 | case SCI_PORT_SUB_WAITING: | ||
691 | case SCI_PORT_SUB_OPERATIONAL: | ||
692 | case SCI_PORT_SUB_CONFIGURING: | ||
693 | return true; | ||
694 | default: | ||
695 | return false; | ||
696 | } | ||
697 | } | ||
698 | |||
699 | /* flag dummy rnc hanling when exiting a ready state */ | ||
700 | static void port_state_machine_change(struct isci_port *iport, | ||
701 | enum sci_port_states state) | ||
702 | { | ||
703 | struct sci_base_state_machine *sm = &iport->sm; | ||
704 | enum sci_port_states old_state = sm->current_state_id; | ||
705 | |||
706 | if (is_port_ready_state(old_state) && !is_port_ready_state(state)) | ||
707 | iport->ready_exit = true; | ||
708 | |||
709 | sci_change_state(sm, state); | ||
710 | iport->ready_exit = false; | ||
711 | } | ||
712 | |||
677 | /** | 713 | /** |
678 | * sci_port_general_link_up_handler - phy can be assigned to port? | 714 | * sci_port_general_link_up_handler - phy can be assigned to port? |
679 | * @sci_port: sci_port object for which has a phy that has gone link up. | 715 | * @sci_port: sci_port object for which has a phy that has gone link up. |
680 | * @sci_phy: This is the struct isci_phy object that has gone link up. | 716 | * @sci_phy: This is the struct isci_phy object that has gone link up. |
681 | * @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy | 717 | * @do_notify_user: This parameter specifies whether to inform the user (via |
718 | * sci_port_link_up()) as to the fact that a new phy as become ready. | ||
682 | * | 719 | * |
683 | * Determine if this phy can be assigned to this port . If the phy is | 720 | * Determine if this phy can be assigned to this |
684 | * not a valid PHY for this port then the function will notify the user. | 721 | * port . If the phy is not a valid PHY for |
685 | * A PHY can only be part of a port if it's attached SAS ADDRESS is the | 722 | * this port then the function will notify the user. A PHY can only be |
686 | * same as all other PHYs in the same port. | 723 | * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in |
724 | * the same port. none | ||
687 | */ | 725 | */ |
688 | static void sci_port_general_link_up_handler(struct isci_port *iport, | 726 | static void sci_port_general_link_up_handler(struct isci_port *iport, |
689 | struct isci_phy *iphy, | 727 | struct isci_phy *iphy, |
690 | u8 flags) | 728 | bool do_notify_user) |
691 | { | 729 | { |
692 | struct sci_sas_address port_sas_address; | 730 | struct sci_sas_address port_sas_address; |
693 | struct sci_sas_address phy_sas_address; | 731 | struct sci_sas_address phy_sas_address; |
@@ -705,7 +743,7 @@ static void sci_port_general_link_up_handler(struct isci_port *iport, | |||
705 | iport->active_phy_mask == 0) { | 743 | iport->active_phy_mask == 0) { |
706 | struct sci_base_state_machine *sm = &iport->sm; | 744 | struct sci_base_state_machine *sm = &iport->sm; |
707 | 745 | ||
708 | sci_port_activate_phy(iport, iphy, flags); | 746 | sci_port_activate_phy(iport, iphy, do_notify_user); |
709 | if (sm->current_state_id == SCI_PORT_RESETTING) | 747 | if (sm->current_state_id == SCI_PORT_RESETTING) |
710 | port_state_machine_change(iport, SCI_PORT_READY); | 748 | port_state_machine_change(iport, SCI_PORT_READY); |
711 | } else | 749 | } else |
@@ -751,19 +789,16 @@ static bool sci_port_is_wide(struct isci_port *iport) | |||
751 | * wide ports and direct attached phys. Since there are no wide ported SATA | 789 | * wide ports and direct attached phys. Since there are no wide ported SATA |
752 | * devices this could become an invalid port configuration. | 790 | * devices this could become an invalid port configuration. |
753 | */ | 791 | */ |
754 | bool sci_port_link_detected(struct isci_port *iport, struct isci_phy *iphy) | 792 | bool sci_port_link_detected( |
793 | struct isci_port *iport, | ||
794 | struct isci_phy *iphy) | ||
755 | { | 795 | { |
756 | if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) && | 796 | if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) && |
757 | (iphy->protocol == SAS_PROTOCOL_SATA)) { | 797 | (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) && |
758 | if (sci_port_is_wide(iport)) { | 798 | sci_port_is_wide(iport)) { |
759 | sci_port_invalid_link_up(iport, iphy); | 799 | sci_port_invalid_link_up(iport, iphy); |
760 | return false; | 800 | |
761 | } else { | 801 | return false; |
762 | struct isci_host *ihost = iport->owning_controller; | ||
763 | struct isci_port *dst_port = &(ihost->ports[iphy->phy_index]); | ||
764 | writel(iphy->phy_index, | ||
765 | &dst_port->port_pe_configuration_register[iphy->phy_index]); | ||
766 | } | ||
767 | } | 802 | } |
768 | 803 | ||
769 | return true; | 804 | return true; |
@@ -798,9 +833,10 @@ static void port_timeout(unsigned long data) | |||
798 | __func__, | 833 | __func__, |
799 | iport); | 834 | iport); |
800 | } else if (current_state == SCI_PORT_STOPPING) { | 835 | } else if (current_state == SCI_PORT_STOPPING) { |
801 | dev_dbg(sciport_to_dev(iport), | 836 | /* if the port is still stopping then the stop has not completed */ |
802 | "%s: port%d: stop complete timeout\n", | 837 | isci_port_stop_complete(iport->owning_controller, |
803 | __func__, iport->physical_port_index); | 838 | iport, |
839 | SCI_FAILURE_TIMEOUT); | ||
804 | } else { | 840 | } else { |
805 | /* The port is in the ready state and we have a timer | 841 | /* The port is in the ready state and we have a timer |
806 | * reporting a timeout this should not happen. | 842 | * reporting a timeout this should not happen. |
@@ -952,34 +988,26 @@ static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine | |||
952 | } | 988 | } |
953 | } | 989 | } |
954 | 990 | ||
955 | static void scic_sds_port_ready_substate_waiting_exit( | ||
956 | struct sci_base_state_machine *sm) | ||
957 | { | ||
958 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); | ||
959 | sci_port_resume_port_task_scheduler(iport); | ||
960 | } | ||
961 | |||
962 | static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm) | 991 | static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm) |
963 | { | 992 | { |
964 | u32 index; | 993 | u32 index; |
965 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); | 994 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); |
966 | struct isci_host *ihost = iport->owning_controller; | 995 | struct isci_host *ihost = iport->owning_controller; |
967 | 996 | ||
968 | dev_dbg(&ihost->pdev->dev, "%s: port%d ready\n", | 997 | isci_port_ready(ihost, iport); |
969 | __func__, iport->physical_port_index); | ||
970 | 998 | ||
971 | for (index = 0; index < SCI_MAX_PHYS; index++) { | 999 | for (index = 0; index < SCI_MAX_PHYS; index++) { |
972 | if (iport->phy_table[index]) { | 1000 | if (iport->phy_table[index]) { |
973 | writel(iport->physical_port_index, | 1001 | writel(iport->physical_port_index, |
974 | &iport->port_pe_configuration_register[ | 1002 | &iport->port_pe_configuration_register[ |
975 | iport->phy_table[index]->phy_index]); | 1003 | iport->phy_table[index]->phy_index]); |
976 | if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0) | ||
977 | sci_port_resume_phy(iport, iport->phy_table[index]); | ||
978 | } | 1004 | } |
979 | } | 1005 | } |
980 | 1006 | ||
981 | sci_port_update_viit_entry(iport); | 1007 | sci_port_update_viit_entry(iport); |
982 | 1008 | ||
1009 | sci_port_resume_port_task_scheduler(iport); | ||
1010 | |||
983 | /* | 1011 | /* |
984 | * Post the dummy task for the port so the hardware can schedule | 1012 | * Post the dummy task for the port so the hardware can schedule |
985 | * io correctly | 1013 | * io correctly |
@@ -1032,8 +1060,7 @@ static void sci_port_ready_substate_operational_exit(struct sci_base_state_machi | |||
1032 | */ | 1060 | */ |
1033 | sci_port_abort_dummy_request(iport); | 1061 | sci_port_abort_dummy_request(iport); |
1034 | 1062 | ||
1035 | dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n", | 1063 | isci_port_not_ready(ihost, iport); |
1036 | __func__, iport->physical_port_index); | ||
1037 | 1064 | ||
1038 | if (iport->ready_exit) | 1065 | if (iport->ready_exit) |
1039 | sci_port_invalidate_dummy_remote_node(iport); | 1066 | sci_port_invalidate_dummy_remote_node(iport); |
@@ -1045,12 +1072,22 @@ static void sci_port_ready_substate_configuring_enter(struct sci_base_state_mach | |||
1045 | struct isci_host *ihost = iport->owning_controller; | 1072 | struct isci_host *ihost = iport->owning_controller; |
1046 | 1073 | ||
1047 | if (iport->active_phy_mask == 0) { | 1074 | if (iport->active_phy_mask == 0) { |
1048 | dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n", | 1075 | isci_port_not_ready(ihost, iport); |
1049 | __func__, iport->physical_port_index); | ||
1050 | 1076 | ||
1051 | port_state_machine_change(iport, SCI_PORT_SUB_WAITING); | 1077 | port_state_machine_change(iport, |
1052 | } else | 1078 | SCI_PORT_SUB_WAITING); |
1053 | port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL); | 1079 | } else if (iport->started_request_count == 0) |
1080 | port_state_machine_change(iport, | ||
1081 | SCI_PORT_SUB_OPERATIONAL); | ||
1082 | } | ||
1083 | |||
1084 | static void sci_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm) | ||
1085 | { | ||
1086 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); | ||
1087 | |||
1088 | sci_port_suspend_port_task_scheduler(iport); | ||
1089 | if (iport->ready_exit) | ||
1090 | sci_port_invalidate_dummy_remote_node(iport); | ||
1054 | } | 1091 | } |
1055 | 1092 | ||
1056 | enum sci_status sci_port_start(struct isci_port *iport) | 1093 | enum sci_status sci_port_start(struct isci_port *iport) |
@@ -1062,8 +1099,8 @@ enum sci_status sci_port_start(struct isci_port *iport) | |||
1062 | 1099 | ||
1063 | state = iport->sm.current_state_id; | 1100 | state = iport->sm.current_state_id; |
1064 | if (state != SCI_PORT_STOPPED) { | 1101 | if (state != SCI_PORT_STOPPED) { |
1065 | dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", | 1102 | dev_warn(sciport_to_dev(iport), |
1066 | __func__, port_state_name(state)); | 1103 | "%s: in wrong state: %d\n", __func__, state); |
1067 | return SCI_FAILURE_INVALID_STATE; | 1104 | return SCI_FAILURE_INVALID_STATE; |
1068 | } | 1105 | } |
1069 | 1106 | ||
@@ -1137,8 +1174,8 @@ enum sci_status sci_port_stop(struct isci_port *iport) | |||
1137 | SCI_PORT_STOPPING); | 1174 | SCI_PORT_STOPPING); |
1138 | return SCI_SUCCESS; | 1175 | return SCI_SUCCESS; |
1139 | default: | 1176 | default: |
1140 | dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", | 1177 | dev_warn(sciport_to_dev(iport), |
1141 | __func__, port_state_name(state)); | 1178 | "%s: in wrong state: %d\n", __func__, state); |
1142 | return SCI_FAILURE_INVALID_STATE; | 1179 | return SCI_FAILURE_INVALID_STATE; |
1143 | } | 1180 | } |
1144 | } | 1181 | } |
@@ -1152,8 +1189,8 @@ static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout) | |||
1152 | 1189 | ||
1153 | state = iport->sm.current_state_id; | 1190 | state = iport->sm.current_state_id; |
1154 | if (state != SCI_PORT_SUB_OPERATIONAL) { | 1191 | if (state != SCI_PORT_SUB_OPERATIONAL) { |
1155 | dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", | 1192 | dev_warn(sciport_to_dev(iport), |
1156 | __func__, port_state_name(state)); | 1193 | "%s: in wrong state: %d\n", __func__, state); |
1157 | return SCI_FAILURE_INVALID_STATE; | 1194 | return SCI_FAILURE_INVALID_STATE; |
1158 | } | 1195 | } |
1159 | 1196 | ||
@@ -1199,8 +1236,6 @@ enum sci_status sci_port_add_phy(struct isci_port *iport, | |||
1199 | enum sci_status status; | 1236 | enum sci_status status; |
1200 | enum sci_port_states state; | 1237 | enum sci_port_states state; |
1201 | 1238 | ||
1202 | sci_port_bcn_enable(iport); | ||
1203 | |||
1204 | state = iport->sm.current_state_id; | 1239 | state = iport->sm.current_state_id; |
1205 | switch (state) { | 1240 | switch (state) { |
1206 | case SCI_PORT_STOPPED: { | 1241 | case SCI_PORT_STOPPED: { |
@@ -1230,7 +1265,7 @@ enum sci_status sci_port_add_phy(struct isci_port *iport, | |||
1230 | if (status != SCI_SUCCESS) | 1265 | if (status != SCI_SUCCESS) |
1231 | return status; | 1266 | return status; |
1232 | 1267 | ||
1233 | sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME); | 1268 | sci_port_general_link_up_handler(iport, iphy, true); |
1234 | iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING; | 1269 | iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING; |
1235 | port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING); | 1270 | port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING); |
1236 | 1271 | ||
@@ -1240,7 +1275,7 @@ enum sci_status sci_port_add_phy(struct isci_port *iport, | |||
1240 | 1275 | ||
1241 | if (status != SCI_SUCCESS) | 1276 | if (status != SCI_SUCCESS) |
1242 | return status; | 1277 | return status; |
1243 | sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY); | 1278 | sci_port_general_link_up_handler(iport, iphy, true); |
1244 | 1279 | ||
1245 | /* Re-enter the configuring state since this may be the last phy in | 1280 | /* Re-enter the configuring state since this may be the last phy in |
1246 | * the port. | 1281 | * the port. |
@@ -1249,8 +1284,8 @@ enum sci_status sci_port_add_phy(struct isci_port *iport, | |||
1249 | SCI_PORT_SUB_CONFIGURING); | 1284 | SCI_PORT_SUB_CONFIGURING); |
1250 | return SCI_SUCCESS; | 1285 | return SCI_SUCCESS; |
1251 | default: | 1286 | default: |
1252 | dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", | 1287 | dev_warn(sciport_to_dev(iport), |
1253 | __func__, port_state_name(state)); | 1288 | "%s: in wrong state: %d\n", __func__, state); |
1254 | return SCI_FAILURE_INVALID_STATE; | 1289 | return SCI_FAILURE_INVALID_STATE; |
1255 | } | 1290 | } |
1256 | } | 1291 | } |
@@ -1299,8 +1334,8 @@ enum sci_status sci_port_remove_phy(struct isci_port *iport, | |||
1299 | SCI_PORT_SUB_CONFIGURING); | 1334 | SCI_PORT_SUB_CONFIGURING); |
1300 | return SCI_SUCCESS; | 1335 | return SCI_SUCCESS; |
1301 | default: | 1336 | default: |
1302 | dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", | 1337 | dev_warn(sciport_to_dev(iport), |
1303 | __func__, port_state_name(state)); | 1338 | "%s: in wrong state: %d\n", __func__, state); |
1304 | return SCI_FAILURE_INVALID_STATE; | 1339 | return SCI_FAILURE_INVALID_STATE; |
1305 | } | 1340 | } |
1306 | } | 1341 | } |
@@ -1316,13 +1351,13 @@ enum sci_status sci_port_link_up(struct isci_port *iport, | |||
1316 | /* Since this is the first phy going link up for the port we | 1351 | /* Since this is the first phy going link up for the port we |
1317 | * can just enable it and continue | 1352 | * can just enable it and continue |
1318 | */ | 1353 | */ |
1319 | sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME); | 1354 | sci_port_activate_phy(iport, iphy, true); |
1320 | 1355 | ||
1321 | port_state_machine_change(iport, | 1356 | port_state_machine_change(iport, |
1322 | SCI_PORT_SUB_OPERATIONAL); | 1357 | SCI_PORT_SUB_OPERATIONAL); |
1323 | return SCI_SUCCESS; | 1358 | return SCI_SUCCESS; |
1324 | case SCI_PORT_SUB_OPERATIONAL: | 1359 | case SCI_PORT_SUB_OPERATIONAL: |
1325 | sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME); | 1360 | sci_port_general_link_up_handler(iport, iphy, true); |
1326 | return SCI_SUCCESS; | 1361 | return SCI_SUCCESS; |
1327 | case SCI_PORT_RESETTING: | 1362 | case SCI_PORT_RESETTING: |
1328 | /* TODO We should make sure that the phy that has gone | 1363 | /* TODO We should make sure that the phy that has gone |
@@ -1339,11 +1374,11 @@ enum sci_status sci_port_link_up(struct isci_port *iport, | |||
1339 | /* In the resetting state we don't notify the user regarding | 1374 | /* In the resetting state we don't notify the user regarding |
1340 | * link up and link down notifications. | 1375 | * link up and link down notifications. |
1341 | */ | 1376 | */ |
1342 | sci_port_general_link_up_handler(iport, iphy, PF_RESUME); | 1377 | sci_port_general_link_up_handler(iport, iphy, false); |
1343 | return SCI_SUCCESS; | 1378 | return SCI_SUCCESS; |
1344 | default: | 1379 | default: |
1345 | dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", | 1380 | dev_warn(sciport_to_dev(iport), |
1346 | __func__, port_state_name(state)); | 1381 | "%s: in wrong state: %d\n", __func__, state); |
1347 | return SCI_FAILURE_INVALID_STATE; | 1382 | return SCI_FAILURE_INVALID_STATE; |
1348 | } | 1383 | } |
1349 | } | 1384 | } |
@@ -1372,8 +1407,8 @@ enum sci_status sci_port_link_down(struct isci_port *iport, | |||
1372 | sci_port_deactivate_phy(iport, iphy, false); | 1407 | sci_port_deactivate_phy(iport, iphy, false); |
1373 | return SCI_SUCCESS; | 1408 | return SCI_SUCCESS; |
1374 | default: | 1409 | default: |
1375 | dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", | 1410 | dev_warn(sciport_to_dev(iport), |
1376 | __func__, port_state_name(state)); | 1411 | "%s: in wrong state: %d\n", __func__, state); |
1377 | return SCI_FAILURE_INVALID_STATE; | 1412 | return SCI_FAILURE_INVALID_STATE; |
1378 | } | 1413 | } |
1379 | } | 1414 | } |
@@ -1392,8 +1427,8 @@ enum sci_status sci_port_start_io(struct isci_port *iport, | |||
1392 | iport->started_request_count++; | 1427 | iport->started_request_count++; |
1393 | return SCI_SUCCESS; | 1428 | return SCI_SUCCESS; |
1394 | default: | 1429 | default: |
1395 | dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", | 1430 | dev_warn(sciport_to_dev(iport), |
1396 | __func__, port_state_name(state)); | 1431 | "%s: in wrong state: %d\n", __func__, state); |
1397 | return SCI_FAILURE_INVALID_STATE; | 1432 | return SCI_FAILURE_INVALID_STATE; |
1398 | } | 1433 | } |
1399 | } | 1434 | } |
@@ -1407,8 +1442,8 @@ enum sci_status sci_port_complete_io(struct isci_port *iport, | |||
1407 | state = iport->sm.current_state_id; | 1442 | state = iport->sm.current_state_id; |
1408 | switch (state) { | 1443 | switch (state) { |
1409 | case SCI_PORT_STOPPED: | 1444 | case SCI_PORT_STOPPED: |
1410 | dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", | 1445 | dev_warn(sciport_to_dev(iport), |
1411 | __func__, port_state_name(state)); | 1446 | "%s: in wrong state: %d\n", __func__, state); |
1412 | return SCI_FAILURE_INVALID_STATE; | 1447 | return SCI_FAILURE_INVALID_STATE; |
1413 | case SCI_PORT_STOPPING: | 1448 | case SCI_PORT_STOPPING: |
1414 | sci_port_decrement_request_count(iport); | 1449 | sci_port_decrement_request_count(iport); |
@@ -1514,8 +1549,7 @@ static void sci_port_ready_state_enter(struct sci_base_state_machine *sm) | |||
1514 | if (prev_state == SCI_PORT_RESETTING) | 1549 | if (prev_state == SCI_PORT_RESETTING) |
1515 | isci_port_hard_reset_complete(iport, SCI_SUCCESS); | 1550 | isci_port_hard_reset_complete(iport, SCI_SUCCESS); |
1516 | else | 1551 | else |
1517 | dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n", | 1552 | isci_port_not_ready(ihost, iport); |
1518 | __func__, iport->physical_port_index); | ||
1519 | 1553 | ||
1520 | /* Post and suspend the dummy remote node context for this port. */ | 1554 | /* Post and suspend the dummy remote node context for this port. */ |
1521 | sci_port_post_dummy_remote_node(iport); | 1555 | sci_port_post_dummy_remote_node(iport); |
@@ -1548,29 +1582,6 @@ static void sci_port_failed_state_enter(struct sci_base_state_machine *sm) | |||
1548 | isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT); | 1582 | isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT); |
1549 | } | 1583 | } |
1550 | 1584 | ||
1551 | void sci_port_set_hang_detection_timeout(struct isci_port *iport, u32 timeout) | ||
1552 | { | ||
1553 | int phy_index; | ||
1554 | u32 phy_mask = iport->active_phy_mask; | ||
1555 | |||
1556 | if (timeout) | ||
1557 | ++iport->hang_detect_users; | ||
1558 | else if (iport->hang_detect_users > 1) | ||
1559 | --iport->hang_detect_users; | ||
1560 | else | ||
1561 | iport->hang_detect_users = 0; | ||
1562 | |||
1563 | if (timeout || (iport->hang_detect_users == 0)) { | ||
1564 | for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) { | ||
1565 | if ((phy_mask >> phy_index) & 1) { | ||
1566 | writel(timeout, | ||
1567 | &iport->phy_table[phy_index] | ||
1568 | ->link_layer_registers | ||
1569 | ->link_layer_hang_detection_timeout); | ||
1570 | } | ||
1571 | } | ||
1572 | } | ||
1573 | } | ||
1574 | /* --------------------------------------------------------------------------- */ | 1585 | /* --------------------------------------------------------------------------- */ |
1575 | 1586 | ||
1576 | static const struct sci_base_state sci_port_state_table[] = { | 1587 | static const struct sci_base_state sci_port_state_table[] = { |
@@ -1586,14 +1597,14 @@ static const struct sci_base_state sci_port_state_table[] = { | |||
1586 | }, | 1597 | }, |
1587 | [SCI_PORT_SUB_WAITING] = { | 1598 | [SCI_PORT_SUB_WAITING] = { |
1588 | .enter_state = sci_port_ready_substate_waiting_enter, | 1599 | .enter_state = sci_port_ready_substate_waiting_enter, |
1589 | .exit_state = scic_sds_port_ready_substate_waiting_exit, | ||
1590 | }, | 1600 | }, |
1591 | [SCI_PORT_SUB_OPERATIONAL] = { | 1601 | [SCI_PORT_SUB_OPERATIONAL] = { |
1592 | .enter_state = sci_port_ready_substate_operational_enter, | 1602 | .enter_state = sci_port_ready_substate_operational_enter, |
1593 | .exit_state = sci_port_ready_substate_operational_exit | 1603 | .exit_state = sci_port_ready_substate_operational_exit |
1594 | }, | 1604 | }, |
1595 | [SCI_PORT_SUB_CONFIGURING] = { | 1605 | [SCI_PORT_SUB_CONFIGURING] = { |
1596 | .enter_state = sci_port_ready_substate_configuring_enter | 1606 | .enter_state = sci_port_ready_substate_configuring_enter, |
1607 | .exit_state = sci_port_ready_substate_configuring_exit | ||
1597 | }, | 1608 | }, |
1598 | [SCI_PORT_RESETTING] = { | 1609 | [SCI_PORT_RESETTING] = { |
1599 | .exit_state = sci_port_resetting_state_exit | 1610 | .exit_state = sci_port_resetting_state_exit |
@@ -1611,15 +1622,12 @@ void sci_port_construct(struct isci_port *iport, u8 index, | |||
1611 | iport->logical_port_index = SCIC_SDS_DUMMY_PORT; | 1622 | iport->logical_port_index = SCIC_SDS_DUMMY_PORT; |
1612 | iport->physical_port_index = index; | 1623 | iport->physical_port_index = index; |
1613 | iport->active_phy_mask = 0; | 1624 | iport->active_phy_mask = 0; |
1614 | iport->enabled_phy_mask = 0; | 1625 | iport->ready_exit = false; |
1615 | iport->last_active_phy = 0; | ||
1616 | iport->ready_exit = false; | ||
1617 | 1626 | ||
1618 | iport->owning_controller = ihost; | 1627 | iport->owning_controller = ihost; |
1619 | 1628 | ||
1620 | iport->started_request_count = 0; | 1629 | iport->started_request_count = 0; |
1621 | iport->assigned_device_count = 0; | 1630 | iport->assigned_device_count = 0; |
1622 | iport->hang_detect_users = 0; | ||
1623 | 1631 | ||
1624 | iport->reserved_rni = SCU_DUMMY_INDEX; | 1632 | iport->reserved_rni = SCU_DUMMY_INDEX; |
1625 | iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG; | 1633 | iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG; |
@@ -1632,6 +1640,29 @@ void sci_port_construct(struct isci_port *iport, u8 index, | |||
1632 | iport->phy_table[index] = NULL; | 1640 | iport->phy_table[index] = NULL; |
1633 | } | 1641 | } |
1634 | 1642 | ||
1643 | void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index) | ||
1644 | { | ||
1645 | INIT_LIST_HEAD(&iport->remote_dev_list); | ||
1646 | INIT_LIST_HEAD(&iport->domain_dev_list); | ||
1647 | spin_lock_init(&iport->state_lock); | ||
1648 | init_completion(&iport->start_complete); | ||
1649 | iport->isci_host = ihost; | ||
1650 | isci_port_change_state(iport, isci_freed); | ||
1651 | atomic_set(&iport->event, 0); | ||
1652 | } | ||
1653 | |||
1654 | /** | ||
1655 | * isci_port_get_state() - This function gets the status of the port object. | ||
1656 | * @isci_port: This parameter points to the isci_port object | ||
1657 | * | ||
1658 | * status of the object as a isci_status enum. | ||
1659 | */ | ||
1660 | enum isci_status isci_port_get_state( | ||
1661 | struct isci_port *isci_port) | ||
1662 | { | ||
1663 | return isci_port->status; | ||
1664 | } | ||
1665 | |||
1635 | void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy) | 1666 | void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy) |
1636 | { | 1667 | { |
1637 | struct isci_host *ihost = iport->owning_controller; | 1668 | struct isci_host *ihost = iport->owning_controller; |
@@ -1640,23 +1671,19 @@ void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy | |||
1640 | isci_port_bc_change_received(ihost, iport, iphy); | 1671 | isci_port_bc_change_received(ihost, iport, iphy); |
1641 | } | 1672 | } |
1642 | 1673 | ||
1643 | static void wait_port_reset(struct isci_host *ihost, struct isci_port *iport) | ||
1644 | { | ||
1645 | wait_event(ihost->eventq, !test_bit(IPORT_RESET_PENDING, &iport->state)); | ||
1646 | } | ||
1647 | |||
1648 | int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport, | 1674 | int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport, |
1649 | struct isci_phy *iphy) | 1675 | struct isci_phy *iphy) |
1650 | { | 1676 | { |
1651 | unsigned long flags; | 1677 | unsigned long flags; |
1652 | enum sci_status status; | 1678 | enum sci_status status; |
1653 | int ret = TMF_RESP_FUNC_COMPLETE; | 1679 | int idx, ret = TMF_RESP_FUNC_COMPLETE; |
1654 | 1680 | ||
1655 | dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n", | 1681 | dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n", |
1656 | __func__, iport); | 1682 | __func__, iport); |
1657 | 1683 | ||
1684 | init_completion(&iport->hard_reset_complete); | ||
1685 | |||
1658 | spin_lock_irqsave(&ihost->scic_lock, flags); | 1686 | spin_lock_irqsave(&ihost->scic_lock, flags); |
1659 | set_bit(IPORT_RESET_PENDING, &iport->state); | ||
1660 | 1687 | ||
1661 | #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT | 1688 | #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT |
1662 | status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT); | 1689 | status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT); |
@@ -1664,22 +1691,15 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor | |||
1664 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1691 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1665 | 1692 | ||
1666 | if (status == SCI_SUCCESS) { | 1693 | if (status == SCI_SUCCESS) { |
1667 | wait_port_reset(ihost, iport); | 1694 | wait_for_completion(&iport->hard_reset_complete); |
1668 | 1695 | ||
1669 | dev_dbg(&ihost->pdev->dev, | 1696 | dev_dbg(&ihost->pdev->dev, |
1670 | "%s: iport = %p; hard reset completion\n", | 1697 | "%s: iport = %p; hard reset completion\n", |
1671 | __func__, iport); | 1698 | __func__, iport); |
1672 | 1699 | ||
1673 | if (iport->hard_reset_status != SCI_SUCCESS) { | 1700 | if (iport->hard_reset_status != SCI_SUCCESS) |
1674 | ret = TMF_RESP_FUNC_FAILED; | 1701 | ret = TMF_RESP_FUNC_FAILED; |
1675 | |||
1676 | dev_err(&ihost->pdev->dev, | ||
1677 | "%s: iport = %p; hard reset failed (0x%x)\n", | ||
1678 | __func__, iport, iport->hard_reset_status); | ||
1679 | } | ||
1680 | } else { | 1702 | } else { |
1681 | clear_bit(IPORT_RESET_PENDING, &iport->state); | ||
1682 | wake_up(&ihost->eventq); | ||
1683 | ret = TMF_RESP_FUNC_FAILED; | 1703 | ret = TMF_RESP_FUNC_FAILED; |
1684 | 1704 | ||
1685 | dev_err(&ihost->pdev->dev, | 1705 | dev_err(&ihost->pdev->dev, |
@@ -1688,83 +1708,50 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor | |||
1688 | __func__, iport, status); | 1708 | __func__, iport, status); |
1689 | 1709 | ||
1690 | } | 1710 | } |
1691 | return ret; | ||
1692 | } | ||
1693 | |||
1694 | int isci_ata_check_ready(struct domain_device *dev) | ||
1695 | { | ||
1696 | struct isci_port *iport = dev->port->lldd_port; | ||
1697 | struct isci_host *ihost = dev_to_ihost(dev); | ||
1698 | struct isci_remote_device *idev; | ||
1699 | unsigned long flags; | ||
1700 | int rc = 0; | ||
1701 | |||
1702 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
1703 | idev = isci_lookup_device(dev); | ||
1704 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
1705 | |||
1706 | if (!idev) | ||
1707 | goto out; | ||
1708 | |||
1709 | if (test_bit(IPORT_RESET_PENDING, &iport->state)) | ||
1710 | goto out; | ||
1711 | 1711 | ||
1712 | rc = !!iport->active_phy_mask; | 1712 | /* If the hard reset for the port has failed, consider this |
1713 | out: | 1713 | * the same as link failures on all phys in the port. |
1714 | isci_put_device(idev); | 1714 | */ |
1715 | if (ret != TMF_RESP_FUNC_COMPLETE) { | ||
1715 | 1716 | ||
1716 | return rc; | 1717 | dev_err(&ihost->pdev->dev, |
1718 | "%s: iport = %p; hard reset failed " | ||
1719 | "(0x%x) - driving explicit link fail for all phys\n", | ||
1720 | __func__, iport, iport->hard_reset_status); | ||
1721 | |||
1722 | /* Down all phys in the port. */ | ||
1723 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
1724 | for (idx = 0; idx < SCI_MAX_PHYS; ++idx) { | ||
1725 | struct isci_phy *iphy = iport->phy_table[idx]; | ||
1726 | |||
1727 | if (!iphy) | ||
1728 | continue; | ||
1729 | sci_phy_stop(iphy); | ||
1730 | sci_phy_start(iphy); | ||
1731 | } | ||
1732 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
1733 | } | ||
1734 | return ret; | ||
1717 | } | 1735 | } |
1718 | 1736 | ||
1737 | /** | ||
1738 | * isci_port_deformed() - This function is called by libsas when a port becomes | ||
1739 | * inactive. | ||
1740 | * @phy: This parameter specifies the libsas phy with the inactive port. | ||
1741 | * | ||
1742 | */ | ||
1719 | void isci_port_deformed(struct asd_sas_phy *phy) | 1743 | void isci_port_deformed(struct asd_sas_phy *phy) |
1720 | { | 1744 | { |
1721 | struct isci_host *ihost = phy->ha->lldd_ha; | 1745 | pr_debug("%s: sas_phy = %p\n", __func__, phy); |
1722 | struct isci_port *iport = phy->port->lldd_port; | ||
1723 | unsigned long flags; | ||
1724 | int i; | ||
1725 | |||
1726 | /* we got a port notification on a port that was subsequently | ||
1727 | * torn down and libsas is just now catching up | ||
1728 | */ | ||
1729 | if (!iport) | ||
1730 | return; | ||
1731 | |||
1732 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
1733 | for (i = 0; i < SCI_MAX_PHYS; i++) { | ||
1734 | if (iport->active_phy_mask & 1 << i) | ||
1735 | break; | ||
1736 | } | ||
1737 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
1738 | |||
1739 | if (i >= SCI_MAX_PHYS) | ||
1740 | dev_dbg(&ihost->pdev->dev, "%s: port: %ld\n", | ||
1741 | __func__, (long) (iport - &ihost->ports[0])); | ||
1742 | } | 1746 | } |
1743 | 1747 | ||
1748 | /** | ||
1749 | * isci_port_formed() - This function is called by libsas when a port becomes | ||
1750 | * active. | ||
1751 | * @phy: This parameter specifies the libsas phy with the active port. | ||
1752 | * | ||
1753 | */ | ||
1744 | void isci_port_formed(struct asd_sas_phy *phy) | 1754 | void isci_port_formed(struct asd_sas_phy *phy) |
1745 | { | 1755 | { |
1746 | struct isci_host *ihost = phy->ha->lldd_ha; | 1756 | pr_debug("%s: sas_phy = %p, sas_port = %p\n", __func__, phy, phy->port); |
1747 | struct isci_phy *iphy = to_iphy(phy); | ||
1748 | struct asd_sas_port *port = phy->port; | ||
1749 | struct isci_port *iport = NULL; | ||
1750 | unsigned long flags; | ||
1751 | int i; | ||
1752 | |||
1753 | /* initial ports are formed as the driver is still initializing, | ||
1754 | * wait for that process to complete | ||
1755 | */ | ||
1756 | wait_for_start(ihost); | ||
1757 | |||
1758 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
1759 | for (i = 0; i < SCI_MAX_PORTS; i++) { | ||
1760 | iport = &ihost->ports[i]; | ||
1761 | if (iport->active_phy_mask & 1 << iphy->phy_index) | ||
1762 | break; | ||
1763 | } | ||
1764 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
1765 | |||
1766 | if (i >= SCI_MAX_PORTS) | ||
1767 | iport = NULL; | ||
1768 | |||
1769 | port->lldd_port = iport; | ||
1770 | } | 1757 | } |
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h index 861e8f72811..b50ecd4e8f9 100644 --- a/drivers/scsi/isci/port.h +++ b/drivers/scsi/isci/port.h | |||
@@ -63,9 +63,6 @@ | |||
63 | 63 | ||
64 | #define SCIC_SDS_DUMMY_PORT 0xFF | 64 | #define SCIC_SDS_DUMMY_PORT 0xFF |
65 | 65 | ||
66 | #define PF_NOTIFY (1 << 0) | ||
67 | #define PF_RESUME (1 << 1) | ||
68 | |||
69 | struct isci_phy; | 66 | struct isci_phy; |
70 | struct isci_host; | 67 | struct isci_host; |
71 | 68 | ||
@@ -80,14 +77,13 @@ enum isci_status { | |||
80 | 77 | ||
81 | /** | 78 | /** |
82 | * struct isci_port - isci direct attached sas port object | 79 | * struct isci_port - isci direct attached sas port object |
80 | * @event: counts bcns and port stop events (for bcn filtering) | ||
83 | * @ready_exit: several states constitute 'ready'. When exiting ready we | 81 | * @ready_exit: several states constitute 'ready'. When exiting ready we |
84 | * need to take extra port-teardown actions that are | 82 | * need to take extra port-teardown actions that are |
85 | * skipped when exiting to another 'ready' state. | 83 | * skipped when exiting to another 'ready' state. |
86 | * @logical_port_index: software port index | 84 | * @logical_port_index: software port index |
87 | * @physical_port_index: hardware port index | 85 | * @physical_port_index: hardware port index |
88 | * @active_phy_mask: identifies phy members | 86 | * @active_phy_mask: identifies phy members |
89 | * @enabled_phy_mask: phy mask for the port | ||
90 | * that are already part of the port | ||
91 | * @reserved_tag: | 87 | * @reserved_tag: |
92 | * @reserved_rni: reserver for port task scheduler workaround | 88 | * @reserved_rni: reserver for port task scheduler workaround |
93 | * @started_request_count: reference count for outstanding commands | 89 | * @started_request_count: reference count for outstanding commands |
@@ -95,23 +91,28 @@ enum isci_status { | |||
95 | * @timer: timeout start/stop operations | 91 | * @timer: timeout start/stop operations |
96 | */ | 92 | */ |
97 | struct isci_port { | 93 | struct isci_port { |
94 | enum isci_status status; | ||
95 | #define IPORT_BCN_BLOCKED 0 | ||
96 | #define IPORT_BCN_PENDING 1 | ||
97 | unsigned long flags; | ||
98 | atomic_t event; | ||
98 | struct isci_host *isci_host; | 99 | struct isci_host *isci_host; |
100 | struct asd_sas_port sas_port; | ||
99 | struct list_head remote_dev_list; | 101 | struct list_head remote_dev_list; |
100 | #define IPORT_RESET_PENDING 0 | 102 | spinlock_t state_lock; |
101 | unsigned long state; | 103 | struct list_head domain_dev_list; |
104 | struct completion start_complete; | ||
105 | struct completion hard_reset_complete; | ||
102 | enum sci_status hard_reset_status; | 106 | enum sci_status hard_reset_status; |
103 | struct sci_base_state_machine sm; | 107 | struct sci_base_state_machine sm; |
104 | bool ready_exit; | 108 | bool ready_exit; |
105 | u8 logical_port_index; | 109 | u8 logical_port_index; |
106 | u8 physical_port_index; | 110 | u8 physical_port_index; |
107 | u8 active_phy_mask; | 111 | u8 active_phy_mask; |
108 | u8 enabled_phy_mask; | ||
109 | u8 last_active_phy; | ||
110 | u16 reserved_rni; | 112 | u16 reserved_rni; |
111 | u16 reserved_tag; | 113 | u16 reserved_tag; |
112 | u32 started_request_count; | 114 | u32 started_request_count; |
113 | u32 assigned_device_count; | 115 | u32 assigned_device_count; |
114 | u32 hang_detect_users; | ||
115 | u32 not_ready_reason; | 116 | u32 not_ready_reason; |
116 | struct isci_phy *phy_table[SCI_MAX_PHYS]; | 117 | struct isci_phy *phy_table[SCI_MAX_PHYS]; |
117 | struct isci_host *owning_controller; | 118 | struct isci_host *owning_controller; |
@@ -144,47 +145,70 @@ struct sci_port_properties { | |||
144 | }; | 145 | }; |
145 | 146 | ||
146 | /** | 147 | /** |
147 | * enum sci_port_states - port state machine states | 148 | * enum sci_port_states - This enumeration depicts all the states for the |
148 | * @SCI_PORT_STOPPED: port has successfully been stopped. In this state | 149 | * common port state machine. |
149 | * no new IO operations are permitted. This state is | 150 | * |
150 | * entered from the STOPPING state. | 151 | * |
151 | * @SCI_PORT_STOPPING: port is in the process of stopping. In this | ||
152 | * state no new IO operations are permitted, but | ||
153 | * existing IO operations are allowed to complete. | ||
154 | * This state is entered from the READY state. | ||
155 | * @SCI_PORT_READY: port is now ready. Thus, the user is able to | ||
156 | * perform IO operations on this port. This state is | ||
157 | * entered from the STARTING state. | ||
158 | * @SCI_PORT_SUB_WAITING: port is started and ready but has no active | ||
159 | * phys. | ||
160 | * @SCI_PORT_SUB_OPERATIONAL: port is started and ready and there is at | ||
161 | * least one phy operational. | ||
162 | * @SCI_PORT_SUB_CONFIGURING: port is started and there was an | ||
163 | * add/remove phy event. This state is only | ||
164 | * used in Automatic Port Configuration Mode | ||
165 | * (APC) | ||
166 | * @SCI_PORT_RESETTING: port is in the process of performing a hard | ||
167 | * reset. Thus, the user is unable to perform IO | ||
168 | * operations on this port. This state is entered | ||
169 | * from the READY state. | ||
170 | * @SCI_PORT_FAILED: port has failed a reset request. This state is | ||
171 | * entered when a port reset request times out. This | ||
172 | * state is entered from the RESETTING state. | ||
173 | */ | 152 | */ |
174 | #define PORT_STATES {\ | 153 | enum sci_port_states { |
175 | C(PORT_STOPPED),\ | 154 | /** |
176 | C(PORT_STOPPING),\ | 155 | * This state indicates that the port has successfully been stopped. |
177 | C(PORT_READY),\ | 156 | * In this state no new IO operations are permitted. |
178 | C(PORT_SUB_WAITING),\ | 157 | * This state is entered from the STOPPING state. |
179 | C(PORT_SUB_OPERATIONAL),\ | 158 | */ |
180 | C(PORT_SUB_CONFIGURING),\ | 159 | SCI_PORT_STOPPED, |
181 | C(PORT_RESETTING),\ | 160 | |
182 | C(PORT_FAILED),\ | 161 | /** |
183 | } | 162 | * This state indicates that the port is in the process of stopping. |
184 | #undef C | 163 | * In this state no new IO operations are permitted, but existing IO |
185 | #define C(a) SCI_##a | 164 | * operations are allowed to complete. |
186 | enum sci_port_states PORT_STATES; | 165 | * This state is entered from the READY state. |
187 | #undef C | 166 | */ |
167 | SCI_PORT_STOPPING, | ||
168 | |||
169 | /** | ||
170 | * This state indicates the port is now ready. Thus, the user is | ||
171 | * able to perform IO operations on this port. | ||
172 | * This state is entered from the STARTING state. | ||
173 | */ | ||
174 | SCI_PORT_READY, | ||
175 | |||
176 | /** | ||
177 | * The substate where the port is started and ready but has no | ||
178 | * active phys. | ||
179 | */ | ||
180 | SCI_PORT_SUB_WAITING, | ||
181 | |||
182 | /** | ||
183 | * The substate where the port is started and ready and there is | ||
184 | * at least one phy operational. | ||
185 | */ | ||
186 | SCI_PORT_SUB_OPERATIONAL, | ||
187 | |||
188 | /** | ||
189 | * The substate where the port is started and there was an | ||
190 | * add/remove phy event. This state is only used in Automatic | ||
191 | * Port Configuration Mode (APC) | ||
192 | */ | ||
193 | SCI_PORT_SUB_CONFIGURING, | ||
194 | |||
195 | /** | ||
196 | * This state indicates the port is in the process of performing a hard | ||
197 | * reset. Thus, the user is unable to perform IO operations on this | ||
198 | * port. | ||
199 | * This state is entered from the READY state. | ||
200 | */ | ||
201 | SCI_PORT_RESETTING, | ||
202 | |||
203 | /** | ||
204 | * This state indicates the port has failed a reset request. This state | ||
205 | * is entered when a port reset request times out. | ||
206 | * This state is entered from the RESETTING state. | ||
207 | */ | ||
208 | SCI_PORT_FAILED, | ||
209 | |||
210 | |||
211 | }; | ||
188 | 212 | ||
189 | static inline void sci_port_decrement_request_count(struct isci_port *iport) | 213 | static inline void sci_port_decrement_request_count(struct isci_port *iport) |
190 | { | 214 | { |
@@ -230,10 +254,6 @@ bool sci_port_link_detected( | |||
230 | struct isci_port *iport, | 254 | struct isci_port *iport, |
231 | struct isci_phy *iphy); | 255 | struct isci_phy *iphy); |
232 | 256 | ||
233 | enum sci_status sci_port_get_properties( | ||
234 | struct isci_port *iport, | ||
235 | struct sci_port_properties *prop); | ||
236 | |||
237 | enum sci_status sci_port_link_up(struct isci_port *iport, | 257 | enum sci_status sci_port_link_up(struct isci_port *iport, |
238 | struct isci_phy *iphy); | 258 | struct isci_phy *iphy); |
239 | enum sci_status sci_port_link_down(struct isci_port *iport, | 259 | enum sci_status sci_port_link_down(struct isci_port *iport, |
@@ -270,14 +290,17 @@ void sci_port_get_attached_sas_address( | |||
270 | struct isci_port *iport, | 290 | struct isci_port *iport, |
271 | struct sci_sas_address *sas_address); | 291 | struct sci_sas_address *sas_address); |
272 | 292 | ||
273 | void sci_port_set_hang_detection_timeout( | 293 | enum isci_status isci_port_get_state( |
274 | struct isci_port *isci_port, | 294 | struct isci_port *isci_port); |
275 | u32 timeout); | ||
276 | 295 | ||
277 | void isci_port_formed(struct asd_sas_phy *); | 296 | void isci_port_formed(struct asd_sas_phy *); |
278 | void isci_port_deformed(struct asd_sas_phy *); | 297 | void isci_port_deformed(struct asd_sas_phy *); |
279 | 298 | ||
299 | void isci_port_init( | ||
300 | struct isci_port *port, | ||
301 | struct isci_host *host, | ||
302 | int index); | ||
303 | |||
280 | int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport, | 304 | int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport, |
281 | struct isci_phy *iphy); | 305 | struct isci_phy *iphy); |
282 | int isci_ata_check_ready(struct domain_device *dev); | ||
283 | #endif /* !defined(_ISCI_PORT_H_) */ | 306 | #endif /* !defined(_ISCI_PORT_H_) */ |
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c index cd962da4a57..38a99d28114 100644 --- a/drivers/scsi/isci/port_config.c +++ b/drivers/scsi/isci/port_config.c | |||
@@ -57,7 +57,7 @@ | |||
57 | 57 | ||
58 | #define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10) | 58 | #define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10) |
59 | #define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10) | 59 | #define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10) |
60 | #define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (1000) | 60 | #define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (100) |
61 | 61 | ||
62 | enum SCIC_SDS_APC_ACTIVITY { | 62 | enum SCIC_SDS_APC_ACTIVITY { |
63 | SCIC_SDS_APC_SKIP_PHY, | 63 | SCIC_SDS_APC_SKIP_PHY, |
@@ -466,19 +466,6 @@ sci_apc_agent_validate_phy_configuration(struct isci_host *ihost, | |||
466 | return sci_port_configuration_agent_validate_ports(ihost, port_agent); | 466 | return sci_port_configuration_agent_validate_ports(ihost, port_agent); |
467 | } | 467 | } |
468 | 468 | ||
469 | /* | ||
470 | * This routine will restart the automatic port configuration timeout | ||
471 | * timer for the next time period. This could be caused by either a link | ||
472 | * down event or a link up event where we can not yet tell to which a phy | ||
473 | * belongs. | ||
474 | */ | ||
475 | static void sci_apc_agent_start_timer(struct sci_port_configuration_agent *port_agent, | ||
476 | u32 timeout) | ||
477 | { | ||
478 | port_agent->timer_pending = true; | ||
479 | sci_mod_timer(&port_agent->timer, timeout); | ||
480 | } | ||
481 | |||
482 | static void sci_apc_agent_configure_ports(struct isci_host *ihost, | 469 | static void sci_apc_agent_configure_ports(struct isci_host *ihost, |
483 | struct sci_port_configuration_agent *port_agent, | 470 | struct sci_port_configuration_agent *port_agent, |
484 | struct isci_phy *iphy, | 471 | struct isci_phy *iphy, |
@@ -578,8 +565,17 @@ static void sci_apc_agent_configure_ports(struct isci_host *ihost, | |||
578 | break; | 565 | break; |
579 | 566 | ||
580 | case SCIC_SDS_APC_START_TIMER: | 567 | case SCIC_SDS_APC_START_TIMER: |
581 | sci_apc_agent_start_timer(port_agent, | 568 | /* |
582 | SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION); | 569 | * This can occur for either a link down event, or a link |
570 | * up event where we cannot yet tell the port to which a | ||
571 | * phy belongs. | ||
572 | */ | ||
573 | if (port_agent->timer_pending) | ||
574 | sci_del_timer(&port_agent->timer); | ||
575 | |||
576 | port_agent->timer_pending = true; | ||
577 | sci_mod_timer(&port_agent->timer, | ||
578 | SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION); | ||
583 | break; | 579 | break; |
584 | 580 | ||
585 | case SCIC_SDS_APC_SKIP_PHY: | 581 | case SCIC_SDS_APC_SKIP_PHY: |
@@ -611,8 +607,7 @@ static void sci_apc_agent_link_up(struct isci_host *ihost, | |||
611 | if (!iport) { | 607 | if (!iport) { |
612 | /* the phy is not the part of this port */ | 608 | /* the phy is not the part of this port */ |
613 | port_agent->phy_ready_mask |= 1 << phy_index; | 609 | port_agent->phy_ready_mask |= 1 << phy_index; |
614 | sci_apc_agent_start_timer(port_agent, | 610 | sci_apc_agent_configure_ports(ihost, port_agent, iphy, true); |
615 | SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION); | ||
616 | } else { | 611 | } else { |
617 | /* the phy is already the part of the port */ | 612 | /* the phy is already the part of the port */ |
618 | u32 port_state = iport->sm.current_state_id; | 613 | u32 port_state = iport->sm.current_state_id; |
@@ -693,9 +688,6 @@ static void apc_agent_timeout(unsigned long data) | |||
693 | &ihost->phys[index], false); | 688 | &ihost->phys[index], false); |
694 | } | 689 | } |
695 | 690 | ||
696 | if (is_controller_start_complete(ihost)) | ||
697 | sci_controller_transition_to_ready(ihost, SCI_SUCCESS); | ||
698 | |||
699 | done: | 691 | done: |
700 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 692 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
701 | } | 693 | } |
@@ -731,11 +723,6 @@ void sci_port_configuration_agent_construct( | |||
731 | } | 723 | } |
732 | } | 724 | } |
733 | 725 | ||
734 | bool is_port_config_apc(struct isci_host *ihost) | ||
735 | { | ||
736 | return ihost->port_agent.link_up_handler == sci_apc_agent_link_up; | ||
737 | } | ||
738 | |||
739 | enum sci_status sci_port_configuration_agent_initialize( | 726 | enum sci_status sci_port_configuration_agent_initialize( |
740 | struct isci_host *ihost, | 727 | struct isci_host *ihost, |
741 | struct sci_port_configuration_agent *port_agent) | 728 | struct sci_port_configuration_agent *port_agent) |
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c index 8ac646e5edd..b5f4341de24 100644 --- a/drivers/scsi/isci/probe_roms.c +++ b/drivers/scsi/isci/probe_roms.c | |||
@@ -104,6 +104,7 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev) | |||
104 | 104 | ||
105 | if (i >= len) { | 105 | if (i >= len) { |
106 | dev_err(&pdev->dev, "oprom parse error\n"); | 106 | dev_err(&pdev->dev, "oprom parse error\n"); |
107 | devm_kfree(&pdev->dev, rom); | ||
107 | rom = NULL; | 108 | rom = NULL; |
108 | } | 109 | } |
109 | pci_unmap_biosrom(oprom); | 110 | pci_unmap_biosrom(oprom); |
@@ -111,6 +112,18 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev) | |||
111 | return rom; | 112 | return rom; |
112 | } | 113 | } |
113 | 114 | ||
115 | enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem, | ||
116 | struct isci_orom *orom, int scu_index) | ||
117 | { | ||
118 | /* check for valid inputs */ | ||
119 | if (scu_index < 0 || scu_index >= SCI_MAX_CONTROLLERS || | ||
120 | scu_index > orom->hdr.num_elements || !oem) | ||
121 | return -EINVAL; | ||
122 | |||
123 | *oem = orom->ctrl[scu_index]; | ||
124 | return 0; | ||
125 | } | ||
126 | |||
114 | struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw) | 127 | struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw) |
115 | { | 128 | { |
116 | struct isci_orom *orom = NULL, *data; | 129 | struct isci_orom *orom = NULL, *data; |
@@ -134,7 +147,7 @@ struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmw | |||
134 | 147 | ||
135 | memcpy(orom, fw->data, fw->size); | 148 | memcpy(orom, fw->data, fw->size); |
136 | 149 | ||
137 | if (is_c0(pdev) || is_c1(pdev)) | 150 | if (is_c0(pdev)) |
138 | goto out; | 151 | goto out; |
139 | 152 | ||
140 | /* | 153 | /* |
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h index e08b578241f..dc007e692f4 100644 --- a/drivers/scsi/isci/probe_roms.h +++ b/drivers/scsi/isci/probe_roms.h | |||
@@ -112,7 +112,7 @@ struct sci_user_parameters { | |||
112 | * This field specifies the maximum number of direct attached devices | 112 | * This field specifies the maximum number of direct attached devices |
113 | * that can have power supplied to them simultaneously. | 113 | * that can have power supplied to them simultaneously. |
114 | */ | 114 | */ |
115 | u8 max_concurr_spinup; | 115 | u8 max_number_concurrent_device_spin_up; |
116 | 116 | ||
117 | /** | 117 | /** |
118 | * This field specifies the number of seconds to allow a phy to consume | 118 | * This field specifies the number of seconds to allow a phy to consume |
@@ -152,10 +152,12 @@ struct sci_user_parameters { | |||
152 | #define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4 | 152 | #define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4 |
153 | 153 | ||
154 | struct sci_oem_params; | 154 | struct sci_oem_params; |
155 | int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version); | 155 | int sci_oem_parameters_validate(struct sci_oem_params *oem); |
156 | 156 | ||
157 | struct isci_orom; | 157 | struct isci_orom; |
158 | struct isci_orom *isci_request_oprom(struct pci_dev *pdev); | 158 | struct isci_orom *isci_request_oprom(struct pci_dev *pdev); |
159 | enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem, | ||
160 | struct isci_orom *orom, int scu_index); | ||
159 | struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw); | 161 | struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw); |
160 | struct isci_orom *isci_get_efi_var(struct pci_dev *pdev); | 162 | struct isci_orom *isci_get_efi_var(struct pci_dev *pdev); |
161 | 163 | ||
@@ -189,11 +191,6 @@ struct isci_oem_hdr { | |||
189 | 0x1a, 0x04, 0xc6) | 191 | 0x1a, 0x04, 0xc6) |
190 | #define ISCI_EFI_VAR_NAME "RstScuO" | 192 | #define ISCI_EFI_VAR_NAME "RstScuO" |
191 | 193 | ||
192 | #define ISCI_ROM_VER_1_0 0x10 | ||
193 | #define ISCI_ROM_VER_1_1 0x11 | ||
194 | #define ISCI_ROM_VER_1_3 0x13 | ||
195 | #define ISCI_ROM_VER_LATEST ISCI_ROM_VER_1_3 | ||
196 | |||
197 | /* Allowed PORT configuration modes APC Automatic PORT configuration mode is | 194 | /* Allowed PORT configuration modes APC Automatic PORT configuration mode is |
198 | * defined by the OEM configuration parameters providing no PHY_MASK parameters | 195 | * defined by the OEM configuration parameters providing no PHY_MASK parameters |
199 | * for any PORT. i.e. There are no phys assigned to any of the ports at start. | 196 | * for any PORT. i.e. There are no phys assigned to any of the ports at start. |
@@ -222,87 +219,9 @@ struct sci_bios_oem_param_block_hdr { | |||
222 | struct sci_oem_params { | 219 | struct sci_oem_params { |
223 | struct { | 220 | struct { |
224 | uint8_t mode_type; | 221 | uint8_t mode_type; |
225 | uint8_t max_concurr_spin_up; | 222 | uint8_t max_concurrent_dev_spin_up; |
226 | /* | 223 | uint8_t do_enable_ssc; |
227 | * This bitfield indicates the OEM's desired default Tx | 224 | uint8_t reserved; |
228 | * Spread Spectrum Clocking (SSC) settings for SATA and SAS. | ||
229 | * NOTE: Default SSC Modulation Frequency is 31.5KHz. | ||
230 | */ | ||
231 | union { | ||
232 | struct { | ||
233 | /* | ||
234 | * NOTE: Max spread for SATA is +0 / -5000 PPM. | ||
235 | * Down-spreading SSC (only method allowed for SATA): | ||
236 | * SATA SSC Tx Disabled = 0x0 | ||
237 | * SATA SSC Tx at +0 / -1419 PPM Spread = 0x2 | ||
238 | * SATA SSC Tx at +0 / -2129 PPM Spread = 0x3 | ||
239 | * SATA SSC Tx at +0 / -4257 PPM Spread = 0x6 | ||
240 | * SATA SSC Tx at +0 / -4967 PPM Spread = 0x7 | ||
241 | */ | ||
242 | uint8_t ssc_sata_tx_spread_level:4; | ||
243 | /* | ||
244 | * SAS SSC Tx Disabled = 0x0 | ||
245 | * | ||
246 | * NOTE: Max spread for SAS down-spreading +0 / | ||
247 | * -2300 PPM | ||
248 | * Down-spreading SSC: | ||
249 | * SAS SSC Tx at +0 / -1419 PPM Spread = 0x2 | ||
250 | * SAS SSC Tx at +0 / -2129 PPM Spread = 0x3 | ||
251 | * | ||
252 | * NOTE: Max spread for SAS center-spreading +2300 / | ||
253 | * -2300 PPM | ||
254 | * Center-spreading SSC: | ||
255 | * SAS SSC Tx at +1064 / -1064 PPM Spread = 0x3 | ||
256 | * SAS SSC Tx at +2129 / -2129 PPM Spread = 0x6 | ||
257 | */ | ||
258 | uint8_t ssc_sas_tx_spread_level:3; | ||
259 | /* | ||
260 | * NOTE: Refer to the SSC section of the SAS 2.x | ||
261 | * Specification for proper setting of this field. | ||
262 | * For standard SAS Initiator SAS PHY operation it | ||
263 | * should be 0 for Down-spreading. | ||
264 | * SAS SSC Tx spread type: | ||
265 | * Down-spreading SSC = 0 | ||
266 | * Center-spreading SSC = 1 | ||
267 | */ | ||
268 | uint8_t ssc_sas_tx_type:1; | ||
269 | }; | ||
270 | uint8_t do_enable_ssc; | ||
271 | }; | ||
272 | /* | ||
273 | * This field indicates length of the SAS/SATA cable between | ||
274 | * host and device. | ||
275 | * This field is used make relationship between analog | ||
276 | * parameters of the phy in the silicon and length of the cable. | ||
277 | * Supported cable attenuation levels: | ||
278 | * "short"- up to 3m, "medium"-3m to 6m, and "long"- more than | ||
279 | * 6m. | ||
280 | * | ||
281 | * This is bit mask field: | ||
282 | * | ||
283 | * BIT: (MSB) 7 6 5 4 | ||
284 | * ASSIGNMENT: <phy3><phy2><phy1><phy0> - Medium cable | ||
285 | * length assignment | ||
286 | * BIT: 3 2 1 0 (LSB) | ||
287 | * ASSIGNMENT: <phy3><phy2><phy1><phy0> - Long cable length | ||
288 | * assignment | ||
289 | * | ||
290 | * BITS 7-4 are set when the cable length is assigned to medium | ||
291 | * BITS 3-0 are set when the cable length is assigned to long | ||
292 | * | ||
293 | * The BIT positions are clear when the cable length is | ||
294 | * assigned to short. | ||
295 | * | ||
296 | * Setting the bits for both long and medium cable length is | ||
297 | * undefined. | ||
298 | * | ||
299 | * A value of 0x84 would assign | ||
300 | * phy3 - medium | ||
301 | * phy2 - long | ||
302 | * phy1 - short | ||
303 | * phy0 - short | ||
304 | */ | ||
305 | uint8_t cable_selection_mask; | ||
306 | } controller; | 225 | } controller; |
307 | 226 | ||
308 | struct { | 227 | struct { |
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h index 97f3ceb8d72..00afc738bbe 100644 --- a/drivers/scsi/isci/registers.h +++ b/drivers/scsi/isci/registers.h | |||
@@ -370,27 +370,6 @@ struct scu_iit_entry { | |||
370 | >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT \ | 370 | >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT \ |
371 | ) | 371 | ) |
372 | 372 | ||
373 | /* ***************************************************************************** */ | ||
374 | #define SMU_CLOCK_GATING_CONTROL_IDLE_ENABLE_SHIFT (0) | ||
375 | #define SMU_CLOCK_GATING_CONTROL_IDLE_ENABLE_MASK (0x00000001) | ||
376 | #define SMU_CLOCK_GATING_CONTROL_XCLK_ENABLE_SHIFT (1) | ||
377 | #define SMU_CLOCK_GATING_CONTROL_XCLK_ENABLE_MASK (0x00000002) | ||
378 | #define SMU_CLOCK_GATING_CONTROL_TXCLK_ENABLE_SHIFT (2) | ||
379 | #define SMU_CLOCK_GATING_CONTROL_TXCLK_ENABLE_MASK (0x00000004) | ||
380 | #define SMU_CLOCK_GATING_CONTROL_REGCLK_ENABLE_SHIFT (3) | ||
381 | #define SMU_CLOCK_GATING_CONTROL_REGCLK_ENABLE_MASK (0x00000008) | ||
382 | #define SMU_CLOCK_GATING_CONTROL_IDLE_TIMEOUT_SHIFT (16) | ||
383 | #define SMU_CLOCK_GATING_CONTROL_IDLE_TIMEOUT_MASK (0x000F0000) | ||
384 | #define SMU_CLOCK_GATING_CONTROL_FORCE_IDLE_SHIFT (31) | ||
385 | #define SMU_CLOCK_GATING_CONTROL_FORCE_IDLE_MASK (0x80000000) | ||
386 | #define SMU_CLOCK_GATING_CONTROL_RESERVED_MASK (0x7FF0FFF0) | ||
387 | |||
388 | #define SMU_CGUCR_GEN_VAL(name, value) \ | ||
389 | SCU_GEN_VALUE(SMU_CLOCK_GATING_CONTROL_##name, value) | ||
390 | |||
391 | #define SMU_CGUCR_GEN_BIT(name) \ | ||
392 | SCU_GEN_BIT(SMU_CLOCK_GATING_CONTROL_##name) | ||
393 | |||
394 | /* -------------------------------------------------------------------------- */ | 373 | /* -------------------------------------------------------------------------- */ |
395 | 374 | ||
396 | #define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_SHIFT (0) | 375 | #define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_SHIFT (0) |
@@ -896,6 +875,122 @@ struct scu_iit_entry { | |||
896 | #define SCU_PTSxSR_GEN_BIT(name) \ | 875 | #define SCU_PTSxSR_GEN_BIT(name) \ |
897 | SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ ## name) | 876 | SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ ## name) |
898 | 877 | ||
878 | |||
879 | /* | ||
880 | * ***************************************************************************** | ||
881 | * * SGPIO Register shift and mask values | ||
882 | * ***************************************************************************** */ | ||
883 | #define SCU_SGPIO_CONTROL_SGPIO_ENABLE_SHIFT (0) | ||
884 | #define SCU_SGPIO_CONTROL_SGPIO_ENABLE_MASK (0x00000001) | ||
885 | #define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_SHIFT (1) | ||
886 | #define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_MASK (0x00000002) | ||
887 | #define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_SHIFT (2) | ||
888 | #define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_MASK (0x00000004) | ||
889 | #define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_SHIFT (15) | ||
890 | #define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_MASK (0x00008000) | ||
891 | #define SCU_SGPIO_CONTROL_SGPIO_RESERVED_MASK (0xFFFF7FF8) | ||
892 | |||
893 | #define SCU_SGICRx_GEN_BIT(name) \ | ||
894 | SCU_GEN_BIT(SCU_SGPIO_CONTROL_SGPIO_ ## name) | ||
895 | |||
896 | #define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_SHIFT (0) | ||
897 | #define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_MASK (0x0000000F) | ||
898 | #define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_SHIFT (4) | ||
899 | #define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_MASK (0x000000F0) | ||
900 | #define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_SHIFT (8) | ||
901 | #define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_MASK (0x00000F00) | ||
902 | #define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_SHIFT (12) | ||
903 | #define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_MASK (0x0000F000) | ||
904 | #define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_RESERVED_MASK (0xFFFF0000) | ||
905 | |||
906 | #define SCU_SGPBRx_GEN_VAL(name, value) \ | ||
907 | SCU_GEN_VALUE(SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_ ## name, value) | ||
908 | |||
909 | #define SCU_SGPIO_START_DRIVE_LOWER_R0_SHIFT (0) | ||
910 | #define SCU_SGPIO_START_DRIVE_LOWER_R0_MASK (0x00000003) | ||
911 | #define SCU_SGPIO_START_DRIVE_LOWER_R1_SHIFT (4) | ||
912 | #define SCU_SGPIO_START_DRIVE_LOWER_R1_MASK (0x00000030) | ||
913 | #define SCU_SGPIO_START_DRIVE_LOWER_R2_SHIFT (8) | ||
914 | #define SCU_SGPIO_START_DRIVE_LOWER_R2_MASK (0x00000300) | ||
915 | #define SCU_SGPIO_START_DRIVE_LOWER_R3_SHIFT (12) | ||
916 | #define SCU_SGPIO_START_DRIVE_LOWER_R3_MASK (0x00003000) | ||
917 | #define SCU_SGPIO_START_DRIVE_LOWER_RESERVED_MASK (0xFFFF8888) | ||
918 | |||
919 | #define SCU_SGSDLRx_GEN_VAL(name, value) \ | ||
920 | SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value) | ||
921 | |||
922 | #define SCU_SGPIO_START_DRIVE_UPPER_R0_SHIFT (0) | ||
923 | #define SCU_SGPIO_START_DRIVE_UPPER_R0_MASK (0x00000003) | ||
924 | #define SCU_SGPIO_START_DRIVE_UPPER_R1_SHIFT (4) | ||
925 | #define SCU_SGPIO_START_DRIVE_UPPER_R1_MASK (0x00000030) | ||
926 | #define SCU_SGPIO_START_DRIVE_UPPER_R2_SHIFT (8) | ||
927 | #define SCU_SGPIO_START_DRIVE_UPPER_R2_MASK (0x00000300) | ||
928 | #define SCU_SGPIO_START_DRIVE_UPPER_R3_SHIFT (12) | ||
929 | #define SCU_SGPIO_START_DRIVE_UPPER_R3_MASK (0x00003000) | ||
930 | #define SCU_SGPIO_START_DRIVE_UPPER_RESERVED_MASK (0xFFFF8888) | ||
931 | |||
932 | #define SCU_SGSDURx_GEN_VAL(name, value) \ | ||
933 | SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value) | ||
934 | |||
935 | #define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_SHIFT (0) | ||
936 | #define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_MASK (0x00000003) | ||
937 | #define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_SHIFT (4) | ||
938 | #define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_MASK (0x00000030) | ||
939 | #define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_SHIFT (8) | ||
940 | #define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_MASK (0x00000300) | ||
941 | #define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_SHIFT (12) | ||
942 | #define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_MASK (0x00003000) | ||
943 | #define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_RESERVED_MASK (0xFFFF8888) | ||
944 | |||
945 | #define SCU_SGSIDLRx_GEN_VAL(name, value) \ | ||
946 | SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value) | ||
947 | |||
948 | #define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_SHIFT (0) | ||
949 | #define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_MASK (0x00000003) | ||
950 | #define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_SHIFT (4) | ||
951 | #define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_MASK (0x00000030) | ||
952 | #define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_SHIFT (8) | ||
953 | #define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_MASK (0x00000300) | ||
954 | #define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_SHIFT (12) | ||
955 | #define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_MASK (0x00003000) | ||
956 | #define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_RESERVED_MASK (0xFFFF8888) | ||
957 | |||
958 | #define SCU_SGSIDURx_GEN_VAL(name, value) \ | ||
959 | SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value) | ||
960 | |||
961 | #define SCU_SGPIO_VENDOR_SPECIFIC_CODE_SHIFT (0) | ||
962 | #define SCU_SGPIO_VENDOR_SPECIFIC_CODE_MASK (0x0000000F) | ||
963 | #define SCU_SGPIO_VENDOR_SPECIFIC_CODE_RESERVED_MASK (0xFFFFFFF0) | ||
964 | |||
965 | #define SCU_SGVSCR_GEN_VAL(value) \ | ||
966 | SCU_GEN_VALUE(SCU_SGPIO_VENDOR_SPECIFIC_CODE ## name, value) | ||
967 | |||
968 | #define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_SHIFT (0) | ||
969 | #define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_MASK (0x00000003) | ||
970 | #define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_SHIFT (2) | ||
971 | #define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_MASK (0x00000004) | ||
972 | #define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_SHIFT (3) | ||
973 | #define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_MASK (0x00000008) | ||
974 | #define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_SHIFT (4) | ||
975 | #define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_MASK (0x00000030) | ||
976 | #define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_SHIFT (6) | ||
977 | #define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_MASK (0x00000040) | ||
978 | #define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_SHIFT (7) | ||
979 | #define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_MASK (0x00000080) | ||
980 | #define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_SHIFT (8) | ||
981 | #define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_MASK (0x00000300) | ||
982 | #define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_SHIFT (10) | ||
983 | #define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_MASK (0x00000400) | ||
984 | #define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_SHIFT (11) | ||
985 | #define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_MASK (0x00000800) | ||
986 | #define SCU_SGPIO_OUPUT_DATA_SELECT_RESERVED_MASK (0xFFFFF000) | ||
987 | |||
988 | #define SCU_SGODSR_GEN_VAL(name, value) \ | ||
989 | SCU_GEN_VALUE(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name, value) | ||
990 | |||
991 | #define SCU_SGODSR_GEN_BIT(name) \ | ||
992 | SCU_GEN_BIT(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name) | ||
993 | |||
899 | /* | 994 | /* |
900 | * ***************************************************************************** | 995 | * ***************************************************************************** |
901 | * * SMU Registers | 996 | * * SMU Registers |
@@ -1013,10 +1108,8 @@ struct smu_registers { | |||
1013 | u32 mmr_address_window; | 1108 | u32 mmr_address_window; |
1014 | /* 0x00A4 SMDW */ | 1109 | /* 0x00A4 SMDW */ |
1015 | u32 mmr_data_window; | 1110 | u32 mmr_data_window; |
1016 | /* 0x00A8 CGUCR */ | 1111 | u32 reserved_A8; |
1017 | u32 clock_gating_control; | 1112 | u32 reserved_AC; |
1018 | /* 0x00AC CGUPC */ | ||
1019 | u32 clock_gating_performance; | ||
1020 | /* A whole bunch of reserved space */ | 1113 | /* A whole bunch of reserved space */ |
1021 | u32 reserved_Bx[4]; | 1114 | u32 reserved_Bx[4]; |
1022 | u32 reserved_Cx[4]; | 1115 | u32 reserved_Cx[4]; |
@@ -1239,14 +1332,6 @@ struct scu_transport_layer_registers { | |||
1239 | #define SCU_SAS_LLCTL_GEN_BIT(name) \ | 1332 | #define SCU_SAS_LLCTL_GEN_BIT(name) \ |
1240 | SCU_GEN_BIT(SCU_SAS_LINK_LAYER_CONTROL_ ## name) | 1333 | SCU_GEN_BIT(SCU_SAS_LINK_LAYER_CONTROL_ ## name) |
1241 | 1334 | ||
1242 | #define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT (0xF0) | ||
1243 | #define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED (0x1FF) | ||
1244 | #define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_SHIFT (0) | ||
1245 | #define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_MASK (0x3FF) | ||
1246 | |||
1247 | #define SCU_SAS_LLTXCOMSAS_GEN_VAL(name, value) \ | ||
1248 | SCU_GEN_VALUE(SCU_SAS_LINK_LAYER_TXCOMSAS_ ## name, value) | ||
1249 | |||
1250 | 1335 | ||
1251 | /* #define SCU_FRXHECR_DCNT_OFFSET 0x00B0 */ | 1336 | /* #define SCU_FRXHECR_DCNT_OFFSET 0x00B0 */ |
1252 | #define SCU_PSZGCR_OFFSET 0x00E4 | 1337 | #define SCU_PSZGCR_OFFSET 0x00E4 |
@@ -1444,12 +1529,10 @@ struct scu_sgpio_registers { | |||
1444 | u32 serial_input_upper; | 1529 | u32 serial_input_upper; |
1445 | /* 0x0018 SGPIO_SGVSCR */ | 1530 | /* 0x0018 SGPIO_SGVSCR */ |
1446 | u32 vendor_specific_code; | 1531 | u32 vendor_specific_code; |
1447 | /* 0x001C Reserved */ | ||
1448 | u32 reserved_001c; | ||
1449 | /* 0x0020 SGPIO_SGODSR */ | 1532 | /* 0x0020 SGPIO_SGODSR */ |
1450 | u32 output_data_select[8]; | 1533 | u32 ouput_data_select[8]; |
1451 | /* Remainder of memory space 256 bytes */ | 1534 | /* Remainder of memory space 256 bytes */ |
1452 | u32 reserved_1444_14ff[0x30]; | 1535 | u32 reserved_1444_14ff[0x31]; |
1453 | 1536 | ||
1454 | }; | 1537 | }; |
1455 | 1538 | ||
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index c3aa6c5457b..b6e6368c266 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c | |||
@@ -53,7 +53,6 @@ | |||
53 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 53 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
54 | */ | 54 | */ |
55 | #include <scsi/sas.h> | 55 | #include <scsi/sas.h> |
56 | #include <linux/bitops.h> | ||
57 | #include "isci.h" | 56 | #include "isci.h" |
58 | #include "port.h" | 57 | #include "port.h" |
59 | #include "remote_device.h" | 58 | #include "remote_device.h" |
@@ -62,21 +61,46 @@ | |||
62 | #include "scu_event_codes.h" | 61 | #include "scu_event_codes.h" |
63 | #include "task.h" | 62 | #include "task.h" |
64 | 63 | ||
65 | #undef C | 64 | /** |
66 | #define C(a) (#a) | 65 | * isci_remote_device_not_ready() - This function is called by the ihost when |
67 | const char *dev_state_name(enum sci_remote_device_states state) | 66 | * the remote device is not ready. We mark the isci device as ready (not |
67 | * "ready_for_io") and signal the waiting proccess. | ||
68 | * @isci_host: This parameter specifies the isci host object. | ||
69 | * @isci_device: This parameter specifies the remote device | ||
70 | * | ||
71 | * sci_lock is held on entrance to this function. | ||
72 | */ | ||
73 | static void isci_remote_device_not_ready(struct isci_host *ihost, | ||
74 | struct isci_remote_device *idev, u32 reason) | ||
68 | { | 75 | { |
69 | static const char * const strings[] = REMOTE_DEV_STATES; | 76 | struct isci_request *ireq; |
70 | 77 | ||
71 | return strings[state]; | 78 | dev_dbg(&ihost->pdev->dev, |
72 | } | 79 | "%s: isci_device = %p\n", __func__, idev); |
73 | #undef C | ||
74 | 80 | ||
75 | enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev, | 81 | switch (reason) { |
76 | enum sci_remote_node_suspension_reasons reason) | 82 | case SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED: |
77 | { | 83 | set_bit(IDEV_GONE, &idev->flags); |
78 | return sci_remote_node_context_suspend(&idev->rnc, reason, | 84 | break; |
79 | SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT); | 85 | case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED: |
86 | set_bit(IDEV_IO_NCQERROR, &idev->flags); | ||
87 | |||
88 | /* Kill all outstanding requests for the device. */ | ||
89 | list_for_each_entry(ireq, &idev->reqs_in_process, dev_node) { | ||
90 | |||
91 | dev_dbg(&ihost->pdev->dev, | ||
92 | "%s: isci_device = %p request = %p\n", | ||
93 | __func__, idev, ireq); | ||
94 | |||
95 | sci_controller_terminate_request(ihost, | ||
96 | idev, | ||
97 | ireq); | ||
98 | } | ||
99 | /* Fall through into the default case... */ | ||
100 | default: | ||
101 | clear_bit(IDEV_IO_READY, &idev->flags); | ||
102 | break; | ||
103 | } | ||
80 | } | 104 | } |
81 | 105 | ||
82 | /** | 106 | /** |
@@ -98,29 +122,18 @@ static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote | |||
98 | wake_up(&ihost->eventq); | 122 | wake_up(&ihost->eventq); |
99 | } | 123 | } |
100 | 124 | ||
101 | static enum sci_status sci_remote_device_terminate_req( | 125 | /* called once the remote node context is ready to be freed. |
102 | struct isci_host *ihost, | 126 | * The remote device can now report that its stop operation is complete. none |
103 | struct isci_remote_device *idev, | 127 | */ |
104 | int check_abort, | 128 | static void rnc_destruct_done(void *_dev) |
105 | struct isci_request *ireq) | ||
106 | { | 129 | { |
107 | if (!test_bit(IREQ_ACTIVE, &ireq->flags) || | 130 | struct isci_remote_device *idev = _dev; |
108 | (ireq->target_device != idev) || | ||
109 | (check_abort && !test_bit(IREQ_PENDING_ABORT, &ireq->flags))) | ||
110 | return SCI_SUCCESS; | ||
111 | |||
112 | dev_dbg(&ihost->pdev->dev, | ||
113 | "%s: idev=%p; flags=%lx; req=%p; req target=%p\n", | ||
114 | __func__, idev, idev->flags, ireq, ireq->target_device); | ||
115 | |||
116 | set_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags); | ||
117 | 131 | ||
118 | return sci_controller_terminate_request(ihost, idev, ireq); | 132 | BUG_ON(idev->started_request_count != 0); |
133 | sci_change_state(&idev->sm, SCI_DEV_STOPPED); | ||
119 | } | 134 | } |
120 | 135 | ||
121 | static enum sci_status sci_remote_device_terminate_reqs_checkabort( | 136 | static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_device *idev) |
122 | struct isci_remote_device *idev, | ||
123 | int chk) | ||
124 | { | 137 | { |
125 | struct isci_host *ihost = idev->owning_port->owning_controller; | 138 | struct isci_host *ihost = idev->owning_port->owning_controller; |
126 | enum sci_status status = SCI_SUCCESS; | 139 | enum sci_status status = SCI_SUCCESS; |
@@ -130,210 +143,18 @@ static enum sci_status sci_remote_device_terminate_reqs_checkabort( | |||
130 | struct isci_request *ireq = ihost->reqs[i]; | 143 | struct isci_request *ireq = ihost->reqs[i]; |
131 | enum sci_status s; | 144 | enum sci_status s; |
132 | 145 | ||
133 | s = sci_remote_device_terminate_req(ihost, idev, chk, ireq); | 146 | if (!test_bit(IREQ_ACTIVE, &ireq->flags) || |
147 | ireq->target_device != idev) | ||
148 | continue; | ||
149 | |||
150 | s = sci_controller_terminate_request(ihost, idev, ireq); | ||
134 | if (s != SCI_SUCCESS) | 151 | if (s != SCI_SUCCESS) |
135 | status = s; | 152 | status = s; |
136 | } | 153 | } |
137 | return status; | ||
138 | } | ||
139 | |||
140 | static bool isci_compare_suspendcount( | ||
141 | struct isci_remote_device *idev, | ||
142 | u32 localcount) | ||
143 | { | ||
144 | smp_rmb(); | ||
145 | |||
146 | /* Check for a change in the suspend count, or the RNC | ||
147 | * being destroyed. | ||
148 | */ | ||
149 | return (localcount != idev->rnc.suspend_count) | ||
150 | || sci_remote_node_context_is_being_destroyed(&idev->rnc); | ||
151 | } | ||
152 | |||
153 | static bool isci_check_reqterm( | ||
154 | struct isci_host *ihost, | ||
155 | struct isci_remote_device *idev, | ||
156 | struct isci_request *ireq, | ||
157 | u32 localcount) | ||
158 | { | ||
159 | unsigned long flags; | ||
160 | bool res; | ||
161 | |||
162 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
163 | res = isci_compare_suspendcount(idev, localcount) | ||
164 | && !test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags); | ||
165 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
166 | 154 | ||
167 | return res; | ||
168 | } | ||
169 | |||
170 | static bool isci_check_devempty( | ||
171 | struct isci_host *ihost, | ||
172 | struct isci_remote_device *idev, | ||
173 | u32 localcount) | ||
174 | { | ||
175 | unsigned long flags; | ||
176 | bool res; | ||
177 | |||
178 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
179 | res = isci_compare_suspendcount(idev, localcount) | ||
180 | && idev->started_request_count == 0; | ||
181 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
182 | |||
183 | return res; | ||
184 | } | ||
185 | |||
186 | enum sci_status isci_remote_device_terminate_requests( | ||
187 | struct isci_host *ihost, | ||
188 | struct isci_remote_device *idev, | ||
189 | struct isci_request *ireq) | ||
190 | { | ||
191 | enum sci_status status = SCI_SUCCESS; | ||
192 | unsigned long flags; | ||
193 | u32 rnc_suspend_count; | ||
194 | |||
195 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
196 | |||
197 | if (isci_get_device(idev) == NULL) { | ||
198 | dev_dbg(&ihost->pdev->dev, "%s: failed isci_get_device(idev=%p)\n", | ||
199 | __func__, idev); | ||
200 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
201 | status = SCI_FAILURE; | ||
202 | } else { | ||
203 | /* If already suspended, don't wait for another suspension. */ | ||
204 | smp_rmb(); | ||
205 | rnc_suspend_count | ||
206 | = sci_remote_node_context_is_suspended(&idev->rnc) | ||
207 | ? 0 : idev->rnc.suspend_count; | ||
208 | |||
209 | dev_dbg(&ihost->pdev->dev, | ||
210 | "%s: idev=%p, ireq=%p; started_request_count=%d, " | ||
211 | "rnc_suspend_count=%d, rnc.suspend_count=%d" | ||
212 | "about to wait\n", | ||
213 | __func__, idev, ireq, idev->started_request_count, | ||
214 | rnc_suspend_count, idev->rnc.suspend_count); | ||
215 | |||
216 | #define MAX_SUSPEND_MSECS 10000 | ||
217 | if (ireq) { | ||
218 | /* Terminate a specific TC. */ | ||
219 | set_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags); | ||
220 | sci_remote_device_terminate_req(ihost, idev, 0, ireq); | ||
221 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
222 | if (!wait_event_timeout(ihost->eventq, | ||
223 | isci_check_reqterm(ihost, idev, ireq, | ||
224 | rnc_suspend_count), | ||
225 | msecs_to_jiffies(MAX_SUSPEND_MSECS))) { | ||
226 | |||
227 | dev_warn(&ihost->pdev->dev, "%s host%d timeout single\n", | ||
228 | __func__, ihost->id); | ||
229 | dev_dbg(&ihost->pdev->dev, | ||
230 | "%s: ******* Timeout waiting for " | ||
231 | "suspend; idev=%p, current state %s; " | ||
232 | "started_request_count=%d, flags=%lx\n\t" | ||
233 | "rnc_suspend_count=%d, rnc.suspend_count=%d " | ||
234 | "RNC: current state %s, current " | ||
235 | "suspend_type %x dest state %d;\n" | ||
236 | "ireq=%p, ireq->flags = %lx\n", | ||
237 | __func__, idev, | ||
238 | dev_state_name(idev->sm.current_state_id), | ||
239 | idev->started_request_count, idev->flags, | ||
240 | rnc_suspend_count, idev->rnc.suspend_count, | ||
241 | rnc_state_name(idev->rnc.sm.current_state_id), | ||
242 | idev->rnc.suspend_type, | ||
243 | idev->rnc.destination_state, | ||
244 | ireq, ireq->flags); | ||
245 | } | ||
246 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
247 | clear_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags); | ||
248 | if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags)) | ||
249 | isci_free_tag(ihost, ireq->io_tag); | ||
250 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
251 | } else { | ||
252 | /* Terminate all TCs. */ | ||
253 | sci_remote_device_terminate_requests(idev); | ||
254 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
255 | if (!wait_event_timeout(ihost->eventq, | ||
256 | isci_check_devempty(ihost, idev, | ||
257 | rnc_suspend_count), | ||
258 | msecs_to_jiffies(MAX_SUSPEND_MSECS))) { | ||
259 | |||
260 | dev_warn(&ihost->pdev->dev, "%s host%d timeout all\n", | ||
261 | __func__, ihost->id); | ||
262 | dev_dbg(&ihost->pdev->dev, | ||
263 | "%s: ******* Timeout waiting for " | ||
264 | "suspend; idev=%p, current state %s; " | ||
265 | "started_request_count=%d, flags=%lx\n\t" | ||
266 | "rnc_suspend_count=%d, " | ||
267 | "RNC: current state %s, " | ||
268 | "rnc.suspend_count=%d, current " | ||
269 | "suspend_type %x dest state %d\n", | ||
270 | __func__, idev, | ||
271 | dev_state_name(idev->sm.current_state_id), | ||
272 | idev->started_request_count, idev->flags, | ||
273 | rnc_suspend_count, | ||
274 | rnc_state_name(idev->rnc.sm.current_state_id), | ||
275 | idev->rnc.suspend_count, | ||
276 | idev->rnc.suspend_type, | ||
277 | idev->rnc.destination_state); | ||
278 | } | ||
279 | } | ||
280 | dev_dbg(&ihost->pdev->dev, "%s: idev=%p, wait done\n", | ||
281 | __func__, idev); | ||
282 | isci_put_device(idev); | ||
283 | } | ||
284 | return status; | 155 | return status; |
285 | } | 156 | } |
286 | 157 | ||
287 | /** | ||
288 | * isci_remote_device_not_ready() - This function is called by the ihost when | ||
289 | * the remote device is not ready. We mark the isci device as ready (not | ||
290 | * "ready_for_io") and signal the waiting proccess. | ||
291 | * @isci_host: This parameter specifies the isci host object. | ||
292 | * @isci_device: This parameter specifies the remote device | ||
293 | * | ||
294 | * sci_lock is held on entrance to this function. | ||
295 | */ | ||
296 | static void isci_remote_device_not_ready(struct isci_host *ihost, | ||
297 | struct isci_remote_device *idev, | ||
298 | u32 reason) | ||
299 | { | ||
300 | dev_dbg(&ihost->pdev->dev, | ||
301 | "%s: isci_device = %p; reason = %d\n", __func__, idev, reason); | ||
302 | |||
303 | switch (reason) { | ||
304 | case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED: | ||
305 | set_bit(IDEV_IO_NCQERROR, &idev->flags); | ||
306 | |||
307 | /* Suspend the remote device so the I/O can be terminated. */ | ||
308 | sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL); | ||
309 | |||
310 | /* Kill all outstanding requests for the device. */ | ||
311 | sci_remote_device_terminate_requests(idev); | ||
312 | |||
313 | /* Fall through into the default case... */ | ||
314 | default: | ||
315 | clear_bit(IDEV_IO_READY, &idev->flags); | ||
316 | break; | ||
317 | } | ||
318 | } | ||
319 | |||
320 | /* called once the remote node context is ready to be freed. | ||
321 | * The remote device can now report that its stop operation is complete. none | ||
322 | */ | ||
323 | static void rnc_destruct_done(void *_dev) | ||
324 | { | ||
325 | struct isci_remote_device *idev = _dev; | ||
326 | |||
327 | BUG_ON(idev->started_request_count != 0); | ||
328 | sci_change_state(&idev->sm, SCI_DEV_STOPPED); | ||
329 | } | ||
330 | |||
331 | enum sci_status sci_remote_device_terminate_requests( | ||
332 | struct isci_remote_device *idev) | ||
333 | { | ||
334 | return sci_remote_device_terminate_reqs_checkabort(idev, 0); | ||
335 | } | ||
336 | |||
337 | enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, | 158 | enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, |
338 | u32 timeout) | 159 | u32 timeout) |
339 | { | 160 | { |
@@ -345,8 +166,8 @@ enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, | |||
345 | case SCI_DEV_FAILED: | 166 | case SCI_DEV_FAILED: |
346 | case SCI_DEV_FINAL: | 167 | case SCI_DEV_FINAL: |
347 | default: | 168 | default: |
348 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", | 169 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", |
349 | __func__, dev_state_name(state)); | 170 | __func__, state); |
350 | return SCI_FAILURE_INVALID_STATE; | 171 | return SCI_FAILURE_INVALID_STATE; |
351 | case SCI_DEV_STOPPED: | 172 | case SCI_DEV_STOPPED: |
352 | return SCI_SUCCESS; | 173 | return SCI_SUCCESS; |
@@ -369,16 +190,13 @@ enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, | |||
369 | case SCI_SMP_DEV_IDLE: | 190 | case SCI_SMP_DEV_IDLE: |
370 | case SCI_SMP_DEV_CMD: | 191 | case SCI_SMP_DEV_CMD: |
371 | sci_change_state(sm, SCI_DEV_STOPPING); | 192 | sci_change_state(sm, SCI_DEV_STOPPING); |
372 | if (idev->started_request_count == 0) | 193 | if (idev->started_request_count == 0) { |
373 | sci_remote_node_context_destruct(&idev->rnc, | 194 | sci_remote_node_context_destruct(&idev->rnc, |
374 | rnc_destruct_done, | 195 | rnc_destruct_done, idev); |
375 | idev); | 196 | return SCI_SUCCESS; |
376 | else { | 197 | } else |
377 | sci_remote_device_suspend( | 198 | return sci_remote_device_terminate_requests(idev); |
378 | idev, SCI_SW_SUSPEND_LINKHANG_DETECT); | 199 | break; |
379 | sci_remote_device_terminate_requests(idev); | ||
380 | } | ||
381 | return SCI_SUCCESS; | ||
382 | case SCI_DEV_STOPPING: | 200 | case SCI_DEV_STOPPING: |
383 | /* All requests should have been terminated, but if there is an | 201 | /* All requests should have been terminated, but if there is an |
384 | * attempt to stop a device already in the stopping state, then | 202 | * attempt to stop a device already in the stopping state, then |
@@ -407,8 +225,8 @@ enum sci_status sci_remote_device_reset(struct isci_remote_device *idev) | |||
407 | case SCI_DEV_RESETTING: | 225 | case SCI_DEV_RESETTING: |
408 | case SCI_DEV_FINAL: | 226 | case SCI_DEV_FINAL: |
409 | default: | 227 | default: |
410 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", | 228 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", |
411 | __func__, dev_state_name(state)); | 229 | __func__, state); |
412 | return SCI_FAILURE_INVALID_STATE; | 230 | return SCI_FAILURE_INVALID_STATE; |
413 | case SCI_DEV_READY: | 231 | case SCI_DEV_READY: |
414 | case SCI_STP_DEV_IDLE: | 232 | case SCI_STP_DEV_IDLE: |
@@ -427,8 +245,8 @@ enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev | |||
427 | enum sci_remote_device_states state = sm->current_state_id; | 245 | enum sci_remote_device_states state = sm->current_state_id; |
428 | 246 | ||
429 | if (state != SCI_DEV_RESETTING) { | 247 | if (state != SCI_DEV_RESETTING) { |
430 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", | 248 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", |
431 | __func__, dev_state_name(state)); | 249 | __func__, state); |
432 | return SCI_FAILURE_INVALID_STATE; | 250 | return SCI_FAILURE_INVALID_STATE; |
433 | } | 251 | } |
434 | 252 | ||
@@ -436,6 +254,22 @@ enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev | |||
436 | return SCI_SUCCESS; | 254 | return SCI_SUCCESS; |
437 | } | 255 | } |
438 | 256 | ||
257 | enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev, | ||
258 | u32 suspend_type) | ||
259 | { | ||
260 | struct sci_base_state_machine *sm = &idev->sm; | ||
261 | enum sci_remote_device_states state = sm->current_state_id; | ||
262 | |||
263 | if (state != SCI_STP_DEV_CMD) { | ||
264 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", | ||
265 | __func__, state); | ||
266 | return SCI_FAILURE_INVALID_STATE; | ||
267 | } | ||
268 | |||
269 | return sci_remote_node_context_suspend(&idev->rnc, | ||
270 | suspend_type, NULL, NULL); | ||
271 | } | ||
272 | |||
439 | enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev, | 273 | enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev, |
440 | u32 frame_index) | 274 | u32 frame_index) |
441 | { | 275 | { |
@@ -452,8 +286,8 @@ enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev, | |||
452 | case SCI_SMP_DEV_IDLE: | 286 | case SCI_SMP_DEV_IDLE: |
453 | case SCI_DEV_FINAL: | 287 | case SCI_DEV_FINAL: |
454 | default: | 288 | default: |
455 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", | 289 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", |
456 | __func__, dev_state_name(state)); | 290 | __func__, state); |
457 | /* Return the frame back to the controller */ | 291 | /* Return the frame back to the controller */ |
458 | sci_controller_release_frame(ihost, frame_index); | 292 | sci_controller_release_frame(ihost, frame_index); |
459 | return SCI_FAILURE_INVALID_STATE; | 293 | return SCI_FAILURE_INVALID_STATE; |
@@ -552,24 +386,12 @@ static bool is_remote_device_ready(struct isci_remote_device *idev) | |||
552 | } | 386 | } |
553 | } | 387 | } |
554 | 388 | ||
555 | /* | ||
556 | * called once the remote node context has transisitioned to a ready | ||
557 | * state (after suspending RX and/or TX due to early D2H fis) | ||
558 | */ | ||
559 | static void atapi_remote_device_resume_done(void *_dev) | ||
560 | { | ||
561 | struct isci_remote_device *idev = _dev; | ||
562 | struct isci_request *ireq = idev->working_request; | ||
563 | |||
564 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | ||
565 | } | ||
566 | |||
567 | enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, | 389 | enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, |
568 | u32 event_code) | 390 | u32 event_code) |
569 | { | 391 | { |
570 | enum sci_status status; | ||
571 | struct sci_base_state_machine *sm = &idev->sm; | 392 | struct sci_base_state_machine *sm = &idev->sm; |
572 | enum sci_remote_device_states state = sm->current_state_id; | 393 | enum sci_remote_device_states state = sm->current_state_id; |
394 | enum sci_status status; | ||
573 | 395 | ||
574 | switch (scu_get_event_type(event_code)) { | 396 | switch (scu_get_event_type(event_code)) { |
575 | case SCU_EVENT_TYPE_RNC_OPS_MISC: | 397 | case SCU_EVENT_TYPE_RNC_OPS_MISC: |
@@ -582,7 +404,9 @@ enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, | |||
582 | status = SCI_SUCCESS; | 404 | status = SCI_SUCCESS; |
583 | 405 | ||
584 | /* Suspend the associated RNC */ | 406 | /* Suspend the associated RNC */ |
585 | sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL); | 407 | sci_remote_node_context_suspend(&idev->rnc, |
408 | SCI_SOFTWARE_SUSPENSION, | ||
409 | NULL, NULL); | ||
586 | 410 | ||
587 | dev_dbg(scirdev_to_dev(idev), | 411 | dev_dbg(scirdev_to_dev(idev), |
588 | "%s: device: %p event code: %x: %s\n", | 412 | "%s: device: %p event code: %x: %s\n", |
@@ -608,20 +432,6 @@ enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, | |||
608 | if (status != SCI_SUCCESS) | 432 | if (status != SCI_SUCCESS) |
609 | return status; | 433 | return status; |
610 | 434 | ||
611 | /* Decode device-specific states that may require an RNC resume during | ||
612 | * normal operation. When the abort path is active, these resumes are | ||
613 | * managed when the abort path exits. | ||
614 | */ | ||
615 | if (state == SCI_STP_DEV_ATAPI_ERROR) { | ||
616 | /* For ATAPI error state resume the RNC right away. */ | ||
617 | if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX || | ||
618 | scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) { | ||
619 | return sci_remote_node_context_resume(&idev->rnc, | ||
620 | atapi_remote_device_resume_done, | ||
621 | idev); | ||
622 | } | ||
623 | } | ||
624 | |||
625 | if (state == SCI_STP_DEV_IDLE) { | 435 | if (state == SCI_STP_DEV_IDLE) { |
626 | 436 | ||
627 | /* We pick up suspension events to handle specifically to this | 437 | /* We pick up suspension events to handle specifically to this |
@@ -669,8 +479,8 @@ enum sci_status sci_remote_device_start_io(struct isci_host *ihost, | |||
669 | case SCI_DEV_RESETTING: | 479 | case SCI_DEV_RESETTING: |
670 | case SCI_DEV_FINAL: | 480 | case SCI_DEV_FINAL: |
671 | default: | 481 | default: |
672 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", | 482 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", |
673 | __func__, dev_state_name(state)); | 483 | __func__, state); |
674 | return SCI_FAILURE_INVALID_STATE; | 484 | return SCI_FAILURE_INVALID_STATE; |
675 | case SCI_DEV_READY: | 485 | case SCI_DEV_READY: |
676 | /* attempt to start an io request for this device object. The remote | 486 | /* attempt to start an io request for this device object. The remote |
@@ -804,8 +614,8 @@ enum sci_status sci_remote_device_complete_io(struct isci_host *ihost, | |||
804 | case SCI_DEV_FAILED: | 614 | case SCI_DEV_FAILED: |
805 | case SCI_DEV_FINAL: | 615 | case SCI_DEV_FINAL: |
806 | default: | 616 | default: |
807 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", | 617 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", |
808 | __func__, dev_state_name(state)); | 618 | __func__, state); |
809 | return SCI_FAILURE_INVALID_STATE; | 619 | return SCI_FAILURE_INVALID_STATE; |
810 | case SCI_DEV_READY: | 620 | case SCI_DEV_READY: |
811 | case SCI_STP_DEV_AWAIT_RESET: | 621 | case SCI_STP_DEV_AWAIT_RESET: |
@@ -815,7 +625,6 @@ enum sci_status sci_remote_device_complete_io(struct isci_host *ihost, | |||
815 | case SCI_STP_DEV_CMD: | 625 | case SCI_STP_DEV_CMD: |
816 | case SCI_STP_DEV_NCQ: | 626 | case SCI_STP_DEV_NCQ: |
817 | case SCI_STP_DEV_NCQ_ERROR: | 627 | case SCI_STP_DEV_NCQ_ERROR: |
818 | case SCI_STP_DEV_ATAPI_ERROR: | ||
819 | status = common_complete_io(iport, idev, ireq); | 628 | status = common_complete_io(iport, idev, ireq); |
820 | if (status != SCI_SUCCESS) | 629 | if (status != SCI_SUCCESS) |
821 | break; | 630 | break; |
@@ -888,8 +697,8 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost, | |||
888 | case SCI_DEV_RESETTING: | 697 | case SCI_DEV_RESETTING: |
889 | case SCI_DEV_FINAL: | 698 | case SCI_DEV_FINAL: |
890 | default: | 699 | default: |
891 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", | 700 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", |
892 | __func__, dev_state_name(state)); | 701 | __func__, state); |
893 | return SCI_FAILURE_INVALID_STATE; | 702 | return SCI_FAILURE_INVALID_STATE; |
894 | case SCI_STP_DEV_IDLE: | 703 | case SCI_STP_DEV_IDLE: |
895 | case SCI_STP_DEV_CMD: | 704 | case SCI_STP_DEV_CMD: |
@@ -900,6 +709,10 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost, | |||
900 | if (status != SCI_SUCCESS) | 709 | if (status != SCI_SUCCESS) |
901 | return status; | 710 | return status; |
902 | 711 | ||
712 | status = sci_remote_node_context_start_task(&idev->rnc, ireq); | ||
713 | if (status != SCI_SUCCESS) | ||
714 | goto out; | ||
715 | |||
903 | status = sci_request_start(ireq); | 716 | status = sci_request_start(ireq); |
904 | if (status != SCI_SUCCESS) | 717 | if (status != SCI_SUCCESS) |
905 | goto out; | 718 | goto out; |
@@ -918,11 +731,11 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost, | |||
918 | * the correct action when the remote node context is suspended | 731 | * the correct action when the remote node context is suspended |
919 | * and later resumed. | 732 | * and later resumed. |
920 | */ | 733 | */ |
921 | sci_remote_device_suspend(idev, | 734 | sci_remote_node_context_suspend(&idev->rnc, |
922 | SCI_SW_SUSPEND_LINKHANG_DETECT); | 735 | SCI_SOFTWARE_SUSPENSION, NULL, NULL); |
923 | 736 | sci_remote_node_context_resume(&idev->rnc, | |
924 | status = sci_remote_node_context_start_task(&idev->rnc, ireq, | 737 | sci_remote_device_continue_request, |
925 | sci_remote_device_continue_request, idev); | 738 | idev); |
926 | 739 | ||
927 | out: | 740 | out: |
928 | sci_remote_device_start_request(idev, ireq, status); | 741 | sci_remote_device_start_request(idev, ireq, status); |
@@ -936,9 +749,7 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost, | |||
936 | if (status != SCI_SUCCESS) | 749 | if (status != SCI_SUCCESS) |
937 | return status; | 750 | return status; |
938 | 751 | ||
939 | /* Resume the RNC as needed: */ | 752 | status = sci_remote_node_context_start_task(&idev->rnc, ireq); |
940 | status = sci_remote_node_context_start_task(&idev->rnc, ireq, | ||
941 | NULL, NULL); | ||
942 | if (status != SCI_SUCCESS) | 753 | if (status != SCI_SUCCESS) |
943 | break; | 754 | break; |
944 | 755 | ||
@@ -1018,8 +829,8 @@ static enum sci_status sci_remote_device_destruct(struct isci_remote_device *ide | |||
1018 | struct isci_host *ihost; | 829 | struct isci_host *ihost; |
1019 | 830 | ||
1020 | if (state != SCI_DEV_STOPPED) { | 831 | if (state != SCI_DEV_STOPPED) { |
1021 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", | 832 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", |
1022 | __func__, dev_state_name(state)); | 833 | __func__, state); |
1023 | return SCI_FAILURE_INVALID_STATE; | 834 | return SCI_FAILURE_INVALID_STATE; |
1024 | } | 835 | } |
1025 | 836 | ||
@@ -1047,7 +858,7 @@ static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_ | |||
1047 | * here should go through isci_remote_device_nuke_requests. | 858 | * here should go through isci_remote_device_nuke_requests. |
1048 | * If we hit this condition, we will need a way to complete | 859 | * If we hit this condition, we will need a way to complete |
1049 | * io requests in process */ | 860 | * io requests in process */ |
1050 | BUG_ON(idev->started_request_count > 0); | 861 | BUG_ON(!list_empty(&idev->reqs_in_process)); |
1051 | 862 | ||
1052 | sci_remote_device_destruct(idev); | 863 | sci_remote_device_destruct(idev); |
1053 | list_del_init(&idev->node); | 864 | list_del_init(&idev->node); |
@@ -1109,21 +920,14 @@ static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm | |||
1109 | static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm) | 920 | static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm) |
1110 | { | 921 | { |
1111 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 922 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
1112 | struct isci_host *ihost = idev->owning_port->owning_controller; | ||
1113 | |||
1114 | dev_dbg(&ihost->pdev->dev, | ||
1115 | "%s: isci_device = %p\n", __func__, idev); | ||
1116 | 923 | ||
1117 | sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT); | 924 | sci_remote_node_context_suspend( |
925 | &idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL); | ||
1118 | } | 926 | } |
1119 | 927 | ||
1120 | static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm) | 928 | static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm) |
1121 | { | 929 | { |
1122 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 930 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
1123 | struct isci_host *ihost = idev->owning_port->owning_controller; | ||
1124 | |||
1125 | dev_dbg(&ihost->pdev->dev, | ||
1126 | "%s: isci_device = %p\n", __func__, idev); | ||
1127 | 931 | ||
1128 | sci_remote_node_context_resume(&idev->rnc, NULL, NULL); | 932 | sci_remote_node_context_resume(&idev->rnc, NULL, NULL); |
1129 | } | 933 | } |
@@ -1216,7 +1020,6 @@ static const struct sci_base_state sci_remote_device_state_table[] = { | |||
1216 | [SCI_STP_DEV_NCQ_ERROR] = { | 1020 | [SCI_STP_DEV_NCQ_ERROR] = { |
1217 | .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter, | 1021 | .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter, |
1218 | }, | 1022 | }, |
1219 | [SCI_STP_DEV_ATAPI_ERROR] = { }, | ||
1220 | [SCI_STP_DEV_AWAIT_RESET] = { }, | 1023 | [SCI_STP_DEV_AWAIT_RESET] = { }, |
1221 | [SCI_SMP_DEV_IDLE] = { | 1024 | [SCI_SMP_DEV_IDLE] = { |
1222 | .enter_state = sci_smp_remote_device_ready_idle_substate_enter, | 1025 | .enter_state = sci_smp_remote_device_ready_idle_substate_enter, |
@@ -1274,23 +1077,33 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport, | |||
1274 | struct isci_remote_device *idev) | 1077 | struct isci_remote_device *idev) |
1275 | { | 1078 | { |
1276 | enum sci_status status; | 1079 | enum sci_status status; |
1277 | struct sci_port_properties properties; | 1080 | struct domain_device *dev = idev->domain_dev; |
1278 | 1081 | ||
1279 | sci_remote_device_construct(iport, idev); | 1082 | sci_remote_device_construct(iport, idev); |
1280 | 1083 | ||
1281 | sci_port_get_properties(iport, &properties); | 1084 | /* |
1282 | /* Get accurate port width from port's phy mask for a DA device. */ | 1085 | * This information is request to determine how many remote node context |
1283 | idev->device_port_width = hweight32(properties.phy_mask); | 1086 | * entries will be needed to store the remote node. |
1284 | 1087 | */ | |
1088 | idev->is_direct_attached = true; | ||
1285 | status = sci_controller_allocate_remote_node_context(iport->owning_controller, | 1089 | status = sci_controller_allocate_remote_node_context(iport->owning_controller, |
1286 | idev, | 1090 | idev, |
1287 | &idev->rnc.remote_node_index); | 1091 | &idev->rnc.remote_node_index); |
1288 | 1092 | ||
1289 | if (status != SCI_SUCCESS) | 1093 | if (status != SCI_SUCCESS) |
1290 | return status; | 1094 | return status; |
1291 | 1095 | ||
1096 | if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV || | ||
1097 | (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev)) | ||
1098 | /* pass */; | ||
1099 | else | ||
1100 | return SCI_FAILURE_UNSUPPORTED_PROTOCOL; | ||
1101 | |||
1292 | idev->connection_rate = sci_port_get_max_allowed_speed(iport); | 1102 | idev->connection_rate = sci_port_get_max_allowed_speed(iport); |
1293 | 1103 | ||
1104 | /* / @todo Should I assign the port width by reading all of the phys on the port? */ | ||
1105 | idev->device_port_width = 1; | ||
1106 | |||
1294 | return SCI_SUCCESS; | 1107 | return SCI_SUCCESS; |
1295 | } | 1108 | } |
1296 | 1109 | ||
@@ -1320,13 +1133,19 @@ static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport, | |||
1320 | if (status != SCI_SUCCESS) | 1133 | if (status != SCI_SUCCESS) |
1321 | return status; | 1134 | return status; |
1322 | 1135 | ||
1323 | /* For SAS-2 the physical link rate is actually a logical link | 1136 | if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV || |
1137 | (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev)) | ||
1138 | /* pass */; | ||
1139 | else | ||
1140 | return SCI_FAILURE_UNSUPPORTED_PROTOCOL; | ||
1141 | |||
1142 | /* | ||
1143 | * For SAS-2 the physical link rate is actually a logical link | ||
1324 | * rate that incorporates multiplexing. The SCU doesn't | 1144 | * rate that incorporates multiplexing. The SCU doesn't |
1325 | * incorporate multiplexing and for the purposes of the | 1145 | * incorporate multiplexing and for the purposes of the |
1326 | * connection the logical link rate is that same as the | 1146 | * connection the logical link rate is that same as the |
1327 | * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay | 1147 | * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay |
1328 | * one another, so this code works for both situations. | 1148 | * one another, so this code works for both situations. */ |
1329 | */ | ||
1330 | idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport), | 1149 | idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport), |
1331 | dev->linkrate); | 1150 | dev->linkrate); |
1332 | 1151 | ||
@@ -1336,105 +1155,6 @@ static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport, | |||
1336 | return SCI_SUCCESS; | 1155 | return SCI_SUCCESS; |
1337 | } | 1156 | } |
1338 | 1157 | ||
1339 | enum sci_status sci_remote_device_resume( | ||
1340 | struct isci_remote_device *idev, | ||
1341 | scics_sds_remote_node_context_callback cb_fn, | ||
1342 | void *cb_p) | ||
1343 | { | ||
1344 | enum sci_status status; | ||
1345 | |||
1346 | status = sci_remote_node_context_resume(&idev->rnc, cb_fn, cb_p); | ||
1347 | if (status != SCI_SUCCESS) | ||
1348 | dev_dbg(scirdev_to_dev(idev), "%s: failed to resume: %d\n", | ||
1349 | __func__, status); | ||
1350 | return status; | ||
1351 | } | ||
1352 | |||
1353 | static void isci_remote_device_resume_from_abort_complete(void *cbparam) | ||
1354 | { | ||
1355 | struct isci_remote_device *idev = cbparam; | ||
1356 | struct isci_host *ihost = idev->owning_port->owning_controller; | ||
1357 | scics_sds_remote_node_context_callback abort_resume_cb = | ||
1358 | idev->abort_resume_cb; | ||
1359 | |||
1360 | dev_dbg(scirdev_to_dev(idev), "%s: passing-along resume: %p\n", | ||
1361 | __func__, abort_resume_cb); | ||
1362 | |||
1363 | if (abort_resume_cb != NULL) { | ||
1364 | idev->abort_resume_cb = NULL; | ||
1365 | abort_resume_cb(idev->abort_resume_cbparam); | ||
1366 | } | ||
1367 | clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); | ||
1368 | wake_up(&ihost->eventq); | ||
1369 | } | ||
1370 | |||
1371 | static bool isci_remote_device_test_resume_done( | ||
1372 | struct isci_host *ihost, | ||
1373 | struct isci_remote_device *idev) | ||
1374 | { | ||
1375 | unsigned long flags; | ||
1376 | bool done; | ||
1377 | |||
1378 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
1379 | done = !test_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags) | ||
1380 | || test_bit(IDEV_STOP_PENDING, &idev->flags) | ||
1381 | || sci_remote_node_context_is_being_destroyed(&idev->rnc); | ||
1382 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
1383 | |||
1384 | return done; | ||
1385 | } | ||
1386 | |||
1387 | void isci_remote_device_wait_for_resume_from_abort( | ||
1388 | struct isci_host *ihost, | ||
1389 | struct isci_remote_device *idev) | ||
1390 | { | ||
1391 | dev_dbg(&ihost->pdev->dev, "%s: starting resume wait: %p\n", | ||
1392 | __func__, idev); | ||
1393 | |||
1394 | #define MAX_RESUME_MSECS 10000 | ||
1395 | if (!wait_event_timeout(ihost->eventq, | ||
1396 | isci_remote_device_test_resume_done(ihost, idev), | ||
1397 | msecs_to_jiffies(MAX_RESUME_MSECS))) { | ||
1398 | |||
1399 | dev_warn(&ihost->pdev->dev, "%s: #### Timeout waiting for " | ||
1400 | "resume: %p\n", __func__, idev); | ||
1401 | } | ||
1402 | clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); | ||
1403 | |||
1404 | dev_dbg(&ihost->pdev->dev, "%s: resume wait done: %p\n", | ||
1405 | __func__, idev); | ||
1406 | } | ||
1407 | |||
1408 | enum sci_status isci_remote_device_resume_from_abort( | ||
1409 | struct isci_host *ihost, | ||
1410 | struct isci_remote_device *idev) | ||
1411 | { | ||
1412 | unsigned long flags; | ||
1413 | enum sci_status status = SCI_SUCCESS; | ||
1414 | int destroyed; | ||
1415 | |||
1416 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
1417 | /* Preserve any current resume callbacks, for instance from other | ||
1418 | * resumptions. | ||
1419 | */ | ||
1420 | idev->abort_resume_cb = idev->rnc.user_callback; | ||
1421 | idev->abort_resume_cbparam = idev->rnc.user_cookie; | ||
1422 | set_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); | ||
1423 | clear_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags); | ||
1424 | destroyed = sci_remote_node_context_is_being_destroyed(&idev->rnc); | ||
1425 | if (!destroyed) | ||
1426 | status = sci_remote_device_resume( | ||
1427 | idev, isci_remote_device_resume_from_abort_complete, | ||
1428 | idev); | ||
1429 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
1430 | if (!destroyed && (status == SCI_SUCCESS)) | ||
1431 | isci_remote_device_wait_for_resume_from_abort(ihost, idev); | ||
1432 | else | ||
1433 | clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); | ||
1434 | |||
1435 | return status; | ||
1436 | } | ||
1437 | |||
1438 | /** | 1158 | /** |
1439 | * sci_remote_device_start() - This method will start the supplied remote | 1159 | * sci_remote_device_start() - This method will start the supplied remote |
1440 | * device. This method enables normal IO requests to flow through to the | 1160 | * device. This method enables normal IO requests to flow through to the |
@@ -1449,20 +1169,21 @@ enum sci_status isci_remote_device_resume_from_abort( | |||
1449 | * the device when there have been no phys added to it. | 1169 | * the device when there have been no phys added to it. |
1450 | */ | 1170 | */ |
1451 | static enum sci_status sci_remote_device_start(struct isci_remote_device *idev, | 1171 | static enum sci_status sci_remote_device_start(struct isci_remote_device *idev, |
1452 | u32 timeout) | 1172 | u32 timeout) |
1453 | { | 1173 | { |
1454 | struct sci_base_state_machine *sm = &idev->sm; | 1174 | struct sci_base_state_machine *sm = &idev->sm; |
1455 | enum sci_remote_device_states state = sm->current_state_id; | 1175 | enum sci_remote_device_states state = sm->current_state_id; |
1456 | enum sci_status status; | 1176 | enum sci_status status; |
1457 | 1177 | ||
1458 | if (state != SCI_DEV_STOPPED) { | 1178 | if (state != SCI_DEV_STOPPED) { |
1459 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", | 1179 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", |
1460 | __func__, dev_state_name(state)); | 1180 | __func__, state); |
1461 | return SCI_FAILURE_INVALID_STATE; | 1181 | return SCI_FAILURE_INVALID_STATE; |
1462 | } | 1182 | } |
1463 | 1183 | ||
1464 | status = sci_remote_device_resume(idev, remote_device_resume_done, | 1184 | status = sci_remote_node_context_resume(&idev->rnc, |
1465 | idev); | 1185 | remote_device_resume_done, |
1186 | idev); | ||
1466 | if (status != SCI_SUCCESS) | 1187 | if (status != SCI_SUCCESS) |
1467 | return status; | 1188 | return status; |
1468 | 1189 | ||
@@ -1500,6 +1221,20 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport, | |||
1500 | return status; | 1221 | return status; |
1501 | } | 1222 | } |
1502 | 1223 | ||
1224 | void isci_remote_device_nuke_requests(struct isci_host *ihost, struct isci_remote_device *idev) | ||
1225 | { | ||
1226 | DECLARE_COMPLETION_ONSTACK(aborted_task_completion); | ||
1227 | |||
1228 | dev_dbg(&ihost->pdev->dev, | ||
1229 | "%s: idev = %p\n", __func__, idev); | ||
1230 | |||
1231 | /* Cleanup all requests pending for this device. */ | ||
1232 | isci_terminate_pending_requests(ihost, idev); | ||
1233 | |||
1234 | dev_dbg(&ihost->pdev->dev, | ||
1235 | "%s: idev = %p, done\n", __func__, idev); | ||
1236 | } | ||
1237 | |||
1503 | /** | 1238 | /** |
1504 | * This function builds the isci_remote_device when a libsas dev_found message | 1239 | * This function builds the isci_remote_device when a libsas dev_found message |
1505 | * is received. | 1240 | * is received. |
@@ -1524,6 +1259,10 @@ isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport) | |||
1524 | dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__); | 1259 | dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__); |
1525 | return NULL; | 1260 | return NULL; |
1526 | } | 1261 | } |
1262 | |||
1263 | if (WARN_ONCE(!list_empty(&idev->reqs_in_process), "found requests in process\n")) | ||
1264 | return NULL; | ||
1265 | |||
1527 | if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n")) | 1266 | if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n")) |
1528 | return NULL; | 1267 | return NULL; |
1529 | 1268 | ||
@@ -1541,6 +1280,7 @@ void isci_remote_device_release(struct kref *kref) | |||
1541 | clear_bit(IDEV_STOP_PENDING, &idev->flags); | 1280 | clear_bit(IDEV_STOP_PENDING, &idev->flags); |
1542 | clear_bit(IDEV_IO_READY, &idev->flags); | 1281 | clear_bit(IDEV_IO_READY, &idev->flags); |
1543 | clear_bit(IDEV_GONE, &idev->flags); | 1282 | clear_bit(IDEV_GONE, &idev->flags); |
1283 | clear_bit(IDEV_EH, &idev->flags); | ||
1544 | smp_mb__before_clear_bit(); | 1284 | smp_mb__before_clear_bit(); |
1545 | clear_bit(IDEV_ALLOCATED, &idev->flags); | 1285 | clear_bit(IDEV_ALLOCATED, &idev->flags); |
1546 | wake_up(&ihost->eventq); | 1286 | wake_up(&ihost->eventq); |
@@ -1565,8 +1305,14 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem | |||
1565 | spin_lock_irqsave(&ihost->scic_lock, flags); | 1305 | spin_lock_irqsave(&ihost->scic_lock, flags); |
1566 | idev->domain_dev->lldd_dev = NULL; /* disable new lookups */ | 1306 | idev->domain_dev->lldd_dev = NULL; /* disable new lookups */ |
1567 | set_bit(IDEV_GONE, &idev->flags); | 1307 | set_bit(IDEV_GONE, &idev->flags); |
1308 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
1309 | |||
1310 | /* Kill all outstanding requests. */ | ||
1311 | isci_remote_device_nuke_requests(ihost, idev); | ||
1568 | 1312 | ||
1569 | set_bit(IDEV_STOP_PENDING, &idev->flags); | 1313 | set_bit(IDEV_STOP_PENDING, &idev->flags); |
1314 | |||
1315 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
1570 | status = sci_remote_device_stop(idev, 50); | 1316 | status = sci_remote_device_stop(idev, 50); |
1571 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1317 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1572 | 1318 | ||
@@ -1576,9 +1322,6 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem | |||
1576 | else | 1322 | else |
1577 | wait_for_device_stop(ihost, idev); | 1323 | wait_for_device_stop(ihost, idev); |
1578 | 1324 | ||
1579 | dev_dbg(&ihost->pdev->dev, | ||
1580 | "%s: isci_device = %p, waiting done.\n", __func__, idev); | ||
1581 | |||
1582 | return status; | 1325 | return status; |
1583 | } | 1326 | } |
1584 | 1327 | ||
@@ -1610,17 +1353,34 @@ void isci_remote_device_gone(struct domain_device *dev) | |||
1610 | * | 1353 | * |
1611 | * status, zero indicates success. | 1354 | * status, zero indicates success. |
1612 | */ | 1355 | */ |
1613 | int isci_remote_device_found(struct domain_device *dev) | 1356 | int isci_remote_device_found(struct domain_device *domain_dev) |
1614 | { | 1357 | { |
1615 | struct isci_host *isci_host = dev_to_ihost(dev); | 1358 | struct isci_host *isci_host = dev_to_ihost(domain_dev); |
1616 | struct isci_port *isci_port = dev->port->lldd_port; | 1359 | struct isci_port *isci_port; |
1360 | struct isci_phy *isci_phy; | ||
1361 | struct asd_sas_port *sas_port; | ||
1362 | struct asd_sas_phy *sas_phy; | ||
1617 | struct isci_remote_device *isci_device; | 1363 | struct isci_remote_device *isci_device; |
1618 | enum sci_status status; | 1364 | enum sci_status status; |
1619 | 1365 | ||
1620 | dev_dbg(&isci_host->pdev->dev, | 1366 | dev_dbg(&isci_host->pdev->dev, |
1621 | "%s: domain_device = %p\n", __func__, dev); | 1367 | "%s: domain_device = %p\n", __func__, domain_dev); |
1368 | |||
1369 | wait_for_start(isci_host); | ||
1370 | |||
1371 | sas_port = domain_dev->port; | ||
1372 | sas_phy = list_first_entry(&sas_port->phy_list, struct asd_sas_phy, | ||
1373 | port_phy_el); | ||
1374 | isci_phy = to_iphy(sas_phy); | ||
1375 | isci_port = isci_phy->isci_port; | ||
1622 | 1376 | ||
1623 | if (!isci_port) | 1377 | /* we are being called for a device on this port, |
1378 | * so it has to come up eventually | ||
1379 | */ | ||
1380 | wait_for_completion(&isci_port->start_complete); | ||
1381 | |||
1382 | if ((isci_stopping == isci_port_get_state(isci_port)) || | ||
1383 | (isci_stopped == isci_port_get_state(isci_port))) | ||
1624 | return -ENODEV; | 1384 | return -ENODEV; |
1625 | 1385 | ||
1626 | isci_device = isci_remote_device_alloc(isci_host, isci_port); | 1386 | isci_device = isci_remote_device_alloc(isci_host, isci_port); |
@@ -1631,7 +1391,7 @@ int isci_remote_device_found(struct domain_device *dev) | |||
1631 | INIT_LIST_HEAD(&isci_device->node); | 1391 | INIT_LIST_HEAD(&isci_device->node); |
1632 | 1392 | ||
1633 | spin_lock_irq(&isci_host->scic_lock); | 1393 | spin_lock_irq(&isci_host->scic_lock); |
1634 | isci_device->domain_dev = dev; | 1394 | isci_device->domain_dev = domain_dev; |
1635 | isci_device->isci_port = isci_port; | 1395 | isci_device->isci_port = isci_port; |
1636 | list_add_tail(&isci_device->node, &isci_port->remote_dev_list); | 1396 | list_add_tail(&isci_device->node, &isci_port->remote_dev_list); |
1637 | 1397 | ||
@@ -1644,7 +1404,7 @@ int isci_remote_device_found(struct domain_device *dev) | |||
1644 | 1404 | ||
1645 | if (status == SCI_SUCCESS) { | 1405 | if (status == SCI_SUCCESS) { |
1646 | /* device came up, advertise it to the world */ | 1406 | /* device came up, advertise it to the world */ |
1647 | dev->lldd_dev = isci_device; | 1407 | domain_dev->lldd_dev = isci_device; |
1648 | } else | 1408 | } else |
1649 | isci_put_device(isci_device); | 1409 | isci_put_device(isci_device); |
1650 | spin_unlock_irq(&isci_host->scic_lock); | 1410 | spin_unlock_irq(&isci_host->scic_lock); |
@@ -1654,73 +1414,88 @@ int isci_remote_device_found(struct domain_device *dev) | |||
1654 | 1414 | ||
1655 | return status == SCI_SUCCESS ? 0 : -ENODEV; | 1415 | return status == SCI_SUCCESS ? 0 : -ENODEV; |
1656 | } | 1416 | } |
1657 | 1417 | /** | |
1658 | enum sci_status isci_remote_device_suspend_terminate( | 1418 | * isci_device_is_reset_pending() - This function will check if there is any |
1659 | struct isci_host *ihost, | 1419 | * pending reset condition on the device. |
1660 | struct isci_remote_device *idev, | 1420 | * @request: This parameter is the isci_device object. |
1661 | struct isci_request *ireq) | 1421 | * |
1422 | * true if there is a reset pending for the device. | ||
1423 | */ | ||
1424 | bool isci_device_is_reset_pending( | ||
1425 | struct isci_host *isci_host, | ||
1426 | struct isci_remote_device *isci_device) | ||
1662 | { | 1427 | { |
1428 | struct isci_request *isci_request; | ||
1429 | struct isci_request *tmp_req; | ||
1430 | bool reset_is_pending = false; | ||
1663 | unsigned long flags; | 1431 | unsigned long flags; |
1664 | enum sci_status status; | ||
1665 | 1432 | ||
1666 | /* Put the device into suspension. */ | 1433 | dev_dbg(&isci_host->pdev->dev, |
1667 | spin_lock_irqsave(&ihost->scic_lock, flags); | 1434 | "%s: isci_device = %p\n", __func__, isci_device); |
1668 | set_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags); | ||
1669 | sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT); | ||
1670 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
1671 | 1435 | ||
1672 | /* Terminate and wait for the completions. */ | 1436 | spin_lock_irqsave(&isci_host->scic_lock, flags); |
1673 | status = isci_remote_device_terminate_requests(ihost, idev, ireq); | ||
1674 | if (status != SCI_SUCCESS) | ||
1675 | dev_dbg(&ihost->pdev->dev, | ||
1676 | "%s: isci_remote_device_terminate_requests(%p) " | ||
1677 | "returned %d!\n", | ||
1678 | __func__, idev, status); | ||
1679 | 1437 | ||
1680 | /* NOTE: RNC resumption is left to the caller! */ | 1438 | /* Check for reset on all pending requests. */ |
1681 | return status; | 1439 | list_for_each_entry_safe(isci_request, tmp_req, |
1682 | } | 1440 | &isci_device->reqs_in_process, dev_node) { |
1441 | dev_dbg(&isci_host->pdev->dev, | ||
1442 | "%s: isci_device = %p request = %p\n", | ||
1443 | __func__, isci_device, isci_request); | ||
1683 | 1444 | ||
1684 | int isci_remote_device_is_safe_to_abort( | 1445 | if (isci_request->ttype == io_task) { |
1685 | struct isci_remote_device *idev) | 1446 | struct sas_task *task = isci_request_access_task( |
1686 | { | 1447 | isci_request); |
1687 | return sci_remote_node_context_is_safe_to_abort(&idev->rnc); | ||
1688 | } | ||
1689 | 1448 | ||
1690 | enum sci_status sci_remote_device_abort_requests_pending_abort( | 1449 | spin_lock(&task->task_state_lock); |
1691 | struct isci_remote_device *idev) | 1450 | if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) |
1692 | { | 1451 | reset_is_pending = true; |
1693 | return sci_remote_device_terminate_reqs_checkabort(idev, 1); | 1452 | spin_unlock(&task->task_state_lock); |
1453 | } | ||
1454 | } | ||
1455 | |||
1456 | spin_unlock_irqrestore(&isci_host->scic_lock, flags); | ||
1457 | |||
1458 | dev_dbg(&isci_host->pdev->dev, | ||
1459 | "%s: isci_device = %p reset_is_pending = %d\n", | ||
1460 | __func__, isci_device, reset_is_pending); | ||
1461 | |||
1462 | return reset_is_pending; | ||
1694 | } | 1463 | } |
1695 | 1464 | ||
1696 | enum sci_status isci_remote_device_reset_complete( | 1465 | /** |
1697 | struct isci_host *ihost, | 1466 | * isci_device_clear_reset_pending() - This function will clear if any pending |
1698 | struct isci_remote_device *idev) | 1467 | * reset condition flags on the device. |
1468 | * @request: This parameter is the isci_device object. | ||
1469 | * | ||
1470 | * true if there is a reset pending for the device. | ||
1471 | */ | ||
1472 | void isci_device_clear_reset_pending(struct isci_host *ihost, struct isci_remote_device *idev) | ||
1699 | { | 1473 | { |
1700 | unsigned long flags; | 1474 | struct isci_request *isci_request; |
1701 | enum sci_status status; | 1475 | struct isci_request *tmp_req; |
1476 | unsigned long flags = 0; | ||
1477 | |||
1478 | dev_dbg(&ihost->pdev->dev, "%s: idev=%p, ihost=%p\n", | ||
1479 | __func__, idev, ihost); | ||
1702 | 1480 | ||
1703 | spin_lock_irqsave(&ihost->scic_lock, flags); | 1481 | spin_lock_irqsave(&ihost->scic_lock, flags); |
1704 | status = sci_remote_device_reset_complete(idev); | ||
1705 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
1706 | 1482 | ||
1707 | return status; | 1483 | /* Clear reset pending on all pending requests. */ |
1708 | } | 1484 | list_for_each_entry_safe(isci_request, tmp_req, |
1485 | &idev->reqs_in_process, dev_node) { | ||
1486 | dev_dbg(&ihost->pdev->dev, "%s: idev = %p request = %p\n", | ||
1487 | __func__, idev, isci_request); | ||
1709 | 1488 | ||
1710 | void isci_dev_set_hang_detection_timeout( | 1489 | if (isci_request->ttype == io_task) { |
1711 | struct isci_remote_device *idev, | 1490 | |
1712 | u32 timeout) | 1491 | unsigned long flags2; |
1713 | { | 1492 | struct sas_task *task = isci_request_access_task( |
1714 | if (dev_is_sata(idev->domain_dev)) { | 1493 | isci_request); |
1715 | if (timeout) { | 1494 | |
1716 | if (test_and_set_bit(IDEV_RNC_LLHANG_ENABLED, | 1495 | spin_lock_irqsave(&task->task_state_lock, flags2); |
1717 | &idev->flags)) | 1496 | task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET; |
1718 | return; /* Already enabled. */ | 1497 | spin_unlock_irqrestore(&task->task_state_lock, flags2); |
1719 | } else if (!test_and_clear_bit(IDEV_RNC_LLHANG_ENABLED, | 1498 | } |
1720 | &idev->flags)) | ||
1721 | return; /* Not enabled. */ | ||
1722 | |||
1723 | sci_port_set_hang_detection_timeout(idev->owning_port, | ||
1724 | timeout); | ||
1725 | } | 1499 | } |
1500 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
1726 | } | 1501 | } |
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h index 7674caae1d8..57ccfc3d6ad 100644 --- a/drivers/scsi/isci/remote_device.h +++ b/drivers/scsi/isci/remote_device.h | |||
@@ -82,41 +82,31 @@ struct isci_remote_device { | |||
82 | #define IDEV_START_PENDING 0 | 82 | #define IDEV_START_PENDING 0 |
83 | #define IDEV_STOP_PENDING 1 | 83 | #define IDEV_STOP_PENDING 1 |
84 | #define IDEV_ALLOCATED 2 | 84 | #define IDEV_ALLOCATED 2 |
85 | #define IDEV_GONE 3 | 85 | #define IDEV_EH 3 |
86 | #define IDEV_IO_READY 4 | 86 | #define IDEV_GONE 4 |
87 | #define IDEV_IO_NCQERROR 5 | 87 | #define IDEV_IO_READY 5 |
88 | #define IDEV_RNC_LLHANG_ENABLED 6 | 88 | #define IDEV_IO_NCQERROR 6 |
89 | #define IDEV_ABORT_PATH_ACTIVE 7 | ||
90 | #define IDEV_ABORT_PATH_RESUME_PENDING 8 | ||
91 | unsigned long flags; | 89 | unsigned long flags; |
92 | struct kref kref; | 90 | struct kref kref; |
93 | struct isci_port *isci_port; | 91 | struct isci_port *isci_port; |
94 | struct domain_device *domain_dev; | 92 | struct domain_device *domain_dev; |
95 | struct list_head node; | 93 | struct list_head node; |
94 | struct list_head reqs_in_process; | ||
96 | struct sci_base_state_machine sm; | 95 | struct sci_base_state_machine sm; |
97 | u32 device_port_width; | 96 | u32 device_port_width; |
98 | enum sas_linkrate connection_rate; | 97 | enum sas_linkrate connection_rate; |
98 | bool is_direct_attached; | ||
99 | struct isci_port *owning_port; | 99 | struct isci_port *owning_port; |
100 | struct sci_remote_node_context rnc; | 100 | struct sci_remote_node_context rnc; |
101 | /* XXX unify with device reference counting and delete */ | 101 | /* XXX unify with device reference counting and delete */ |
102 | u32 started_request_count; | 102 | u32 started_request_count; |
103 | struct isci_request *working_request; | 103 | struct isci_request *working_request; |
104 | u32 not_ready_reason; | 104 | u32 not_ready_reason; |
105 | scics_sds_remote_node_context_callback abort_resume_cb; | ||
106 | void *abort_resume_cbparam; | ||
107 | }; | 105 | }; |
108 | 106 | ||
109 | #define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000 | 107 | #define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000 |
110 | 108 | ||
111 | /* device reference routines must be called under sci_lock */ | 109 | /* device reference routines must be called under sci_lock */ |
112 | static inline struct isci_remote_device *isci_get_device( | ||
113 | struct isci_remote_device *idev) | ||
114 | { | ||
115 | if (idev) | ||
116 | kref_get(&idev->kref); | ||
117 | return idev; | ||
118 | } | ||
119 | |||
120 | static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev) | 110 | static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev) |
121 | { | 111 | { |
122 | struct isci_remote_device *idev = dev->lldd_dev; | 112 | struct isci_remote_device *idev = dev->lldd_dev; |
@@ -142,7 +132,10 @@ void isci_remote_device_nuke_requests(struct isci_host *ihost, | |||
142 | struct isci_remote_device *idev); | 132 | struct isci_remote_device *idev); |
143 | void isci_remote_device_gone(struct domain_device *domain_dev); | 133 | void isci_remote_device_gone(struct domain_device *domain_dev); |
144 | int isci_remote_device_found(struct domain_device *domain_dev); | 134 | int isci_remote_device_found(struct domain_device *domain_dev); |
145 | 135 | bool isci_device_is_reset_pending(struct isci_host *ihost, | |
136 | struct isci_remote_device *idev); | ||
137 | void isci_device_clear_reset_pending(struct isci_host *ihost, | ||
138 | struct isci_remote_device *idev); | ||
146 | /** | 139 | /** |
147 | * sci_remote_device_stop() - This method will stop both transmission and | 140 | * sci_remote_device_stop() - This method will stop both transmission and |
148 | * reception of link activity for the supplied remote device. This method | 141 | * reception of link activity for the supplied remote device. This method |
@@ -190,101 +183,113 @@ enum sci_status sci_remote_device_reset_complete( | |||
190 | /** | 183 | /** |
191 | * enum sci_remote_device_states - This enumeration depicts all the states | 184 | * enum sci_remote_device_states - This enumeration depicts all the states |
192 | * for the common remote device state machine. | 185 | * for the common remote device state machine. |
193 | * @SCI_DEV_INITIAL: Simply the initial state for the base remote device | ||
194 | * state machine. | ||
195 | * | ||
196 | * @SCI_DEV_STOPPED: This state indicates that the remote device has | ||
197 | * successfully been stopped. In this state no new IO operations are | ||
198 | * permitted. This state is entered from the INITIAL state. This state | ||
199 | * is entered from the STOPPING state. | ||
200 | * | ||
201 | * @SCI_DEV_STARTING: This state indicates the the remote device is in | ||
202 | * the process of becoming ready (i.e. starting). In this state no new | ||
203 | * IO operations are permitted. This state is entered from the STOPPED | ||
204 | * state. | ||
205 | * | ||
206 | * @SCI_DEV_READY: This state indicates the remote device is now ready. | ||
207 | * Thus, the user is able to perform IO operations on the remote device. | ||
208 | * This state is entered from the STARTING state. | ||
209 | * | ||
210 | * @SCI_STP_DEV_IDLE: This is the idle substate for the stp remote | ||
211 | * device. When there are no active IO for the device it is is in this | ||
212 | * state. | ||
213 | * | ||
214 | * @SCI_STP_DEV_CMD: This is the command state for for the STP remote | ||
215 | * device. This state is entered when the device is processing a | ||
216 | * non-NCQ command. The device object will fail any new start IO | ||
217 | * requests until this command is complete. | ||
218 | * | ||
219 | * @SCI_STP_DEV_NCQ: This is the NCQ state for the STP remote device. | ||
220 | * This state is entered when the device is processing an NCQ reuqest. | ||
221 | * It will remain in this state so long as there is one or more NCQ | ||
222 | * requests being processed. | ||
223 | * | ||
224 | * @SCI_STP_DEV_NCQ_ERROR: This is the NCQ error state for the STP | ||
225 | * remote device. This state is entered when an SDB error FIS is | ||
226 | * received by the device object while in the NCQ state. The device | ||
227 | * object will only accept a READ LOG command while in this state. | ||
228 | * | ||
229 | * @SCI_STP_DEV_ATAPI_ERROR: This is the ATAPI error state for the STP | ||
230 | * ATAPI remote device. This state is entered when ATAPI device sends | ||
231 | * error status FIS without data while the device object is in CMD | ||
232 | * state. A suspension event is expected in this state. The device | ||
233 | * object will resume right away. | ||
234 | * | ||
235 | * @SCI_STP_DEV_AWAIT_RESET: This is the READY substate indicates the | ||
236 | * device is waiting for the RESET task coming to be recovered from | ||
237 | * certain hardware specific error. | ||
238 | * | ||
239 | * @SCI_SMP_DEV_IDLE: This is the ready operational substate for the | ||
240 | * remote device. This is the normal operational state for a remote | ||
241 | * device. | ||
242 | * | ||
243 | * @SCI_SMP_DEV_CMD: This is the suspended state for the remote device. | ||
244 | * This is the state that the device is placed in when a RNC suspend is | ||
245 | * received by the SCU hardware. | ||
246 | * | ||
247 | * @SCI_DEV_STOPPING: This state indicates that the remote device is in | ||
248 | * the process of stopping. In this state no new IO operations are | ||
249 | * permitted, but existing IO operations are allowed to complete. This | ||
250 | * state is entered from the READY state. This state is entered from | ||
251 | * the FAILED state. | ||
252 | * | ||
253 | * @SCI_DEV_FAILED: This state indicates that the remote device has | ||
254 | * failed. In this state no new IO operations are permitted. This | ||
255 | * state is entered from the INITIALIZING state. This state is entered | ||
256 | * from the READY state. | ||
257 | * | 186 | * |
258 | * @SCI_DEV_RESETTING: This state indicates the device is being reset. | ||
259 | * In this state no new IO operations are permitted. This state is | ||
260 | * entered from the READY state. | ||
261 | * | 187 | * |
262 | * @SCI_DEV_FINAL: Simply the final state for the base remote device | ||
263 | * state machine. | ||
264 | */ | 188 | */ |
265 | #define REMOTE_DEV_STATES {\ | 189 | enum sci_remote_device_states { |
266 | C(DEV_INITIAL),\ | 190 | /** |
267 | C(DEV_STOPPED),\ | 191 | * Simply the initial state for the base remote device state machine. |
268 | C(DEV_STARTING),\ | 192 | */ |
269 | C(DEV_READY),\ | 193 | SCI_DEV_INITIAL, |
270 | C(STP_DEV_IDLE),\ | 194 | |
271 | C(STP_DEV_CMD),\ | 195 | /** |
272 | C(STP_DEV_NCQ),\ | 196 | * This state indicates that the remote device has successfully been |
273 | C(STP_DEV_NCQ_ERROR),\ | 197 | * stopped. In this state no new IO operations are permitted. |
274 | C(STP_DEV_ATAPI_ERROR),\ | 198 | * This state is entered from the INITIAL state. |
275 | C(STP_DEV_AWAIT_RESET),\ | 199 | * This state is entered from the STOPPING state. |
276 | C(SMP_DEV_IDLE),\ | 200 | */ |
277 | C(SMP_DEV_CMD),\ | 201 | SCI_DEV_STOPPED, |
278 | C(DEV_STOPPING),\ | 202 | |
279 | C(DEV_FAILED),\ | 203 | /** |
280 | C(DEV_RESETTING),\ | 204 | * This state indicates the the remote device is in the process of |
281 | C(DEV_FINAL),\ | 205 | * becoming ready (i.e. starting). In this state no new IO operations |
282 | } | 206 | * are permitted. |
283 | #undef C | 207 | * This state is entered from the STOPPED state. |
284 | #define C(a) SCI_##a | 208 | */ |
285 | enum sci_remote_device_states REMOTE_DEV_STATES; | 209 | SCI_DEV_STARTING, |
286 | #undef C | 210 | |
287 | const char *dev_state_name(enum sci_remote_device_states state); | 211 | /** |
212 | * This state indicates the remote device is now ready. Thus, the user | ||
213 | * is able to perform IO operations on the remote device. | ||
214 | * This state is entered from the STARTING state. | ||
215 | */ | ||
216 | SCI_DEV_READY, | ||
217 | |||
218 | /** | ||
219 | * This is the idle substate for the stp remote device. When there are no | ||
220 | * active IO for the device it is is in this state. | ||
221 | */ | ||
222 | SCI_STP_DEV_IDLE, | ||
223 | |||
224 | /** | ||
225 | * This is the command state for for the STP remote device. This state is | ||
226 | * entered when the device is processing a non-NCQ command. The device object | ||
227 | * will fail any new start IO requests until this command is complete. | ||
228 | */ | ||
229 | SCI_STP_DEV_CMD, | ||
230 | |||
231 | /** | ||
232 | * This is the NCQ state for the STP remote device. This state is entered | ||
233 | * when the device is processing an NCQ reuqest. It will remain in this state | ||
234 | * so long as there is one or more NCQ requests being processed. | ||
235 | */ | ||
236 | SCI_STP_DEV_NCQ, | ||
237 | |||
238 | /** | ||
239 | * This is the NCQ error state for the STP remote device. This state is | ||
240 | * entered when an SDB error FIS is received by the device object while in the | ||
241 | * NCQ state. The device object will only accept a READ LOG command while in | ||
242 | * this state. | ||
243 | */ | ||
244 | SCI_STP_DEV_NCQ_ERROR, | ||
245 | |||
246 | /** | ||
247 | * This is the READY substate indicates the device is waiting for the RESET task | ||
248 | * coming to be recovered from certain hardware specific error. | ||
249 | */ | ||
250 | SCI_STP_DEV_AWAIT_RESET, | ||
251 | |||
252 | /** | ||
253 | * This is the ready operational substate for the remote device. This is the | ||
254 | * normal operational state for a remote device. | ||
255 | */ | ||
256 | SCI_SMP_DEV_IDLE, | ||
257 | |||
258 | /** | ||
259 | * This is the suspended state for the remote device. This is the state that | ||
260 | * the device is placed in when a RNC suspend is received by the SCU hardware. | ||
261 | */ | ||
262 | SCI_SMP_DEV_CMD, | ||
263 | |||
264 | /** | ||
265 | * This state indicates that the remote device is in the process of | ||
266 | * stopping. In this state no new IO operations are permitted, but | ||
267 | * existing IO operations are allowed to complete. | ||
268 | * This state is entered from the READY state. | ||
269 | * This state is entered from the FAILED state. | ||
270 | */ | ||
271 | SCI_DEV_STOPPING, | ||
272 | |||
273 | /** | ||
274 | * This state indicates that the remote device has failed. | ||
275 | * In this state no new IO operations are permitted. | ||
276 | * This state is entered from the INITIALIZING state. | ||
277 | * This state is entered from the READY state. | ||
278 | */ | ||
279 | SCI_DEV_FAILED, | ||
280 | |||
281 | /** | ||
282 | * This state indicates the device is being reset. | ||
283 | * In this state no new IO operations are permitted. | ||
284 | * This state is entered from the READY state. | ||
285 | */ | ||
286 | SCI_DEV_RESETTING, | ||
287 | |||
288 | /** | ||
289 | * Simply the final state for the base remote device state machine. | ||
290 | */ | ||
291 | SCI_DEV_FINAL, | ||
292 | }; | ||
288 | 293 | ||
289 | static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc) | 294 | static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc) |
290 | { | 295 | { |
@@ -313,8 +318,6 @@ static inline void sci_remote_device_decrement_request_count(struct isci_remote_ | |||
313 | idev->started_request_count--; | 318 | idev->started_request_count--; |
314 | } | 319 | } |
315 | 320 | ||
316 | void isci_dev_set_hang_detection_timeout(struct isci_remote_device *idev, u32 timeout); | ||
317 | |||
318 | enum sci_status sci_remote_device_frame_handler( | 321 | enum sci_status sci_remote_device_frame_handler( |
319 | struct isci_remote_device *idev, | 322 | struct isci_remote_device *idev, |
320 | u32 frame_index); | 323 | u32 frame_index); |
@@ -338,50 +341,12 @@ enum sci_status sci_remote_device_complete_io( | |||
338 | struct isci_remote_device *idev, | 341 | struct isci_remote_device *idev, |
339 | struct isci_request *ireq); | 342 | struct isci_request *ireq); |
340 | 343 | ||
341 | void sci_remote_device_post_request( | 344 | enum sci_status sci_remote_device_suspend( |
342 | struct isci_remote_device *idev, | ||
343 | u32 request); | ||
344 | |||
345 | enum sci_status sci_remote_device_terminate_requests( | ||
346 | struct isci_remote_device *idev); | ||
347 | |||
348 | int isci_remote_device_is_safe_to_abort( | ||
349 | struct isci_remote_device *idev); | ||
350 | |||
351 | enum sci_status | ||
352 | sci_remote_device_abort_requests_pending_abort( | ||
353 | struct isci_remote_device *idev); | ||
354 | |||
355 | enum sci_status isci_remote_device_suspend( | ||
356 | struct isci_host *ihost, | ||
357 | struct isci_remote_device *idev); | ||
358 | |||
359 | enum sci_status sci_remote_device_resume( | ||
360 | struct isci_remote_device *idev, | 345 | struct isci_remote_device *idev, |
361 | scics_sds_remote_node_context_callback cb_fn, | 346 | u32 suspend_type); |
362 | void *cb_p); | ||
363 | |||
364 | enum sci_status isci_remote_device_resume_from_abort( | ||
365 | struct isci_host *ihost, | ||
366 | struct isci_remote_device *idev); | ||
367 | |||
368 | enum sci_status isci_remote_device_reset( | ||
369 | struct isci_host *ihost, | ||
370 | struct isci_remote_device *idev); | ||
371 | 347 | ||
372 | enum sci_status isci_remote_device_reset_complete( | 348 | void sci_remote_device_post_request( |
373 | struct isci_host *ihost, | ||
374 | struct isci_remote_device *idev); | ||
375 | |||
376 | enum sci_status isci_remote_device_suspend_terminate( | ||
377 | struct isci_host *ihost, | ||
378 | struct isci_remote_device *idev, | 349 | struct isci_remote_device *idev, |
379 | struct isci_request *ireq); | 350 | u32 request); |
380 | 351 | ||
381 | enum sci_status isci_remote_device_terminate_requests( | ||
382 | struct isci_host *ihost, | ||
383 | struct isci_remote_device *idev, | ||
384 | struct isci_request *ireq); | ||
385 | enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev, | ||
386 | enum sci_remote_node_suspension_reasons reason); | ||
387 | #endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */ | 352 | #endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */ |
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c index 1910100638a..748e8339d1e 100644 --- a/drivers/scsi/isci/remote_node_context.c +++ b/drivers/scsi/isci/remote_node_context.c | |||
@@ -52,7 +52,7 @@ | |||
52 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 52 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
53 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 53 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
54 | */ | 54 | */ |
55 | #include <scsi/sas_ata.h> | 55 | |
56 | #include "host.h" | 56 | #include "host.h" |
57 | #include "isci.h" | 57 | #include "isci.h" |
58 | #include "remote_device.h" | 58 | #include "remote_device.h" |
@@ -60,15 +60,18 @@ | |||
60 | #include "scu_event_codes.h" | 60 | #include "scu_event_codes.h" |
61 | #include "scu_task_context.h" | 61 | #include "scu_task_context.h" |
62 | 62 | ||
63 | #undef C | ||
64 | #define C(a) (#a) | ||
65 | const char *rnc_state_name(enum scis_sds_remote_node_context_states state) | ||
66 | { | ||
67 | static const char * const strings[] = RNC_STATES; | ||
68 | 63 | ||
69 | return strings[state]; | 64 | /** |
70 | } | 65 | * |
71 | #undef C | 66 | * @sci_rnc: The RNC for which the is posted request is being made. |
67 | * | ||
68 | * This method will return true if the RNC is not in the initial state. In all | ||
69 | * other states the RNC is considered active and this will return true. The | ||
70 | * destroy request of the state machine drives the RNC back to the initial | ||
71 | * state. If the state machine changes then this routine will also have to be | ||
72 | * changed. bool true if the state machine is not in the initial state false if | ||
73 | * the state machine is in the initial state | ||
74 | */ | ||
72 | 75 | ||
73 | /** | 76 | /** |
74 | * | 77 | * |
@@ -90,15 +93,6 @@ bool sci_remote_node_context_is_ready( | |||
90 | return false; | 93 | return false; |
91 | } | 94 | } |
92 | 95 | ||
93 | bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc) | ||
94 | { | ||
95 | u32 current_state = sci_rnc->sm.current_state_id; | ||
96 | |||
97 | if (current_state == SCI_RNC_TX_RX_SUSPENDED) | ||
98 | return true; | ||
99 | return false; | ||
100 | } | ||
101 | |||
102 | static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id) | 96 | static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id) |
103 | { | 97 | { |
104 | if (id < ihost->remote_node_entries && | 98 | if (id < ihost->remote_node_entries && |
@@ -140,7 +134,7 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont | |||
140 | 134 | ||
141 | rnc->ssp.arbitration_wait_time = 0; | 135 | rnc->ssp.arbitration_wait_time = 0; |
142 | 136 | ||
143 | if (dev_is_sata(dev)) { | 137 | if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { |
144 | rnc->ssp.connection_occupancy_timeout = | 138 | rnc->ssp.connection_occupancy_timeout = |
145 | ihost->user_parameters.stp_max_occupancy_timeout; | 139 | ihost->user_parameters.stp_max_occupancy_timeout; |
146 | rnc->ssp.connection_inactivity_timeout = | 140 | rnc->ssp.connection_inactivity_timeout = |
@@ -160,6 +154,7 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont | |||
160 | rnc->ssp.oaf_source_zone_group = 0; | 154 | rnc->ssp.oaf_source_zone_group = 0; |
161 | rnc->ssp.oaf_more_compatibility_features = 0; | 155 | rnc->ssp.oaf_more_compatibility_features = 0; |
162 | } | 156 | } |
157 | |||
163 | /** | 158 | /** |
164 | * | 159 | * |
165 | * @sci_rnc: | 160 | * @sci_rnc: |
@@ -173,30 +168,23 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont | |||
173 | static void sci_remote_node_context_setup_to_resume( | 168 | static void sci_remote_node_context_setup_to_resume( |
174 | struct sci_remote_node_context *sci_rnc, | 169 | struct sci_remote_node_context *sci_rnc, |
175 | scics_sds_remote_node_context_callback callback, | 170 | scics_sds_remote_node_context_callback callback, |
176 | void *callback_parameter, | 171 | void *callback_parameter) |
177 | enum sci_remote_node_context_destination_state dest_param) | ||
178 | { | 172 | { |
179 | if (sci_rnc->destination_state != RNC_DEST_FINAL) { | 173 | if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) { |
180 | sci_rnc->destination_state = dest_param; | 174 | sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY; |
181 | if (callback != NULL) { | 175 | sci_rnc->user_callback = callback; |
182 | sci_rnc->user_callback = callback; | 176 | sci_rnc->user_cookie = callback_parameter; |
183 | sci_rnc->user_cookie = callback_parameter; | ||
184 | } | ||
185 | } | 177 | } |
186 | } | 178 | } |
187 | 179 | ||
188 | static void sci_remote_node_context_setup_to_destroy( | 180 | static void sci_remote_node_context_setup_to_destory( |
189 | struct sci_remote_node_context *sci_rnc, | 181 | struct sci_remote_node_context *sci_rnc, |
190 | scics_sds_remote_node_context_callback callback, | 182 | scics_sds_remote_node_context_callback callback, |
191 | void *callback_parameter) | 183 | void *callback_parameter) |
192 | { | 184 | { |
193 | struct isci_host *ihost = idev_to_ihost(rnc_to_dev(sci_rnc)); | 185 | sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL; |
194 | |||
195 | sci_rnc->destination_state = RNC_DEST_FINAL; | ||
196 | sci_rnc->user_callback = callback; | 186 | sci_rnc->user_callback = callback; |
197 | sci_rnc->user_cookie = callback_parameter; | 187 | sci_rnc->user_cookie = callback_parameter; |
198 | |||
199 | wake_up(&ihost->eventq); | ||
200 | } | 188 | } |
201 | 189 | ||
202 | /** | 190 | /** |
@@ -218,19 +206,9 @@ static void sci_remote_node_context_notify_user( | |||
218 | 206 | ||
219 | static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc) | 207 | static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc) |
220 | { | 208 | { |
221 | switch (rnc->destination_state) { | 209 | if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY) |
222 | case RNC_DEST_READY: | ||
223 | case RNC_DEST_SUSPENDED_RESUME: | ||
224 | rnc->destination_state = RNC_DEST_READY; | ||
225 | /* Fall through... */ | ||
226 | case RNC_DEST_FINAL: | ||
227 | sci_remote_node_context_resume(rnc, rnc->user_callback, | 210 | sci_remote_node_context_resume(rnc, rnc->user_callback, |
228 | rnc->user_cookie); | 211 | rnc->user_cookie); |
229 | break; | ||
230 | default: | ||
231 | rnc->destination_state = RNC_DEST_UNSPECIFIED; | ||
232 | break; | ||
233 | } | ||
234 | } | 212 | } |
235 | 213 | ||
236 | static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc) | 214 | static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc) |
@@ -244,12 +222,13 @@ static void sci_remote_node_context_validate_context_buffer(struct sci_remote_no | |||
244 | 222 | ||
245 | rnc_buffer->ssp.is_valid = true; | 223 | rnc_buffer->ssp.is_valid = true; |
246 | 224 | ||
247 | if (dev_is_sata(dev) && dev->parent) { | 225 | if (!idev->is_direct_attached && |
226 | (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))) { | ||
248 | sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96); | 227 | sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96); |
249 | } else { | 228 | } else { |
250 | sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32); | 229 | sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32); |
251 | 230 | ||
252 | if (!dev->parent) | 231 | if (idev->is_direct_attached) |
253 | sci_port_setup_transports(idev->owning_port, | 232 | sci_port_setup_transports(idev->owning_port, |
254 | sci_rnc->remote_node_index); | 233 | sci_rnc->remote_node_index); |
255 | } | 234 | } |
@@ -272,18 +251,13 @@ static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_ | |||
272 | static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm) | 251 | static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm) |
273 | { | 252 | { |
274 | struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); | 253 | struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); |
275 | struct isci_remote_device *idev = rnc_to_dev(rnc); | ||
276 | struct isci_host *ihost = idev->owning_port->owning_controller; | ||
277 | 254 | ||
278 | /* Check to see if we have gotten back to the initial state because | 255 | /* Check to see if we have gotten back to the initial state because |
279 | * someone requested to destroy the remote node context object. | 256 | * someone requested to destroy the remote node context object. |
280 | */ | 257 | */ |
281 | if (sm->previous_state_id == SCI_RNC_INVALIDATING) { | 258 | if (sm->previous_state_id == SCI_RNC_INVALIDATING) { |
282 | rnc->destination_state = RNC_DEST_UNSPECIFIED; | 259 | rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; |
283 | sci_remote_node_context_notify_user(rnc); | 260 | sci_remote_node_context_notify_user(rnc); |
284 | |||
285 | smp_wmb(); | ||
286 | wake_up(&ihost->eventq); | ||
287 | } | 261 | } |
288 | } | 262 | } |
289 | 263 | ||
@@ -298,8 +272,6 @@ static void sci_remote_node_context_invalidating_state_enter(struct sci_base_sta | |||
298 | { | 272 | { |
299 | struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); | 273 | struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); |
300 | 274 | ||
301 | /* Terminate all outstanding requests. */ | ||
302 | sci_remote_device_terminate_requests(rnc_to_dev(rnc)); | ||
303 | sci_remote_node_context_invalidate_context_buffer(rnc); | 275 | sci_remote_node_context_invalidate_context_buffer(rnc); |
304 | } | 276 | } |
305 | 277 | ||
@@ -318,8 +290,10 @@ static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_m | |||
318 | * resume because of a target reset we also need to update | 290 | * resume because of a target reset we also need to update |
319 | * the STPTLDARNI register with the RNi of the device | 291 | * the STPTLDARNI register with the RNi of the device |
320 | */ | 292 | */ |
321 | if (dev_is_sata(dev) && !dev->parent) | 293 | if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) && |
322 | sci_port_setup_transports(idev->owning_port, rnc->remote_node_index); | 294 | idev->is_direct_attached) |
295 | sci_port_setup_transports(idev->owning_port, | ||
296 | rnc->remote_node_index); | ||
323 | 297 | ||
324 | sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME); | 298 | sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME); |
325 | } | 299 | } |
@@ -327,22 +301,10 @@ static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_m | |||
327 | static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm) | 301 | static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm) |
328 | { | 302 | { |
329 | struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); | 303 | struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); |
330 | enum sci_remote_node_context_destination_state dest_select; | ||
331 | int tell_user = 1; | ||
332 | |||
333 | dest_select = rnc->destination_state; | ||
334 | rnc->destination_state = RNC_DEST_UNSPECIFIED; | ||
335 | 304 | ||
336 | if ((dest_select == RNC_DEST_SUSPENDED) || | 305 | rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; |
337 | (dest_select == RNC_DEST_SUSPENDED_RESUME)) { | ||
338 | sci_remote_node_context_suspend( | ||
339 | rnc, rnc->suspend_reason, | ||
340 | SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT); | ||
341 | 306 | ||
342 | if (dest_select == RNC_DEST_SUSPENDED_RESUME) | 307 | if (rnc->user_callback) |
343 | tell_user = 0; /* Wait until ready again. */ | ||
344 | } | ||
345 | if (tell_user) | ||
346 | sci_remote_node_context_notify_user(rnc); | 308 | sci_remote_node_context_notify_user(rnc); |
347 | } | 309 | } |
348 | 310 | ||
@@ -356,34 +318,10 @@ static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_sta | |||
356 | static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm) | 318 | static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm) |
357 | { | 319 | { |
358 | struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); | 320 | struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); |
359 | struct isci_remote_device *idev = rnc_to_dev(rnc); | ||
360 | struct isci_host *ihost = idev->owning_port->owning_controller; | ||
361 | u32 new_count = rnc->suspend_count + 1; | ||
362 | |||
363 | if (new_count == 0) | ||
364 | rnc->suspend_count = 1; | ||
365 | else | ||
366 | rnc->suspend_count = new_count; | ||
367 | smp_wmb(); | ||
368 | 321 | ||
369 | /* Terminate outstanding requests pending abort. */ | ||
370 | sci_remote_device_abort_requests_pending_abort(idev); | ||
371 | |||
372 | wake_up(&ihost->eventq); | ||
373 | sci_remote_node_context_continue_state_transitions(rnc); | 322 | sci_remote_node_context_continue_state_transitions(rnc); |
374 | } | 323 | } |
375 | 324 | ||
376 | static void sci_remote_node_context_await_suspend_state_exit( | ||
377 | struct sci_base_state_machine *sm) | ||
378 | { | ||
379 | struct sci_remote_node_context *rnc | ||
380 | = container_of(sm, typeof(*rnc), sm); | ||
381 | struct isci_remote_device *idev = rnc_to_dev(rnc); | ||
382 | |||
383 | if (dev_is_sata(idev->domain_dev)) | ||
384 | isci_dev_set_hang_detection_timeout(idev, 0); | ||
385 | } | ||
386 | |||
387 | static const struct sci_base_state sci_remote_node_context_state_table[] = { | 325 | static const struct sci_base_state sci_remote_node_context_state_table[] = { |
388 | [SCI_RNC_INITIAL] = { | 326 | [SCI_RNC_INITIAL] = { |
389 | .enter_state = sci_remote_node_context_initial_state_enter, | 327 | .enter_state = sci_remote_node_context_initial_state_enter, |
@@ -406,9 +344,7 @@ static const struct sci_base_state sci_remote_node_context_state_table[] = { | |||
406 | [SCI_RNC_TX_RX_SUSPENDED] = { | 344 | [SCI_RNC_TX_RX_SUSPENDED] = { |
407 | .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter, | 345 | .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter, |
408 | }, | 346 | }, |
409 | [SCI_RNC_AWAIT_SUSPENSION] = { | 347 | [SCI_RNC_AWAIT_SUSPENSION] = { }, |
410 | .exit_state = sci_remote_node_context_await_suspend_state_exit, | ||
411 | }, | ||
412 | }; | 348 | }; |
413 | 349 | ||
414 | void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, | 350 | void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, |
@@ -417,7 +353,7 @@ void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, | |||
417 | memset(rnc, 0, sizeof(struct sci_remote_node_context)); | 353 | memset(rnc, 0, sizeof(struct sci_remote_node_context)); |
418 | 354 | ||
419 | rnc->remote_node_index = remote_node_index; | 355 | rnc->remote_node_index = remote_node_index; |
420 | rnc->destination_state = RNC_DEST_UNSPECIFIED; | 356 | rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; |
421 | 357 | ||
422 | sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL); | 358 | sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL); |
423 | } | 359 | } |
@@ -426,7 +362,6 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con | |||
426 | u32 event_code) | 362 | u32 event_code) |
427 | { | 363 | { |
428 | enum scis_sds_remote_node_context_states state; | 364 | enum scis_sds_remote_node_context_states state; |
429 | u32 next_state; | ||
430 | 365 | ||
431 | state = sci_rnc->sm.current_state_id; | 366 | state = sci_rnc->sm.current_state_id; |
432 | switch (state) { | 367 | switch (state) { |
@@ -441,18 +376,18 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con | |||
441 | break; | 376 | break; |
442 | case SCI_RNC_INVALIDATING: | 377 | case SCI_RNC_INVALIDATING: |
443 | if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) { | 378 | if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) { |
444 | if (sci_rnc->destination_state == RNC_DEST_FINAL) | 379 | if (sci_rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) |
445 | next_state = SCI_RNC_INITIAL; | 380 | state = SCI_RNC_INITIAL; |
446 | else | 381 | else |
447 | next_state = SCI_RNC_POSTING; | 382 | state = SCI_RNC_POSTING; |
448 | sci_change_state(&sci_rnc->sm, next_state); | 383 | sci_change_state(&sci_rnc->sm, state); |
449 | } else { | 384 | } else { |
450 | switch (scu_get_event_type(event_code)) { | 385 | switch (scu_get_event_type(event_code)) { |
451 | case SCU_EVENT_TYPE_RNC_SUSPEND_TX: | 386 | case SCU_EVENT_TYPE_RNC_SUSPEND_TX: |
452 | case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: | 387 | case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: |
453 | /* We really dont care if the hardware is going to suspend | 388 | /* We really dont care if the hardware is going to suspend |
454 | * the device since it's being invalidated anyway */ | 389 | * the device since it's being invalidated anyway */ |
455 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), | 390 | dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), |
456 | "%s: SCIC Remote Node Context 0x%p was " | 391 | "%s: SCIC Remote Node Context 0x%p was " |
457 | "suspeneded by hardware while being " | 392 | "suspeneded by hardware while being " |
458 | "invalidated.\n", __func__, sci_rnc); | 393 | "invalidated.\n", __func__, sci_rnc); |
@@ -471,7 +406,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con | |||
471 | case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: | 406 | case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: |
472 | /* We really dont care if the hardware is going to suspend | 407 | /* We really dont care if the hardware is going to suspend |
473 | * the device since it's being resumed anyway */ | 408 | * the device since it's being resumed anyway */ |
474 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), | 409 | dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), |
475 | "%s: SCIC Remote Node Context 0x%p was " | 410 | "%s: SCIC Remote Node Context 0x%p was " |
476 | "suspeneded by hardware while being resumed.\n", | 411 | "suspeneded by hardware while being resumed.\n", |
477 | __func__, sci_rnc); | 412 | __func__, sci_rnc); |
@@ -485,11 +420,11 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con | |||
485 | switch (scu_get_event_type(event_code)) { | 420 | switch (scu_get_event_type(event_code)) { |
486 | case SCU_EVENT_TL_RNC_SUSPEND_TX: | 421 | case SCU_EVENT_TL_RNC_SUSPEND_TX: |
487 | sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED); | 422 | sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED); |
488 | sci_rnc->suspend_type = scu_get_event_type(event_code); | 423 | sci_rnc->suspension_code = scu_get_event_specifier(event_code); |
489 | break; | 424 | break; |
490 | case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: | 425 | case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: |
491 | sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED); | 426 | sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED); |
492 | sci_rnc->suspend_type = scu_get_event_type(event_code); | 427 | sci_rnc->suspension_code = scu_get_event_specifier(event_code); |
493 | break; | 428 | break; |
494 | default: | 429 | default: |
495 | goto out; | 430 | goto out; |
@@ -498,29 +433,27 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con | |||
498 | case SCI_RNC_AWAIT_SUSPENSION: | 433 | case SCI_RNC_AWAIT_SUSPENSION: |
499 | switch (scu_get_event_type(event_code)) { | 434 | switch (scu_get_event_type(event_code)) { |
500 | case SCU_EVENT_TL_RNC_SUSPEND_TX: | 435 | case SCU_EVENT_TL_RNC_SUSPEND_TX: |
501 | next_state = SCI_RNC_TX_SUSPENDED; | 436 | sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED); |
437 | sci_rnc->suspension_code = scu_get_event_specifier(event_code); | ||
502 | break; | 438 | break; |
503 | case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: | 439 | case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: |
504 | next_state = SCI_RNC_TX_RX_SUSPENDED; | 440 | sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED); |
441 | sci_rnc->suspension_code = scu_get_event_specifier(event_code); | ||
505 | break; | 442 | break; |
506 | default: | 443 | default: |
507 | goto out; | 444 | goto out; |
508 | } | 445 | } |
509 | if (sci_rnc->suspend_type == scu_get_event_type(event_code)) | ||
510 | sci_change_state(&sci_rnc->sm, next_state); | ||
511 | break; | 446 | break; |
512 | default: | 447 | default: |
513 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), | 448 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), |
514 | "%s: invalid state: %s\n", __func__, | 449 | "%s: invalid state %d\n", __func__, state); |
515 | rnc_state_name(state)); | ||
516 | return SCI_FAILURE_INVALID_STATE; | 450 | return SCI_FAILURE_INVALID_STATE; |
517 | } | 451 | } |
518 | return SCI_SUCCESS; | 452 | return SCI_SUCCESS; |
519 | 453 | ||
520 | out: | 454 | out: |
521 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), | 455 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), |
522 | "%s: code: %#x state: %s\n", __func__, event_code, | 456 | "%s: code: %#x state: %d\n", __func__, event_code, state); |
523 | rnc_state_name(state)); | ||
524 | return SCI_FAILURE; | 457 | return SCI_FAILURE; |
525 | 458 | ||
526 | } | 459 | } |
@@ -534,23 +467,20 @@ enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context | |||
534 | state = sci_rnc->sm.current_state_id; | 467 | state = sci_rnc->sm.current_state_id; |
535 | switch (state) { | 468 | switch (state) { |
536 | case SCI_RNC_INVALIDATING: | 469 | case SCI_RNC_INVALIDATING: |
537 | sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p); | 470 | sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); |
538 | return SCI_SUCCESS; | 471 | return SCI_SUCCESS; |
539 | case SCI_RNC_POSTING: | 472 | case SCI_RNC_POSTING: |
540 | case SCI_RNC_RESUMING: | 473 | case SCI_RNC_RESUMING: |
541 | case SCI_RNC_READY: | 474 | case SCI_RNC_READY: |
542 | case SCI_RNC_TX_SUSPENDED: | 475 | case SCI_RNC_TX_SUSPENDED: |
543 | case SCI_RNC_TX_RX_SUSPENDED: | 476 | case SCI_RNC_TX_RX_SUSPENDED: |
544 | sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p); | ||
545 | sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); | ||
546 | return SCI_SUCCESS; | ||
547 | case SCI_RNC_AWAIT_SUSPENSION: | 477 | case SCI_RNC_AWAIT_SUSPENSION: |
548 | sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p); | 478 | sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); |
479 | sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); | ||
549 | return SCI_SUCCESS; | 480 | return SCI_SUCCESS; |
550 | case SCI_RNC_INITIAL: | 481 | case SCI_RNC_INITIAL: |
551 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), | 482 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), |
552 | "%s: invalid state: %s\n", __func__, | 483 | "%s: invalid state %d\n", __func__, state); |
553 | rnc_state_name(state)); | ||
554 | /* We have decided that the destruct request on the remote node context | 484 | /* We have decided that the destruct request on the remote node context |
555 | * can not fail since it is either in the initial/destroyed state or is | 485 | * can not fail since it is either in the initial/destroyed state or is |
556 | * can be destroyed. | 486 | * can be destroyed. |
@@ -558,101 +488,35 @@ enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context | |||
558 | return SCI_SUCCESS; | 488 | return SCI_SUCCESS; |
559 | default: | 489 | default: |
560 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), | 490 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), |
561 | "%s: invalid state %s\n", __func__, | 491 | "%s: invalid state %d\n", __func__, state); |
562 | rnc_state_name(state)); | ||
563 | return SCI_FAILURE_INVALID_STATE; | 492 | return SCI_FAILURE_INVALID_STATE; |
564 | } | 493 | } |
565 | } | 494 | } |
566 | 495 | ||
567 | enum sci_status sci_remote_node_context_suspend( | 496 | enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc, |
568 | struct sci_remote_node_context *sci_rnc, | 497 | u32 suspend_type, |
569 | enum sci_remote_node_suspension_reasons suspend_reason, | 498 | scics_sds_remote_node_context_callback cb_fn, |
570 | u32 suspend_type) | 499 | void *cb_p) |
571 | { | 500 | { |
572 | enum scis_sds_remote_node_context_states state | 501 | enum scis_sds_remote_node_context_states state; |
573 | = sci_rnc->sm.current_state_id; | ||
574 | struct isci_remote_device *idev = rnc_to_dev(sci_rnc); | ||
575 | enum sci_status status = SCI_FAILURE_INVALID_STATE; | ||
576 | enum sci_remote_node_context_destination_state dest_param = | ||
577 | RNC_DEST_UNSPECIFIED; | ||
578 | |||
579 | dev_dbg(scirdev_to_dev(idev), | ||
580 | "%s: current state %s, current suspend_type %x dest state %d," | ||
581 | " arg suspend_reason %d, arg suspend_type %x", | ||
582 | __func__, rnc_state_name(state), sci_rnc->suspend_type, | ||
583 | sci_rnc->destination_state, suspend_reason, | ||
584 | suspend_type); | ||
585 | |||
586 | /* Disable automatic state continuations if explicitly suspending. */ | ||
587 | if ((suspend_reason == SCI_HW_SUSPEND) || | ||
588 | (sci_rnc->destination_state == RNC_DEST_FINAL)) | ||
589 | dest_param = sci_rnc->destination_state; | ||
590 | |||
591 | switch (state) { | ||
592 | case SCI_RNC_READY: | ||
593 | break; | ||
594 | case SCI_RNC_INVALIDATING: | ||
595 | if (sci_rnc->destination_state == RNC_DEST_FINAL) { | ||
596 | dev_warn(scirdev_to_dev(idev), | ||
597 | "%s: already destroying %p\n", | ||
598 | __func__, sci_rnc); | ||
599 | return SCI_FAILURE_INVALID_STATE; | ||
600 | } | ||
601 | /* Fall through and handle like SCI_RNC_POSTING */ | ||
602 | case SCI_RNC_RESUMING: | ||
603 | /* Fall through and handle like SCI_RNC_POSTING */ | ||
604 | case SCI_RNC_POSTING: | ||
605 | /* Set the destination state to AWAIT - this signals the | ||
606 | * entry into the SCI_RNC_READY state that a suspension | ||
607 | * needs to be done immediately. | ||
608 | */ | ||
609 | if (sci_rnc->destination_state != RNC_DEST_FINAL) | ||
610 | sci_rnc->destination_state = RNC_DEST_SUSPENDED; | ||
611 | sci_rnc->suspend_type = suspend_type; | ||
612 | sci_rnc->suspend_reason = suspend_reason; | ||
613 | return SCI_SUCCESS; | ||
614 | 502 | ||
615 | case SCI_RNC_TX_SUSPENDED: | 503 | state = sci_rnc->sm.current_state_id; |
616 | if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX) | 504 | if (state != SCI_RNC_READY) { |
617 | status = SCI_SUCCESS; | ||
618 | break; | ||
619 | case SCI_RNC_TX_RX_SUSPENDED: | ||
620 | if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX) | ||
621 | status = SCI_SUCCESS; | ||
622 | break; | ||
623 | case SCI_RNC_AWAIT_SUSPENSION: | ||
624 | if ((sci_rnc->suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX) | ||
625 | || (suspend_type == sci_rnc->suspend_type)) | ||
626 | return SCI_SUCCESS; | ||
627 | break; | ||
628 | default: | ||
629 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), | 505 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), |
630 | "%s: invalid state %s\n", __func__, | 506 | "%s: invalid state %d\n", __func__, state); |
631 | rnc_state_name(state)); | ||
632 | return SCI_FAILURE_INVALID_STATE; | 507 | return SCI_FAILURE_INVALID_STATE; |
633 | } | 508 | } |
634 | sci_rnc->destination_state = dest_param; | ||
635 | sci_rnc->suspend_type = suspend_type; | ||
636 | sci_rnc->suspend_reason = suspend_reason; | ||
637 | 509 | ||
638 | if (status == SCI_SUCCESS) { /* Already in the destination state? */ | 510 | sci_rnc->user_callback = cb_fn; |
639 | struct isci_host *ihost = idev->owning_port->owning_controller; | 511 | sci_rnc->user_cookie = cb_p; |
512 | sci_rnc->suspension_code = suspend_type; | ||
640 | 513 | ||
641 | wake_up_all(&ihost->eventq); /* Let observers look. */ | 514 | if (suspend_type == SCI_SOFTWARE_SUSPENSION) { |
642 | return SCI_SUCCESS; | 515 | sci_remote_device_post_request(rnc_to_dev(sci_rnc), |
516 | SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX); | ||
643 | } | 517 | } |
644 | if ((suspend_reason == SCI_SW_SUSPEND_NORMAL) || | ||
645 | (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)) { | ||
646 | |||
647 | if (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT) | ||
648 | isci_dev_set_hang_detection_timeout(idev, 0x00000001); | ||
649 | |||
650 | sci_remote_device_post_request( | ||
651 | idev, SCI_SOFTWARE_SUSPEND_CMD); | ||
652 | } | ||
653 | if (state != SCI_RNC_AWAIT_SUSPENSION) | ||
654 | sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION); | ||
655 | 518 | ||
519 | sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION); | ||
656 | return SCI_SUCCESS; | 520 | return SCI_SUCCESS; |
657 | } | 521 | } |
658 | 522 | ||
@@ -661,86 +525,56 @@ enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *s | |||
661 | void *cb_p) | 525 | void *cb_p) |
662 | { | 526 | { |
663 | enum scis_sds_remote_node_context_states state; | 527 | enum scis_sds_remote_node_context_states state; |
664 | struct isci_remote_device *idev = rnc_to_dev(sci_rnc); | ||
665 | 528 | ||
666 | state = sci_rnc->sm.current_state_id; | 529 | state = sci_rnc->sm.current_state_id; |
667 | dev_dbg(scirdev_to_dev(idev), | ||
668 | "%s: state %s, cb_fn = %p, cb_p = %p; dest_state = %d; " | ||
669 | "dev resume path %s\n", | ||
670 | __func__, rnc_state_name(state), cb_fn, cb_p, | ||
671 | sci_rnc->destination_state, | ||
672 | test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags) | ||
673 | ? "<abort active>" : "<normal>"); | ||
674 | |||
675 | switch (state) { | 530 | switch (state) { |
676 | case SCI_RNC_INITIAL: | 531 | case SCI_RNC_INITIAL: |
677 | if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) | 532 | if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) |
678 | return SCI_FAILURE_INVALID_STATE; | 533 | return SCI_FAILURE_INVALID_STATE; |
679 | 534 | ||
680 | sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p, | 535 | sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); |
681 | RNC_DEST_READY); | 536 | sci_remote_node_context_construct_buffer(sci_rnc); |
682 | if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) { | 537 | sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING); |
683 | sci_remote_node_context_construct_buffer(sci_rnc); | ||
684 | sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING); | ||
685 | } | ||
686 | return SCI_SUCCESS; | 538 | return SCI_SUCCESS; |
687 | |||
688 | case SCI_RNC_POSTING: | 539 | case SCI_RNC_POSTING: |
689 | case SCI_RNC_INVALIDATING: | 540 | case SCI_RNC_INVALIDATING: |
690 | case SCI_RNC_RESUMING: | 541 | case SCI_RNC_RESUMING: |
691 | /* We are still waiting to post when a resume was | 542 | if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY) |
692 | * requested. | 543 | return SCI_FAILURE_INVALID_STATE; |
693 | */ | ||
694 | switch (sci_rnc->destination_state) { | ||
695 | case RNC_DEST_SUSPENDED: | ||
696 | case RNC_DEST_SUSPENDED_RESUME: | ||
697 | /* Previously waiting to suspend after posting. | ||
698 | * Now continue onto resumption. | ||
699 | */ | ||
700 | sci_remote_node_context_setup_to_resume( | ||
701 | sci_rnc, cb_fn, cb_p, | ||
702 | RNC_DEST_SUSPENDED_RESUME); | ||
703 | break; | ||
704 | default: | ||
705 | sci_remote_node_context_setup_to_resume( | ||
706 | sci_rnc, cb_fn, cb_p, | ||
707 | RNC_DEST_READY); | ||
708 | break; | ||
709 | } | ||
710 | return SCI_SUCCESS; | ||
711 | 544 | ||
712 | case SCI_RNC_TX_SUSPENDED: | 545 | sci_rnc->user_callback = cb_fn; |
713 | case SCI_RNC_TX_RX_SUSPENDED: | 546 | sci_rnc->user_cookie = cb_p; |
714 | { | 547 | return SCI_SUCCESS; |
715 | struct domain_device *dev = idev->domain_dev; | 548 | case SCI_RNC_TX_SUSPENDED: { |
716 | /* If this is an expander attached SATA device we must | 549 | struct isci_remote_device *idev = rnc_to_dev(sci_rnc); |
717 | * invalidate and repost the RNC since this is the only | 550 | struct domain_device *dev = idev->domain_dev; |
718 | * way to clear the TCi to NCQ tag mapping table for | 551 | |
719 | * the RNi. All other device types we can just resume. | 552 | sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); |
720 | */ | 553 | |
721 | sci_remote_node_context_setup_to_resume( | 554 | /* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */ |
722 | sci_rnc, cb_fn, cb_p, RNC_DEST_READY); | 555 | if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev)) |
723 | 556 | sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); | |
724 | if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) { | 557 | else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { |
725 | if ((dev_is_sata(dev) && dev->parent) || | 558 | if (idev->is_direct_attached) { |
726 | (sci_rnc->destination_state == RNC_DEST_FINAL)) | 559 | /* @todo Fix this since I am being silly in writing to the STPTLDARNI register. */ |
727 | sci_change_state(&sci_rnc->sm, | 560 | sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); |
728 | SCI_RNC_INVALIDATING); | 561 | } else { |
729 | else | 562 | sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); |
730 | sci_change_state(&sci_rnc->sm, | ||
731 | SCI_RNC_RESUMING); | ||
732 | } | 563 | } |
733 | } | 564 | } else |
565 | return SCI_FAILURE; | ||
734 | return SCI_SUCCESS; | 566 | return SCI_SUCCESS; |
735 | 567 | } | |
568 | case SCI_RNC_TX_RX_SUSPENDED: | ||
569 | sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); | ||
570 | sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); | ||
571 | return SCI_FAILURE_INVALID_STATE; | ||
736 | case SCI_RNC_AWAIT_SUSPENSION: | 572 | case SCI_RNC_AWAIT_SUSPENSION: |
737 | sci_remote_node_context_setup_to_resume( | 573 | sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); |
738 | sci_rnc, cb_fn, cb_p, RNC_DEST_SUSPENDED_RESUME); | ||
739 | return SCI_SUCCESS; | 574 | return SCI_SUCCESS; |
740 | default: | 575 | default: |
741 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), | 576 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), |
742 | "%s: invalid state %s\n", __func__, | 577 | "%s: invalid state %d\n", __func__, state); |
743 | rnc_state_name(state)); | ||
744 | return SCI_FAILURE_INVALID_STATE; | 578 | return SCI_FAILURE_INVALID_STATE; |
745 | } | 579 | } |
746 | } | 580 | } |
@@ -759,51 +593,35 @@ enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context | |||
759 | case SCI_RNC_TX_RX_SUSPENDED: | 593 | case SCI_RNC_TX_RX_SUSPENDED: |
760 | case SCI_RNC_AWAIT_SUSPENSION: | 594 | case SCI_RNC_AWAIT_SUSPENSION: |
761 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), | 595 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), |
762 | "%s: invalid state %s\n", __func__, | 596 | "%s: invalid state %d\n", __func__, state); |
763 | rnc_state_name(state)); | ||
764 | return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; | 597 | return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; |
765 | default: | 598 | default: |
766 | dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), | 599 | break; |
767 | "%s: invalid state %s\n", __func__, | ||
768 | rnc_state_name(state)); | ||
769 | return SCI_FAILURE_INVALID_STATE; | ||
770 | } | 600 | } |
601 | dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), | ||
602 | "%s: requested to start IO while still resuming, %d\n", | ||
603 | __func__, state); | ||
604 | return SCI_FAILURE_INVALID_STATE; | ||
771 | } | 605 | } |
772 | 606 | ||
773 | enum sci_status sci_remote_node_context_start_task( | 607 | enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, |
774 | struct sci_remote_node_context *sci_rnc, | 608 | struct isci_request *ireq) |
775 | struct isci_request *ireq, | ||
776 | scics_sds_remote_node_context_callback cb_fn, | ||
777 | void *cb_p) | ||
778 | { | ||
779 | enum sci_status status = sci_remote_node_context_resume(sci_rnc, | ||
780 | cb_fn, cb_p); | ||
781 | if (status != SCI_SUCCESS) | ||
782 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), | ||
783 | "%s: resume failed: %d\n", __func__, status); | ||
784 | return status; | ||
785 | } | ||
786 | |||
787 | int sci_remote_node_context_is_safe_to_abort( | ||
788 | struct sci_remote_node_context *sci_rnc) | ||
789 | { | 609 | { |
790 | enum scis_sds_remote_node_context_states state; | 610 | enum scis_sds_remote_node_context_states state; |
791 | 611 | ||
792 | state = sci_rnc->sm.current_state_id; | 612 | state = sci_rnc->sm.current_state_id; |
793 | switch (state) { | 613 | switch (state) { |
794 | case SCI_RNC_INVALIDATING: | ||
795 | case SCI_RNC_TX_RX_SUSPENDED: | ||
796 | return 1; | ||
797 | case SCI_RNC_POSTING: | ||
798 | case SCI_RNC_RESUMING: | 614 | case SCI_RNC_RESUMING: |
799 | case SCI_RNC_READY: | 615 | case SCI_RNC_READY: |
800 | case SCI_RNC_TX_SUSPENDED: | ||
801 | case SCI_RNC_AWAIT_SUSPENSION: | 616 | case SCI_RNC_AWAIT_SUSPENSION: |
802 | case SCI_RNC_INITIAL: | 617 | return SCI_SUCCESS; |
803 | return 0; | 618 | case SCI_RNC_TX_SUSPENDED: |
619 | case SCI_RNC_TX_RX_SUSPENDED: | ||
620 | sci_remote_node_context_resume(sci_rnc, NULL, NULL); | ||
621 | return SCI_SUCCESS; | ||
804 | default: | 622 | default: |
805 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), | 623 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), |
806 | "%s: invalid state %d\n", __func__, state); | 624 | "%s: invalid state %d\n", __func__, state); |
807 | return 0; | 625 | return SCI_FAILURE_INVALID_STATE; |
808 | } | 626 | } |
809 | } | 627 | } |
diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h index c7ee81d0112..41580ad1252 100644 --- a/drivers/scsi/isci/remote_node_context.h +++ b/drivers/scsi/isci/remote_node_context.h | |||
@@ -75,13 +75,8 @@ | |||
75 | */ | 75 | */ |
76 | #define SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX 0x0FFF | 76 | #define SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX 0x0FFF |
77 | 77 | ||
78 | enum sci_remote_node_suspension_reasons { | 78 | #define SCU_HARDWARE_SUSPENSION (0) |
79 | SCI_HW_SUSPEND, | 79 | #define SCI_SOFTWARE_SUSPENSION (1) |
80 | SCI_SW_SUSPEND_NORMAL, | ||
81 | SCI_SW_SUSPEND_LINKHANG_DETECT | ||
82 | }; | ||
83 | #define SCI_SOFTWARE_SUSPEND_CMD SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX | ||
84 | #define SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT SCU_EVENT_TL_RNC_SUSPEND_TX_RX | ||
85 | 80 | ||
86 | struct isci_request; | 81 | struct isci_request; |
87 | struct isci_remote_device; | 82 | struct isci_remote_device; |
@@ -90,50 +85,61 @@ struct sci_remote_node_context; | |||
90 | typedef void (*scics_sds_remote_node_context_callback)(void *); | 85 | typedef void (*scics_sds_remote_node_context_callback)(void *); |
91 | 86 | ||
92 | /** | 87 | /** |
93 | * enum sci_remote_node_context_states | 88 | * This is the enumeration of the remote node context states. |
94 | * @SCI_RNC_INITIAL initial state for a remote node context. On a resume | ||
95 | * request the remote node context will transition to the posting state. | ||
96 | * | ||
97 | * @SCI_RNC_POSTING: transition state that posts the RNi to the hardware. Once | ||
98 | * the RNC is posted the remote node context will be made ready. | ||
99 | * | ||
100 | * @SCI_RNC_INVALIDATING: transition state that will post an RNC invalidate to | ||
101 | * the hardware. Once the invalidate is complete the remote node context will | ||
102 | * transition to the posting state. | ||
103 | * | ||
104 | * @SCI_RNC_RESUMING: transition state that will post an RNC resume to the | ||
105 | * hardare. Once the event notification of resume complete is received the | ||
106 | * remote node context will transition to the ready state. | ||
107 | * | ||
108 | * @SCI_RNC_READY: state that the remote node context must be in to accept io | ||
109 | * request operations. | ||
110 | * | ||
111 | * @SCI_RNC_TX_SUSPENDED: state that the remote node context transitions to when | ||
112 | * it gets a TX suspend notification from the hardware. | ||
113 | * | ||
114 | * @SCI_RNC_TX_RX_SUSPENDED: state that the remote node context transitions to | ||
115 | * when it gets a TX RX suspend notification from the hardware. | ||
116 | * | ||
117 | * @SCI_RNC_AWAIT_SUSPENSION: wait state for the remote node context that waits | ||
118 | * for a suspend notification from the hardware. This state is entered when | ||
119 | * either there is a request to supend the remote node context or when there is | ||
120 | * a TC completion where the remote node will be suspended by the hardware. | ||
121 | */ | 89 | */ |
122 | #define RNC_STATES {\ | 90 | enum scis_sds_remote_node_context_states { |
123 | C(RNC_INITIAL),\ | 91 | /** |
124 | C(RNC_POSTING),\ | 92 | * This state is the initial state for a remote node context. On a resume |
125 | C(RNC_INVALIDATING),\ | 93 | * request the remote node context will transition to the posting state. |
126 | C(RNC_RESUMING),\ | 94 | */ |
127 | C(RNC_READY),\ | 95 | SCI_RNC_INITIAL, |
128 | C(RNC_TX_SUSPENDED),\ | 96 | |
129 | C(RNC_TX_RX_SUSPENDED),\ | 97 | /** |
130 | C(RNC_AWAIT_SUSPENSION),\ | 98 | * This is a transition state that posts the RNi to the hardware. Once the RNC |
131 | } | 99 | * is posted the remote node context will be made ready. |
132 | #undef C | 100 | */ |
133 | #define C(a) SCI_##a | 101 | SCI_RNC_POSTING, |
134 | enum scis_sds_remote_node_context_states RNC_STATES; | 102 | |
135 | #undef C | 103 | /** |
136 | const char *rnc_state_name(enum scis_sds_remote_node_context_states state); | 104 | * This is a transition state that will post an RNC invalidate to the |
105 | * hardware. Once the invalidate is complete the remote node context will | ||
106 | * transition to the posting state. | ||
107 | */ | ||
108 | SCI_RNC_INVALIDATING, | ||
109 | |||
110 | /** | ||
111 | * This is a transition state that will post an RNC resume to the hardare. | ||
112 | * Once the event notification of resume complete is received the remote node | ||
113 | * context will transition to the ready state. | ||
114 | */ | ||
115 | SCI_RNC_RESUMING, | ||
116 | |||
117 | /** | ||
118 | * This is the state that the remote node context must be in to accept io | ||
119 | * request operations. | ||
120 | */ | ||
121 | SCI_RNC_READY, | ||
122 | |||
123 | /** | ||
124 | * This is the state that the remote node context transitions to when it gets | ||
125 | * a TX suspend notification from the hardware. | ||
126 | */ | ||
127 | SCI_RNC_TX_SUSPENDED, | ||
128 | |||
129 | /** | ||
130 | * This is the state that the remote node context transitions to when it gets | ||
131 | * a TX RX suspend notification from the hardware. | ||
132 | */ | ||
133 | SCI_RNC_TX_RX_SUSPENDED, | ||
134 | |||
135 | /** | ||
136 | * This state is a wait state for the remote node context that waits for a | ||
137 | * suspend notification from the hardware. This state is entered when either | ||
138 | * there is a request to supend the remote node context or when there is a TC | ||
139 | * completion where the remote node will be suspended by the hardware. | ||
140 | */ | ||
141 | SCI_RNC_AWAIT_SUSPENSION | ||
142 | }; | ||
137 | 143 | ||
138 | /** | 144 | /** |
139 | * | 145 | * |
@@ -142,13 +148,9 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state); | |||
142 | * node context. | 148 | * node context. |
143 | */ | 149 | */ |
144 | enum sci_remote_node_context_destination_state { | 150 | enum sci_remote_node_context_destination_state { |
145 | RNC_DEST_UNSPECIFIED, | 151 | SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED, |
146 | RNC_DEST_READY, | 152 | SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY, |
147 | RNC_DEST_FINAL, | 153 | SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL |
148 | RNC_DEST_SUSPENDED, /* Set when suspend during post/invalidate */ | ||
149 | RNC_DEST_SUSPENDED_RESUME /* Set when a resume was done during posting | ||
150 | * or invalidating and already suspending. | ||
151 | */ | ||
152 | }; | 154 | }; |
153 | 155 | ||
154 | /** | 156 | /** |
@@ -165,12 +167,10 @@ struct sci_remote_node_context { | |||
165 | u16 remote_node_index; | 167 | u16 remote_node_index; |
166 | 168 | ||
167 | /** | 169 | /** |
168 | * This field is the recored suspension type of the remote node | 170 | * This field is the recored suspension code or the reason for the remote node |
169 | * context suspension. | 171 | * context suspension. |
170 | */ | 172 | */ |
171 | u32 suspend_type; | 173 | u32 suspension_code; |
172 | enum sci_remote_node_suspension_reasons suspend_reason; | ||
173 | u32 suspend_count; | ||
174 | 174 | ||
175 | /** | 175 | /** |
176 | * This field is true if the remote node context is resuming from its current | 176 | * This field is true if the remote node context is resuming from its current |
@@ -204,33 +204,21 @@ void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, | |||
204 | bool sci_remote_node_context_is_ready( | 204 | bool sci_remote_node_context_is_ready( |
205 | struct sci_remote_node_context *sci_rnc); | 205 | struct sci_remote_node_context *sci_rnc); |
206 | 206 | ||
207 | bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc); | ||
208 | |||
209 | enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, | 207 | enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, |
210 | u32 event_code); | 208 | u32 event_code); |
211 | enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, | 209 | enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, |
212 | scics_sds_remote_node_context_callback callback, | 210 | scics_sds_remote_node_context_callback callback, |
213 | void *callback_parameter); | 211 | void *callback_parameter); |
214 | enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc, | 212 | enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc, |
215 | enum sci_remote_node_suspension_reasons reason, | 213 | u32 suspend_type, |
216 | u32 suspension_code); | 214 | scics_sds_remote_node_context_callback cb_fn, |
215 | void *cb_p); | ||
217 | enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc, | 216 | enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc, |
218 | scics_sds_remote_node_context_callback cb_fn, | 217 | scics_sds_remote_node_context_callback cb_fn, |
219 | void *cb_p); | 218 | void *cb_p); |
220 | enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, | 219 | enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, |
221 | struct isci_request *ireq, | 220 | struct isci_request *ireq); |
222 | scics_sds_remote_node_context_callback cb_fn, | ||
223 | void *cb_p); | ||
224 | enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc, | 221 | enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc, |
225 | struct isci_request *ireq); | 222 | struct isci_request *ireq); |
226 | int sci_remote_node_context_is_safe_to_abort( | ||
227 | struct sci_remote_node_context *sci_rnc); | ||
228 | 223 | ||
229 | static inline bool sci_remote_node_context_is_being_destroyed( | ||
230 | struct sci_remote_node_context *sci_rnc) | ||
231 | { | ||
232 | return (sci_rnc->destination_state == RNC_DEST_FINAL) | ||
233 | || ((sci_rnc->sm.current_state_id == SCI_RNC_INITIAL) | ||
234 | && (sci_rnc->destination_state == RNC_DEST_UNSPECIFIED)); | ||
235 | } | ||
236 | #endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */ | 224 | #endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */ |
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 9594ab62702..225b196800a 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c | |||
@@ -53,7 +53,6 @@ | |||
53 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 53 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
54 | */ | 54 | */ |
55 | 55 | ||
56 | #include <scsi/scsi_cmnd.h> | ||
57 | #include "isci.h" | 56 | #include "isci.h" |
58 | #include "task.h" | 57 | #include "task.h" |
59 | #include "request.h" | 58 | #include "request.h" |
@@ -61,16 +60,6 @@ | |||
61 | #include "scu_event_codes.h" | 60 | #include "scu_event_codes.h" |
62 | #include "sas.h" | 61 | #include "sas.h" |
63 | 62 | ||
64 | #undef C | ||
65 | #define C(a) (#a) | ||
66 | const char *req_state_name(enum sci_base_request_states state) | ||
67 | { | ||
68 | static const char * const strings[] = REQUEST_STATES; | ||
69 | |||
70 | return strings[state]; | ||
71 | } | ||
72 | #undef C | ||
73 | |||
74 | static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq, | 63 | static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq, |
75 | int idx) | 64 | int idx) |
76 | { | 65 | { |
@@ -92,11 +81,11 @@ static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost, | |||
92 | if (idx == 0) { | 81 | if (idx == 0) { |
93 | offset = (void *) &ireq->tc->sgl_pair_ab - | 82 | offset = (void *) &ireq->tc->sgl_pair_ab - |
94 | (void *) &ihost->task_context_table[0]; | 83 | (void *) &ihost->task_context_table[0]; |
95 | return ihost->tc_dma + offset; | 84 | return ihost->task_context_dma + offset; |
96 | } else if (idx == 1) { | 85 | } else if (idx == 1) { |
97 | offset = (void *) &ireq->tc->sgl_pair_cd - | 86 | offset = (void *) &ireq->tc->sgl_pair_cd - |
98 | (void *) &ihost->task_context_table[0]; | 87 | (void *) &ihost->task_context_table[0]; |
99 | return ihost->tc_dma + offset; | 88 | return ihost->task_context_dma + offset; |
100 | } | 89 | } |
101 | 90 | ||
102 | return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); | 91 | return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); |
@@ -202,7 +191,7 @@ static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq) | |||
202 | 191 | ||
203 | task_iu->task_func = isci_tmf->tmf_code; | 192 | task_iu->task_func = isci_tmf->tmf_code; |
204 | task_iu->task_tag = | 193 | task_iu->task_tag = |
205 | (test_bit(IREQ_TMF, &ireq->flags)) ? | 194 | (ireq->ttype == tmf_task) ? |
206 | isci_tmf->io_tag : | 195 | isci_tmf->io_tag : |
207 | SCI_CONTROLLER_INVALID_IO_TAG; | 196 | SCI_CONTROLLER_INVALID_IO_TAG; |
208 | } | 197 | } |
@@ -275,141 +264,6 @@ static void scu_ssp_reqeust_construct_task_context( | |||
275 | task_context->response_iu_lower = lower_32_bits(dma_addr); | 264 | task_context->response_iu_lower = lower_32_bits(dma_addr); |
276 | } | 265 | } |
277 | 266 | ||
278 | static u8 scu_bg_blk_size(struct scsi_device *sdp) | ||
279 | { | ||
280 | switch (sdp->sector_size) { | ||
281 | case 512: | ||
282 | return 0; | ||
283 | case 1024: | ||
284 | return 1; | ||
285 | case 4096: | ||
286 | return 3; | ||
287 | default: | ||
288 | return 0xff; | ||
289 | } | ||
290 | } | ||
291 | |||
292 | static u32 scu_dif_bytes(u32 len, u32 sector_size) | ||
293 | { | ||
294 | return (len >> ilog2(sector_size)) * 8; | ||
295 | } | ||
296 | |||
297 | static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op) | ||
298 | { | ||
299 | struct scu_task_context *tc = ireq->tc; | ||
300 | struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task; | ||
301 | u8 blk_sz = scu_bg_blk_size(scmd->device); | ||
302 | |||
303 | tc->block_guard_enable = 1; | ||
304 | tc->blk_prot_en = 1; | ||
305 | tc->blk_sz = blk_sz; | ||
306 | /* DIF write insert */ | ||
307 | tc->blk_prot_func = 0x2; | ||
308 | |||
309 | tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes, | ||
310 | scmd->device->sector_size); | ||
311 | |||
312 | /* always init to 0, used by hw */ | ||
313 | tc->interm_crc_val = 0; | ||
314 | |||
315 | tc->init_crc_seed = 0; | ||
316 | tc->app_tag_verify = 0; | ||
317 | tc->app_tag_gen = 0; | ||
318 | tc->ref_tag_seed_verify = 0; | ||
319 | |||
320 | /* always init to same as bg_blk_sz */ | ||
321 | tc->UD_bytes_immed_val = scmd->device->sector_size; | ||
322 | |||
323 | tc->reserved_DC_0 = 0; | ||
324 | |||
325 | /* always init to 8 */ | ||
326 | tc->DIF_bytes_immed_val = 8; | ||
327 | |||
328 | tc->reserved_DC_1 = 0; | ||
329 | tc->bgc_blk_sz = scmd->device->sector_size; | ||
330 | tc->reserved_E0_0 = 0; | ||
331 | tc->app_tag_gen_mask = 0; | ||
332 | |||
333 | /** setup block guard control **/ | ||
334 | tc->bgctl = 0; | ||
335 | |||
336 | /* DIF write insert */ | ||
337 | tc->bgctl_f.op = 0x2; | ||
338 | |||
339 | tc->app_tag_verify_mask = 0; | ||
340 | |||
341 | /* must init to 0 for hw */ | ||
342 | tc->blk_guard_err = 0; | ||
343 | |||
344 | tc->reserved_E8_0 = 0; | ||
345 | |||
346 | if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) | ||
347 | tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff; | ||
348 | else if (type & SCSI_PROT_DIF_TYPE3) | ||
349 | tc->ref_tag_seed_gen = 0; | ||
350 | } | ||
351 | |||
352 | static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op) | ||
353 | { | ||
354 | struct scu_task_context *tc = ireq->tc; | ||
355 | struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task; | ||
356 | u8 blk_sz = scu_bg_blk_size(scmd->device); | ||
357 | |||
358 | tc->block_guard_enable = 1; | ||
359 | tc->blk_prot_en = 1; | ||
360 | tc->blk_sz = blk_sz; | ||
361 | /* DIF read strip */ | ||
362 | tc->blk_prot_func = 0x1; | ||
363 | |||
364 | tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes, | ||
365 | scmd->device->sector_size); | ||
366 | |||
367 | /* always init to 0, used by hw */ | ||
368 | tc->interm_crc_val = 0; | ||
369 | |||
370 | tc->init_crc_seed = 0; | ||
371 | tc->app_tag_verify = 0; | ||
372 | tc->app_tag_gen = 0; | ||
373 | |||
374 | if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) | ||
375 | tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff; | ||
376 | else if (type & SCSI_PROT_DIF_TYPE3) | ||
377 | tc->ref_tag_seed_verify = 0; | ||
378 | |||
379 | /* always init to same as bg_blk_sz */ | ||
380 | tc->UD_bytes_immed_val = scmd->device->sector_size; | ||
381 | |||
382 | tc->reserved_DC_0 = 0; | ||
383 | |||
384 | /* always init to 8 */ | ||
385 | tc->DIF_bytes_immed_val = 8; | ||
386 | |||
387 | tc->reserved_DC_1 = 0; | ||
388 | tc->bgc_blk_sz = scmd->device->sector_size; | ||
389 | tc->reserved_E0_0 = 0; | ||
390 | tc->app_tag_gen_mask = 0; | ||
391 | |||
392 | /** setup block guard control **/ | ||
393 | tc->bgctl = 0; | ||
394 | |||
395 | /* DIF read strip */ | ||
396 | tc->bgctl_f.crc_verify = 1; | ||
397 | tc->bgctl_f.op = 0x1; | ||
398 | if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) { | ||
399 | tc->bgctl_f.ref_tag_chk = 1; | ||
400 | tc->bgctl_f.app_f_detect = 1; | ||
401 | } else if (type & SCSI_PROT_DIF_TYPE3) | ||
402 | tc->bgctl_f.app_ref_f_detect = 1; | ||
403 | |||
404 | tc->app_tag_verify_mask = 0; | ||
405 | |||
406 | /* must init to 0 for hw */ | ||
407 | tc->blk_guard_err = 0; | ||
408 | |||
409 | tc->reserved_E8_0 = 0; | ||
410 | tc->ref_tag_seed_gen = 0; | ||
411 | } | ||
412 | |||
413 | /** | 267 | /** |
414 | * This method is will fill in the SCU Task Context for a SSP IO request. | 268 | * This method is will fill in the SCU Task Context for a SSP IO request. |
415 | * @sci_req: | 269 | * @sci_req: |
@@ -420,10 +274,6 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq, | |||
420 | u32 len) | 274 | u32 len) |
421 | { | 275 | { |
422 | struct scu_task_context *task_context = ireq->tc; | 276 | struct scu_task_context *task_context = ireq->tc; |
423 | struct sas_task *sas_task = ireq->ttype_ptr.io_task_ptr; | ||
424 | struct scsi_cmnd *scmd = sas_task->uldd_task; | ||
425 | u8 prot_type = scsi_get_prot_type(scmd); | ||
426 | u8 prot_op = scsi_get_prot_op(scmd); | ||
427 | 277 | ||
428 | scu_ssp_reqeust_construct_task_context(ireq, task_context); | 278 | scu_ssp_reqeust_construct_task_context(ireq, task_context); |
429 | 279 | ||
@@ -446,13 +296,6 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq, | |||
446 | 296 | ||
447 | if (task_context->transfer_length_bytes > 0) | 297 | if (task_context->transfer_length_bytes > 0) |
448 | sci_request_build_sgl(ireq); | 298 | sci_request_build_sgl(ireq); |
449 | |||
450 | if (prot_type != SCSI_PROT_DIF_TYPE0) { | ||
451 | if (prot_op == SCSI_PROT_READ_STRIP) | ||
452 | scu_ssp_ireq_dif_strip(ireq, prot_type, prot_op); | ||
453 | else if (prot_op == SCSI_PROT_WRITE_INSERT) | ||
454 | scu_ssp_ireq_dif_insert(ireq, prot_type, prot_op); | ||
455 | } | ||
456 | } | 299 | } |
457 | 300 | ||
458 | /** | 301 | /** |
@@ -638,29 +481,7 @@ static void sci_stp_optimized_request_construct(struct isci_request *ireq, | |||
638 | } | 481 | } |
639 | } | 482 | } |
640 | 483 | ||
641 | static void sci_atapi_construct(struct isci_request *ireq) | ||
642 | { | ||
643 | struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd; | ||
644 | struct sas_task *task; | ||
645 | 484 | ||
646 | /* To simplify the implementation we take advantage of the | ||
647 | * silicon's partial acceleration of atapi protocol (dma data | ||
648 | * transfers), so we promote all commands to dma protocol. This | ||
649 | * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives. | ||
650 | */ | ||
651 | h2d_fis->features |= ATAPI_PKT_DMA; | ||
652 | |||
653 | scu_stp_raw_request_construct_task_context(ireq); | ||
654 | |||
655 | task = isci_request_access_task(ireq); | ||
656 | if (task->data_dir == DMA_NONE) | ||
657 | task->total_xfer_len = 0; | ||
658 | |||
659 | /* clear the response so we can detect arrivial of an | ||
660 | * unsolicited h2d fis | ||
661 | */ | ||
662 | ireq->stp.rsp.fis_type = 0; | ||
663 | } | ||
664 | 485 | ||
665 | static enum sci_status | 486 | static enum sci_status |
666 | sci_io_request_construct_sata(struct isci_request *ireq, | 487 | sci_io_request_construct_sata(struct isci_request *ireq, |
@@ -670,18 +491,23 @@ sci_io_request_construct_sata(struct isci_request *ireq, | |||
670 | { | 491 | { |
671 | enum sci_status status = SCI_SUCCESS; | 492 | enum sci_status status = SCI_SUCCESS; |
672 | struct sas_task *task = isci_request_access_task(ireq); | 493 | struct sas_task *task = isci_request_access_task(ireq); |
673 | struct domain_device *dev = ireq->target_device->domain_dev; | ||
674 | 494 | ||
675 | /* check for management protocols */ | 495 | /* check for management protocols */ |
676 | if (test_bit(IREQ_TMF, &ireq->flags)) { | 496 | if (ireq->ttype == tmf_task) { |
677 | struct isci_tmf *tmf = isci_request_access_tmf(ireq); | 497 | struct isci_tmf *tmf = isci_request_access_tmf(ireq); |
678 | 498 | ||
679 | dev_err(&ireq->owning_controller->pdev->dev, | 499 | if (tmf->tmf_code == isci_tmf_sata_srst_high || |
680 | "%s: Request 0x%p received un-handled SAT " | 500 | tmf->tmf_code == isci_tmf_sata_srst_low) { |
681 | "management protocol 0x%x.\n", | 501 | scu_stp_raw_request_construct_task_context(ireq); |
682 | __func__, ireq, tmf->tmf_code); | 502 | return SCI_SUCCESS; |
503 | } else { | ||
504 | dev_err(&ireq->owning_controller->pdev->dev, | ||
505 | "%s: Request 0x%p received un-handled SAT " | ||
506 | "management protocol 0x%x.\n", | ||
507 | __func__, ireq, tmf->tmf_code); | ||
683 | 508 | ||
684 | return SCI_FAILURE; | 509 | return SCI_FAILURE; |
510 | } | ||
685 | } | 511 | } |
686 | 512 | ||
687 | if (!sas_protocol_ata(task->task_proto)) { | 513 | if (!sas_protocol_ata(task->task_proto)) { |
@@ -693,13 +519,6 @@ sci_io_request_construct_sata(struct isci_request *ireq, | |||
693 | 519 | ||
694 | } | 520 | } |
695 | 521 | ||
696 | /* ATAPI */ | ||
697 | if (dev->sata_dev.command_set == ATAPI_COMMAND_SET && | ||
698 | task->ata_task.fis.command == ATA_CMD_PACKET) { | ||
699 | sci_atapi_construct(ireq); | ||
700 | return SCI_SUCCESS; | ||
701 | } | ||
702 | |||
703 | /* non data */ | 522 | /* non data */ |
704 | if (task->data_dir == DMA_NONE) { | 523 | if (task->data_dir == DMA_NONE) { |
705 | scu_stp_raw_request_construct_task_context(ireq); | 524 | scu_stp_raw_request_construct_task_context(ireq); |
@@ -730,7 +549,7 @@ static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *i | |||
730 | { | 549 | { |
731 | struct sas_task *task = isci_request_access_task(ireq); | 550 | struct sas_task *task = isci_request_access_task(ireq); |
732 | 551 | ||
733 | ireq->protocol = SAS_PROTOCOL_SSP; | 552 | ireq->protocol = SCIC_SSP_PROTOCOL; |
734 | 553 | ||
735 | scu_ssp_io_request_construct_task_context(ireq, | 554 | scu_ssp_io_request_construct_task_context(ireq, |
736 | task->data_dir, | 555 | task->data_dir, |
@@ -763,7 +582,7 @@ static enum sci_status sci_io_request_construct_basic_sata(struct isci_request * | |||
763 | bool copy = false; | 582 | bool copy = false; |
764 | struct sas_task *task = isci_request_access_task(ireq); | 583 | struct sas_task *task = isci_request_access_task(ireq); |
765 | 584 | ||
766 | ireq->protocol = SAS_PROTOCOL_STP; | 585 | ireq->protocol = SCIC_STP_PROTOCOL; |
767 | 586 | ||
768 | copy = (task->data_dir == DMA_NONE) ? false : true; | 587 | copy = (task->data_dir == DMA_NONE) ? false : true; |
769 | 588 | ||
@@ -778,9 +597,37 @@ static enum sci_status sci_io_request_construct_basic_sata(struct isci_request * | |||
778 | return status; | 597 | return status; |
779 | } | 598 | } |
780 | 599 | ||
600 | enum sci_status sci_task_request_construct_sata(struct isci_request *ireq) | ||
601 | { | ||
602 | enum sci_status status = SCI_SUCCESS; | ||
603 | |||
604 | /* check for management protocols */ | ||
605 | if (ireq->ttype == tmf_task) { | ||
606 | struct isci_tmf *tmf = isci_request_access_tmf(ireq); | ||
607 | |||
608 | if (tmf->tmf_code == isci_tmf_sata_srst_high || | ||
609 | tmf->tmf_code == isci_tmf_sata_srst_low) { | ||
610 | scu_stp_raw_request_construct_task_context(ireq); | ||
611 | } else { | ||
612 | dev_err(&ireq->owning_controller->pdev->dev, | ||
613 | "%s: Request 0x%p received un-handled SAT " | ||
614 | "Protocol 0x%x.\n", | ||
615 | __func__, ireq, tmf->tmf_code); | ||
616 | |||
617 | return SCI_FAILURE; | ||
618 | } | ||
619 | } | ||
620 | |||
621 | if (status != SCI_SUCCESS) | ||
622 | return status; | ||
623 | sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); | ||
624 | |||
625 | return status; | ||
626 | } | ||
627 | |||
781 | /** | 628 | /** |
782 | * sci_req_tx_bytes - bytes transferred when reply underruns request | 629 | * sci_req_tx_bytes - bytes transferred when reply underruns request |
783 | * @ireq: request that was terminated early | 630 | * @sci_req: request that was terminated early |
784 | */ | 631 | */ |
785 | #define SCU_TASK_CONTEXT_SRAM 0x200000 | 632 | #define SCU_TASK_CONTEXT_SRAM 0x200000 |
786 | static u32 sci_req_tx_bytes(struct isci_request *ireq) | 633 | static u32 sci_req_tx_bytes(struct isci_request *ireq) |
@@ -863,8 +710,6 @@ sci_io_request_terminate(struct isci_request *ireq) | |||
863 | 710 | ||
864 | switch (state) { | 711 | switch (state) { |
865 | case SCI_REQ_CONSTRUCTED: | 712 | case SCI_REQ_CONSTRUCTED: |
866 | /* Set to make sure no HW terminate posting is done: */ | ||
867 | set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags); | ||
868 | ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; | 713 | ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; |
869 | ireq->sci_status = SCI_FAILURE_IO_TERMINATED; | 714 | ireq->sci_status = SCI_FAILURE_IO_TERMINATED; |
870 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 715 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
@@ -881,11 +726,11 @@ sci_io_request_terminate(struct isci_request *ireq) | |||
881 | case SCI_REQ_STP_PIO_WAIT_FRAME: | 726 | case SCI_REQ_STP_PIO_WAIT_FRAME: |
882 | case SCI_REQ_STP_PIO_DATA_IN: | 727 | case SCI_REQ_STP_PIO_DATA_IN: |
883 | case SCI_REQ_STP_PIO_DATA_OUT: | 728 | case SCI_REQ_STP_PIO_DATA_OUT: |
884 | case SCI_REQ_ATAPI_WAIT_H2D: | 729 | case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED: |
885 | case SCI_REQ_ATAPI_WAIT_PIO_SETUP: | 730 | case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG: |
886 | case SCI_REQ_ATAPI_WAIT_D2H: | 731 | case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: |
887 | case SCI_REQ_ATAPI_WAIT_TC_COMP: | 732 | sci_change_state(&ireq->sm, SCI_REQ_ABORTING); |
888 | /* Fall through and change state to ABORTING... */ | 733 | return SCI_SUCCESS; |
889 | case SCI_REQ_TASK_WAIT_TC_RESP: | 734 | case SCI_REQ_TASK_WAIT_TC_RESP: |
890 | /* The task frame was already confirmed to have been | 735 | /* The task frame was already confirmed to have been |
891 | * sent by the SCU HW. Since the state machine is | 736 | * sent by the SCU HW. Since the state machine is |
@@ -894,21 +739,20 @@ sci_io_request_terminate(struct isci_request *ireq) | |||
894 | * and don't wait for the task response. | 739 | * and don't wait for the task response. |
895 | */ | 740 | */ |
896 | sci_change_state(&ireq->sm, SCI_REQ_ABORTING); | 741 | sci_change_state(&ireq->sm, SCI_REQ_ABORTING); |
897 | /* Fall through and handle like ABORTING... */ | 742 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
743 | return SCI_SUCCESS; | ||
898 | case SCI_REQ_ABORTING: | 744 | case SCI_REQ_ABORTING: |
899 | if (!isci_remote_device_is_safe_to_abort(ireq->target_device)) | 745 | /* If a request has a termination requested twice, return |
900 | set_bit(IREQ_PENDING_ABORT, &ireq->flags); | 746 | * a failure indication, since HW confirmation of the first |
901 | else | 747 | * abort is still outstanding. |
902 | clear_bit(IREQ_PENDING_ABORT, &ireq->flags); | ||
903 | /* If the request is only waiting on the remote device | ||
904 | * suspension, return SUCCESS so the caller will wait too. | ||
905 | */ | 748 | */ |
906 | return SCI_SUCCESS; | ||
907 | case SCI_REQ_COMPLETED: | 749 | case SCI_REQ_COMPLETED: |
908 | default: | 750 | default: |
909 | dev_warn(&ireq->owning_controller->pdev->dev, | 751 | dev_warn(&ireq->owning_controller->pdev->dev, |
910 | "%s: SCIC IO Request requested to abort while in wrong " | 752 | "%s: SCIC IO Request requested to abort while in wrong " |
911 | "state %d\n", __func__, ireq->sm.current_state_id); | 753 | "state %d\n", |
754 | __func__, | ||
755 | ireq->sm.current_state_id); | ||
912 | break; | 756 | break; |
913 | } | 757 | } |
914 | 758 | ||
@@ -922,8 +766,7 @@ enum sci_status sci_request_complete(struct isci_request *ireq) | |||
922 | 766 | ||
923 | state = ireq->sm.current_state_id; | 767 | state = ireq->sm.current_state_id; |
924 | if (WARN_ONCE(state != SCI_REQ_COMPLETED, | 768 | if (WARN_ONCE(state != SCI_REQ_COMPLETED, |
925 | "isci: request completion from wrong state (%s)\n", | 769 | "isci: request completion from wrong state (%d)\n", state)) |
926 | req_state_name(state))) | ||
927 | return SCI_FAILURE_INVALID_STATE; | 770 | return SCI_FAILURE_INVALID_STATE; |
928 | 771 | ||
929 | if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) | 772 | if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) |
@@ -944,8 +787,8 @@ enum sci_status sci_io_request_event_handler(struct isci_request *ireq, | |||
944 | state = ireq->sm.current_state_id; | 787 | state = ireq->sm.current_state_id; |
945 | 788 | ||
946 | if (state != SCI_REQ_STP_PIO_DATA_IN) { | 789 | if (state != SCI_REQ_STP_PIO_DATA_IN) { |
947 | dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %s\n", | 790 | dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %d\n", |
948 | __func__, event_code, req_state_name(state)); | 791 | __func__, event_code, state); |
949 | 792 | ||
950 | return SCI_FAILURE_INVALID_STATE; | 793 | return SCI_FAILURE_INVALID_STATE; |
951 | } | 794 | } |
@@ -1072,7 +915,7 @@ request_started_state_tc_event(struct isci_request *ireq, | |||
1072 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): | 915 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): |
1073 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): | 916 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): |
1074 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): | 917 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): |
1075 | if (ireq->protocol == SAS_PROTOCOL_STP) { | 918 | if (ireq->protocol == SCIC_STP_PROTOCOL) { |
1076 | ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> | 919 | ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> |
1077 | SCU_COMPLETION_TL_STATUS_SHIFT; | 920 | SCU_COMPLETION_TL_STATUS_SHIFT; |
1078 | ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; | 921 | ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; |
@@ -1351,8 +1194,8 @@ static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_re | |||
1351 | { | 1194 | { |
1352 | struct isci_stp_request *stp_req = &ireq->stp.req; | 1195 | struct isci_stp_request *stp_req = &ireq->stp.req; |
1353 | struct scu_sgl_element_pair *sgl_pair; | 1196 | struct scu_sgl_element_pair *sgl_pair; |
1354 | enum sci_status status = SCI_SUCCESS; | ||
1355 | struct scu_sgl_element *sgl; | 1197 | struct scu_sgl_element *sgl; |
1198 | enum sci_status status; | ||
1356 | u32 offset; | 1199 | u32 offset; |
1357 | u32 len = 0; | 1200 | u32 len = 0; |
1358 | 1201 | ||
@@ -1406,7 +1249,7 @@ static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_re | |||
1406 | */ | 1249 | */ |
1407 | static enum sci_status | 1250 | static enum sci_status |
1408 | sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, | 1251 | sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, |
1409 | u8 *data_buf, u32 len) | 1252 | u8 *data_buf, u32 len) |
1410 | { | 1253 | { |
1411 | struct isci_request *ireq; | 1254 | struct isci_request *ireq; |
1412 | u8 *src_addr; | 1255 | u8 *src_addr; |
@@ -1427,9 +1270,9 @@ sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, | |||
1427 | struct page *page = sg_page(sg); | 1270 | struct page *page = sg_page(sg); |
1428 | 1271 | ||
1429 | copy_len = min_t(int, total_len, sg_dma_len(sg)); | 1272 | copy_len = min_t(int, total_len, sg_dma_len(sg)); |
1430 | kaddr = kmap_atomic(page); | 1273 | kaddr = kmap_atomic(page, KM_IRQ0); |
1431 | memcpy(kaddr + sg->offset, src_addr, copy_len); | 1274 | memcpy(kaddr + sg->offset, src_addr, copy_len); |
1432 | kunmap_atomic(kaddr); | 1275 | kunmap_atomic(kaddr, KM_IRQ0); |
1433 | total_len -= copy_len; | 1276 | total_len -= copy_len; |
1434 | src_addr += copy_len; | 1277 | src_addr += copy_len; |
1435 | sg = sg_next(sg); | 1278 | sg = sg_next(sg); |
@@ -1580,128 +1423,6 @@ static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_re | |||
1580 | return status; | 1423 | return status; |
1581 | } | 1424 | } |
1582 | 1425 | ||
1583 | static enum sci_status process_unsolicited_fis(struct isci_request *ireq, | ||
1584 | u32 frame_index) | ||
1585 | { | ||
1586 | struct isci_host *ihost = ireq->owning_controller; | ||
1587 | enum sci_status status; | ||
1588 | struct dev_to_host_fis *frame_header; | ||
1589 | u32 *frame_buffer; | ||
1590 | |||
1591 | status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, | ||
1592 | frame_index, | ||
1593 | (void **)&frame_header); | ||
1594 | |||
1595 | if (status != SCI_SUCCESS) | ||
1596 | return status; | ||
1597 | |||
1598 | if (frame_header->fis_type != FIS_REGD2H) { | ||
1599 | dev_err(&ireq->isci_host->pdev->dev, | ||
1600 | "%s ERROR: invalid fis type 0x%X\n", | ||
1601 | __func__, frame_header->fis_type); | ||
1602 | return SCI_FAILURE; | ||
1603 | } | ||
1604 | |||
1605 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, | ||
1606 | frame_index, | ||
1607 | (void **)&frame_buffer); | ||
1608 | |||
1609 | sci_controller_copy_sata_response(&ireq->stp.rsp, | ||
1610 | (u32 *)frame_header, | ||
1611 | frame_buffer); | ||
1612 | |||
1613 | /* Frame has been decoded return it to the controller */ | ||
1614 | sci_controller_release_frame(ihost, frame_index); | ||
1615 | |||
1616 | return status; | ||
1617 | } | ||
1618 | |||
1619 | static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq, | ||
1620 | u32 frame_index) | ||
1621 | { | ||
1622 | struct sas_task *task = isci_request_access_task(ireq); | ||
1623 | enum sci_status status; | ||
1624 | |||
1625 | status = process_unsolicited_fis(ireq, frame_index); | ||
1626 | |||
1627 | if (status == SCI_SUCCESS) { | ||
1628 | if (ireq->stp.rsp.status & ATA_ERR) | ||
1629 | status = SCI_IO_FAILURE_RESPONSE_VALID; | ||
1630 | } else { | ||
1631 | status = SCI_IO_FAILURE_RESPONSE_VALID; | ||
1632 | } | ||
1633 | |||
1634 | if (status != SCI_SUCCESS) { | ||
1635 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; | ||
1636 | ireq->sci_status = status; | ||
1637 | } else { | ||
1638 | ireq->scu_status = SCU_TASK_DONE_GOOD; | ||
1639 | ireq->sci_status = SCI_SUCCESS; | ||
1640 | } | ||
1641 | |||
1642 | /* the d2h ufi is the end of non-data commands */ | ||
1643 | if (task->data_dir == DMA_NONE) | ||
1644 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | ||
1645 | |||
1646 | return status; | ||
1647 | } | ||
1648 | |||
1649 | static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq) | ||
1650 | { | ||
1651 | struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev); | ||
1652 | void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet; | ||
1653 | struct scu_task_context *task_context = ireq->tc; | ||
1654 | |||
1655 | /* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame | ||
1656 | * type. The TC for previous Packet fis was already there, we only need to | ||
1657 | * change the H2D fis content. | ||
1658 | */ | ||
1659 | memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis)); | ||
1660 | memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN); | ||
1661 | memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context)); | ||
1662 | task_context->type.stp.fis_type = FIS_DATA; | ||
1663 | task_context->transfer_length_bytes = dev->cdb_len; | ||
1664 | } | ||
1665 | |||
1666 | static void scu_atapi_construct_task_context(struct isci_request *ireq) | ||
1667 | { | ||
1668 | struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev); | ||
1669 | struct sas_task *task = isci_request_access_task(ireq); | ||
1670 | struct scu_task_context *task_context = ireq->tc; | ||
1671 | int cdb_len = dev->cdb_len; | ||
1672 | |||
1673 | /* reference: SSTL 1.13.4.2 | ||
1674 | * task_type, sata_direction | ||
1675 | */ | ||
1676 | if (task->data_dir == DMA_TO_DEVICE) { | ||
1677 | task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT; | ||
1678 | task_context->sata_direction = 0; | ||
1679 | } else { | ||
1680 | /* todo: for NO_DATA command, we need to send out raw frame. */ | ||
1681 | task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN; | ||
1682 | task_context->sata_direction = 1; | ||
1683 | } | ||
1684 | |||
1685 | memset(&task_context->type.stp, 0, sizeof(task_context->type.stp)); | ||
1686 | task_context->type.stp.fis_type = FIS_DATA; | ||
1687 | |||
1688 | memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); | ||
1689 | memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len); | ||
1690 | task_context->ssp_command_iu_length = cdb_len / sizeof(u32); | ||
1691 | |||
1692 | /* task phase is set to TX_CMD */ | ||
1693 | task_context->task_phase = 0x1; | ||
1694 | |||
1695 | /* retry counter */ | ||
1696 | task_context->stp_retry_count = 0; | ||
1697 | |||
1698 | /* data transfer size. */ | ||
1699 | task_context->transfer_length_bytes = task->total_xfer_len; | ||
1700 | |||
1701 | /* setup sgl */ | ||
1702 | sci_request_build_sgl(ireq); | ||
1703 | } | ||
1704 | |||
1705 | enum sci_status | 1426 | enum sci_status |
1706 | sci_io_request_frame_handler(struct isci_request *ireq, | 1427 | sci_io_request_frame_handler(struct isci_request *ireq, |
1707 | u32 frame_index) | 1428 | u32 frame_index) |
@@ -1777,7 +1498,7 @@ sci_io_request_frame_handler(struct isci_request *ireq, | |||
1777 | sci_unsolicited_frame_control_get_header(&ihost->uf_control, | 1498 | sci_unsolicited_frame_control_get_header(&ihost->uf_control, |
1778 | frame_index, | 1499 | frame_index, |
1779 | &frame_header); | 1500 | &frame_header); |
1780 | kaddr = kmap_atomic(sg_page(sg)); | 1501 | kaddr = kmap_atomic(sg_page(sg), KM_IRQ0); |
1781 | rsp = kaddr + sg->offset; | 1502 | rsp = kaddr + sg->offset; |
1782 | sci_swab32_cpy(rsp, frame_header, 1); | 1503 | sci_swab32_cpy(rsp, frame_header, 1); |
1783 | 1504 | ||
@@ -1814,7 +1535,7 @@ sci_io_request_frame_handler(struct isci_request *ireq, | |||
1814 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; | 1535 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
1815 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1536 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1816 | } | 1537 | } |
1817 | kunmap_atomic(kaddr); | 1538 | kunmap_atomic(kaddr, KM_IRQ0); |
1818 | 1539 | ||
1819 | sci_controller_release_frame(ihost, frame_index); | 1540 | sci_controller_release_frame(ihost, frame_index); |
1820 | 1541 | ||
@@ -1972,7 +1693,7 @@ sci_io_request_frame_handler(struct isci_request *ireq, | |||
1972 | frame_index, | 1693 | frame_index, |
1973 | (void **)&frame_buffer); | 1694 | (void **)&frame_buffer); |
1974 | 1695 | ||
1975 | sci_controller_copy_sata_response(&ireq->stp.rsp, | 1696 | sci_controller_copy_sata_response(&ireq->stp.req, |
1976 | frame_header, | 1697 | frame_header, |
1977 | frame_buffer); | 1698 | frame_buffer); |
1978 | 1699 | ||
@@ -2061,24 +1782,59 @@ sci_io_request_frame_handler(struct isci_request *ireq, | |||
2061 | return status; | 1782 | return status; |
2062 | } | 1783 | } |
2063 | 1784 | ||
2064 | case SCI_REQ_ATAPI_WAIT_PIO_SETUP: { | 1785 | case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: { |
2065 | struct sas_task *task = isci_request_access_task(ireq); | 1786 | struct dev_to_host_fis *frame_header; |
1787 | u32 *frame_buffer; | ||
2066 | 1788 | ||
2067 | sci_controller_release_frame(ihost, frame_index); | 1789 | status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, |
2068 | ireq->target_device->working_request = ireq; | 1790 | frame_index, |
2069 | if (task->data_dir == DMA_NONE) { | 1791 | (void **)&frame_header); |
2070 | sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP); | 1792 | if (status != SCI_SUCCESS) { |
2071 | scu_atapi_reconstruct_raw_frame_task_context(ireq); | 1793 | dev_err(&ihost->pdev->dev, |
2072 | } else { | 1794 | "%s: SCIC IO Request 0x%p could not get frame " |
2073 | sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H); | 1795 | "header for frame index %d, status %x\n", |
2074 | scu_atapi_construct_task_context(ireq); | 1796 | __func__, |
1797 | stp_req, | ||
1798 | frame_index, | ||
1799 | status); | ||
1800 | return status; | ||
2075 | } | 1801 | } |
2076 | 1802 | ||
2077 | sci_controller_continue_io(ireq); | 1803 | switch (frame_header->fis_type) { |
2078 | return SCI_SUCCESS; | 1804 | case FIS_REGD2H: |
1805 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, | ||
1806 | frame_index, | ||
1807 | (void **)&frame_buffer); | ||
1808 | |||
1809 | sci_controller_copy_sata_response(&ireq->stp.rsp, | ||
1810 | frame_header, | ||
1811 | frame_buffer); | ||
1812 | |||
1813 | /* The command has completed with error */ | ||
1814 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; | ||
1815 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; | ||
1816 | break; | ||
1817 | |||
1818 | default: | ||
1819 | dev_warn(&ihost->pdev->dev, | ||
1820 | "%s: IO Request:0x%p Frame Id:%d protocol " | ||
1821 | "violation occurred\n", | ||
1822 | __func__, | ||
1823 | stp_req, | ||
1824 | frame_index); | ||
1825 | |||
1826 | ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS; | ||
1827 | ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION; | ||
1828 | break; | ||
1829 | } | ||
1830 | |||
1831 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | ||
1832 | |||
1833 | /* Frame has been decoded return it to the controller */ | ||
1834 | sci_controller_release_frame(ihost, frame_index); | ||
1835 | |||
1836 | return status; | ||
2079 | } | 1837 | } |
2080 | case SCI_REQ_ATAPI_WAIT_D2H: | ||
2081 | return atapi_d2h_reg_frame_handler(ireq, frame_index); | ||
2082 | case SCI_REQ_ABORTING: | 1838 | case SCI_REQ_ABORTING: |
2083 | /* | 1839 | /* |
2084 | * TODO: Is it even possible to get an unsolicited frame in the | 1840 | * TODO: Is it even possible to get an unsolicited frame in the |
@@ -2119,7 +1875,7 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq | |||
2119 | */ | 1875 | */ |
2120 | if (ireq->stp.rsp.fis_type == FIS_REGD2H) { | 1876 | if (ireq->stp.rsp.fis_type == FIS_REGD2H) { |
2121 | sci_remote_device_suspend(ireq->target_device, | 1877 | sci_remote_device_suspend(ireq->target_device, |
2122 | SCI_SW_SUSPEND_NORMAL); | 1878 | SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); |
2123 | 1879 | ||
2124 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; | 1880 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
2125 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; | 1881 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
@@ -2140,6 +1896,14 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq | |||
2140 | /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR | 1896 | /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR |
2141 | * - this comes only for B0 | 1897 | * - this comes only for B0 |
2142 | */ | 1898 | */ |
1899 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN): | ||
1900 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): | ||
1901 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR): | ||
1902 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR): | ||
1903 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR): | ||
1904 | sci_remote_device_suspend(ireq->target_device, | ||
1905 | SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); | ||
1906 | /* Fall through to the default case */ | ||
2143 | default: | 1907 | default: |
2144 | /* All other completion status cause the IO to be complete. */ | 1908 | /* All other completion status cause the IO to be complete. */ |
2145 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); | 1909 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
@@ -2151,257 +1915,66 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq | |||
2151 | return status; | 1915 | return status; |
2152 | } | 1916 | } |
2153 | 1917 | ||
2154 | static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code, | 1918 | static enum sci_status |
2155 | enum sci_base_request_states next) | 1919 | stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq, |
1920 | u32 completion_code) | ||
2156 | { | 1921 | { |
2157 | enum sci_status status = SCI_SUCCESS; | ||
2158 | |||
2159 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 1922 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
2160 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 1923 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
2161 | ireq->scu_status = SCU_TASK_DONE_GOOD; | 1924 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
2162 | ireq->sci_status = SCI_SUCCESS; | 1925 | ireq->sci_status = SCI_SUCCESS; |
2163 | sci_change_state(&ireq->sm, next); | 1926 | sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG); |
2164 | break; | 1927 | break; |
1928 | |||
2165 | default: | 1929 | default: |
2166 | /* All other completion status cause the IO to be complete. | 1930 | /* |
1931 | * All other completion status cause the IO to be complete. | ||
2167 | * If a NAK was received, then it is up to the user to retry | 1932 | * If a NAK was received, then it is up to the user to retry |
2168 | * the request. | 1933 | * the request. |
2169 | */ | 1934 | */ |
2170 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); | 1935 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
2171 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; | 1936 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
2172 | |||
2173 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1937 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
2174 | break; | 1938 | break; |
2175 | } | 1939 | } |
2176 | 1940 | ||
2177 | return status; | 1941 | return SCI_SUCCESS; |
2178 | } | 1942 | } |
2179 | 1943 | ||
2180 | static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq, | 1944 | static enum sci_status |
2181 | u32 completion_code) | 1945 | stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq, |
1946 | u32 completion_code) | ||
2182 | { | 1947 | { |
2183 | struct isci_remote_device *idev = ireq->target_device; | ||
2184 | struct dev_to_host_fis *d2h = &ireq->stp.rsp; | ||
2185 | enum sci_status status = SCI_SUCCESS; | ||
2186 | |||
2187 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 1948 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
2188 | case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): | 1949 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
2189 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | ||
2190 | break; | ||
2191 | |||
2192 | case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): { | ||
2193 | u16 len = sci_req_tx_bytes(ireq); | ||
2194 | |||
2195 | /* likely non-error data underrrun, workaround missing | ||
2196 | * d2h frame from the controller | ||
2197 | */ | ||
2198 | if (d2h->fis_type != FIS_REGD2H) { | ||
2199 | d2h->fis_type = FIS_REGD2H; | ||
2200 | d2h->flags = (1 << 6); | ||
2201 | d2h->status = 0x50; | ||
2202 | d2h->error = 0; | ||
2203 | d2h->lbal = 0; | ||
2204 | d2h->byte_count_low = len & 0xff; | ||
2205 | d2h->byte_count_high = len >> 8; | ||
2206 | d2h->device = 0xa0; | ||
2207 | d2h->lbal_exp = 0; | ||
2208 | d2h->lbam_exp = 0; | ||
2209 | d2h->lbah_exp = 0; | ||
2210 | d2h->_r_a = 0; | ||
2211 | d2h->sector_count = 0x3; | ||
2212 | d2h->sector_count_exp = 0; | ||
2213 | d2h->_r_b = 0; | ||
2214 | d2h->_r_c = 0; | ||
2215 | d2h->_r_d = 0; | ||
2216 | } | ||
2217 | |||
2218 | ireq->scu_status = SCU_TASK_DONE_GOOD; | ||
2219 | ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; | ||
2220 | status = ireq->sci_status; | ||
2221 | |||
2222 | /* the hw will have suspended the rnc, so complete the | ||
2223 | * request upon pending resume | ||
2224 | */ | ||
2225 | sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR); | ||
2226 | break; | ||
2227 | } | ||
2228 | case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT): | ||
2229 | /* In this case, there is no UF coming after. | ||
2230 | * compelte the IO now. | ||
2231 | */ | ||
2232 | ireq->scu_status = SCU_TASK_DONE_GOOD; | 1950 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
2233 | ireq->sci_status = SCI_SUCCESS; | 1951 | ireq->sci_status = SCI_SUCCESS; |
2234 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1952 | sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H); |
2235 | break; | 1953 | break; |
2236 | 1954 | ||
2237 | default: | 1955 | default: |
2238 | if (d2h->fis_type == FIS_REGD2H) { | 1956 | /* All other completion status cause the IO to be complete. If |
2239 | /* UF received change the device state to ATAPI_ERROR */ | 1957 | * a NAK was received, then it is up to the user to retry the |
2240 | status = ireq->sci_status; | 1958 | * request. |
2241 | sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR); | 1959 | */ |
2242 | } else { | 1960 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
2243 | /* If receiving any non-success TC status, no UF | 1961 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
2244 | * received yet, then an UF for the status fis | 1962 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
2245 | * is coming after (XXX: suspect this is | ||
2246 | * actually a protocol error or a bug like the | ||
2247 | * DONE_UNEXP_FIS case) | ||
2248 | */ | ||
2249 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; | ||
2250 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; | ||
2251 | |||
2252 | sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H); | ||
2253 | } | ||
2254 | break; | 1963 | break; |
2255 | } | 1964 | } |
2256 | 1965 | ||
2257 | return status; | 1966 | return SCI_SUCCESS; |
2258 | } | ||
2259 | |||
2260 | static int sci_request_smp_completion_status_is_tx_suspend( | ||
2261 | unsigned int completion_status) | ||
2262 | { | ||
2263 | switch (completion_status) { | ||
2264 | case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: | ||
2265 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: | ||
2266 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: | ||
2267 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: | ||
2268 | case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: | ||
2269 | case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: | ||
2270 | return 1; | ||
2271 | } | ||
2272 | return 0; | ||
2273 | } | ||
2274 | |||
2275 | static int sci_request_smp_completion_status_is_tx_rx_suspend( | ||
2276 | unsigned int completion_status) | ||
2277 | { | ||
2278 | return 0; /* There are no Tx/Rx SMP suspend conditions. */ | ||
2279 | } | ||
2280 | |||
2281 | static int sci_request_ssp_completion_status_is_tx_suspend( | ||
2282 | unsigned int completion_status) | ||
2283 | { | ||
2284 | switch (completion_status) { | ||
2285 | case SCU_TASK_DONE_TX_RAW_CMD_ERR: | ||
2286 | case SCU_TASK_DONE_LF_ERR: | ||
2287 | case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: | ||
2288 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: | ||
2289 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: | ||
2290 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: | ||
2291 | case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: | ||
2292 | case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: | ||
2293 | case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: | ||
2294 | case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: | ||
2295 | case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: | ||
2296 | return 1; | ||
2297 | } | ||
2298 | return 0; | ||
2299 | } | ||
2300 | |||
2301 | static int sci_request_ssp_completion_status_is_tx_rx_suspend( | ||
2302 | unsigned int completion_status) | ||
2303 | { | ||
2304 | return 0; /* There are no Tx/Rx SSP suspend conditions. */ | ||
2305 | } | ||
2306 | |||
2307 | static int sci_request_stpsata_completion_status_is_tx_suspend( | ||
2308 | unsigned int completion_status) | ||
2309 | { | ||
2310 | switch (completion_status) { | ||
2311 | case SCU_TASK_DONE_TX_RAW_CMD_ERR: | ||
2312 | case SCU_TASK_DONE_LL_R_ERR: | ||
2313 | case SCU_TASK_DONE_LL_PERR: | ||
2314 | case SCU_TASK_DONE_REG_ERR: | ||
2315 | case SCU_TASK_DONE_SDB_ERR: | ||
2316 | case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: | ||
2317 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: | ||
2318 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: | ||
2319 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: | ||
2320 | case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: | ||
2321 | case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: | ||
2322 | case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: | ||
2323 | case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: | ||
2324 | case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: | ||
2325 | return 1; | ||
2326 | } | ||
2327 | return 0; | ||
2328 | } | ||
2329 | |||
2330 | |||
2331 | static int sci_request_stpsata_completion_status_is_tx_rx_suspend( | ||
2332 | unsigned int completion_status) | ||
2333 | { | ||
2334 | switch (completion_status) { | ||
2335 | case SCU_TASK_DONE_LF_ERR: | ||
2336 | case SCU_TASK_DONE_LL_SY_TERM: | ||
2337 | case SCU_TASK_DONE_LL_LF_TERM: | ||
2338 | case SCU_TASK_DONE_BREAK_RCVD: | ||
2339 | case SCU_TASK_DONE_INV_FIS_LEN: | ||
2340 | case SCU_TASK_DONE_UNEXP_FIS: | ||
2341 | case SCU_TASK_DONE_UNEXP_SDBFIS: | ||
2342 | case SCU_TASK_DONE_MAX_PLD_ERR: | ||
2343 | return 1; | ||
2344 | } | ||
2345 | return 0; | ||
2346 | } | ||
2347 | |||
2348 | static void sci_request_handle_suspending_completions( | ||
2349 | struct isci_request *ireq, | ||
2350 | u32 completion_code) | ||
2351 | { | ||
2352 | int is_tx = 0; | ||
2353 | int is_tx_rx = 0; | ||
2354 | |||
2355 | switch (ireq->protocol) { | ||
2356 | case SAS_PROTOCOL_SMP: | ||
2357 | is_tx = sci_request_smp_completion_status_is_tx_suspend( | ||
2358 | completion_code); | ||
2359 | is_tx_rx = sci_request_smp_completion_status_is_tx_rx_suspend( | ||
2360 | completion_code); | ||
2361 | break; | ||
2362 | case SAS_PROTOCOL_SSP: | ||
2363 | is_tx = sci_request_ssp_completion_status_is_tx_suspend( | ||
2364 | completion_code); | ||
2365 | is_tx_rx = sci_request_ssp_completion_status_is_tx_rx_suspend( | ||
2366 | completion_code); | ||
2367 | break; | ||
2368 | case SAS_PROTOCOL_STP: | ||
2369 | is_tx = sci_request_stpsata_completion_status_is_tx_suspend( | ||
2370 | completion_code); | ||
2371 | is_tx_rx = | ||
2372 | sci_request_stpsata_completion_status_is_tx_rx_suspend( | ||
2373 | completion_code); | ||
2374 | break; | ||
2375 | default: | ||
2376 | dev_warn(&ireq->isci_host->pdev->dev, | ||
2377 | "%s: request %p has no valid protocol\n", | ||
2378 | __func__, ireq); | ||
2379 | break; | ||
2380 | } | ||
2381 | if (is_tx || is_tx_rx) { | ||
2382 | BUG_ON(is_tx && is_tx_rx); | ||
2383 | |||
2384 | sci_remote_node_context_suspend( | ||
2385 | &ireq->target_device->rnc, | ||
2386 | SCI_HW_SUSPEND, | ||
2387 | (is_tx_rx) ? SCU_EVENT_TL_RNC_SUSPEND_TX_RX | ||
2388 | : SCU_EVENT_TL_RNC_SUSPEND_TX); | ||
2389 | } | ||
2390 | } | 1967 | } |
2391 | 1968 | ||
2392 | enum sci_status | 1969 | enum sci_status |
2393 | sci_io_request_tc_completion(struct isci_request *ireq, | 1970 | sci_io_request_tc_completion(struct isci_request *ireq, |
2394 | u32 completion_code) | 1971 | u32 completion_code) |
2395 | { | 1972 | { |
2396 | enum sci_base_request_states state; | 1973 | enum sci_base_request_states state; |
2397 | struct isci_host *ihost = ireq->owning_controller; | 1974 | struct isci_host *ihost = ireq->owning_controller; |
2398 | 1975 | ||
2399 | state = ireq->sm.current_state_id; | 1976 | state = ireq->sm.current_state_id; |
2400 | 1977 | ||
2401 | /* Decode those completions that signal upcoming suspension events. */ | ||
2402 | sci_request_handle_suspending_completions( | ||
2403 | ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code)); | ||
2404 | |||
2405 | switch (state) { | 1978 | switch (state) { |
2406 | case SCI_REQ_STARTED: | 1979 | case SCI_REQ_STARTED: |
2407 | return request_started_state_tc_event(ireq, completion_code); | 1980 | return request_started_state_tc_event(ireq, completion_code); |
@@ -2432,24 +2005,25 @@ sci_io_request_tc_completion(struct isci_request *ireq, | |||
2432 | case SCI_REQ_STP_PIO_DATA_OUT: | 2005 | case SCI_REQ_STP_PIO_DATA_OUT: |
2433 | return pio_data_out_tx_done_tc_event(ireq, completion_code); | 2006 | return pio_data_out_tx_done_tc_event(ireq, completion_code); |
2434 | 2007 | ||
2008 | case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED: | ||
2009 | return stp_request_soft_reset_await_h2d_asserted_tc_event(ireq, | ||
2010 | completion_code); | ||
2011 | |||
2012 | case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG: | ||
2013 | return stp_request_soft_reset_await_h2d_diagnostic_tc_event(ireq, | ||
2014 | completion_code); | ||
2015 | |||
2435 | case SCI_REQ_ABORTING: | 2016 | case SCI_REQ_ABORTING: |
2436 | return request_aborting_state_tc_event(ireq, | 2017 | return request_aborting_state_tc_event(ireq, |
2437 | completion_code); | 2018 | completion_code); |
2438 | 2019 | ||
2439 | case SCI_REQ_ATAPI_WAIT_H2D: | ||
2440 | return atapi_raw_completion(ireq, completion_code, | ||
2441 | SCI_REQ_ATAPI_WAIT_PIO_SETUP); | ||
2442 | |||
2443 | case SCI_REQ_ATAPI_WAIT_TC_COMP: | ||
2444 | return atapi_raw_completion(ireq, completion_code, | ||
2445 | SCI_REQ_ATAPI_WAIT_D2H); | ||
2446 | |||
2447 | case SCI_REQ_ATAPI_WAIT_D2H: | ||
2448 | return atapi_data_tc_completion_handler(ireq, completion_code); | ||
2449 | |||
2450 | default: | 2020 | default: |
2451 | dev_warn(&ihost->pdev->dev, "%s: %x in wrong state %s\n", | 2021 | dev_warn(&ihost->pdev->dev, |
2452 | __func__, completion_code, req_state_name(state)); | 2022 | "%s: SCIC IO Request given task completion " |
2023 | "notification %x while in wrong state %d\n", | ||
2024 | __func__, | ||
2025 | completion_code, | ||
2026 | state); | ||
2453 | return SCI_FAILURE_INVALID_STATE; | 2027 | return SCI_FAILURE_INVALID_STATE; |
2454 | } | 2028 | } |
2455 | } | 2029 | } |
@@ -2493,6 +2067,9 @@ static void isci_request_process_response_iu( | |||
2493 | * @request: This parameter is the completed isci_request object. | 2067 | * @request: This parameter is the completed isci_request object. |
2494 | * @response_ptr: This parameter specifies the service response for the I/O. | 2068 | * @response_ptr: This parameter specifies the service response for the I/O. |
2495 | * @status_ptr: This parameter specifies the exec status for the I/O. | 2069 | * @status_ptr: This parameter specifies the exec status for the I/O. |
2070 | * @complete_to_host_ptr: This parameter specifies the action to be taken by | ||
2071 | * the LLDD with respect to completing this request or forcing an abort | ||
2072 | * condition on the I/O. | ||
2496 | * @open_rej_reason: This parameter specifies the encoded reason for the | 2073 | * @open_rej_reason: This parameter specifies the encoded reason for the |
2497 | * abandon-class reject. | 2074 | * abandon-class reject. |
2498 | * | 2075 | * |
@@ -2503,12 +2080,14 @@ static void isci_request_set_open_reject_status( | |||
2503 | struct sas_task *task, | 2080 | struct sas_task *task, |
2504 | enum service_response *response_ptr, | 2081 | enum service_response *response_ptr, |
2505 | enum exec_status *status_ptr, | 2082 | enum exec_status *status_ptr, |
2083 | enum isci_completion_selection *complete_to_host_ptr, | ||
2506 | enum sas_open_rej_reason open_rej_reason) | 2084 | enum sas_open_rej_reason open_rej_reason) |
2507 | { | 2085 | { |
2508 | /* Task in the target is done. */ | 2086 | /* Task in the target is done. */ |
2509 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2087 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2510 | *response_ptr = SAS_TASK_UNDELIVERED; | 2088 | *response_ptr = SAS_TASK_UNDELIVERED; |
2511 | *status_ptr = SAS_OPEN_REJECT; | 2089 | *status_ptr = SAS_OPEN_REJECT; |
2090 | *complete_to_host_ptr = isci_perform_normal_io_completion; | ||
2512 | task->task_status.open_rej_reason = open_rej_reason; | 2091 | task->task_status.open_rej_reason = open_rej_reason; |
2513 | } | 2092 | } |
2514 | 2093 | ||
@@ -2518,6 +2097,9 @@ static void isci_request_set_open_reject_status( | |||
2518 | * @request: This parameter is the completed isci_request object. | 2097 | * @request: This parameter is the completed isci_request object. |
2519 | * @response_ptr: This parameter specifies the service response for the I/O. | 2098 | * @response_ptr: This parameter specifies the service response for the I/O. |
2520 | * @status_ptr: This parameter specifies the exec status for the I/O. | 2099 | * @status_ptr: This parameter specifies the exec status for the I/O. |
2100 | * @complete_to_host_ptr: This parameter specifies the action to be taken by | ||
2101 | * the LLDD with respect to completing this request or forcing an abort | ||
2102 | * condition on the I/O. | ||
2521 | * | 2103 | * |
2522 | * none. | 2104 | * none. |
2523 | */ | 2105 | */ |
@@ -2526,7 +2108,8 @@ static void isci_request_handle_controller_specific_errors( | |||
2526 | struct isci_request *request, | 2108 | struct isci_request *request, |
2527 | struct sas_task *task, | 2109 | struct sas_task *task, |
2528 | enum service_response *response_ptr, | 2110 | enum service_response *response_ptr, |
2529 | enum exec_status *status_ptr) | 2111 | enum exec_status *status_ptr, |
2112 | enum isci_completion_selection *complete_to_host_ptr) | ||
2530 | { | 2113 | { |
2531 | unsigned int cstatus; | 2114 | unsigned int cstatus; |
2532 | 2115 | ||
@@ -2567,6 +2150,9 @@ static void isci_request_handle_controller_specific_errors( | |||
2567 | *status_ptr = SAS_ABORTED_TASK; | 2150 | *status_ptr = SAS_ABORTED_TASK; |
2568 | 2151 | ||
2569 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2152 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2153 | |||
2154 | *complete_to_host_ptr = | ||
2155 | isci_perform_normal_io_completion; | ||
2570 | } else { | 2156 | } else { |
2571 | /* Task in the target is not done. */ | 2157 | /* Task in the target is not done. */ |
2572 | *response_ptr = SAS_TASK_UNDELIVERED; | 2158 | *response_ptr = SAS_TASK_UNDELIVERED; |
@@ -2577,6 +2163,9 @@ static void isci_request_handle_controller_specific_errors( | |||
2577 | *status_ptr = SAM_STAT_TASK_ABORTED; | 2163 | *status_ptr = SAM_STAT_TASK_ABORTED; |
2578 | 2164 | ||
2579 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2165 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2166 | |||
2167 | *complete_to_host_ptr = | ||
2168 | isci_perform_error_io_completion; | ||
2580 | } | 2169 | } |
2581 | 2170 | ||
2582 | break; | 2171 | break; |
@@ -2605,6 +2194,8 @@ static void isci_request_handle_controller_specific_errors( | |||
2605 | *status_ptr = SAS_ABORTED_TASK; | 2194 | *status_ptr = SAS_ABORTED_TASK; |
2606 | 2195 | ||
2607 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2196 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2197 | |||
2198 | *complete_to_host_ptr = isci_perform_normal_io_completion; | ||
2608 | break; | 2199 | break; |
2609 | 2200 | ||
2610 | 2201 | ||
@@ -2615,7 +2206,7 @@ static void isci_request_handle_controller_specific_errors( | |||
2615 | 2206 | ||
2616 | isci_request_set_open_reject_status( | 2207 | isci_request_set_open_reject_status( |
2617 | request, task, response_ptr, status_ptr, | 2208 | request, task, response_ptr, status_ptr, |
2618 | SAS_OREJ_WRONG_DEST); | 2209 | complete_to_host_ptr, SAS_OREJ_WRONG_DEST); |
2619 | break; | 2210 | break; |
2620 | 2211 | ||
2621 | case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: | 2212 | case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: |
@@ -2625,56 +2216,56 @@ static void isci_request_handle_controller_specific_errors( | |||
2625 | */ | 2216 | */ |
2626 | isci_request_set_open_reject_status( | 2217 | isci_request_set_open_reject_status( |
2627 | request, task, response_ptr, status_ptr, | 2218 | request, task, response_ptr, status_ptr, |
2628 | SAS_OREJ_RESV_AB0); | 2219 | complete_to_host_ptr, SAS_OREJ_RESV_AB0); |
2629 | break; | 2220 | break; |
2630 | 2221 | ||
2631 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: | 2222 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: |
2632 | 2223 | ||
2633 | isci_request_set_open_reject_status( | 2224 | isci_request_set_open_reject_status( |
2634 | request, task, response_ptr, status_ptr, | 2225 | request, task, response_ptr, status_ptr, |
2635 | SAS_OREJ_RESV_AB1); | 2226 | complete_to_host_ptr, SAS_OREJ_RESV_AB1); |
2636 | break; | 2227 | break; |
2637 | 2228 | ||
2638 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: | 2229 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: |
2639 | 2230 | ||
2640 | isci_request_set_open_reject_status( | 2231 | isci_request_set_open_reject_status( |
2641 | request, task, response_ptr, status_ptr, | 2232 | request, task, response_ptr, status_ptr, |
2642 | SAS_OREJ_RESV_AB2); | 2233 | complete_to_host_ptr, SAS_OREJ_RESV_AB2); |
2643 | break; | 2234 | break; |
2644 | 2235 | ||
2645 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: | 2236 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: |
2646 | 2237 | ||
2647 | isci_request_set_open_reject_status( | 2238 | isci_request_set_open_reject_status( |
2648 | request, task, response_ptr, status_ptr, | 2239 | request, task, response_ptr, status_ptr, |
2649 | SAS_OREJ_RESV_AB3); | 2240 | complete_to_host_ptr, SAS_OREJ_RESV_AB3); |
2650 | break; | 2241 | break; |
2651 | 2242 | ||
2652 | case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: | 2243 | case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: |
2653 | 2244 | ||
2654 | isci_request_set_open_reject_status( | 2245 | isci_request_set_open_reject_status( |
2655 | request, task, response_ptr, status_ptr, | 2246 | request, task, response_ptr, status_ptr, |
2656 | SAS_OREJ_BAD_DEST); | 2247 | complete_to_host_ptr, SAS_OREJ_BAD_DEST); |
2657 | break; | 2248 | break; |
2658 | 2249 | ||
2659 | case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: | 2250 | case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: |
2660 | 2251 | ||
2661 | isci_request_set_open_reject_status( | 2252 | isci_request_set_open_reject_status( |
2662 | request, task, response_ptr, status_ptr, | 2253 | request, task, response_ptr, status_ptr, |
2663 | SAS_OREJ_STP_NORES); | 2254 | complete_to_host_ptr, SAS_OREJ_STP_NORES); |
2664 | break; | 2255 | break; |
2665 | 2256 | ||
2666 | case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: | 2257 | case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: |
2667 | 2258 | ||
2668 | isci_request_set_open_reject_status( | 2259 | isci_request_set_open_reject_status( |
2669 | request, task, response_ptr, status_ptr, | 2260 | request, task, response_ptr, status_ptr, |
2670 | SAS_OREJ_EPROTO); | 2261 | complete_to_host_ptr, SAS_OREJ_EPROTO); |
2671 | break; | 2262 | break; |
2672 | 2263 | ||
2673 | case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: | 2264 | case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: |
2674 | 2265 | ||
2675 | isci_request_set_open_reject_status( | 2266 | isci_request_set_open_reject_status( |
2676 | request, task, response_ptr, status_ptr, | 2267 | request, task, response_ptr, status_ptr, |
2677 | SAS_OREJ_CONN_RATE); | 2268 | complete_to_host_ptr, SAS_OREJ_CONN_RATE); |
2678 | break; | 2269 | break; |
2679 | 2270 | ||
2680 | case SCU_TASK_DONE_LL_R_ERR: | 2271 | case SCU_TASK_DONE_LL_R_ERR: |
@@ -2706,10 +2297,114 @@ static void isci_request_handle_controller_specific_errors( | |||
2706 | *response_ptr = SAS_TASK_UNDELIVERED; | 2297 | *response_ptr = SAS_TASK_UNDELIVERED; |
2707 | *status_ptr = SAM_STAT_TASK_ABORTED; | 2298 | *status_ptr = SAM_STAT_TASK_ABORTED; |
2708 | 2299 | ||
2709 | if (task->task_proto == SAS_PROTOCOL_SMP) | 2300 | if (task->task_proto == SAS_PROTOCOL_SMP) { |
2710 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2301 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2711 | else | 2302 | |
2303 | *complete_to_host_ptr = isci_perform_normal_io_completion; | ||
2304 | } else { | ||
2712 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2305 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2306 | |||
2307 | *complete_to_host_ptr = isci_perform_error_io_completion; | ||
2308 | } | ||
2309 | break; | ||
2310 | } | ||
2311 | } | ||
2312 | |||
2313 | /** | ||
2314 | * isci_task_save_for_upper_layer_completion() - This function saves the | ||
2315 | * request for later completion to the upper layer driver. | ||
2316 | * @host: This parameter is a pointer to the host on which the the request | ||
2317 | * should be queued (either as an error or success). | ||
2318 | * @request: This parameter is the completed request. | ||
2319 | * @response: This parameter is the response code for the completed task. | ||
2320 | * @status: This parameter is the status code for the completed task. | ||
2321 | * | ||
2322 | * none. | ||
2323 | */ | ||
2324 | static void isci_task_save_for_upper_layer_completion( | ||
2325 | struct isci_host *host, | ||
2326 | struct isci_request *request, | ||
2327 | enum service_response response, | ||
2328 | enum exec_status status, | ||
2329 | enum isci_completion_selection task_notification_selection) | ||
2330 | { | ||
2331 | struct sas_task *task = isci_request_access_task(request); | ||
2332 | |||
2333 | task_notification_selection | ||
2334 | = isci_task_set_completion_status(task, response, status, | ||
2335 | task_notification_selection); | ||
2336 | |||
2337 | /* Tasks aborted specifically by a call to the lldd_abort_task | ||
2338 | * function should not be completed to the host in the regular path. | ||
2339 | */ | ||
2340 | switch (task_notification_selection) { | ||
2341 | |||
2342 | case isci_perform_normal_io_completion: | ||
2343 | |||
2344 | /* Normal notification (task_done) */ | ||
2345 | dev_dbg(&host->pdev->dev, | ||
2346 | "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n", | ||
2347 | __func__, | ||
2348 | task, | ||
2349 | task->task_status.resp, response, | ||
2350 | task->task_status.stat, status); | ||
2351 | /* Add to the completed list. */ | ||
2352 | list_add(&request->completed_node, | ||
2353 | &host->requests_to_complete); | ||
2354 | |||
2355 | /* Take the request off the device's pending request list. */ | ||
2356 | list_del_init(&request->dev_node); | ||
2357 | break; | ||
2358 | |||
2359 | case isci_perform_aborted_io_completion: | ||
2360 | /* No notification to libsas because this request is | ||
2361 | * already in the abort path. | ||
2362 | */ | ||
2363 | dev_dbg(&host->pdev->dev, | ||
2364 | "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n", | ||
2365 | __func__, | ||
2366 | task, | ||
2367 | task->task_status.resp, response, | ||
2368 | task->task_status.stat, status); | ||
2369 | |||
2370 | /* Wake up whatever process was waiting for this | ||
2371 | * request to complete. | ||
2372 | */ | ||
2373 | WARN_ON(request->io_request_completion == NULL); | ||
2374 | |||
2375 | if (request->io_request_completion != NULL) { | ||
2376 | |||
2377 | /* Signal whoever is waiting that this | ||
2378 | * request is complete. | ||
2379 | */ | ||
2380 | complete(request->io_request_completion); | ||
2381 | } | ||
2382 | break; | ||
2383 | |||
2384 | case isci_perform_error_io_completion: | ||
2385 | /* Use sas_task_abort */ | ||
2386 | dev_dbg(&host->pdev->dev, | ||
2387 | "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n", | ||
2388 | __func__, | ||
2389 | task, | ||
2390 | task->task_status.resp, response, | ||
2391 | task->task_status.stat, status); | ||
2392 | /* Add to the aborted list. */ | ||
2393 | list_add(&request->completed_node, | ||
2394 | &host->requests_to_errorback); | ||
2395 | break; | ||
2396 | |||
2397 | default: | ||
2398 | dev_dbg(&host->pdev->dev, | ||
2399 | "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n", | ||
2400 | __func__, | ||
2401 | task, | ||
2402 | task->task_status.resp, response, | ||
2403 | task->task_status.stat, status); | ||
2404 | |||
2405 | /* Add to the error to libsas list. */ | ||
2406 | list_add(&request->completed_node, | ||
2407 | &host->requests_to_errorback); | ||
2713 | break; | 2408 | break; |
2714 | } | 2409 | } |
2715 | } | 2410 | } |
@@ -2728,8 +2423,6 @@ static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_ | |||
2728 | */ | 2423 | */ |
2729 | if (fis->status & ATA_DF) | 2424 | if (fis->status & ATA_DF) |
2730 | ts->stat = SAS_PROTO_RESPONSE; | 2425 | ts->stat = SAS_PROTO_RESPONSE; |
2731 | else if (fis->status & ATA_ERR) | ||
2732 | ts->stat = SAM_STAT_CHECK_CONDITION; | ||
2733 | else | 2426 | else |
2734 | ts->stat = SAM_STAT_GOOD; | 2427 | ts->stat = SAM_STAT_GOOD; |
2735 | 2428 | ||
@@ -2743,167 +2436,298 @@ static void isci_request_io_request_complete(struct isci_host *ihost, | |||
2743 | struct sas_task *task = isci_request_access_task(request); | 2436 | struct sas_task *task = isci_request_access_task(request); |
2744 | struct ssp_response_iu *resp_iu; | 2437 | struct ssp_response_iu *resp_iu; |
2745 | unsigned long task_flags; | 2438 | unsigned long task_flags; |
2746 | struct isci_remote_device *idev = request->target_device; | 2439 | struct isci_remote_device *idev = isci_lookup_device(task->dev); |
2747 | enum service_response response = SAS_TASK_UNDELIVERED; | 2440 | enum service_response response = SAS_TASK_UNDELIVERED; |
2748 | enum exec_status status = SAS_ABORTED_TASK; | 2441 | enum exec_status status = SAS_ABORTED_TASK; |
2442 | enum isci_request_status request_status; | ||
2443 | enum isci_completion_selection complete_to_host | ||
2444 | = isci_perform_normal_io_completion; | ||
2749 | 2445 | ||
2750 | dev_dbg(&ihost->pdev->dev, | 2446 | dev_dbg(&ihost->pdev->dev, |
2751 | "%s: request = %p, task = %p, " | 2447 | "%s: request = %p, task = %p,\n" |
2752 | "task->data_dir = %d completion_status = 0x%x\n", | 2448 | "task->data_dir = %d completion_status = 0x%x\n", |
2753 | __func__, request, task, task->data_dir, completion_status); | 2449 | __func__, |
2754 | 2450 | request, | |
2755 | /* The request is done from an SCU HW perspective. */ | 2451 | task, |
2756 | 2452 | task->data_dir, | |
2757 | /* This is an active request being completed from the core. */ | 2453 | completion_status); |
2758 | switch (completion_status) { | ||
2759 | |||
2760 | case SCI_IO_FAILURE_RESPONSE_VALID: | ||
2761 | dev_dbg(&ihost->pdev->dev, | ||
2762 | "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", | ||
2763 | __func__, request, task); | ||
2764 | |||
2765 | if (sas_protocol_ata(task->task_proto)) { | ||
2766 | isci_process_stp_response(task, &request->stp.rsp); | ||
2767 | } else if (SAS_PROTOCOL_SSP == task->task_proto) { | ||
2768 | 2454 | ||
2769 | /* crack the iu response buffer. */ | 2455 | spin_lock(&request->state_lock); |
2770 | resp_iu = &request->ssp.rsp; | 2456 | request_status = request->status; |
2771 | isci_request_process_response_iu(task, resp_iu, | ||
2772 | &ihost->pdev->dev); | ||
2773 | 2457 | ||
2774 | } else if (SAS_PROTOCOL_SMP == task->task_proto) { | 2458 | /* Decode the request status. Note that if the request has been |
2459 | * aborted by a task management function, we don't care | ||
2460 | * what the status is. | ||
2461 | */ | ||
2462 | switch (request_status) { | ||
2463 | |||
2464 | case aborted: | ||
2465 | /* "aborted" indicates that the request was aborted by a task | ||
2466 | * management function, since once a task management request is | ||
2467 | * perfomed by the device, the request only completes because | ||
2468 | * of the subsequent driver terminate. | ||
2469 | * | ||
2470 | * Aborted also means an external thread is explicitly managing | ||
2471 | * this request, so that we do not complete it up the stack. | ||
2472 | * | ||
2473 | * The target is still there (since the TMF was successful). | ||
2474 | */ | ||
2475 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | ||
2476 | response = SAS_TASK_COMPLETE; | ||
2775 | 2477 | ||
2776 | dev_err(&ihost->pdev->dev, | 2478 | /* See if the device has been/is being stopped. Note |
2777 | "%s: SCI_IO_FAILURE_RESPONSE_VALID: " | 2479 | * that we ignore the quiesce state, since we are |
2778 | "SAS_PROTOCOL_SMP protocol\n", | 2480 | * concerned about the actual device state. |
2779 | __func__); | 2481 | */ |
2482 | if (!idev) | ||
2483 | status = SAS_DEVICE_UNKNOWN; | ||
2484 | else | ||
2485 | status = SAS_ABORTED_TASK; | ||
2780 | 2486 | ||
2781 | } else | 2487 | complete_to_host = isci_perform_aborted_io_completion; |
2782 | dev_err(&ihost->pdev->dev, | 2488 | /* This was an aborted request. */ |
2783 | "%s: unknown protocol\n", __func__); | ||
2784 | 2489 | ||
2785 | /* use the task status set in the task struct by the | 2490 | spin_unlock(&request->state_lock); |
2786 | * isci_request_process_response_iu call. | ||
2787 | */ | ||
2788 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | ||
2789 | response = task->task_status.resp; | ||
2790 | status = task->task_status.stat; | ||
2791 | break; | 2491 | break; |
2792 | 2492 | ||
2793 | case SCI_IO_SUCCESS: | 2493 | case aborting: |
2794 | case SCI_IO_SUCCESS_IO_DONE_EARLY: | 2494 | /* aborting means that the task management function tried and |
2795 | 2495 | * failed to abort the request. We need to note the request | |
2796 | response = SAS_TASK_COMPLETE; | 2496 | * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the |
2797 | status = SAM_STAT_GOOD; | 2497 | * target as down. |
2498 | * | ||
2499 | * Aborting also means an external thread is explicitly managing | ||
2500 | * this request, so that we do not complete it up the stack. | ||
2501 | */ | ||
2798 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2502 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2503 | response = SAS_TASK_UNDELIVERED; | ||
2799 | 2504 | ||
2800 | if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) { | 2505 | if (!idev) |
2801 | 2506 | /* The device has been /is being stopped. Note that | |
2802 | /* This was an SSP / STP / SATA transfer. | 2507 | * we ignore the quiesce state, since we are |
2803 | * There is a possibility that less data than | 2508 | * concerned about the actual device state. |
2804 | * the maximum was transferred. | 2509 | */ |
2805 | */ | 2510 | status = SAS_DEVICE_UNKNOWN; |
2806 | u32 transferred_length = sci_req_tx_bytes(request); | 2511 | else |
2807 | 2512 | status = SAS_PHY_DOWN; | |
2808 | task->task_status.residual | ||
2809 | = task->total_xfer_len - transferred_length; | ||
2810 | 2513 | ||
2811 | /* If there were residual bytes, call this an | 2514 | complete_to_host = isci_perform_aborted_io_completion; |
2812 | * underrun. | ||
2813 | */ | ||
2814 | if (task->task_status.residual != 0) | ||
2815 | status = SAS_DATA_UNDERRUN; | ||
2816 | 2515 | ||
2817 | dev_dbg(&ihost->pdev->dev, | 2516 | /* This was an aborted request. */ |
2818 | "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", | ||
2819 | __func__, status); | ||
2820 | 2517 | ||
2821 | } else | 2518 | spin_unlock(&request->state_lock); |
2822 | dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_SUCCESS\n", | ||
2823 | __func__); | ||
2824 | break; | 2519 | break; |
2825 | 2520 | ||
2826 | case SCI_IO_FAILURE_TERMINATED: | 2521 | case terminating: |
2827 | 2522 | ||
2828 | dev_dbg(&ihost->pdev->dev, | 2523 | /* This was an terminated request. This happens when |
2829 | "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", | 2524 | * the I/O is being terminated because of an action on |
2830 | __func__, request, task); | 2525 | * the device (reset, tear down, etc.), and the I/O needs |
2831 | 2526 | * to be completed up the stack. | |
2832 | /* The request was terminated explicitly. */ | 2527 | */ |
2833 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2528 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2834 | response = SAS_TASK_UNDELIVERED; | 2529 | response = SAS_TASK_UNDELIVERED; |
2835 | 2530 | ||
2836 | /* See if the device has been/is being stopped. Note | 2531 | /* See if the device has been/is being stopped. Note |
2837 | * that we ignore the quiesce state, since we are | 2532 | * that we ignore the quiesce state, since we are |
2838 | * concerned about the actual device state. | 2533 | * concerned about the actual device state. |
2839 | */ | 2534 | */ |
2840 | if (!idev) | 2535 | if (!idev) |
2841 | status = SAS_DEVICE_UNKNOWN; | 2536 | status = SAS_DEVICE_UNKNOWN; |
2842 | else | 2537 | else |
2843 | status = SAS_ABORTED_TASK; | 2538 | status = SAS_ABORTED_TASK; |
2844 | break; | ||
2845 | 2539 | ||
2846 | case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: | 2540 | complete_to_host = isci_perform_aborted_io_completion; |
2847 | 2541 | ||
2848 | isci_request_handle_controller_specific_errors(idev, request, | 2542 | /* This was a terminated request. */ |
2849 | task, &response, | 2543 | |
2850 | &status); | 2544 | spin_unlock(&request->state_lock); |
2851 | break; | 2545 | break; |
2852 | 2546 | ||
2853 | case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: | 2547 | case dead: |
2854 | /* This is a special case, in that the I/O completion | 2548 | /* This was a terminated request that timed-out during the |
2855 | * is telling us that the device needs a reset. | 2549 | * termination process. There is no task to complete to |
2856 | * In order for the device reset condition to be | 2550 | * libsas. |
2857 | * noticed, the I/O has to be handled in the error | 2551 | */ |
2858 | * handler. Set the reset flag and cause the | 2552 | complete_to_host = isci_perform_normal_io_completion; |
2859 | * SCSI error thread to be scheduled. | 2553 | spin_unlock(&request->state_lock); |
2860 | */ | 2554 | break; |
2861 | spin_lock_irqsave(&task->task_state_lock, task_flags); | ||
2862 | task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; | ||
2863 | spin_unlock_irqrestore(&task->task_state_lock, task_flags); | ||
2864 | 2555 | ||
2865 | /* Fail the I/O. */ | 2556 | default: |
2866 | response = SAS_TASK_UNDELIVERED; | ||
2867 | status = SAM_STAT_TASK_ABORTED; | ||
2868 | 2557 | ||
2869 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2558 | /* The request is done from an SCU HW perspective. */ |
2870 | break; | 2559 | request->status = completed; |
2871 | 2560 | ||
2872 | case SCI_FAILURE_RETRY_REQUIRED: | 2561 | spin_unlock(&request->state_lock); |
2873 | 2562 | ||
2874 | /* Fail the I/O so it can be retried. */ | 2563 | /* This is an active request being completed from the core. */ |
2875 | response = SAS_TASK_UNDELIVERED; | 2564 | switch (completion_status) { |
2876 | if (!idev) | ||
2877 | status = SAS_DEVICE_UNKNOWN; | ||
2878 | else | ||
2879 | status = SAS_ABORTED_TASK; | ||
2880 | 2565 | ||
2881 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2566 | case SCI_IO_FAILURE_RESPONSE_VALID: |
2882 | break; | 2567 | dev_dbg(&ihost->pdev->dev, |
2568 | "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", | ||
2569 | __func__, | ||
2570 | request, | ||
2571 | task); | ||
2883 | 2572 | ||
2573 | if (sas_protocol_ata(task->task_proto)) { | ||
2574 | isci_process_stp_response(task, &request->stp.rsp); | ||
2575 | } else if (SAS_PROTOCOL_SSP == task->task_proto) { | ||
2884 | 2576 | ||
2885 | default: | 2577 | /* crack the iu response buffer. */ |
2886 | /* Catch any otherwise unhandled error codes here. */ | 2578 | resp_iu = &request->ssp.rsp; |
2887 | dev_dbg(&ihost->pdev->dev, | 2579 | isci_request_process_response_iu(task, resp_iu, |
2888 | "%s: invalid completion code: 0x%x - " | 2580 | &ihost->pdev->dev); |
2889 | "isci_request = %p\n", | ||
2890 | __func__, completion_status, request); | ||
2891 | 2581 | ||
2892 | response = SAS_TASK_UNDELIVERED; | 2582 | } else if (SAS_PROTOCOL_SMP == task->task_proto) { |
2893 | 2583 | ||
2894 | /* See if the device has been/is being stopped. Note | 2584 | dev_err(&ihost->pdev->dev, |
2895 | * that we ignore the quiesce state, since we are | 2585 | "%s: SCI_IO_FAILURE_RESPONSE_VALID: " |
2896 | * concerned about the actual device state. | 2586 | "SAS_PROTOCOL_SMP protocol\n", |
2897 | */ | 2587 | __func__); |
2898 | if (!idev) | 2588 | |
2899 | status = SAS_DEVICE_UNKNOWN; | 2589 | } else |
2900 | else | 2590 | dev_err(&ihost->pdev->dev, |
2901 | status = SAS_ABORTED_TASK; | 2591 | "%s: unknown protocol\n", __func__); |
2592 | |||
2593 | /* use the task status set in the task struct by the | ||
2594 | * isci_request_process_response_iu call. | ||
2595 | */ | ||
2596 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | ||
2597 | response = task->task_status.resp; | ||
2598 | status = task->task_status.stat; | ||
2599 | break; | ||
2902 | 2600 | ||
2903 | if (SAS_PROTOCOL_SMP == task->task_proto) | 2601 | case SCI_IO_SUCCESS: |
2602 | case SCI_IO_SUCCESS_IO_DONE_EARLY: | ||
2603 | |||
2604 | response = SAS_TASK_COMPLETE; | ||
2605 | status = SAM_STAT_GOOD; | ||
2904 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2606 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2905 | else | 2607 | |
2608 | if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) { | ||
2609 | |||
2610 | /* This was an SSP / STP / SATA transfer. | ||
2611 | * There is a possibility that less data than | ||
2612 | * the maximum was transferred. | ||
2613 | */ | ||
2614 | u32 transferred_length = sci_req_tx_bytes(request); | ||
2615 | |||
2616 | task->task_status.residual | ||
2617 | = task->total_xfer_len - transferred_length; | ||
2618 | |||
2619 | /* If there were residual bytes, call this an | ||
2620 | * underrun. | ||
2621 | */ | ||
2622 | if (task->task_status.residual != 0) | ||
2623 | status = SAS_DATA_UNDERRUN; | ||
2624 | |||
2625 | dev_dbg(&ihost->pdev->dev, | ||
2626 | "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", | ||
2627 | __func__, | ||
2628 | status); | ||
2629 | |||
2630 | } else | ||
2631 | dev_dbg(&ihost->pdev->dev, | ||
2632 | "%s: SCI_IO_SUCCESS\n", | ||
2633 | __func__); | ||
2634 | |||
2635 | break; | ||
2636 | |||
2637 | case SCI_IO_FAILURE_TERMINATED: | ||
2638 | dev_dbg(&ihost->pdev->dev, | ||
2639 | "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", | ||
2640 | __func__, | ||
2641 | request, | ||
2642 | task); | ||
2643 | |||
2644 | /* The request was terminated explicitly. No handling | ||
2645 | * is needed in the SCSI error handler path. | ||
2646 | */ | ||
2647 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | ||
2648 | response = SAS_TASK_UNDELIVERED; | ||
2649 | |||
2650 | /* See if the device has been/is being stopped. Note | ||
2651 | * that we ignore the quiesce state, since we are | ||
2652 | * concerned about the actual device state. | ||
2653 | */ | ||
2654 | if (!idev) | ||
2655 | status = SAS_DEVICE_UNKNOWN; | ||
2656 | else | ||
2657 | status = SAS_ABORTED_TASK; | ||
2658 | |||
2659 | complete_to_host = isci_perform_normal_io_completion; | ||
2660 | break; | ||
2661 | |||
2662 | case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: | ||
2663 | |||
2664 | isci_request_handle_controller_specific_errors( | ||
2665 | idev, request, task, &response, &status, | ||
2666 | &complete_to_host); | ||
2667 | |||
2668 | break; | ||
2669 | |||
2670 | case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: | ||
2671 | /* This is a special case, in that the I/O completion | ||
2672 | * is telling us that the device needs a reset. | ||
2673 | * In order for the device reset condition to be | ||
2674 | * noticed, the I/O has to be handled in the error | ||
2675 | * handler. Set the reset flag and cause the | ||
2676 | * SCSI error thread to be scheduled. | ||
2677 | */ | ||
2678 | spin_lock_irqsave(&task->task_state_lock, task_flags); | ||
2679 | task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; | ||
2680 | spin_unlock_irqrestore(&task->task_state_lock, task_flags); | ||
2681 | |||
2682 | /* Fail the I/O. */ | ||
2683 | response = SAS_TASK_UNDELIVERED; | ||
2684 | status = SAM_STAT_TASK_ABORTED; | ||
2685 | |||
2686 | complete_to_host = isci_perform_error_io_completion; | ||
2906 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2687 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2688 | break; | ||
2689 | |||
2690 | case SCI_FAILURE_RETRY_REQUIRED: | ||
2691 | |||
2692 | /* Fail the I/O so it can be retried. */ | ||
2693 | response = SAS_TASK_UNDELIVERED; | ||
2694 | if (!idev) | ||
2695 | status = SAS_DEVICE_UNKNOWN; | ||
2696 | else | ||
2697 | status = SAS_ABORTED_TASK; | ||
2698 | |||
2699 | complete_to_host = isci_perform_normal_io_completion; | ||
2700 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | ||
2701 | break; | ||
2702 | |||
2703 | |||
2704 | default: | ||
2705 | /* Catch any otherwise unhandled error codes here. */ | ||
2706 | dev_dbg(&ihost->pdev->dev, | ||
2707 | "%s: invalid completion code: 0x%x - " | ||
2708 | "isci_request = %p\n", | ||
2709 | __func__, completion_status, request); | ||
2710 | |||
2711 | response = SAS_TASK_UNDELIVERED; | ||
2712 | |||
2713 | /* See if the device has been/is being stopped. Note | ||
2714 | * that we ignore the quiesce state, since we are | ||
2715 | * concerned about the actual device state. | ||
2716 | */ | ||
2717 | if (!idev) | ||
2718 | status = SAS_DEVICE_UNKNOWN; | ||
2719 | else | ||
2720 | status = SAS_ABORTED_TASK; | ||
2721 | |||
2722 | if (SAS_PROTOCOL_SMP == task->task_proto) { | ||
2723 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | ||
2724 | complete_to_host = isci_perform_normal_io_completion; | ||
2725 | } else { | ||
2726 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | ||
2727 | complete_to_host = isci_perform_error_io_completion; | ||
2728 | } | ||
2729 | break; | ||
2730 | } | ||
2907 | break; | 2731 | break; |
2908 | } | 2732 | } |
2909 | 2733 | ||
@@ -2928,77 +2752,67 @@ static void isci_request_io_request_complete(struct isci_host *ihost, | |||
2928 | dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE); | 2752 | dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE); |
2929 | 2753 | ||
2930 | /* need to swab it back in case the command buffer is re-used */ | 2754 | /* need to swab it back in case the command buffer is re-used */ |
2931 | kaddr = kmap_atomic(sg_page(sg)); | 2755 | kaddr = kmap_atomic(sg_page(sg), KM_IRQ0); |
2932 | smp_req = kaddr + sg->offset; | 2756 | smp_req = kaddr + sg->offset; |
2933 | sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); | 2757 | sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); |
2934 | kunmap_atomic(kaddr); | 2758 | kunmap_atomic(kaddr, KM_IRQ0); |
2935 | break; | 2759 | break; |
2936 | } | 2760 | } |
2937 | default: | 2761 | default: |
2938 | break; | 2762 | break; |
2939 | } | 2763 | } |
2940 | 2764 | ||
2941 | spin_lock_irqsave(&task->task_state_lock, task_flags); | 2765 | /* Put the completed request on the correct list */ |
2942 | 2766 | isci_task_save_for_upper_layer_completion(ihost, request, response, | |
2943 | task->task_status.resp = response; | 2767 | status, complete_to_host |
2944 | task->task_status.stat = status; | 2768 | ); |
2945 | |||
2946 | if (test_bit(IREQ_COMPLETE_IN_TARGET, &request->flags)) { | ||
2947 | /* Normal notification (task_done) */ | ||
2948 | task->task_state_flags |= SAS_TASK_STATE_DONE; | ||
2949 | task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | | ||
2950 | SAS_TASK_STATE_PENDING); | ||
2951 | } | ||
2952 | spin_unlock_irqrestore(&task->task_state_lock, task_flags); | ||
2953 | 2769 | ||
2954 | /* complete the io request to the core. */ | 2770 | /* complete the io request to the core. */ |
2955 | sci_controller_complete_io(ihost, request->target_device, request); | 2771 | sci_controller_complete_io(ihost, request->target_device, request); |
2772 | isci_put_device(idev); | ||
2956 | 2773 | ||
2957 | /* set terminated handle so it cannot be completed or | 2774 | /* set terminated handle so it cannot be completed or |
2958 | * terminated again, and to cause any calls into abort | 2775 | * terminated again, and to cause any calls into abort |
2959 | * task to recognize the already completed case. | 2776 | * task to recognize the already completed case. |
2960 | */ | 2777 | */ |
2961 | set_bit(IREQ_TERMINATED, &request->flags); | 2778 | set_bit(IREQ_TERMINATED, &request->flags); |
2962 | |||
2963 | ireq_done(ihost, request, task); | ||
2964 | } | 2779 | } |
2965 | 2780 | ||
2966 | static void sci_request_started_state_enter(struct sci_base_state_machine *sm) | 2781 | static void sci_request_started_state_enter(struct sci_base_state_machine *sm) |
2967 | { | 2782 | { |
2968 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); | 2783 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); |
2969 | struct domain_device *dev = ireq->target_device->domain_dev; | 2784 | struct domain_device *dev = ireq->target_device->domain_dev; |
2970 | enum sci_base_request_states state; | ||
2971 | struct sas_task *task; | 2785 | struct sas_task *task; |
2972 | 2786 | ||
2973 | /* XXX as hch said always creating an internal sas_task for tmf | 2787 | /* XXX as hch said always creating an internal sas_task for tmf |
2974 | * requests would simplify the driver | 2788 | * requests would simplify the driver |
2975 | */ | 2789 | */ |
2976 | task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq); | 2790 | task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL; |
2977 | 2791 | ||
2978 | /* all unaccelerated request types (non ssp or ncq) handled with | 2792 | /* all unaccelerated request types (non ssp or ncq) handled with |
2979 | * substates | 2793 | * substates |
2980 | */ | 2794 | */ |
2981 | if (!task && dev->dev_type == SAS_END_DEV) { | 2795 | if (!task && dev->dev_type == SAS_END_DEV) { |
2982 | state = SCI_REQ_TASK_WAIT_TC_COMP; | 2796 | sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP); |
2797 | } else if (!task && | ||
2798 | (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high || | ||
2799 | isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) { | ||
2800 | sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED); | ||
2983 | } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { | 2801 | } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { |
2984 | state = SCI_REQ_SMP_WAIT_RESP; | 2802 | sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP); |
2985 | } else if (task && sas_protocol_ata(task->task_proto) && | 2803 | } else if (task && sas_protocol_ata(task->task_proto) && |
2986 | !task->ata_task.use_ncq) { | 2804 | !task->ata_task.use_ncq) { |
2987 | if (dev->sata_dev.command_set == ATAPI_COMMAND_SET && | 2805 | u32 state; |
2988 | task->ata_task.fis.command == ATA_CMD_PACKET) { | 2806 | |
2989 | state = SCI_REQ_ATAPI_WAIT_H2D; | 2807 | if (task->data_dir == DMA_NONE) |
2990 | } else if (task->data_dir == DMA_NONE) { | ||
2991 | state = SCI_REQ_STP_NON_DATA_WAIT_H2D; | 2808 | state = SCI_REQ_STP_NON_DATA_WAIT_H2D; |
2992 | } else if (task->ata_task.dma_xfer) { | 2809 | else if (task->ata_task.dma_xfer) |
2993 | state = SCI_REQ_STP_UDMA_WAIT_TC_COMP; | 2810 | state = SCI_REQ_STP_UDMA_WAIT_TC_COMP; |
2994 | } else /* PIO */ { | 2811 | else /* PIO */ |
2995 | state = SCI_REQ_STP_PIO_WAIT_H2D; | 2812 | state = SCI_REQ_STP_PIO_WAIT_H2D; |
2996 | } | 2813 | |
2997 | } else { | 2814 | sci_change_state(sm, state); |
2998 | /* SSP or NCQ are fully accelerated, no substates */ | ||
2999 | return; | ||
3000 | } | 2815 | } |
3001 | sci_change_state(sm, state); | ||
3002 | } | 2816 | } |
3003 | 2817 | ||
3004 | static void sci_request_completed_state_enter(struct sci_base_state_machine *sm) | 2818 | static void sci_request_completed_state_enter(struct sci_base_state_machine *sm) |
@@ -3036,6 +2850,31 @@ static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_ba | |||
3036 | ireq->target_device->working_request = ireq; | 2850 | ireq->target_device->working_request = ireq; |
3037 | } | 2851 | } |
3038 | 2852 | ||
2853 | static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm) | ||
2854 | { | ||
2855 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); | ||
2856 | |||
2857 | ireq->target_device->working_request = ireq; | ||
2858 | } | ||
2859 | |||
2860 | static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm) | ||
2861 | { | ||
2862 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); | ||
2863 | struct scu_task_context *tc = ireq->tc; | ||
2864 | struct host_to_dev_fis *h2d_fis; | ||
2865 | enum sci_status status; | ||
2866 | |||
2867 | /* Clear the SRST bit */ | ||
2868 | h2d_fis = &ireq->stp.cmd; | ||
2869 | h2d_fis->control = 0; | ||
2870 | |||
2871 | /* Clear the TC control bit */ | ||
2872 | tc->control_frame = 0; | ||
2873 | |||
2874 | status = sci_controller_continue_io(ireq); | ||
2875 | WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n"); | ||
2876 | } | ||
2877 | |||
3039 | static const struct sci_base_state sci_request_state_table[] = { | 2878 | static const struct sci_base_state sci_request_state_table[] = { |
3040 | [SCI_REQ_INIT] = { }, | 2879 | [SCI_REQ_INIT] = { }, |
3041 | [SCI_REQ_CONSTRUCTED] = { }, | 2880 | [SCI_REQ_CONSTRUCTED] = { }, |
@@ -3054,14 +2893,17 @@ static const struct sci_base_state sci_request_state_table[] = { | |||
3054 | [SCI_REQ_STP_PIO_DATA_OUT] = { }, | 2893 | [SCI_REQ_STP_PIO_DATA_OUT] = { }, |
3055 | [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { }, | 2894 | [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { }, |
3056 | [SCI_REQ_STP_UDMA_WAIT_D2H] = { }, | 2895 | [SCI_REQ_STP_UDMA_WAIT_D2H] = { }, |
2896 | [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = { | ||
2897 | .enter_state = sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter, | ||
2898 | }, | ||
2899 | [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = { | ||
2900 | .enter_state = sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter, | ||
2901 | }, | ||
2902 | [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { }, | ||
3057 | [SCI_REQ_TASK_WAIT_TC_COMP] = { }, | 2903 | [SCI_REQ_TASK_WAIT_TC_COMP] = { }, |
3058 | [SCI_REQ_TASK_WAIT_TC_RESP] = { }, | 2904 | [SCI_REQ_TASK_WAIT_TC_RESP] = { }, |
3059 | [SCI_REQ_SMP_WAIT_RESP] = { }, | 2905 | [SCI_REQ_SMP_WAIT_RESP] = { }, |
3060 | [SCI_REQ_SMP_WAIT_TC_COMP] = { }, | 2906 | [SCI_REQ_SMP_WAIT_TC_COMP] = { }, |
3061 | [SCI_REQ_ATAPI_WAIT_H2D] = { }, | ||
3062 | [SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { }, | ||
3063 | [SCI_REQ_ATAPI_WAIT_D2H] = { }, | ||
3064 | [SCI_REQ_ATAPI_WAIT_TC_COMP] = { }, | ||
3065 | [SCI_REQ_COMPLETED] = { | 2907 | [SCI_REQ_COMPLETED] = { |
3066 | .enter_state = sci_request_completed_state_enter, | 2908 | .enter_state = sci_request_completed_state_enter, |
3067 | }, | 2909 | }, |
@@ -3079,7 +2921,7 @@ sci_general_request_construct(struct isci_host *ihost, | |||
3079 | sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT); | 2921 | sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT); |
3080 | 2922 | ||
3081 | ireq->target_device = idev; | 2923 | ireq->target_device = idev; |
3082 | ireq->protocol = SAS_PROTOCOL_NONE; | 2924 | ireq->protocol = SCIC_NO_PROTOCOL; |
3083 | ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; | 2925 | ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; |
3084 | 2926 | ||
3085 | ireq->sci_status = SCI_SUCCESS; | 2927 | ireq->sci_status = SCI_SUCCESS; |
@@ -3103,7 +2945,7 @@ sci_io_request_construct(struct isci_host *ihost, | |||
3103 | 2945 | ||
3104 | if (dev->dev_type == SAS_END_DEV) | 2946 | if (dev->dev_type == SAS_END_DEV) |
3105 | /* pass */; | 2947 | /* pass */; |
3106 | else if (dev_is_sata(dev)) | 2948 | else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) |
3107 | memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); | 2949 | memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); |
3108 | else if (dev_is_expander(dev)) | 2950 | else if (dev_is_expander(dev)) |
3109 | /* pass */; | 2951 | /* pass */; |
@@ -3125,15 +2967,10 @@ enum sci_status sci_task_request_construct(struct isci_host *ihost, | |||
3125 | /* Build the common part of the request */ | 2967 | /* Build the common part of the request */ |
3126 | sci_general_request_construct(ihost, idev, ireq); | 2968 | sci_general_request_construct(ihost, idev, ireq); |
3127 | 2969 | ||
3128 | if (dev->dev_type == SAS_END_DEV || dev_is_sata(dev)) { | 2970 | if (dev->dev_type == SAS_END_DEV || |
2971 | dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { | ||
3129 | set_bit(IREQ_TMF, &ireq->flags); | 2972 | set_bit(IREQ_TMF, &ireq->flags); |
3130 | memset(ireq->tc, 0, sizeof(struct scu_task_context)); | 2973 | memset(ireq->tc, 0, sizeof(struct scu_task_context)); |
3131 | |||
3132 | /* Set the protocol indicator. */ | ||
3133 | if (dev_is_sata(dev)) | ||
3134 | ireq->protocol = SAS_PROTOCOL_STP; | ||
3135 | else | ||
3136 | ireq->protocol = SAS_PROTOCOL_SSP; | ||
3137 | } else | 2974 | } else |
3138 | status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; | 2975 | status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; |
3139 | 2976 | ||
@@ -3195,7 +3032,7 @@ sci_io_request_construct_smp(struct device *dev, | |||
3195 | u8 req_len; | 3032 | u8 req_len; |
3196 | u32 cmd; | 3033 | u32 cmd; |
3197 | 3034 | ||
3198 | kaddr = kmap_atomic(sg_page(sg)); | 3035 | kaddr = kmap_atomic(sg_page(sg), KM_IRQ0); |
3199 | smp_req = kaddr + sg->offset; | 3036 | smp_req = kaddr + sg->offset; |
3200 | /* | 3037 | /* |
3201 | * Look at the SMP requests' header fields; for certain SAS 1.x SMP | 3038 | * Look at the SMP requests' header fields; for certain SAS 1.x SMP |
@@ -3221,12 +3058,12 @@ sci_io_request_construct_smp(struct device *dev, | |||
3221 | req_len = smp_req->req_len; | 3058 | req_len = smp_req->req_len; |
3222 | sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); | 3059 | sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); |
3223 | cmd = *(u32 *) smp_req; | 3060 | cmd = *(u32 *) smp_req; |
3224 | kunmap_atomic(kaddr); | 3061 | kunmap_atomic(kaddr, KM_IRQ0); |
3225 | 3062 | ||
3226 | if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE)) | 3063 | if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE)) |
3227 | return SCI_FAILURE; | 3064 | return SCI_FAILURE; |
3228 | 3065 | ||
3229 | ireq->protocol = SAS_PROTOCOL_SMP; | 3066 | ireq->protocol = SCIC_SMP_PROTOCOL; |
3230 | 3067 | ||
3231 | /* byte swap the smp request. */ | 3068 | /* byte swap the smp request. */ |
3232 | 3069 | ||
@@ -3411,6 +3248,9 @@ static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 t | |||
3411 | ireq->io_request_completion = NULL; | 3248 | ireq->io_request_completion = NULL; |
3412 | ireq->flags = 0; | 3249 | ireq->flags = 0; |
3413 | ireq->num_sg_entries = 0; | 3250 | ireq->num_sg_entries = 0; |
3251 | INIT_LIST_HEAD(&ireq->completed_node); | ||
3252 | INIT_LIST_HEAD(&ireq->dev_node); | ||
3253 | isci_request_change_state(ireq, allocated); | ||
3414 | 3254 | ||
3415 | return ireq; | 3255 | return ireq; |
3416 | } | 3256 | } |
@@ -3423,7 +3263,7 @@ static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost, | |||
3423 | 3263 | ||
3424 | ireq = isci_request_from_tag(ihost, tag); | 3264 | ireq = isci_request_from_tag(ihost, tag); |
3425 | ireq->ttype_ptr.io_task_ptr = task; | 3265 | ireq->ttype_ptr.io_task_ptr = task; |
3426 | clear_bit(IREQ_TMF, &ireq->flags); | 3266 | ireq->ttype = io_task; |
3427 | task->lldd_task = ireq; | 3267 | task->lldd_task = ireq; |
3428 | 3268 | ||
3429 | return ireq; | 3269 | return ireq; |
@@ -3437,7 +3277,7 @@ struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost, | |||
3437 | 3277 | ||
3438 | ireq = isci_request_from_tag(ihost, tag); | 3278 | ireq = isci_request_from_tag(ihost, tag); |
3439 | ireq->ttype_ptr.tmf_task_ptr = isci_tmf; | 3279 | ireq->ttype_ptr.tmf_task_ptr = isci_tmf; |
3440 | set_bit(IREQ_TMF, &ireq->flags); | 3280 | ireq->ttype = tmf_task; |
3441 | 3281 | ||
3442 | return ireq; | 3282 | return ireq; |
3443 | } | 3283 | } |
@@ -3494,15 +3334,26 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide | |||
3494 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 3334 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
3495 | return status; | 3335 | return status; |
3496 | } | 3336 | } |
3337 | |||
3497 | /* Either I/O started OK, or the core has signaled that | 3338 | /* Either I/O started OK, or the core has signaled that |
3498 | * the device needs a target reset. | 3339 | * the device needs a target reset. |
3340 | * | ||
3341 | * In either case, hold onto the I/O for later. | ||
3342 | * | ||
3343 | * Update it's status and add it to the list in the | ||
3344 | * remote device object. | ||
3499 | */ | 3345 | */ |
3500 | if (status != SCI_SUCCESS) { | 3346 | list_add(&ireq->dev_node, &idev->reqs_in_process); |
3347 | |||
3348 | if (status == SCI_SUCCESS) { | ||
3349 | isci_request_change_state(ireq, started); | ||
3350 | } else { | ||
3501 | /* The request did not really start in the | 3351 | /* The request did not really start in the |
3502 | * hardware, so clear the request handle | 3352 | * hardware, so clear the request handle |
3503 | * here so no terminations will be done. | 3353 | * here so no terminations will be done. |
3504 | */ | 3354 | */ |
3505 | set_bit(IREQ_TERMINATED, &ireq->flags); | 3355 | set_bit(IREQ_TERMINATED, &ireq->flags); |
3356 | isci_request_change_state(ireq, completed); | ||
3506 | } | 3357 | } |
3507 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 3358 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
3508 | 3359 | ||
@@ -3519,7 +3370,8 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide | |||
3519 | /* Cause this task to be scheduled in the SCSI error | 3370 | /* Cause this task to be scheduled in the SCSI error |
3520 | * handler thread. | 3371 | * handler thread. |
3521 | */ | 3372 | */ |
3522 | sas_task_abort(task); | 3373 | isci_execpath_callback(ihost, task, |
3374 | sas_task_abort); | ||
3523 | 3375 | ||
3524 | /* Change the status, since we are holding | 3376 | /* Change the status, since we are holding |
3525 | * the I/O until it is managed by the SCSI | 3377 | * the I/O until it is managed by the SCSI |
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index aff95317fcf..58d70b6606e 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h | |||
@@ -61,12 +61,42 @@ | |||
61 | #include "scu_task_context.h" | 61 | #include "scu_task_context.h" |
62 | 62 | ||
63 | /** | 63 | /** |
64 | * struct isci_request_status - This enum defines the possible states of an I/O | ||
65 | * request. | ||
66 | * | ||
67 | * | ||
68 | */ | ||
69 | enum isci_request_status { | ||
70 | unallocated = 0x00, | ||
71 | allocated = 0x01, | ||
72 | started = 0x02, | ||
73 | completed = 0x03, | ||
74 | aborting = 0x04, | ||
75 | aborted = 0x05, | ||
76 | terminating = 0x06, | ||
77 | dead = 0x07 | ||
78 | }; | ||
79 | |||
80 | enum task_type { | ||
81 | io_task = 0, | ||
82 | tmf_task = 1 | ||
83 | }; | ||
84 | |||
85 | enum sci_request_protocol { | ||
86 | SCIC_NO_PROTOCOL, | ||
87 | SCIC_SMP_PROTOCOL, | ||
88 | SCIC_SSP_PROTOCOL, | ||
89 | SCIC_STP_PROTOCOL | ||
90 | }; /* XXX remove me, use sas_task.{dev|task_proto} instead */; | ||
91 | |||
92 | /** | ||
64 | * isci_stp_request - extra request infrastructure to handle pio/atapi protocol | 93 | * isci_stp_request - extra request infrastructure to handle pio/atapi protocol |
65 | * @pio_len - number of bytes requested at PIO setup | 94 | * @pio_len - number of bytes requested at PIO setup |
66 | * @status - pio setup ending status value to tell us if we need | 95 | * @status - pio setup ending status value to tell us if we need |
67 | * to wait for another fis or if the transfer is complete. Upon | 96 | * to wait for another fis or if the transfer is complete. Upon |
68 | * receipt of a d2h fis this will be the status field of that fis. | 97 | * receipt of a d2h fis this will be the status field of that fis. |
69 | * @sgl - track pio transfer progress as we iterate through the sgl | 98 | * @sgl - track pio transfer progress as we iterate through the sgl |
99 | * @device_cdb_len - atapi device advertises it's transfer constraints at setup | ||
70 | */ | 100 | */ |
71 | struct isci_stp_request { | 101 | struct isci_stp_request { |
72 | u32 pio_len; | 102 | u32 pio_len; |
@@ -77,24 +107,28 @@ struct isci_stp_request { | |||
77 | u8 set; | 107 | u8 set; |
78 | u32 offset; | 108 | u32 offset; |
79 | } sgl; | 109 | } sgl; |
110 | u32 device_cdb_len; | ||
80 | }; | 111 | }; |
81 | 112 | ||
82 | struct isci_request { | 113 | struct isci_request { |
114 | enum isci_request_status status; | ||
83 | #define IREQ_COMPLETE_IN_TARGET 0 | 115 | #define IREQ_COMPLETE_IN_TARGET 0 |
84 | #define IREQ_TERMINATED 1 | 116 | #define IREQ_TERMINATED 1 |
85 | #define IREQ_TMF 2 | 117 | #define IREQ_TMF 2 |
86 | #define IREQ_ACTIVE 3 | 118 | #define IREQ_ACTIVE 3 |
87 | #define IREQ_PENDING_ABORT 4 /* Set == device was not suspended yet */ | ||
88 | #define IREQ_TC_ABORT_POSTED 5 | ||
89 | #define IREQ_ABORT_PATH_ACTIVE 6 | ||
90 | #define IREQ_NO_AUTO_FREE_TAG 7 /* Set when being explicitly managed */ | ||
91 | unsigned long flags; | 119 | unsigned long flags; |
92 | /* XXX kill ttype and ttype_ptr, allocate full sas_task */ | 120 | /* XXX kill ttype and ttype_ptr, allocate full sas_task */ |
121 | enum task_type ttype; | ||
93 | union ttype_ptr_union { | 122 | union ttype_ptr_union { |
94 | struct sas_task *io_task_ptr; /* When ttype==io_task */ | 123 | struct sas_task *io_task_ptr; /* When ttype==io_task */ |
95 | struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */ | 124 | struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */ |
96 | } ttype_ptr; | 125 | } ttype_ptr; |
97 | struct isci_host *isci_host; | 126 | struct isci_host *isci_host; |
127 | /* For use in the requests_to_{complete|abort} lists: */ | ||
128 | struct list_head completed_node; | ||
129 | /* For use in the reqs_in_process list: */ | ||
130 | struct list_head dev_node; | ||
131 | spinlock_t state_lock; | ||
98 | dma_addr_t request_daddr; | 132 | dma_addr_t request_daddr; |
99 | dma_addr_t zero_scatter_daddr; | 133 | dma_addr_t zero_scatter_daddr; |
100 | unsigned int num_sg_entries; | 134 | unsigned int num_sg_entries; |
@@ -114,7 +148,7 @@ struct isci_request { | |||
114 | struct isci_host *owning_controller; | 148 | struct isci_host *owning_controller; |
115 | struct isci_remote_device *target_device; | 149 | struct isci_remote_device *target_device; |
116 | u16 io_tag; | 150 | u16 io_tag; |
117 | enum sas_protocol protocol; | 151 | enum sci_request_protocol protocol; |
118 | u32 scu_status; /* hardware result */ | 152 | u32 scu_status; /* hardware result */ |
119 | u32 sci_status; /* upper layer disposition */ | 153 | u32 sci_status; /* upper layer disposition */ |
120 | u32 post_context; | 154 | u32 post_context; |
@@ -156,103 +190,112 @@ static inline struct isci_request *to_ireq(struct isci_stp_request *stp_req) | |||
156 | } | 190 | } |
157 | 191 | ||
158 | /** | 192 | /** |
159 | * enum sci_base_request_states - request state machine states | 193 | * enum sci_base_request_states - This enumeration depicts all the states for |
160 | * | 194 | * the common request state machine. |
161 | * @SCI_REQ_INIT: Simply the initial state for the base request state machine. | ||
162 | * | ||
163 | * @SCI_REQ_CONSTRUCTED: This state indicates that the request has been | ||
164 | * constructed. This state is entered from the INITIAL state. | ||
165 | * | ||
166 | * @SCI_REQ_STARTED: This state indicates that the request has been started. | ||
167 | * This state is entered from the CONSTRUCTED state. | ||
168 | * | ||
169 | * @SCI_REQ_STP_UDMA_WAIT_TC_COMP: | ||
170 | * @SCI_REQ_STP_UDMA_WAIT_D2H: | ||
171 | * @SCI_REQ_STP_NON_DATA_WAIT_H2D: | ||
172 | * @SCI_REQ_STP_NON_DATA_WAIT_D2H: | ||
173 | * | ||
174 | * @SCI_REQ_STP_PIO_WAIT_H2D: While in this state the IO request object is | ||
175 | * waiting for the TC completion notification for the H2D Register FIS | ||
176 | * | ||
177 | * @SCI_REQ_STP_PIO_WAIT_FRAME: While in this state the IO request object is | ||
178 | * waiting for either a PIO Setup FIS or a D2H register FIS. The type of frame | ||
179 | * received is based on the result of the prior frame and line conditions. | ||
180 | * | ||
181 | * @SCI_REQ_STP_PIO_DATA_IN: While in this state the IO request object is | ||
182 | * waiting for a DATA frame from the device. | ||
183 | * | ||
184 | * @SCI_REQ_STP_PIO_DATA_OUT: While in this state the IO request object is | ||
185 | * waiting to transmit the next data frame to the device. | ||
186 | * | ||
187 | * @SCI_REQ_ATAPI_WAIT_H2D: While in this state the IO request object is | ||
188 | * waiting for the TC completion notification for the H2D Register FIS | ||
189 | * | ||
190 | * @SCI_REQ_ATAPI_WAIT_PIO_SETUP: While in this state the IO request object is | ||
191 | * waiting for either a PIO Setup. | ||
192 | * | ||
193 | * @SCI_REQ_ATAPI_WAIT_D2H: The non-data IO transit to this state in this state | ||
194 | * after receiving TC completion. While in this state IO request object is | ||
195 | * waiting for D2H status frame as UF. | ||
196 | * | ||
197 | * @SCI_REQ_ATAPI_WAIT_TC_COMP: When transmitting raw frames hardware reports | ||
198 | * task context completion after every frame submission, so in the | ||
199 | * non-accelerated case we need to expect the completion for the "cdb" frame. | ||
200 | * | ||
201 | * @SCI_REQ_TASK_WAIT_TC_COMP: The AWAIT_TC_COMPLETION sub-state indicates that | ||
202 | * the started raw task management request is waiting for the transmission of | ||
203 | * the initial frame (i.e. command, task, etc.). | ||
204 | * | ||
205 | * @SCI_REQ_TASK_WAIT_TC_RESP: This sub-state indicates that the started task | ||
206 | * management request is waiting for the reception of an unsolicited frame | ||
207 | * (i.e. response IU). | ||
208 | * | ||
209 | * @SCI_REQ_SMP_WAIT_RESP: This sub-state indicates that the started task | ||
210 | * management request is waiting for the reception of an unsolicited frame | ||
211 | * (i.e. response IU). | ||
212 | * | 195 | * |
213 | * @SCI_REQ_SMP_WAIT_TC_COMP: The AWAIT_TC_COMPLETION sub-state indicates that | ||
214 | * the started SMP request is waiting for the transmission of the initial frame | ||
215 | * (i.e. command, task, etc.). | ||
216 | * | 196 | * |
217 | * @SCI_REQ_COMPLETED: This state indicates that the request has completed. | ||
218 | * This state is entered from the STARTED state. This state is entered from the | ||
219 | * ABORTING state. | ||
220 | * | ||
221 | * @SCI_REQ_ABORTING: This state indicates that the request is in the process | ||
222 | * of being terminated/aborted. This state is entered from the CONSTRUCTED | ||
223 | * state. This state is entered from the STARTED state. | ||
224 | * | ||
225 | * @SCI_REQ_FINAL: Simply the final state for the base request state machine. | ||
226 | */ | 197 | */ |
227 | #define REQUEST_STATES {\ | 198 | enum sci_base_request_states { |
228 | C(REQ_INIT),\ | 199 | /* |
229 | C(REQ_CONSTRUCTED),\ | 200 | * Simply the initial state for the base request state machine. |
230 | C(REQ_STARTED),\ | 201 | */ |
231 | C(REQ_STP_UDMA_WAIT_TC_COMP),\ | 202 | SCI_REQ_INIT, |
232 | C(REQ_STP_UDMA_WAIT_D2H),\ | 203 | |
233 | C(REQ_STP_NON_DATA_WAIT_H2D),\ | 204 | /* |
234 | C(REQ_STP_NON_DATA_WAIT_D2H),\ | 205 | * This state indicates that the request has been constructed. |
235 | C(REQ_STP_PIO_WAIT_H2D),\ | 206 | * This state is entered from the INITIAL state. |
236 | C(REQ_STP_PIO_WAIT_FRAME),\ | 207 | */ |
237 | C(REQ_STP_PIO_DATA_IN),\ | 208 | SCI_REQ_CONSTRUCTED, |
238 | C(REQ_STP_PIO_DATA_OUT),\ | 209 | |
239 | C(REQ_ATAPI_WAIT_H2D),\ | 210 | /* |
240 | C(REQ_ATAPI_WAIT_PIO_SETUP),\ | 211 | * This state indicates that the request has been started. This state |
241 | C(REQ_ATAPI_WAIT_D2H),\ | 212 | * is entered from the CONSTRUCTED state. |
242 | C(REQ_ATAPI_WAIT_TC_COMP),\ | 213 | */ |
243 | C(REQ_TASK_WAIT_TC_COMP),\ | 214 | SCI_REQ_STARTED, |
244 | C(REQ_TASK_WAIT_TC_RESP),\ | 215 | |
245 | C(REQ_SMP_WAIT_RESP),\ | 216 | SCI_REQ_STP_UDMA_WAIT_TC_COMP, |
246 | C(REQ_SMP_WAIT_TC_COMP),\ | 217 | SCI_REQ_STP_UDMA_WAIT_D2H, |
247 | C(REQ_COMPLETED),\ | 218 | |
248 | C(REQ_ABORTING),\ | 219 | SCI_REQ_STP_NON_DATA_WAIT_H2D, |
249 | C(REQ_FINAL),\ | 220 | SCI_REQ_STP_NON_DATA_WAIT_D2H, |
250 | } | 221 | |
251 | #undef C | 222 | SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED, |
252 | #define C(a) SCI_##a | 223 | SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG, |
253 | enum sci_base_request_states REQUEST_STATES; | 224 | SCI_REQ_STP_SOFT_RESET_WAIT_D2H, |
254 | #undef C | 225 | |
255 | const char *req_state_name(enum sci_base_request_states state); | 226 | /* |
227 | * While in this state the IO request object is waiting for the TC | ||
228 | * completion notification for the H2D Register FIS | ||
229 | */ | ||
230 | SCI_REQ_STP_PIO_WAIT_H2D, | ||
231 | |||
232 | /* | ||
233 | * While in this state the IO request object is waiting for either a | ||
234 | * PIO Setup FIS or a D2H register FIS. The type of frame received is | ||
235 | * based on the result of the prior frame and line conditions. | ||
236 | */ | ||
237 | SCI_REQ_STP_PIO_WAIT_FRAME, | ||
238 | |||
239 | /* | ||
240 | * While in this state the IO request object is waiting for a DATA | ||
241 | * frame from the device. | ||
242 | */ | ||
243 | SCI_REQ_STP_PIO_DATA_IN, | ||
244 | |||
245 | /* | ||
246 | * While in this state the IO request object is waiting to transmit | ||
247 | * the next data frame to the device. | ||
248 | */ | ||
249 | SCI_REQ_STP_PIO_DATA_OUT, | ||
250 | |||
251 | /* | ||
252 | * The AWAIT_TC_COMPLETION sub-state indicates that the started raw | ||
253 | * task management request is waiting for the transmission of the | ||
254 | * initial frame (i.e. command, task, etc.). | ||
255 | */ | ||
256 | SCI_REQ_TASK_WAIT_TC_COMP, | ||
257 | |||
258 | /* | ||
259 | * This sub-state indicates that the started task management request | ||
260 | * is waiting for the reception of an unsolicited frame | ||
261 | * (i.e. response IU). | ||
262 | */ | ||
263 | SCI_REQ_TASK_WAIT_TC_RESP, | ||
264 | |||
265 | /* | ||
266 | * This sub-state indicates that the started task management request | ||
267 | * is waiting for the reception of an unsolicited frame | ||
268 | * (i.e. response IU). | ||
269 | */ | ||
270 | SCI_REQ_SMP_WAIT_RESP, | ||
271 | |||
272 | /* | ||
273 | * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP | ||
274 | * request is waiting for the transmission of the initial frame | ||
275 | * (i.e. command, task, etc.). | ||
276 | */ | ||
277 | SCI_REQ_SMP_WAIT_TC_COMP, | ||
278 | |||
279 | /* | ||
280 | * This state indicates that the request has completed. | ||
281 | * This state is entered from the STARTED state. This state is entered | ||
282 | * from the ABORTING state. | ||
283 | */ | ||
284 | SCI_REQ_COMPLETED, | ||
285 | |||
286 | /* | ||
287 | * This state indicates that the request is in the process of being | ||
288 | * terminated/aborted. | ||
289 | * This state is entered from the CONSTRUCTED state. | ||
290 | * This state is entered from the STARTED state. | ||
291 | */ | ||
292 | SCI_REQ_ABORTING, | ||
293 | |||
294 | /* | ||
295 | * Simply the final state for the base request state machine. | ||
296 | */ | ||
297 | SCI_REQ_FINAL, | ||
298 | }; | ||
256 | 299 | ||
257 | enum sci_status sci_request_start(struct isci_request *ireq); | 300 | enum sci_status sci_request_start(struct isci_request *ireq); |
258 | enum sci_status sci_io_request_terminate(struct isci_request *ireq); | 301 | enum sci_status sci_io_request_terminate(struct isci_request *ireq); |
@@ -283,6 +326,92 @@ sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr) | |||
283 | return ireq->request_daddr + (requested_addr - base_addr); | 326 | return ireq->request_daddr + (requested_addr - base_addr); |
284 | } | 327 | } |
285 | 328 | ||
329 | /** | ||
330 | * isci_request_change_state() - This function sets the status of the request | ||
331 | * object. | ||
332 | * @request: This parameter points to the isci_request object | ||
333 | * @status: This Parameter is the new status of the object | ||
334 | * | ||
335 | */ | ||
336 | static inline enum isci_request_status | ||
337 | isci_request_change_state(struct isci_request *isci_request, | ||
338 | enum isci_request_status status) | ||
339 | { | ||
340 | enum isci_request_status old_state; | ||
341 | unsigned long flags; | ||
342 | |||
343 | dev_dbg(&isci_request->isci_host->pdev->dev, | ||
344 | "%s: isci_request = %p, state = 0x%x\n", | ||
345 | __func__, | ||
346 | isci_request, | ||
347 | status); | ||
348 | |||
349 | BUG_ON(isci_request == NULL); | ||
350 | |||
351 | spin_lock_irqsave(&isci_request->state_lock, flags); | ||
352 | old_state = isci_request->status; | ||
353 | isci_request->status = status; | ||
354 | spin_unlock_irqrestore(&isci_request->state_lock, flags); | ||
355 | |||
356 | return old_state; | ||
357 | } | ||
358 | |||
359 | /** | ||
360 | * isci_request_change_started_to_newstate() - This function sets the status of | ||
361 | * the request object. | ||
362 | * @request: This parameter points to the isci_request object | ||
363 | * @status: This Parameter is the new status of the object | ||
364 | * | ||
365 | * state previous to any change. | ||
366 | */ | ||
367 | static inline enum isci_request_status | ||
368 | isci_request_change_started_to_newstate(struct isci_request *isci_request, | ||
369 | struct completion *completion_ptr, | ||
370 | enum isci_request_status newstate) | ||
371 | { | ||
372 | enum isci_request_status old_state; | ||
373 | unsigned long flags; | ||
374 | |||
375 | spin_lock_irqsave(&isci_request->state_lock, flags); | ||
376 | |||
377 | old_state = isci_request->status; | ||
378 | |||
379 | if (old_state == started || old_state == aborting) { | ||
380 | BUG_ON(isci_request->io_request_completion != NULL); | ||
381 | |||
382 | isci_request->io_request_completion = completion_ptr; | ||
383 | isci_request->status = newstate; | ||
384 | } | ||
385 | |||
386 | spin_unlock_irqrestore(&isci_request->state_lock, flags); | ||
387 | |||
388 | dev_dbg(&isci_request->isci_host->pdev->dev, | ||
389 | "%s: isci_request = %p, old_state = 0x%x\n", | ||
390 | __func__, | ||
391 | isci_request, | ||
392 | old_state); | ||
393 | |||
394 | return old_state; | ||
395 | } | ||
396 | |||
397 | /** | ||
398 | * isci_request_change_started_to_aborted() - This function sets the status of | ||
399 | * the request object. | ||
400 | * @request: This parameter points to the isci_request object | ||
401 | * @completion_ptr: This parameter is saved as the kernel completion structure | ||
402 | * signalled when the old request completes. | ||
403 | * | ||
404 | * state previous to any change. | ||
405 | */ | ||
406 | static inline enum isci_request_status | ||
407 | isci_request_change_started_to_aborted(struct isci_request *isci_request, | ||
408 | struct completion *completion_ptr) | ||
409 | { | ||
410 | return isci_request_change_started_to_newstate(isci_request, | ||
411 | completion_ptr, | ||
412 | aborted); | ||
413 | } | ||
414 | |||
286 | #define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr) | 415 | #define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr) |
287 | 416 | ||
288 | #define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr) | 417 | #define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr) |
@@ -292,12 +421,17 @@ struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost, | |||
292 | u16 tag); | 421 | u16 tag); |
293 | int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, | 422 | int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, |
294 | struct sas_task *task, u16 tag); | 423 | struct sas_task *task, u16 tag); |
424 | void isci_terminate_pending_requests(struct isci_host *ihost, | ||
425 | struct isci_remote_device *idev); | ||
295 | enum sci_status | 426 | enum sci_status |
296 | sci_task_request_construct(struct isci_host *ihost, | 427 | sci_task_request_construct(struct isci_host *ihost, |
297 | struct isci_remote_device *idev, | 428 | struct isci_remote_device *idev, |
298 | u16 io_tag, | 429 | u16 io_tag, |
299 | struct isci_request *ireq); | 430 | struct isci_request *ireq); |
300 | enum sci_status sci_task_request_construct_ssp(struct isci_request *ireq); | 431 | enum sci_status |
432 | sci_task_request_construct_ssp(struct isci_request *ireq); | ||
433 | enum sci_status | ||
434 | sci_task_request_construct_sata(struct isci_request *ireq); | ||
301 | void sci_smp_request_copy_response(struct isci_request *ireq); | 435 | void sci_smp_request_copy_response(struct isci_request *ireq); |
302 | 436 | ||
303 | static inline int isci_task_is_ncq_recovery(struct sas_task *task) | 437 | static inline int isci_task_is_ncq_recovery(struct sas_task *task) |
@@ -307,4 +441,5 @@ static inline int isci_task_is_ncq_recovery(struct sas_task *task) | |||
307 | task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ); | 441 | task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ); |
308 | 442 | ||
309 | } | 443 | } |
444 | |||
310 | #endif /* !defined(_ISCI_REQUEST_H_) */ | 445 | #endif /* !defined(_ISCI_REQUEST_H_) */ |
diff --git a/drivers/scsi/isci/scu_completion_codes.h b/drivers/scsi/isci/scu_completion_codes.h index 071cb74a211..c8b329c695f 100644 --- a/drivers/scsi/isci/scu_completion_codes.h +++ b/drivers/scsi/isci/scu_completion_codes.h | |||
@@ -224,7 +224,6 @@ | |||
224 | * 32-bit value like we want, each immediate value must be cast to a u32. | 224 | * 32-bit value like we want, each immediate value must be cast to a u32. |
225 | */ | 225 | */ |
226 | #define SCU_TASK_DONE_GOOD ((u32)0x00) | 226 | #define SCU_TASK_DONE_GOOD ((u32)0x00) |
227 | #define SCU_TASK_DONE_TX_RAW_CMD_ERR ((u32)0x08) | ||
228 | #define SCU_TASK_DONE_CRC_ERR ((u32)0x14) | 227 | #define SCU_TASK_DONE_CRC_ERR ((u32)0x14) |
229 | #define SCU_TASK_DONE_CHECK_RESPONSE ((u32)0x14) | 228 | #define SCU_TASK_DONE_CHECK_RESPONSE ((u32)0x14) |
230 | #define SCU_TASK_DONE_GEN_RESPONSE ((u32)0x15) | 229 | #define SCU_TASK_DONE_GEN_RESPONSE ((u32)0x15) |
@@ -238,7 +237,6 @@ | |||
238 | #define SCU_TASK_DONE_LL_LF_TERM ((u32)0x1A) | 237 | #define SCU_TASK_DONE_LL_LF_TERM ((u32)0x1A) |
239 | #define SCU_TASK_DONE_DATA_LEN_ERR ((u32)0x1A) | 238 | #define SCU_TASK_DONE_DATA_LEN_ERR ((u32)0x1A) |
240 | #define SCU_TASK_DONE_LL_CL_TERM ((u32)0x1B) | 239 | #define SCU_TASK_DONE_LL_CL_TERM ((u32)0x1B) |
241 | #define SCU_TASK_DONE_BREAK_RCVD ((u32)0x1B) | ||
242 | #define SCU_TASK_DONE_LL_ABORT_ERR ((u32)0x1B) | 240 | #define SCU_TASK_DONE_LL_ABORT_ERR ((u32)0x1B) |
243 | #define SCU_TASK_DONE_SEQ_INV_TYPE ((u32)0x1C) | 241 | #define SCU_TASK_DONE_SEQ_INV_TYPE ((u32)0x1C) |
244 | #define SCU_TASK_DONE_UNEXP_XR ((u32)0x1C) | 242 | #define SCU_TASK_DONE_UNEXP_XR ((u32)0x1C) |
diff --git a/drivers/scsi/isci/scu_task_context.h b/drivers/scsi/isci/scu_task_context.h index 869a979eb5b..7df87d92328 100644 --- a/drivers/scsi/isci/scu_task_context.h +++ b/drivers/scsi/isci/scu_task_context.h | |||
@@ -866,9 +866,9 @@ struct scu_task_context { | |||
866 | struct transport_snapshot snapshot; /* read only set to 0 */ | 866 | struct transport_snapshot snapshot; /* read only set to 0 */ |
867 | 867 | ||
868 | /* OFFSET 0x5C */ | 868 | /* OFFSET 0x5C */ |
869 | u32 blk_prot_en:1; | 869 | u32 block_protection_enable:1; |
870 | u32 blk_sz:2; | 870 | u32 block_size:2; |
871 | u32 blk_prot_func:2; | 871 | u32 block_protection_function:2; |
872 | u32 reserved_5C_0:9; | 872 | u32 reserved_5C_0:9; |
873 | u32 active_sgl_element:2; /* read only set to 0 */ | 873 | u32 active_sgl_element:2; /* read only set to 0 */ |
874 | u32 sgl_exhausted:1; /* read only set to 0 */ | 874 | u32 sgl_exhausted:1; /* read only set to 0 */ |
@@ -896,56 +896,33 @@ struct scu_task_context { | |||
896 | u32 reserved_C4_CC[3]; | 896 | u32 reserved_C4_CC[3]; |
897 | 897 | ||
898 | /* OFFSET 0xD0 */ | 898 | /* OFFSET 0xD0 */ |
899 | u32 interm_crc_val:16; | 899 | u32 intermediate_crc_value:16; |
900 | u32 init_crc_seed:16; | 900 | u32 initial_crc_seed:16; |
901 | 901 | ||
902 | /* OFFSET 0xD4 */ | 902 | /* OFFSET 0xD4 */ |
903 | u32 app_tag_verify:16; | 903 | u32 application_tag_for_verify:16; |
904 | u32 app_tag_gen:16; | 904 | u32 application_tag_for_generate:16; |
905 | 905 | ||
906 | /* OFFSET 0xD8 */ | 906 | /* OFFSET 0xD8 */ |
907 | u32 ref_tag_seed_verify; | 907 | u32 reference_tag_seed_for_verify_function; |
908 | 908 | ||
909 | /* OFFSET 0xDC */ | 909 | /* OFFSET 0xDC */ |
910 | u32 UD_bytes_immed_val:13; | 910 | u32 reserved_DC; |
911 | u32 reserved_DC_0:3; | ||
912 | u32 DIF_bytes_immed_val:4; | ||
913 | u32 reserved_DC_1:12; | ||
914 | 911 | ||
915 | /* OFFSET 0xE0 */ | 912 | /* OFFSET 0xE0 */ |
916 | u32 bgc_blk_sz:13; | 913 | u32 reserved_E0_0:16; |
917 | u32 reserved_E0_0:3; | 914 | u32 application_tag_mask_for_generate:16; |
918 | u32 app_tag_gen_mask:16; | ||
919 | 915 | ||
920 | /* OFFSET 0xE4 */ | 916 | /* OFFSET 0xE4 */ |
921 | union { | 917 | u32 block_protection_control:16; |
922 | u16 bgctl; | 918 | u32 application_tag_mask_for_verify:16; |
923 | struct { | ||
924 | u16 crc_verify:1; | ||
925 | u16 app_tag_chk:1; | ||
926 | u16 ref_tag_chk:1; | ||
927 | u16 op:2; | ||
928 | u16 legacy:1; | ||
929 | u16 invert_crc_seed:1; | ||
930 | u16 ref_tag_gen:1; | ||
931 | u16 fixed_ref_tag:1; | ||
932 | u16 invert_crc:1; | ||
933 | u16 app_ref_f_detect:1; | ||
934 | u16 uninit_dif_check_err:1; | ||
935 | u16 uninit_dif_bypass:1; | ||
936 | u16 app_f_detect:1; | ||
937 | u16 reserved_0:2; | ||
938 | } bgctl_f; | ||
939 | }; | ||
940 | |||
941 | u16 app_tag_verify_mask; | ||
942 | 919 | ||
943 | /* OFFSET 0xE8 */ | 920 | /* OFFSET 0xE8 */ |
944 | u32 blk_guard_err:8; | 921 | u32 block_protection_error:8; |
945 | u32 reserved_E8_0:24; | 922 | u32 reserved_E8_0:24; |
946 | 923 | ||
947 | /* OFFSET 0xEC */ | 924 | /* OFFSET 0xEC */ |
948 | u32 ref_tag_seed_gen; | 925 | u32 reference_tag_seed_for_verify; |
949 | 926 | ||
950 | /* OFFSET 0xF0 */ | 927 | /* OFFSET 0xF0 */ |
951 | u32 intermediate_crc_valid_snapshot:16; | 928 | u32 intermediate_crc_valid_snapshot:16; |
@@ -960,6 +937,6 @@ struct scu_task_context { | |||
960 | /* OFFSET 0xFC */ | 937 | /* OFFSET 0xFC */ |
961 | u32 reference_tag_seed_for_generate_function_snapshot; | 938 | u32 reference_tag_seed_for_generate_function_snapshot; |
962 | 939 | ||
963 | } __packed; | 940 | }; |
964 | 941 | ||
965 | #endif /* _SCU_TASK_CONTEXT_H_ */ | 942 | #endif /* _SCU_TASK_CONTEXT_H_ */ |
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index b6f19a1db78..d6bcdd013dc 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c | |||
@@ -78,25 +78,56 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task, | |||
78 | enum exec_status status) | 78 | enum exec_status status) |
79 | 79 | ||
80 | { | 80 | { |
81 | unsigned long flags; | 81 | enum isci_completion_selection disposition; |
82 | 82 | ||
83 | /* Normal notification (task_done) */ | 83 | disposition = isci_perform_normal_io_completion; |
84 | dev_dbg(&ihost->pdev->dev, "%s: task = %p, response=%d, status=%d\n", | 84 | disposition = isci_task_set_completion_status(task, response, status, |
85 | __func__, task, response, status); | 85 | disposition); |
86 | 86 | ||
87 | spin_lock_irqsave(&task->task_state_lock, flags); | 87 | /* Tasks aborted specifically by a call to the lldd_abort_task |
88 | * function should not be completed to the host in the regular path. | ||
89 | */ | ||
90 | switch (disposition) { | ||
91 | case isci_perform_normal_io_completion: | ||
92 | /* Normal notification (task_done) */ | ||
93 | dev_dbg(&ihost->pdev->dev, | ||
94 | "%s: Normal - task = %p, response=%d, " | ||
95 | "status=%d\n", | ||
96 | __func__, task, response, status); | ||
88 | 97 | ||
89 | task->task_status.resp = response; | 98 | task->lldd_task = NULL; |
90 | task->task_status.stat = status; | ||
91 | 99 | ||
92 | /* Normal notification (task_done) */ | 100 | isci_execpath_callback(ihost, task, task->task_done); |
93 | task->task_state_flags |= SAS_TASK_STATE_DONE; | 101 | break; |
94 | task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | | 102 | |
95 | SAS_TASK_STATE_PENDING); | 103 | case isci_perform_aborted_io_completion: |
96 | task->lldd_task = NULL; | 104 | /* |
97 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 105 | * No notification because this request is already in the |
106 | * abort path. | ||
107 | */ | ||
108 | dev_dbg(&ihost->pdev->dev, | ||
109 | "%s: Aborted - task = %p, response=%d, " | ||
110 | "status=%d\n", | ||
111 | __func__, task, response, status); | ||
112 | break; | ||
113 | |||
114 | case isci_perform_error_io_completion: | ||
115 | /* Use sas_task_abort */ | ||
116 | dev_dbg(&ihost->pdev->dev, | ||
117 | "%s: Error - task = %p, response=%d, " | ||
118 | "status=%d\n", | ||
119 | __func__, task, response, status); | ||
120 | |||
121 | isci_execpath_callback(ihost, task, sas_task_abort); | ||
122 | break; | ||
98 | 123 | ||
99 | task->task_done(task); | 124 | default: |
125 | dev_dbg(&ihost->pdev->dev, | ||
126 | "%s: isci task notification default case!", | ||
127 | __func__); | ||
128 | sas_task_abort(task); | ||
129 | break; | ||
130 | } | ||
100 | } | 131 | } |
101 | 132 | ||
102 | #define for_each_sas_task(num, task) \ | 133 | #define for_each_sas_task(num, task) \ |
@@ -181,27 +212,16 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) | |||
181 | task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; | 212 | task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; |
182 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 213 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
183 | 214 | ||
184 | if (test_bit(IDEV_GONE, &idev->flags)) { | 215 | /* Indicate QUEUE_FULL so that the scsi |
185 | 216 | * midlayer retries. if the request | |
186 | /* Indicate that the device | 217 | * failed for remote device reasons, |
187 | * is gone. | 218 | * it gets returned as |
188 | */ | 219 | * SAS_TASK_UNDELIVERED next time |
189 | isci_task_refuse(ihost, task, | 220 | * through. |
190 | SAS_TASK_UNDELIVERED, | 221 | */ |
191 | SAS_DEVICE_UNKNOWN); | 222 | isci_task_refuse(ihost, task, |
192 | } else { | 223 | SAS_TASK_COMPLETE, |
193 | /* Indicate QUEUE_FULL so that | 224 | SAS_QUEUE_FULL); |
194 | * the scsi midlayer retries. | ||
195 | * If the request failed for | ||
196 | * remote device reasons, it | ||
197 | * gets returned as | ||
198 | * SAS_TASK_UNDELIVERED next | ||
199 | * time through. | ||
200 | */ | ||
201 | isci_task_refuse(ihost, task, | ||
202 | SAS_TASK_COMPLETE, | ||
203 | SAS_QUEUE_FULL); | ||
204 | } | ||
205 | } | 225 | } |
206 | } | 226 | } |
207 | } | 227 | } |
@@ -218,6 +238,46 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) | |||
218 | return 0; | 238 | return 0; |
219 | } | 239 | } |
220 | 240 | ||
241 | static enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq) | ||
242 | { | ||
243 | struct isci_tmf *isci_tmf; | ||
244 | enum sci_status status; | ||
245 | |||
246 | if (tmf_task != ireq->ttype) | ||
247 | return SCI_FAILURE; | ||
248 | |||
249 | isci_tmf = isci_request_access_tmf(ireq); | ||
250 | |||
251 | switch (isci_tmf->tmf_code) { | ||
252 | |||
253 | case isci_tmf_sata_srst_high: | ||
254 | case isci_tmf_sata_srst_low: { | ||
255 | struct host_to_dev_fis *fis = &ireq->stp.cmd; | ||
256 | |||
257 | memset(fis, 0, sizeof(*fis)); | ||
258 | |||
259 | fis->fis_type = 0x27; | ||
260 | fis->flags &= ~0x80; | ||
261 | fis->flags &= 0xF0; | ||
262 | if (isci_tmf->tmf_code == isci_tmf_sata_srst_high) | ||
263 | fis->control |= ATA_SRST; | ||
264 | else | ||
265 | fis->control &= ~ATA_SRST; | ||
266 | break; | ||
267 | } | ||
268 | /* other management commnd go here... */ | ||
269 | default: | ||
270 | return SCI_FAILURE; | ||
271 | } | ||
272 | |||
273 | /* core builds the protocol specific request | ||
274 | * based on the h2d fis. | ||
275 | */ | ||
276 | status = sci_task_request_construct_sata(ireq); | ||
277 | |||
278 | return status; | ||
279 | } | ||
280 | |||
221 | static struct isci_request *isci_task_request_build(struct isci_host *ihost, | 281 | static struct isci_request *isci_task_request_build(struct isci_host *ihost, |
222 | struct isci_remote_device *idev, | 282 | struct isci_remote_device *idev, |
223 | u16 tag, struct isci_tmf *isci_tmf) | 283 | u16 tag, struct isci_tmf *isci_tmf) |
@@ -257,6 +317,13 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, | |||
257 | return NULL; | 317 | return NULL; |
258 | } | 318 | } |
259 | 319 | ||
320 | if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { | ||
321 | isci_tmf->proto = SAS_PROTOCOL_SATA; | ||
322 | status = isci_sata_management_task_request_build(ireq); | ||
323 | |||
324 | if (status != SCI_SUCCESS) | ||
325 | return NULL; | ||
326 | } | ||
260 | return ireq; | 327 | return ireq; |
261 | } | 328 | } |
262 | 329 | ||
@@ -297,7 +364,6 @@ static int isci_task_execute_tmf(struct isci_host *ihost, | |||
297 | 364 | ||
298 | /* Assign the pointer to the TMF's completion kernel wait structure. */ | 365 | /* Assign the pointer to the TMF's completion kernel wait structure. */ |
299 | tmf->complete = &completion; | 366 | tmf->complete = &completion; |
300 | tmf->status = SCI_FAILURE_TIMEOUT; | ||
301 | 367 | ||
302 | ireq = isci_task_request_build(ihost, idev, tag, tmf); | 368 | ireq = isci_task_request_build(ihost, idev, tag, tmf); |
303 | if (!ireq) | 369 | if (!ireq) |
@@ -317,23 +383,37 @@ static int isci_task_execute_tmf(struct isci_host *ihost, | |||
317 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 383 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
318 | goto err_tci; | 384 | goto err_tci; |
319 | } | 385 | } |
320 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
321 | 386 | ||
322 | /* The RNC must be unsuspended before the TMF can get a response. */ | 387 | if (tmf->cb_state_func != NULL) |
323 | isci_remote_device_resume_from_abort(ihost, idev); | 388 | tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data); |
389 | |||
390 | isci_request_change_state(ireq, started); | ||
391 | |||
392 | /* add the request to the remote device request list. */ | ||
393 | list_add(&ireq->dev_node, &idev->reqs_in_process); | ||
394 | |||
395 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
324 | 396 | ||
325 | /* Wait for the TMF to complete, or a timeout. */ | 397 | /* Wait for the TMF to complete, or a timeout. */ |
326 | timeleft = wait_for_completion_timeout(&completion, | 398 | timeleft = wait_for_completion_timeout(&completion, |
327 | msecs_to_jiffies(timeout_ms)); | 399 | msecs_to_jiffies(timeout_ms)); |
328 | 400 | ||
329 | if (timeleft == 0) { | 401 | if (timeleft == 0) { |
330 | /* The TMF did not complete - this could be because | 402 | spin_lock_irqsave(&ihost->scic_lock, flags); |
331 | * of an unplug. Terminate the TMF request now. | 403 | |
332 | */ | 404 | if (tmf->cb_state_func != NULL) |
333 | isci_remote_device_suspend_terminate(ihost, idev, ireq); | 405 | tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data); |
406 | |||
407 | sci_controller_terminate_request(ihost, | ||
408 | idev, | ||
409 | ireq); | ||
410 | |||
411 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
412 | |||
413 | wait_for_completion(tmf->complete); | ||
334 | } | 414 | } |
335 | 415 | ||
336 | isci_print_tmf(ihost, tmf); | 416 | isci_print_tmf(tmf); |
337 | 417 | ||
338 | if (tmf->status == SCI_SUCCESS) | 418 | if (tmf->status == SCI_SUCCESS) |
339 | ret = TMF_RESP_FUNC_COMPLETE; | 419 | ret = TMF_RESP_FUNC_COMPLETE; |
@@ -362,21 +442,366 @@ static int isci_task_execute_tmf(struct isci_host *ihost, | |||
362 | } | 442 | } |
363 | 443 | ||
364 | static void isci_task_build_tmf(struct isci_tmf *tmf, | 444 | static void isci_task_build_tmf(struct isci_tmf *tmf, |
365 | enum isci_tmf_function_codes code) | 445 | enum isci_tmf_function_codes code, |
446 | void (*tmf_sent_cb)(enum isci_tmf_cb_state, | ||
447 | struct isci_tmf *, | ||
448 | void *), | ||
449 | void *cb_data) | ||
366 | { | 450 | { |
367 | memset(tmf, 0, sizeof(*tmf)); | 451 | memset(tmf, 0, sizeof(*tmf)); |
368 | tmf->tmf_code = code; | 452 | |
453 | tmf->tmf_code = code; | ||
454 | tmf->cb_state_func = tmf_sent_cb; | ||
455 | tmf->cb_data = cb_data; | ||
369 | } | 456 | } |
370 | 457 | ||
371 | static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf, | 458 | static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf, |
372 | enum isci_tmf_function_codes code, | 459 | enum isci_tmf_function_codes code, |
460 | void (*tmf_sent_cb)(enum isci_tmf_cb_state, | ||
461 | struct isci_tmf *, | ||
462 | void *), | ||
373 | struct isci_request *old_request) | 463 | struct isci_request *old_request) |
374 | { | 464 | { |
375 | isci_task_build_tmf(tmf, code); | 465 | isci_task_build_tmf(tmf, code, tmf_sent_cb, old_request); |
376 | tmf->io_tag = old_request->io_tag; | 466 | tmf->io_tag = old_request->io_tag; |
377 | } | 467 | } |
378 | 468 | ||
379 | /** | 469 | /** |
470 | * isci_task_validate_request_to_abort() - This function checks the given I/O | ||
471 | * against the "started" state. If the request is still "started", it's | ||
472 | * state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD | ||
473 | * BEFORE CALLING THIS FUNCTION. | ||
474 | * @isci_request: This parameter specifies the request object to control. | ||
475 | * @isci_host: This parameter specifies the ISCI host object | ||
476 | * @isci_device: This is the device to which the request is pending. | ||
477 | * @aborted_io_completion: This is a completion structure that will be added to | ||
478 | * the request in case it is changed to aborting; this completion is | ||
479 | * triggered when the request is fully completed. | ||
480 | * | ||
481 | * Either "started" on successful change of the task status to "aborted", or | ||
482 | * "unallocated" if the task cannot be controlled. | ||
483 | */ | ||
484 | static enum isci_request_status isci_task_validate_request_to_abort( | ||
485 | struct isci_request *isci_request, | ||
486 | struct isci_host *isci_host, | ||
487 | struct isci_remote_device *isci_device, | ||
488 | struct completion *aborted_io_completion) | ||
489 | { | ||
490 | enum isci_request_status old_state = unallocated; | ||
491 | |||
492 | /* Only abort the task if it's in the | ||
493 | * device's request_in_process list | ||
494 | */ | ||
495 | if (isci_request && !list_empty(&isci_request->dev_node)) { | ||
496 | old_state = isci_request_change_started_to_aborted( | ||
497 | isci_request, aborted_io_completion); | ||
498 | |||
499 | } | ||
500 | |||
501 | return old_state; | ||
502 | } | ||
503 | |||
504 | /** | ||
505 | * isci_request_cleanup_completed_loiterer() - This function will take care of | ||
506 | * the final cleanup on any request which has been explicitly terminated. | ||
507 | * @isci_host: This parameter specifies the ISCI host object | ||
508 | * @isci_device: This is the device to which the request is pending. | ||
509 | * @isci_request: This parameter specifies the terminated request object. | ||
510 | * @task: This parameter is the libsas I/O request. | ||
511 | */ | ||
512 | static void isci_request_cleanup_completed_loiterer( | ||
513 | struct isci_host *isci_host, | ||
514 | struct isci_remote_device *isci_device, | ||
515 | struct isci_request *isci_request, | ||
516 | struct sas_task *task) | ||
517 | { | ||
518 | unsigned long flags; | ||
519 | |||
520 | dev_dbg(&isci_host->pdev->dev, | ||
521 | "%s: isci_device=%p, request=%p, task=%p\n", | ||
522 | __func__, isci_device, isci_request, task); | ||
523 | |||
524 | if (task != NULL) { | ||
525 | |||
526 | spin_lock_irqsave(&task->task_state_lock, flags); | ||
527 | task->lldd_task = NULL; | ||
528 | |||
529 | task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET; | ||
530 | |||
531 | isci_set_task_doneflags(task); | ||
532 | |||
533 | /* If this task is not in the abort path, call task_done. */ | ||
534 | if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) { | ||
535 | |||
536 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
537 | task->task_done(task); | ||
538 | } else | ||
539 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
540 | } | ||
541 | |||
542 | if (isci_request != NULL) { | ||
543 | spin_lock_irqsave(&isci_host->scic_lock, flags); | ||
544 | list_del_init(&isci_request->dev_node); | ||
545 | spin_unlock_irqrestore(&isci_host->scic_lock, flags); | ||
546 | } | ||
547 | } | ||
548 | |||
549 | /** | ||
550 | * isci_terminate_request_core() - This function will terminate the given | ||
551 | * request, and wait for it to complete. This function must only be called | ||
552 | * from a thread that can wait. Note that the request is terminated and | ||
553 | * completed (back to the host, if started there). | ||
554 | * @ihost: This SCU. | ||
555 | * @idev: The target. | ||
556 | * @isci_request: The I/O request to be terminated. | ||
557 | * | ||
558 | */ | ||
559 | static void isci_terminate_request_core(struct isci_host *ihost, | ||
560 | struct isci_remote_device *idev, | ||
561 | struct isci_request *isci_request) | ||
562 | { | ||
563 | enum sci_status status = SCI_SUCCESS; | ||
564 | bool was_terminated = false; | ||
565 | bool needs_cleanup_handling = false; | ||
566 | enum isci_request_status request_status; | ||
567 | unsigned long flags; | ||
568 | unsigned long termination_completed = 1; | ||
569 | struct completion *io_request_completion; | ||
570 | struct sas_task *task; | ||
571 | |||
572 | dev_dbg(&ihost->pdev->dev, | ||
573 | "%s: device = %p; request = %p\n", | ||
574 | __func__, idev, isci_request); | ||
575 | |||
576 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
577 | |||
578 | io_request_completion = isci_request->io_request_completion; | ||
579 | |||
580 | task = (isci_request->ttype == io_task) | ||
581 | ? isci_request_access_task(isci_request) | ||
582 | : NULL; | ||
583 | |||
584 | /* Note that we are not going to control | ||
585 | * the target to abort the request. | ||
586 | */ | ||
587 | set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags); | ||
588 | |||
589 | /* Make sure the request wasn't just sitting around signalling | ||
590 | * device condition (if the request handle is NULL, then the | ||
591 | * request completed but needed additional handling here). | ||
592 | */ | ||
593 | if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) { | ||
594 | was_terminated = true; | ||
595 | needs_cleanup_handling = true; | ||
596 | status = sci_controller_terminate_request(ihost, | ||
597 | idev, | ||
598 | isci_request); | ||
599 | } | ||
600 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
601 | |||
602 | /* | ||
603 | * The only time the request to terminate will | ||
604 | * fail is when the io request is completed and | ||
605 | * being aborted. | ||
606 | */ | ||
607 | if (status != SCI_SUCCESS) { | ||
608 | dev_dbg(&ihost->pdev->dev, | ||
609 | "%s: sci_controller_terminate_request" | ||
610 | " returned = 0x%x\n", | ||
611 | __func__, status); | ||
612 | |||
613 | isci_request->io_request_completion = NULL; | ||
614 | |||
615 | } else { | ||
616 | if (was_terminated) { | ||
617 | dev_dbg(&ihost->pdev->dev, | ||
618 | "%s: before completion wait (%p/%p)\n", | ||
619 | __func__, isci_request, io_request_completion); | ||
620 | |||
621 | /* Wait here for the request to complete. */ | ||
622 | #define TERMINATION_TIMEOUT_MSEC 500 | ||
623 | termination_completed | ||
624 | = wait_for_completion_timeout( | ||
625 | io_request_completion, | ||
626 | msecs_to_jiffies(TERMINATION_TIMEOUT_MSEC)); | ||
627 | |||
628 | if (!termination_completed) { | ||
629 | |||
630 | /* The request to terminate has timed out. */ | ||
631 | spin_lock_irqsave(&ihost->scic_lock, | ||
632 | flags); | ||
633 | |||
634 | /* Check for state changes. */ | ||
635 | if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) { | ||
636 | |||
637 | /* The best we can do is to have the | ||
638 | * request die a silent death if it | ||
639 | * ever really completes. | ||
640 | * | ||
641 | * Set the request state to "dead", | ||
642 | * and clear the task pointer so that | ||
643 | * an actual completion event callback | ||
644 | * doesn't do anything. | ||
645 | */ | ||
646 | isci_request->status = dead; | ||
647 | isci_request->io_request_completion | ||
648 | = NULL; | ||
649 | |||
650 | if (isci_request->ttype == io_task) { | ||
651 | |||
652 | /* Break links with the | ||
653 | * sas_task. | ||
654 | */ | ||
655 | isci_request->ttype_ptr.io_task_ptr | ||
656 | = NULL; | ||
657 | } | ||
658 | } else | ||
659 | termination_completed = 1; | ||
660 | |||
661 | spin_unlock_irqrestore(&ihost->scic_lock, | ||
662 | flags); | ||
663 | |||
664 | if (!termination_completed) { | ||
665 | |||
666 | dev_dbg(&ihost->pdev->dev, | ||
667 | "%s: *** Timeout waiting for " | ||
668 | "termination(%p/%p)\n", | ||
669 | __func__, io_request_completion, | ||
670 | isci_request); | ||
671 | |||
672 | /* The request can no longer be referenced | ||
673 | * safely since it may go away if the | ||
674 | * termination every really does complete. | ||
675 | */ | ||
676 | isci_request = NULL; | ||
677 | } | ||
678 | } | ||
679 | if (termination_completed) | ||
680 | dev_dbg(&ihost->pdev->dev, | ||
681 | "%s: after completion wait (%p/%p)\n", | ||
682 | __func__, isci_request, io_request_completion); | ||
683 | } | ||
684 | |||
685 | if (termination_completed) { | ||
686 | |||
687 | isci_request->io_request_completion = NULL; | ||
688 | |||
689 | /* Peek at the status of the request. This will tell | ||
690 | * us if there was special handling on the request such that it | ||
691 | * needs to be detached and freed here. | ||
692 | */ | ||
693 | spin_lock_irqsave(&isci_request->state_lock, flags); | ||
694 | request_status = isci_request->status; | ||
695 | |||
696 | if ((isci_request->ttype == io_task) /* TMFs are in their own thread */ | ||
697 | && ((request_status == aborted) | ||
698 | || (request_status == aborting) | ||
699 | || (request_status == terminating) | ||
700 | || (request_status == completed) | ||
701 | || (request_status == dead) | ||
702 | ) | ||
703 | ) { | ||
704 | |||
705 | /* The completion routine won't free a request in | ||
706 | * the aborted/aborting/etc. states, so we do | ||
707 | * it here. | ||
708 | */ | ||
709 | needs_cleanup_handling = true; | ||
710 | } | ||
711 | spin_unlock_irqrestore(&isci_request->state_lock, flags); | ||
712 | |||
713 | } | ||
714 | if (needs_cleanup_handling) | ||
715 | isci_request_cleanup_completed_loiterer( | ||
716 | ihost, idev, isci_request, task); | ||
717 | } | ||
718 | } | ||
719 | |||
720 | /** | ||
721 | * isci_terminate_pending_requests() - This function will change the all of the | ||
722 | * requests on the given device's state to "aborting", will terminate the | ||
723 | * requests, and wait for them to complete. This function must only be | ||
724 | * called from a thread that can wait. Note that the requests are all | ||
725 | * terminated and completed (back to the host, if started there). | ||
726 | * @isci_host: This parameter specifies SCU. | ||
727 | * @idev: This parameter specifies the target. | ||
728 | * | ||
729 | */ | ||
730 | void isci_terminate_pending_requests(struct isci_host *ihost, | ||
731 | struct isci_remote_device *idev) | ||
732 | { | ||
733 | struct completion request_completion; | ||
734 | enum isci_request_status old_state; | ||
735 | unsigned long flags; | ||
736 | LIST_HEAD(list); | ||
737 | |||
738 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
739 | list_splice_init(&idev->reqs_in_process, &list); | ||
740 | |||
741 | /* assumes that isci_terminate_request_core deletes from the list */ | ||
742 | while (!list_empty(&list)) { | ||
743 | struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node); | ||
744 | |||
745 | /* Change state to "terminating" if it is currently | ||
746 | * "started". | ||
747 | */ | ||
748 | old_state = isci_request_change_started_to_newstate(ireq, | ||
749 | &request_completion, | ||
750 | terminating); | ||
751 | switch (old_state) { | ||
752 | case started: | ||
753 | case completed: | ||
754 | case aborting: | ||
755 | break; | ||
756 | default: | ||
757 | /* termination in progress, or otherwise dispositioned. | ||
758 | * We know the request was on 'list' so should be safe | ||
759 | * to move it back to reqs_in_process | ||
760 | */ | ||
761 | list_move(&ireq->dev_node, &idev->reqs_in_process); | ||
762 | ireq = NULL; | ||
763 | break; | ||
764 | } | ||
765 | |||
766 | if (!ireq) | ||
767 | continue; | ||
768 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
769 | |||
770 | init_completion(&request_completion); | ||
771 | |||
772 | dev_dbg(&ihost->pdev->dev, | ||
773 | "%s: idev=%p request=%p; task=%p old_state=%d\n", | ||
774 | __func__, idev, ireq, | ||
775 | ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL, | ||
776 | old_state); | ||
777 | |||
778 | /* If the old_state is started: | ||
779 | * This request was not already being aborted. If it had been, | ||
780 | * then the aborting I/O (ie. the TMF request) would not be in | ||
781 | * the aborting state, and thus would be terminated here. Note | ||
782 | * that since the TMF completion's call to the kernel function | ||
783 | * "complete()" does not happen until the pending I/O request | ||
784 | * terminate fully completes, we do not have to implement a | ||
785 | * special wait here for already aborting requests - the | ||
786 | * termination of the TMF request will force the request | ||
787 | * to finish it's already started terminate. | ||
788 | * | ||
789 | * If old_state == completed: | ||
790 | * This request completed from the SCU hardware perspective | ||
791 | * and now just needs cleaning up in terms of freeing the | ||
792 | * request and potentially calling up to libsas. | ||
793 | * | ||
794 | * If old_state == aborting: | ||
795 | * This request has already gone through a TMF timeout, but may | ||
796 | * not have been terminated; needs cleaning up at least. | ||
797 | */ | ||
798 | isci_terminate_request_core(ihost, idev, ireq); | ||
799 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
800 | } | ||
801 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
802 | } | ||
803 | |||
804 | /** | ||
380 | * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain | 805 | * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain |
381 | * Template functions. | 806 | * Template functions. |
382 | * @lun: This parameter specifies the lun to be reset. | 807 | * @lun: This parameter specifies the lun to be reset. |
@@ -399,7 +824,7 @@ static int isci_task_send_lu_reset_sas( | |||
399 | * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or | 824 | * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or |
400 | * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED"). | 825 | * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED"). |
401 | */ | 826 | */ |
402 | isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset); | 827 | isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL); |
403 | 828 | ||
404 | #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */ | 829 | #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */ |
405 | ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS); | 830 | ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS); |
@@ -416,46 +841,85 @@ static int isci_task_send_lu_reset_sas( | |||
416 | return ret; | 841 | return ret; |
417 | } | 842 | } |
418 | 843 | ||
419 | int isci_task_lu_reset(struct domain_device *dev, u8 *lun) | 844 | static int isci_task_send_lu_reset_sata(struct isci_host *ihost, |
845 | struct isci_remote_device *idev, u8 *lun) | ||
420 | { | 846 | { |
421 | struct isci_host *ihost = dev_to_ihost(dev); | 847 | int ret = TMF_RESP_FUNC_FAILED; |
422 | struct isci_remote_device *idev; | 848 | struct isci_tmf tmf; |
849 | |||
850 | /* Send the soft reset to the target */ | ||
851 | #define ISCI_SRST_TIMEOUT_MS 25000 /* 25 second timeout. */ | ||
852 | isci_task_build_tmf(&tmf, isci_tmf_sata_srst_high, NULL, NULL); | ||
853 | |||
854 | ret = isci_task_execute_tmf(ihost, idev, &tmf, ISCI_SRST_TIMEOUT_MS); | ||
855 | |||
856 | if (ret != TMF_RESP_FUNC_COMPLETE) { | ||
857 | dev_dbg(&ihost->pdev->dev, | ||
858 | "%s: Assert SRST failed (%p) = %x", | ||
859 | __func__, idev, ret); | ||
860 | |||
861 | /* Return the failure so that the LUN reset is escalated | ||
862 | * to a target reset. | ||
863 | */ | ||
864 | } | ||
865 | return ret; | ||
866 | } | ||
867 | |||
868 | /** | ||
869 | * isci_task_lu_reset() - This function is one of the SAS Domain Template | ||
870 | * functions. This is one of the Task Management functoins called by libsas, | ||
871 | * to reset the given lun. Note the assumption that while this call is | ||
872 | * executing, no I/O will be sent by the host to the device. | ||
873 | * @lun: This parameter specifies the lun to be reset. | ||
874 | * | ||
875 | * status, zero indicates success. | ||
876 | */ | ||
877 | int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun) | ||
878 | { | ||
879 | struct isci_host *isci_host = dev_to_ihost(domain_device); | ||
880 | struct isci_remote_device *isci_device; | ||
423 | unsigned long flags; | 881 | unsigned long flags; |
424 | int ret = TMF_RESP_FUNC_COMPLETE; | 882 | int ret; |
425 | 883 | ||
426 | spin_lock_irqsave(&ihost->scic_lock, flags); | 884 | spin_lock_irqsave(&isci_host->scic_lock, flags); |
427 | idev = isci_get_device(dev->lldd_dev); | 885 | isci_device = isci_lookup_device(domain_device); |
428 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 886 | spin_unlock_irqrestore(&isci_host->scic_lock, flags); |
429 | 887 | ||
430 | dev_dbg(&ihost->pdev->dev, | 888 | dev_dbg(&isci_host->pdev->dev, |
431 | "%s: domain_device=%p, isci_host=%p; isci_device=%p\n", | 889 | "%s: domain_device=%p, isci_host=%p; isci_device=%p\n", |
432 | __func__, dev, ihost, idev); | 890 | __func__, domain_device, isci_host, isci_device); |
433 | 891 | ||
434 | if (!idev) { | 892 | if (isci_device) |
435 | /* If the device is gone, escalate to I_T_Nexus_Reset. */ | 893 | set_bit(IDEV_EH, &isci_device->flags); |
436 | dev_dbg(&ihost->pdev->dev, "%s: No dev\n", __func__); | ||
437 | 894 | ||
895 | /* If there is a device reset pending on any request in the | ||
896 | * device's list, fail this LUN reset request in order to | ||
897 | * escalate to the device reset. | ||
898 | */ | ||
899 | if (!isci_device || | ||
900 | isci_device_is_reset_pending(isci_host, isci_device)) { | ||
901 | dev_dbg(&isci_host->pdev->dev, | ||
902 | "%s: No dev (%p), or " | ||
903 | "RESET PENDING: domain_device=%p\n", | ||
904 | __func__, isci_device, domain_device); | ||
438 | ret = TMF_RESP_FUNC_FAILED; | 905 | ret = TMF_RESP_FUNC_FAILED; |
439 | goto out; | 906 | goto out; |
440 | } | 907 | } |
441 | 908 | ||
442 | /* Suspend the RNC, kill all TCs */ | 909 | /* Send the task management part of the reset. */ |
443 | if (isci_remote_device_suspend_terminate(ihost, idev, NULL) | 910 | if (sas_protocol_ata(domain_device->tproto)) { |
444 | != SCI_SUCCESS) { | 911 | ret = isci_task_send_lu_reset_sata(isci_host, isci_device, lun); |
445 | /* The suspend/terminate only fails if isci_get_device fails */ | 912 | } else |
446 | ret = TMF_RESP_FUNC_FAILED; | 913 | ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun); |
447 | goto out; | 914 | |
448 | } | 915 | /* If the LUN reset worked, all the I/O can now be terminated. */ |
449 | /* All pending I/Os have been terminated and cleaned up. */ | 916 | if (ret == TMF_RESP_FUNC_COMPLETE) |
450 | if (!test_bit(IDEV_GONE, &idev->flags)) { | 917 | /* Terminate all I/O now. */ |
451 | if (dev_is_sata(dev)) | 918 | isci_terminate_pending_requests(isci_host, |
452 | sas_ata_schedule_reset(dev); | 919 | isci_device); |
453 | else | 920 | |
454 | /* Send the task management part of the reset. */ | ||
455 | ret = isci_task_send_lu_reset_sas(ihost, idev, lun); | ||
456 | } | ||
457 | out: | 921 | out: |
458 | isci_put_device(idev); | 922 | isci_put_device(isci_device); |
459 | return ret; | 923 | return ret; |
460 | } | 924 | } |
461 | 925 | ||
@@ -476,6 +940,63 @@ int isci_task_clear_nexus_ha(struct sas_ha_struct *ha) | |||
476 | /* Task Management Functions. Must be called from process context. */ | 940 | /* Task Management Functions. Must be called from process context. */ |
477 | 941 | ||
478 | /** | 942 | /** |
943 | * isci_abort_task_process_cb() - This is a helper function for the abort task | ||
944 | * TMF command. It manages the request state with respect to the successful | ||
945 | * transmission / completion of the abort task request. | ||
946 | * @cb_state: This parameter specifies when this function was called - after | ||
947 | * the TMF request has been started and after it has timed-out. | ||
948 | * @tmf: This parameter specifies the TMF in progress. | ||
949 | * | ||
950 | * | ||
951 | */ | ||
952 | static void isci_abort_task_process_cb( | ||
953 | enum isci_tmf_cb_state cb_state, | ||
954 | struct isci_tmf *tmf, | ||
955 | void *cb_data) | ||
956 | { | ||
957 | struct isci_request *old_request; | ||
958 | |||
959 | old_request = (struct isci_request *)cb_data; | ||
960 | |||
961 | dev_dbg(&old_request->isci_host->pdev->dev, | ||
962 | "%s: tmf=%p, old_request=%p\n", | ||
963 | __func__, tmf, old_request); | ||
964 | |||
965 | switch (cb_state) { | ||
966 | |||
967 | case isci_tmf_started: | ||
968 | /* The TMF has been started. Nothing to do here, since the | ||
969 | * request state was already set to "aborted" by the abort | ||
970 | * task function. | ||
971 | */ | ||
972 | if ((old_request->status != aborted) | ||
973 | && (old_request->status != completed)) | ||
974 | dev_dbg(&old_request->isci_host->pdev->dev, | ||
975 | "%s: Bad request status (%d): tmf=%p, old_request=%p\n", | ||
976 | __func__, old_request->status, tmf, old_request); | ||
977 | break; | ||
978 | |||
979 | case isci_tmf_timed_out: | ||
980 | |||
981 | /* Set the task's state to "aborting", since the abort task | ||
982 | * function thread set it to "aborted" (above) in anticipation | ||
983 | * of the task management request working correctly. Since the | ||
984 | * timeout has now fired, the TMF request failed. We set the | ||
985 | * state such that the request completion will indicate the | ||
986 | * device is no longer present. | ||
987 | */ | ||
988 | isci_request_change_state(old_request, aborting); | ||
989 | break; | ||
990 | |||
991 | default: | ||
992 | dev_dbg(&old_request->isci_host->pdev->dev, | ||
993 | "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n", | ||
994 | __func__, cb_state, tmf, old_request); | ||
995 | break; | ||
996 | } | ||
997 | } | ||
998 | |||
999 | /** | ||
479 | * isci_task_abort_task() - This function is one of the SAS Domain Template | 1000 | * isci_task_abort_task() - This function is one of the SAS Domain Template |
480 | * functions. This function is called by libsas to abort a specified task. | 1001 | * functions. This function is called by libsas to abort a specified task. |
481 | * @task: This parameter specifies the SAS task to abort. | 1002 | * @task: This parameter specifies the SAS task to abort. |
@@ -484,20 +1005,22 @@ int isci_task_clear_nexus_ha(struct sas_ha_struct *ha) | |||
484 | */ | 1005 | */ |
485 | int isci_task_abort_task(struct sas_task *task) | 1006 | int isci_task_abort_task(struct sas_task *task) |
486 | { | 1007 | { |
487 | struct isci_host *ihost = dev_to_ihost(task->dev); | 1008 | struct isci_host *isci_host = dev_to_ihost(task->dev); |
488 | DECLARE_COMPLETION_ONSTACK(aborted_io_completion); | 1009 | DECLARE_COMPLETION_ONSTACK(aborted_io_completion); |
489 | struct isci_request *old_request = NULL; | 1010 | struct isci_request *old_request = NULL; |
490 | struct isci_remote_device *idev = NULL; | 1011 | enum isci_request_status old_state; |
1012 | struct isci_remote_device *isci_device = NULL; | ||
491 | struct isci_tmf tmf; | 1013 | struct isci_tmf tmf; |
492 | int ret = TMF_RESP_FUNC_FAILED; | 1014 | int ret = TMF_RESP_FUNC_FAILED; |
493 | unsigned long flags; | 1015 | unsigned long flags; |
1016 | bool any_dev_reset = false; | ||
494 | 1017 | ||
495 | /* Get the isci_request reference from the task. Note that | 1018 | /* Get the isci_request reference from the task. Note that |
496 | * this check does not depend on the pending request list | 1019 | * this check does not depend on the pending request list |
497 | * in the device, because tasks driving resets may land here | 1020 | * in the device, because tasks driving resets may land here |
498 | * after completion in the core. | 1021 | * after completion in the core. |
499 | */ | 1022 | */ |
500 | spin_lock_irqsave(&ihost->scic_lock, flags); | 1023 | spin_lock_irqsave(&isci_host->scic_lock, flags); |
501 | spin_lock(&task->task_state_lock); | 1024 | spin_lock(&task->task_state_lock); |
502 | 1025 | ||
503 | old_request = task->lldd_task; | 1026 | old_request = task->lldd_task; |
@@ -506,108 +1029,171 @@ int isci_task_abort_task(struct sas_task *task) | |||
506 | if (!(task->task_state_flags & SAS_TASK_STATE_DONE) && | 1029 | if (!(task->task_state_flags & SAS_TASK_STATE_DONE) && |
507 | (task->task_state_flags & SAS_TASK_AT_INITIATOR) && | 1030 | (task->task_state_flags & SAS_TASK_AT_INITIATOR) && |
508 | old_request) | 1031 | old_request) |
509 | idev = isci_get_device(task->dev->lldd_dev); | 1032 | isci_device = isci_lookup_device(task->dev); |
510 | 1033 | ||
511 | spin_unlock(&task->task_state_lock); | 1034 | spin_unlock(&task->task_state_lock); |
512 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1035 | spin_unlock_irqrestore(&isci_host->scic_lock, flags); |
513 | 1036 | ||
514 | dev_warn(&ihost->pdev->dev, | 1037 | dev_dbg(&isci_host->pdev->dev, |
515 | "%s: dev = %p (%s%s), task = %p, old_request == %p\n", | 1038 | "%s: task = %p\n", __func__, task); |
516 | __func__, idev, | ||
517 | (dev_is_sata(task->dev) ? "STP/SATA" | ||
518 | : ((dev_is_expander(task->dev)) | ||
519 | ? "SMP" | ||
520 | : "SSP")), | ||
521 | ((idev) ? ((test_bit(IDEV_GONE, &idev->flags)) | ||
522 | ? " IDEV_GONE" | ||
523 | : "") | ||
524 | : " <NULL>"), | ||
525 | task, old_request); | ||
526 | |||
527 | /* Device reset conditions signalled in task_state_flags are the | ||
528 | * responsbility of libsas to observe at the start of the error | ||
529 | * handler thread. | ||
530 | */ | ||
531 | if (!idev || !old_request) { | ||
532 | /* The request has already completed and there | ||
533 | * is nothing to do here other than to set the task | ||
534 | * done bit, and indicate that the task abort function | ||
535 | * was successful. | ||
536 | */ | ||
537 | spin_lock_irqsave(&task->task_state_lock, flags); | ||
538 | task->task_state_flags |= SAS_TASK_STATE_DONE; | ||
539 | task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | | ||
540 | SAS_TASK_STATE_PENDING); | ||
541 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
542 | 1039 | ||
543 | ret = TMF_RESP_FUNC_COMPLETE; | 1040 | if (!isci_device || !old_request) |
1041 | goto out; | ||
544 | 1042 | ||
545 | dev_warn(&ihost->pdev->dev, | 1043 | set_bit(IDEV_EH, &isci_device->flags); |
546 | "%s: abort task not needed for %p\n", | 1044 | |
547 | __func__, task); | 1045 | /* This version of the driver will fail abort requests for |
1046 | * SATA/STP. Failing the abort request this way will cause the | ||
1047 | * SCSI error handler thread to escalate to LUN reset | ||
1048 | */ | ||
1049 | if (sas_protocol_ata(task->task_proto)) { | ||
1050 | dev_dbg(&isci_host->pdev->dev, | ||
1051 | " task %p is for a STP/SATA device;" | ||
1052 | " returning TMF_RESP_FUNC_FAILED\n" | ||
1053 | " to cause a LUN reset...\n", task); | ||
548 | goto out; | 1054 | goto out; |
549 | } | 1055 | } |
550 | /* Suspend the RNC, kill the TC */ | 1056 | |
551 | if (isci_remote_device_suspend_terminate(ihost, idev, old_request) | 1057 | dev_dbg(&isci_host->pdev->dev, |
552 | != SCI_SUCCESS) { | 1058 | "%s: old_request == %p\n", __func__, old_request); |
553 | dev_warn(&ihost->pdev->dev, | 1059 | |
554 | "%s: isci_remote_device_reset_terminate(dev=%p, " | 1060 | any_dev_reset = isci_device_is_reset_pending(isci_host, isci_device); |
555 | "req=%p, task=%p) failed\n", | 1061 | |
556 | __func__, idev, old_request, task); | 1062 | spin_lock_irqsave(&task->task_state_lock, flags); |
557 | ret = TMF_RESP_FUNC_FAILED; | 1063 | |
1064 | any_dev_reset = any_dev_reset || (task->task_state_flags & SAS_TASK_NEED_DEV_RESET); | ||
1065 | |||
1066 | /* If the extraction of the request reference from the task | ||
1067 | * failed, then the request has been completed (or if there is a | ||
1068 | * pending reset then this abort request function must be failed | ||
1069 | * in order to escalate to the target reset). | ||
1070 | */ | ||
1071 | if ((old_request == NULL) || any_dev_reset) { | ||
1072 | |||
1073 | /* If the device reset task flag is set, fail the task | ||
1074 | * management request. Otherwise, the original request | ||
1075 | * has completed. | ||
1076 | */ | ||
1077 | if (any_dev_reset) { | ||
1078 | |||
1079 | /* Turn off the task's DONE to make sure this | ||
1080 | * task is escalated to a target reset. | ||
1081 | */ | ||
1082 | task->task_state_flags &= ~SAS_TASK_STATE_DONE; | ||
1083 | |||
1084 | /* Make the reset happen as soon as possible. */ | ||
1085 | task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; | ||
1086 | |||
1087 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
1088 | |||
1089 | /* Fail the task management request in order to | ||
1090 | * escalate to the target reset. | ||
1091 | */ | ||
1092 | ret = TMF_RESP_FUNC_FAILED; | ||
1093 | |||
1094 | dev_dbg(&isci_host->pdev->dev, | ||
1095 | "%s: Failing task abort in order to " | ||
1096 | "escalate to target reset because\n" | ||
1097 | "SAS_TASK_NEED_DEV_RESET is set for " | ||
1098 | "task %p on dev %p\n", | ||
1099 | __func__, task, isci_device); | ||
1100 | |||
1101 | |||
1102 | } else { | ||
1103 | /* The request has already completed and there | ||
1104 | * is nothing to do here other than to set the task | ||
1105 | * done bit, and indicate that the task abort function | ||
1106 | * was sucessful. | ||
1107 | */ | ||
1108 | isci_set_task_doneflags(task); | ||
1109 | |||
1110 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
1111 | |||
1112 | ret = TMF_RESP_FUNC_COMPLETE; | ||
1113 | |||
1114 | dev_dbg(&isci_host->pdev->dev, | ||
1115 | "%s: abort task not needed for %p\n", | ||
1116 | __func__, task); | ||
1117 | } | ||
558 | goto out; | 1118 | goto out; |
1119 | } else { | ||
1120 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
559 | } | 1121 | } |
560 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
561 | 1122 | ||
562 | if (task->task_proto == SAS_PROTOCOL_SMP || | 1123 | spin_lock_irqsave(&isci_host->scic_lock, flags); |
563 | sas_protocol_ata(task->task_proto) || | ||
564 | test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags) || | ||
565 | test_bit(IDEV_GONE, &idev->flags)) { | ||
566 | 1124 | ||
567 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1125 | /* Check the request status and change to "aborted" if currently |
1126 | * "starting"; if true then set the I/O kernel completion | ||
1127 | * struct that will be triggered when the request completes. | ||
1128 | */ | ||
1129 | old_state = isci_task_validate_request_to_abort( | ||
1130 | old_request, isci_host, isci_device, | ||
1131 | &aborted_io_completion); | ||
1132 | if ((old_state != started) && | ||
1133 | (old_state != completed) && | ||
1134 | (old_state != aborting)) { | ||
568 | 1135 | ||
569 | /* No task to send, so explicitly resume the device here */ | 1136 | spin_unlock_irqrestore(&isci_host->scic_lock, flags); |
570 | isci_remote_device_resume_from_abort(ihost, idev); | ||
571 | 1137 | ||
572 | dev_warn(&ihost->pdev->dev, | 1138 | /* The request was already being handled by someone else (because |
573 | "%s: %s request" | 1139 | * they got to set the state away from started). |
574 | " or complete_in_target (%d), " | 1140 | */ |
575 | "or IDEV_GONE (%d), thus no TMF\n", | 1141 | dev_dbg(&isci_host->pdev->dev, |
576 | __func__, | 1142 | "%s: device = %p; old_request %p already being aborted\n", |
577 | ((task->task_proto == SAS_PROTOCOL_SMP) | 1143 | __func__, |
578 | ? "SMP" | 1144 | isci_device, old_request); |
579 | : (sas_protocol_ata(task->task_proto) | 1145 | ret = TMF_RESP_FUNC_COMPLETE; |
580 | ? "SATA/STP" | 1146 | goto out; |
581 | : "<other>") | 1147 | } |
582 | ), | 1148 | if (task->task_proto == SAS_PROTOCOL_SMP || |
583 | test_bit(IREQ_COMPLETE_IN_TARGET, | 1149 | test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) { |
584 | &old_request->flags), | ||
585 | test_bit(IDEV_GONE, &idev->flags)); | ||
586 | 1150 | ||
587 | spin_lock_irqsave(&task->task_state_lock, flags); | 1151 | spin_unlock_irqrestore(&isci_host->scic_lock, flags); |
588 | task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | | 1152 | |
589 | SAS_TASK_STATE_PENDING); | 1153 | dev_dbg(&isci_host->pdev->dev, |
590 | task->task_state_flags |= SAS_TASK_STATE_DONE; | 1154 | "%s: SMP request (%d)" |
591 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 1155 | " or complete_in_target (%d), thus no TMF\n", |
1156 | __func__, (task->task_proto == SAS_PROTOCOL_SMP), | ||
1157 | test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)); | ||
1158 | |||
1159 | /* Set the state on the task. */ | ||
1160 | isci_task_all_done(task); | ||
592 | 1161 | ||
593 | ret = TMF_RESP_FUNC_COMPLETE; | 1162 | ret = TMF_RESP_FUNC_COMPLETE; |
1163 | |||
1164 | /* Stopping and SMP devices are not sent a TMF, and are not | ||
1165 | * reset, but the outstanding I/O request is terminated below. | ||
1166 | */ | ||
594 | } else { | 1167 | } else { |
595 | /* Fill in the tmf stucture */ | 1168 | /* Fill in the tmf stucture */ |
596 | isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort, | 1169 | isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort, |
1170 | isci_abort_task_process_cb, | ||
597 | old_request); | 1171 | old_request); |
598 | 1172 | ||
599 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1173 | spin_unlock_irqrestore(&isci_host->scic_lock, flags); |
600 | 1174 | ||
601 | /* Send the task management request. */ | 1175 | #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */ |
602 | #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */ | 1176 | ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, |
603 | ret = isci_task_execute_tmf(ihost, idev, &tmf, | ||
604 | ISCI_ABORT_TASK_TIMEOUT_MS); | 1177 | ISCI_ABORT_TASK_TIMEOUT_MS); |
1178 | |||
1179 | if (ret != TMF_RESP_FUNC_COMPLETE) | ||
1180 | dev_dbg(&isci_host->pdev->dev, | ||
1181 | "%s: isci_task_send_tmf failed\n", | ||
1182 | __func__); | ||
605 | } | 1183 | } |
606 | out: | 1184 | if (ret == TMF_RESP_FUNC_COMPLETE) { |
607 | dev_warn(&ihost->pdev->dev, | 1185 | set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags); |
608 | "%s: Done; dev = %p, task = %p , old_request == %p\n", | 1186 | |
609 | __func__, idev, task, old_request); | 1187 | /* Clean up the request on our side, and wait for the aborted |
610 | isci_put_device(idev); | 1188 | * I/O to complete. |
1189 | */ | ||
1190 | isci_terminate_request_core(isci_host, isci_device, old_request); | ||
1191 | } | ||
1192 | |||
1193 | /* Make sure we do not leave a reference to aborted_io_completion */ | ||
1194 | old_request->io_request_completion = NULL; | ||
1195 | out: | ||
1196 | isci_put_device(isci_device); | ||
611 | return ret; | 1197 | return ret; |
612 | } | 1198 | } |
613 | 1199 | ||
@@ -702,84 +1288,345 @@ isci_task_request_complete(struct isci_host *ihost, | |||
702 | enum sci_task_status completion_status) | 1288 | enum sci_task_status completion_status) |
703 | { | 1289 | { |
704 | struct isci_tmf *tmf = isci_request_access_tmf(ireq); | 1290 | struct isci_tmf *tmf = isci_request_access_tmf(ireq); |
705 | struct completion *tmf_complete = NULL; | 1291 | struct completion *tmf_complete; |
706 | 1292 | ||
707 | dev_dbg(&ihost->pdev->dev, | 1293 | dev_dbg(&ihost->pdev->dev, |
708 | "%s: request = %p, status=%d\n", | 1294 | "%s: request = %p, status=%d\n", |
709 | __func__, ireq, completion_status); | 1295 | __func__, ireq, completion_status); |
710 | 1296 | ||
1297 | isci_request_change_state(ireq, completed); | ||
1298 | |||
1299 | tmf->status = completion_status; | ||
711 | set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags); | 1300 | set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags); |
712 | 1301 | ||
713 | if (tmf) { | 1302 | if (tmf->proto == SAS_PROTOCOL_SSP) { |
714 | tmf->status = completion_status; | 1303 | memcpy(&tmf->resp.resp_iu, |
715 | 1304 | &ireq->ssp.rsp, | |
716 | if (tmf->proto == SAS_PROTOCOL_SSP) { | 1305 | SSP_RESP_IU_MAX_SIZE); |
717 | memcpy(&tmf->resp.resp_iu, | 1306 | } else if (tmf->proto == SAS_PROTOCOL_SATA) { |
718 | &ireq->ssp.rsp, | 1307 | memcpy(&tmf->resp.d2h_fis, |
719 | SSP_RESP_IU_MAX_SIZE); | 1308 | &ireq->stp.rsp, |
720 | } else if (tmf->proto == SAS_PROTOCOL_SATA) { | 1309 | sizeof(struct dev_to_host_fis)); |
721 | memcpy(&tmf->resp.d2h_fis, | ||
722 | &ireq->stp.rsp, | ||
723 | sizeof(struct dev_to_host_fis)); | ||
724 | } | ||
725 | /* PRINT_TMF( ((struct isci_tmf *)request->task)); */ | ||
726 | tmf_complete = tmf->complete; | ||
727 | } | 1310 | } |
1311 | |||
1312 | /* PRINT_TMF( ((struct isci_tmf *)request->task)); */ | ||
1313 | tmf_complete = tmf->complete; | ||
1314 | |||
728 | sci_controller_complete_io(ihost, ireq->target_device, ireq); | 1315 | sci_controller_complete_io(ihost, ireq->target_device, ireq); |
729 | /* set the 'terminated' flag handle to make sure it cannot be terminated | 1316 | /* set the 'terminated' flag handle to make sure it cannot be terminated |
730 | * or completed again. | 1317 | * or completed again. |
731 | */ | 1318 | */ |
732 | set_bit(IREQ_TERMINATED, &ireq->flags); | 1319 | set_bit(IREQ_TERMINATED, &ireq->flags); |
733 | 1320 | ||
734 | if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags)) | 1321 | isci_request_change_state(ireq, unallocated); |
735 | wake_up_all(&ihost->eventq); | 1322 | list_del_init(&ireq->dev_node); |
736 | |||
737 | if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags)) | ||
738 | isci_free_tag(ihost, ireq->io_tag); | ||
739 | 1323 | ||
740 | /* The task management part completes last. */ | 1324 | /* The task management part completes last. */ |
741 | if (tmf_complete) | 1325 | complete(tmf_complete); |
742 | complete(tmf_complete); | 1326 | } |
1327 | |||
1328 | static void isci_smp_task_timedout(unsigned long _task) | ||
1329 | { | ||
1330 | struct sas_task *task = (void *) _task; | ||
1331 | unsigned long flags; | ||
1332 | |||
1333 | spin_lock_irqsave(&task->task_state_lock, flags); | ||
1334 | if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) | ||
1335 | task->task_state_flags |= SAS_TASK_STATE_ABORTED; | ||
1336 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
1337 | |||
1338 | complete(&task->completion); | ||
1339 | } | ||
1340 | |||
1341 | static void isci_smp_task_done(struct sas_task *task) | ||
1342 | { | ||
1343 | if (!del_timer(&task->timer)) | ||
1344 | return; | ||
1345 | complete(&task->completion); | ||
1346 | } | ||
1347 | |||
1348 | static struct sas_task *isci_alloc_task(void) | ||
1349 | { | ||
1350 | struct sas_task *task = kzalloc(sizeof(*task), GFP_KERNEL); | ||
1351 | |||
1352 | if (task) { | ||
1353 | INIT_LIST_HEAD(&task->list); | ||
1354 | spin_lock_init(&task->task_state_lock); | ||
1355 | task->task_state_flags = SAS_TASK_STATE_PENDING; | ||
1356 | init_timer(&task->timer); | ||
1357 | init_completion(&task->completion); | ||
1358 | } | ||
1359 | |||
1360 | return task; | ||
1361 | } | ||
1362 | |||
1363 | static void isci_free_task(struct isci_host *ihost, struct sas_task *task) | ||
1364 | { | ||
1365 | if (task) { | ||
1366 | BUG_ON(!list_empty(&task->list)); | ||
1367 | kfree(task); | ||
1368 | } | ||
1369 | } | ||
1370 | |||
1371 | static int isci_smp_execute_task(struct isci_host *ihost, | ||
1372 | struct domain_device *dev, void *req, | ||
1373 | int req_size, void *resp, int resp_size) | ||
1374 | { | ||
1375 | int res, retry; | ||
1376 | struct sas_task *task = NULL; | ||
1377 | |||
1378 | for (retry = 0; retry < 3; retry++) { | ||
1379 | task = isci_alloc_task(); | ||
1380 | if (!task) | ||
1381 | return -ENOMEM; | ||
1382 | |||
1383 | task->dev = dev; | ||
1384 | task->task_proto = dev->tproto; | ||
1385 | sg_init_one(&task->smp_task.smp_req, req, req_size); | ||
1386 | sg_init_one(&task->smp_task.smp_resp, resp, resp_size); | ||
1387 | |||
1388 | task->task_done = isci_smp_task_done; | ||
1389 | |||
1390 | task->timer.data = (unsigned long) task; | ||
1391 | task->timer.function = isci_smp_task_timedout; | ||
1392 | task->timer.expires = jiffies + 10*HZ; | ||
1393 | add_timer(&task->timer); | ||
1394 | |||
1395 | res = isci_task_execute_task(task, 1, GFP_KERNEL); | ||
1396 | |||
1397 | if (res) { | ||
1398 | del_timer(&task->timer); | ||
1399 | dev_dbg(&ihost->pdev->dev, | ||
1400 | "%s: executing SMP task failed:%d\n", | ||
1401 | __func__, res); | ||
1402 | goto ex_err; | ||
1403 | } | ||
1404 | |||
1405 | wait_for_completion(&task->completion); | ||
1406 | res = -ECOMM; | ||
1407 | if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { | ||
1408 | dev_dbg(&ihost->pdev->dev, | ||
1409 | "%s: smp task timed out or aborted\n", | ||
1410 | __func__); | ||
1411 | isci_task_abort_task(task); | ||
1412 | if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { | ||
1413 | dev_dbg(&ihost->pdev->dev, | ||
1414 | "%s: SMP task aborted and not done\n", | ||
1415 | __func__); | ||
1416 | goto ex_err; | ||
1417 | } | ||
1418 | } | ||
1419 | if (task->task_status.resp == SAS_TASK_COMPLETE && | ||
1420 | task->task_status.stat == SAM_STAT_GOOD) { | ||
1421 | res = 0; | ||
1422 | break; | ||
1423 | } | ||
1424 | if (task->task_status.resp == SAS_TASK_COMPLETE && | ||
1425 | task->task_status.stat == SAS_DATA_UNDERRUN) { | ||
1426 | /* no error, but return the number of bytes of | ||
1427 | * underrun */ | ||
1428 | res = task->task_status.residual; | ||
1429 | break; | ||
1430 | } | ||
1431 | if (task->task_status.resp == SAS_TASK_COMPLETE && | ||
1432 | task->task_status.stat == SAS_DATA_OVERRUN) { | ||
1433 | res = -EMSGSIZE; | ||
1434 | break; | ||
1435 | } else { | ||
1436 | dev_dbg(&ihost->pdev->dev, | ||
1437 | "%s: task to dev %016llx response: 0x%x " | ||
1438 | "status 0x%x\n", __func__, | ||
1439 | SAS_ADDR(dev->sas_addr), | ||
1440 | task->task_status.resp, | ||
1441 | task->task_status.stat); | ||
1442 | isci_free_task(ihost, task); | ||
1443 | task = NULL; | ||
1444 | } | ||
1445 | } | ||
1446 | ex_err: | ||
1447 | BUG_ON(retry == 3 && task != NULL); | ||
1448 | isci_free_task(ihost, task); | ||
1449 | return res; | ||
1450 | } | ||
1451 | |||
1452 | #define DISCOVER_REQ_SIZE 16 | ||
1453 | #define DISCOVER_RESP_SIZE 56 | ||
1454 | |||
1455 | int isci_smp_get_phy_attached_dev_type(struct isci_host *ihost, | ||
1456 | struct domain_device *dev, | ||
1457 | int phy_id, int *adt) | ||
1458 | { | ||
1459 | struct smp_resp *disc_resp; | ||
1460 | u8 *disc_req; | ||
1461 | int res; | ||
1462 | |||
1463 | disc_resp = kzalloc(DISCOVER_RESP_SIZE, GFP_KERNEL); | ||
1464 | if (!disc_resp) | ||
1465 | return -ENOMEM; | ||
1466 | |||
1467 | disc_req = kzalloc(DISCOVER_REQ_SIZE, GFP_KERNEL); | ||
1468 | if (disc_req) { | ||
1469 | disc_req[0] = SMP_REQUEST; | ||
1470 | disc_req[1] = SMP_DISCOVER; | ||
1471 | disc_req[9] = phy_id; | ||
1472 | } else { | ||
1473 | kfree(disc_resp); | ||
1474 | return -ENOMEM; | ||
1475 | } | ||
1476 | res = isci_smp_execute_task(ihost, dev, disc_req, DISCOVER_REQ_SIZE, | ||
1477 | disc_resp, DISCOVER_RESP_SIZE); | ||
1478 | if (!res) { | ||
1479 | if (disc_resp->result != SMP_RESP_FUNC_ACC) | ||
1480 | res = disc_resp->result; | ||
1481 | else | ||
1482 | *adt = disc_resp->disc.attached_dev_type; | ||
1483 | } | ||
1484 | kfree(disc_req); | ||
1485 | kfree(disc_resp); | ||
1486 | |||
1487 | return res; | ||
1488 | } | ||
1489 | |||
1490 | static void isci_wait_for_smp_phy_reset(struct isci_remote_device *idev, int phy_num) | ||
1491 | { | ||
1492 | struct domain_device *dev = idev->domain_dev; | ||
1493 | struct isci_port *iport = idev->isci_port; | ||
1494 | struct isci_host *ihost = iport->isci_host; | ||
1495 | int res, iteration = 0, attached_device_type; | ||
1496 | #define STP_WAIT_MSECS 25000 | ||
1497 | unsigned long tmo = msecs_to_jiffies(STP_WAIT_MSECS); | ||
1498 | unsigned long deadline = jiffies + tmo; | ||
1499 | enum { | ||
1500 | SMP_PHYWAIT_PHYDOWN, | ||
1501 | SMP_PHYWAIT_PHYUP, | ||
1502 | SMP_PHYWAIT_DONE | ||
1503 | } phy_state = SMP_PHYWAIT_PHYDOWN; | ||
1504 | |||
1505 | /* While there is time, wait for the phy to go away and come back */ | ||
1506 | while (time_is_after_jiffies(deadline) && phy_state != SMP_PHYWAIT_DONE) { | ||
1507 | int event = atomic_read(&iport->event); | ||
1508 | |||
1509 | ++iteration; | ||
1510 | |||
1511 | tmo = wait_event_timeout(ihost->eventq, | ||
1512 | event != atomic_read(&iport->event) || | ||
1513 | !test_bit(IPORT_BCN_BLOCKED, &iport->flags), | ||
1514 | tmo); | ||
1515 | /* link down, stop polling */ | ||
1516 | if (!test_bit(IPORT_BCN_BLOCKED, &iport->flags)) | ||
1517 | break; | ||
1518 | |||
1519 | dev_dbg(&ihost->pdev->dev, | ||
1520 | "%s: iport %p, iteration %d," | ||
1521 | " phase %d: time_remaining %lu, bcns = %d\n", | ||
1522 | __func__, iport, iteration, phy_state, | ||
1523 | tmo, test_bit(IPORT_BCN_PENDING, &iport->flags)); | ||
1524 | |||
1525 | res = isci_smp_get_phy_attached_dev_type(ihost, dev, phy_num, | ||
1526 | &attached_device_type); | ||
1527 | tmo = deadline - jiffies; | ||
1528 | |||
1529 | if (res) { | ||
1530 | dev_dbg(&ihost->pdev->dev, | ||
1531 | "%s: iteration %d, phase %d:" | ||
1532 | " SMP error=%d, time_remaining=%lu\n", | ||
1533 | __func__, iteration, phy_state, res, tmo); | ||
1534 | break; | ||
1535 | } | ||
1536 | dev_dbg(&ihost->pdev->dev, | ||
1537 | "%s: iport %p, iteration %d," | ||
1538 | " phase %d: time_remaining %lu, bcns = %d, " | ||
1539 | "attdevtype = %x\n", | ||
1540 | __func__, iport, iteration, phy_state, | ||
1541 | tmo, test_bit(IPORT_BCN_PENDING, &iport->flags), | ||
1542 | attached_device_type); | ||
1543 | |||
1544 | switch (phy_state) { | ||
1545 | case SMP_PHYWAIT_PHYDOWN: | ||
1546 | /* Has the device gone away? */ | ||
1547 | if (!attached_device_type) | ||
1548 | phy_state = SMP_PHYWAIT_PHYUP; | ||
1549 | |||
1550 | break; | ||
1551 | |||
1552 | case SMP_PHYWAIT_PHYUP: | ||
1553 | /* Has the device come back? */ | ||
1554 | if (attached_device_type) | ||
1555 | phy_state = SMP_PHYWAIT_DONE; | ||
1556 | break; | ||
1557 | |||
1558 | case SMP_PHYWAIT_DONE: | ||
1559 | break; | ||
1560 | } | ||
1561 | |||
1562 | } | ||
1563 | dev_dbg(&ihost->pdev->dev, "%s: done\n", __func__); | ||
743 | } | 1564 | } |
744 | 1565 | ||
745 | static int isci_reset_device(struct isci_host *ihost, | 1566 | static int isci_reset_device(struct isci_host *ihost, |
746 | struct domain_device *dev, | ||
747 | struct isci_remote_device *idev) | 1567 | struct isci_remote_device *idev) |
748 | { | 1568 | { |
749 | int rc = TMF_RESP_FUNC_COMPLETE, reset_stat = -1; | 1569 | struct sas_phy *phy = sas_find_local_phy(idev->domain_dev); |
750 | struct sas_phy *phy = sas_get_local_phy(dev); | 1570 | struct isci_port *iport = idev->isci_port; |
751 | struct isci_port *iport = dev->port->lldd_port; | 1571 | enum sci_status status; |
1572 | unsigned long flags; | ||
1573 | int rc; | ||
752 | 1574 | ||
753 | dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev); | 1575 | dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev); |
754 | 1576 | ||
755 | /* Suspend the RNC, terminate all outstanding TCs. */ | 1577 | spin_lock_irqsave(&ihost->scic_lock, flags); |
756 | if (isci_remote_device_suspend_terminate(ihost, idev, NULL) | 1578 | status = sci_remote_device_reset(idev); |
757 | != SCI_SUCCESS) { | 1579 | if (status != SCI_SUCCESS) { |
758 | rc = TMF_RESP_FUNC_FAILED; | 1580 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
759 | goto out; | 1581 | |
1582 | dev_dbg(&ihost->pdev->dev, | ||
1583 | "%s: sci_remote_device_reset(%p) returned %d!\n", | ||
1584 | __func__, idev, status); | ||
1585 | |||
1586 | return TMF_RESP_FUNC_FAILED; | ||
760 | } | 1587 | } |
761 | /* Note that since the termination for outstanding requests succeeded, | 1588 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
762 | * this function will return success. This is because the resets will | ||
763 | * only fail if the device has been removed (ie. hotplug), and the | ||
764 | * primary duty of this function is to cleanup tasks, so that is the | ||
765 | * relevant status. | ||
766 | */ | ||
767 | if (!test_bit(IDEV_GONE, &idev->flags)) { | ||
768 | if (scsi_is_sas_phy_local(phy)) { | ||
769 | struct isci_phy *iphy = &ihost->phys[phy->number]; | ||
770 | 1589 | ||
771 | reset_stat = isci_port_perform_hard_reset(ihost, iport, | 1590 | /* Make sure all pending requests are able to be fully terminated. */ |
772 | iphy); | 1591 | isci_device_clear_reset_pending(ihost, idev); |
773 | } else | 1592 | |
774 | reset_stat = sas_phy_reset(phy, !dev_is_sata(dev)); | 1593 | /* If this is a device on an expander, disable BCN processing. */ |
1594 | if (!scsi_is_sas_phy_local(phy)) | ||
1595 | set_bit(IPORT_BCN_BLOCKED, &iport->flags); | ||
1596 | |||
1597 | rc = sas_phy_reset(phy, true); | ||
1598 | |||
1599 | /* Terminate in-progress I/O now. */ | ||
1600 | isci_remote_device_nuke_requests(ihost, idev); | ||
1601 | |||
1602 | /* Since all pending TCs have been cleaned, resume the RNC. */ | ||
1603 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
1604 | status = sci_remote_device_reset_complete(idev); | ||
1605 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
1606 | |||
1607 | /* If this is a device on an expander, bring the phy back up. */ | ||
1608 | if (!scsi_is_sas_phy_local(phy)) { | ||
1609 | /* A phy reset will cause the device to go away then reappear. | ||
1610 | * Since libsas will take action on incoming BCNs (eg. remove | ||
1611 | * a device going through an SMP phy-control driven reset), | ||
1612 | * we need to wait until the phy comes back up before letting | ||
1613 | * discovery proceed in libsas. | ||
1614 | */ | ||
1615 | isci_wait_for_smp_phy_reset(idev, phy->number); | ||
1616 | |||
1617 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
1618 | isci_port_bcn_enable(ihost, idev->isci_port); | ||
1619 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
775 | } | 1620 | } |
776 | /* Explicitly resume the RNC here, since there was no task sent. */ | ||
777 | isci_remote_device_resume_from_abort(ihost, idev); | ||
778 | 1621 | ||
779 | dev_dbg(&ihost->pdev->dev, "%s: idev %p complete, reset_stat=%d.\n", | 1622 | if (status != SCI_SUCCESS) { |
780 | __func__, idev, reset_stat); | 1623 | dev_dbg(&ihost->pdev->dev, |
781 | out: | 1624 | "%s: sci_remote_device_reset_complete(%p) " |
782 | sas_put_local_phy(phy); | 1625 | "returned %d!\n", __func__, idev, status); |
1626 | } | ||
1627 | |||
1628 | dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev); | ||
1629 | |||
783 | return rc; | 1630 | return rc; |
784 | } | 1631 | } |
785 | 1632 | ||
@@ -791,18 +1638,38 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev) | |||
791 | int ret; | 1638 | int ret; |
792 | 1639 | ||
793 | spin_lock_irqsave(&ihost->scic_lock, flags); | 1640 | spin_lock_irqsave(&ihost->scic_lock, flags); |
794 | idev = isci_get_device(dev->lldd_dev); | 1641 | idev = isci_lookup_device(dev); |
1642 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
1643 | |||
1644 | if (!idev || !test_bit(IDEV_EH, &idev->flags)) { | ||
1645 | ret = TMF_RESP_FUNC_COMPLETE; | ||
1646 | goto out; | ||
1647 | } | ||
1648 | |||
1649 | ret = isci_reset_device(ihost, idev); | ||
1650 | out: | ||
1651 | isci_put_device(idev); | ||
1652 | return ret; | ||
1653 | } | ||
1654 | |||
1655 | int isci_bus_reset_handler(struct scsi_cmnd *cmd) | ||
1656 | { | ||
1657 | struct domain_device *dev = sdev_to_domain_dev(cmd->device); | ||
1658 | struct isci_host *ihost = dev_to_ihost(dev); | ||
1659 | struct isci_remote_device *idev; | ||
1660 | unsigned long flags; | ||
1661 | int ret; | ||
1662 | |||
1663 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
1664 | idev = isci_lookup_device(dev); | ||
795 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1665 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
796 | 1666 | ||
797 | if (!idev) { | 1667 | if (!idev) { |
798 | /* XXX: need to cleanup any ireqs targeting this | ||
799 | * domain_device | ||
800 | */ | ||
801 | ret = TMF_RESP_FUNC_COMPLETE; | 1668 | ret = TMF_RESP_FUNC_COMPLETE; |
802 | goto out; | 1669 | goto out; |
803 | } | 1670 | } |
804 | 1671 | ||
805 | ret = isci_reset_device(ihost, dev, idev); | 1672 | ret = isci_reset_device(ihost, idev); |
806 | out: | 1673 | out: |
807 | isci_put_device(idev); | 1674 | isci_put_device(idev); |
808 | return ret; | 1675 | return ret; |
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h index 9c06cbad1d2..4a7fa90287e 100644 --- a/drivers/scsi/isci/task.h +++ b/drivers/scsi/isci/task.h | |||
@@ -58,11 +58,22 @@ | |||
58 | #include <scsi/sas_ata.h> | 58 | #include <scsi/sas_ata.h> |
59 | #include "host.h" | 59 | #include "host.h" |
60 | 60 | ||
61 | #define ISCI_TERMINATION_TIMEOUT_MSEC 500 | ||
62 | |||
63 | struct isci_request; | 61 | struct isci_request; |
64 | 62 | ||
65 | /** | 63 | /** |
64 | * enum isci_tmf_cb_state - This enum defines the possible states in which the | ||
65 | * TMF callback function is invoked during the TMF execution process. | ||
66 | * | ||
67 | * | ||
68 | */ | ||
69 | enum isci_tmf_cb_state { | ||
70 | |||
71 | isci_tmf_init_state = 0, | ||
72 | isci_tmf_started, | ||
73 | isci_tmf_timed_out | ||
74 | }; | ||
75 | |||
76 | /** | ||
66 | * enum isci_tmf_function_codes - This enum defines the possible preparations | 77 | * enum isci_tmf_function_codes - This enum defines the possible preparations |
67 | * of task management requests. | 78 | * of task management requests. |
68 | * | 79 | * |
@@ -73,8 +84,9 @@ enum isci_tmf_function_codes { | |||
73 | isci_tmf_func_none = 0, | 84 | isci_tmf_func_none = 0, |
74 | isci_tmf_ssp_task_abort = TMF_ABORT_TASK, | 85 | isci_tmf_ssp_task_abort = TMF_ABORT_TASK, |
75 | isci_tmf_ssp_lun_reset = TMF_LU_RESET, | 86 | isci_tmf_ssp_lun_reset = TMF_LU_RESET, |
87 | isci_tmf_sata_srst_high = TMF_LU_RESET + 0x100, /* Non SCSI */ | ||
88 | isci_tmf_sata_srst_low = TMF_LU_RESET + 0x101 /* Non SCSI */ | ||
76 | }; | 89 | }; |
77 | |||
78 | /** | 90 | /** |
79 | * struct isci_tmf - This class represents the task management object which | 91 | * struct isci_tmf - This class represents the task management object which |
80 | * acts as an interface to libsas for processing task management requests | 92 | * acts as an interface to libsas for processing task management requests |
@@ -92,14 +104,24 @@ struct isci_tmf { | |||
92 | } resp; | 104 | } resp; |
93 | unsigned char lun[8]; | 105 | unsigned char lun[8]; |
94 | u16 io_tag; | 106 | u16 io_tag; |
107 | struct isci_remote_device *device; | ||
95 | enum isci_tmf_function_codes tmf_code; | 108 | enum isci_tmf_function_codes tmf_code; |
96 | int status; | 109 | int status; |
110 | |||
111 | /* The optional callback function allows the user process to | ||
112 | * track the TMF transmit / timeout conditions. | ||
113 | */ | ||
114 | void (*cb_state_func)( | ||
115 | enum isci_tmf_cb_state, | ||
116 | struct isci_tmf *, void *); | ||
117 | void *cb_data; | ||
118 | |||
97 | }; | 119 | }; |
98 | 120 | ||
99 | static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf) | 121 | static inline void isci_print_tmf(struct isci_tmf *tmf) |
100 | { | 122 | { |
101 | if (SAS_PROTOCOL_SATA == tmf->proto) | 123 | if (SAS_PROTOCOL_SATA == tmf->proto) |
102 | dev_dbg(&ihost->pdev->dev, | 124 | dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev, |
103 | "%s: status = %x\n" | 125 | "%s: status = %x\n" |
104 | "tmf->resp.d2h_fis.status = %x\n" | 126 | "tmf->resp.d2h_fis.status = %x\n" |
105 | "tmf->resp.d2h_fis.error = %x\n", | 127 | "tmf->resp.d2h_fis.error = %x\n", |
@@ -108,7 +130,7 @@ static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf) | |||
108 | tmf->resp.d2h_fis.status, | 130 | tmf->resp.d2h_fis.status, |
109 | tmf->resp.d2h_fis.error); | 131 | tmf->resp.d2h_fis.error); |
110 | else | 132 | else |
111 | dev_dbg(&ihost->pdev->dev, | 133 | dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev, |
112 | "%s: status = %x\n" | 134 | "%s: status = %x\n" |
113 | "tmf->resp.resp_iu.data_present = %x\n" | 135 | "tmf->resp.resp_iu.data_present = %x\n" |
114 | "tmf->resp.resp_iu.status = %x\n" | 136 | "tmf->resp.resp_iu.status = %x\n" |
@@ -187,4 +209,159 @@ int isci_queuecommand( | |||
187 | struct scsi_cmnd *scsi_cmd, | 209 | struct scsi_cmnd *scsi_cmd, |
188 | void (*donefunc)(struct scsi_cmnd *)); | 210 | void (*donefunc)(struct scsi_cmnd *)); |
189 | 211 | ||
212 | int isci_bus_reset_handler(struct scsi_cmnd *cmd); | ||
213 | |||
214 | /** | ||
215 | * enum isci_completion_selection - This enum defines the possible actions to | ||
216 | * take with respect to a given request's notification back to libsas. | ||
217 | * | ||
218 | * | ||
219 | */ | ||
220 | enum isci_completion_selection { | ||
221 | |||
222 | isci_perform_normal_io_completion, /* Normal notify (task_done) */ | ||
223 | isci_perform_aborted_io_completion, /* No notification. */ | ||
224 | isci_perform_error_io_completion /* Use sas_task_abort */ | ||
225 | }; | ||
226 | |||
227 | static inline void isci_set_task_doneflags( | ||
228 | struct sas_task *task) | ||
229 | { | ||
230 | /* Since no futher action will be taken on this task, | ||
231 | * make sure to mark it complete from the lldd perspective. | ||
232 | */ | ||
233 | task->task_state_flags |= SAS_TASK_STATE_DONE; | ||
234 | task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; | ||
235 | task->task_state_flags &= ~SAS_TASK_STATE_PENDING; | ||
236 | } | ||
237 | /** | ||
238 | * isci_task_all_done() - This function clears the task bits to indicate the | ||
239 | * LLDD is done with the task. | ||
240 | * | ||
241 | * | ||
242 | */ | ||
243 | static inline void isci_task_all_done( | ||
244 | struct sas_task *task) | ||
245 | { | ||
246 | unsigned long flags; | ||
247 | |||
248 | /* Since no futher action will be taken on this task, | ||
249 | * make sure to mark it complete from the lldd perspective. | ||
250 | */ | ||
251 | spin_lock_irqsave(&task->task_state_lock, flags); | ||
252 | isci_set_task_doneflags(task); | ||
253 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
254 | } | ||
255 | |||
256 | /** | ||
257 | * isci_task_set_completion_status() - This function sets the completion status | ||
258 | * for the request. | ||
259 | * @task: This parameter is the completed request. | ||
260 | * @response: This parameter is the response code for the completed task. | ||
261 | * @status: This parameter is the status code for the completed task. | ||
262 | * | ||
263 | * @return The new notification mode for the request. | ||
264 | */ | ||
265 | static inline enum isci_completion_selection | ||
266 | isci_task_set_completion_status( | ||
267 | struct sas_task *task, | ||
268 | enum service_response response, | ||
269 | enum exec_status status, | ||
270 | enum isci_completion_selection task_notification_selection) | ||
271 | { | ||
272 | unsigned long flags; | ||
273 | |||
274 | spin_lock_irqsave(&task->task_state_lock, flags); | ||
275 | |||
276 | /* If a device reset is being indicated, make sure the I/O | ||
277 | * is in the error path. | ||
278 | */ | ||
279 | if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) { | ||
280 | /* Fail the I/O to make sure it goes into the error path. */ | ||
281 | response = SAS_TASK_UNDELIVERED; | ||
282 | status = SAM_STAT_TASK_ABORTED; | ||
283 | |||
284 | task_notification_selection = isci_perform_error_io_completion; | ||
285 | } | ||
286 | task->task_status.resp = response; | ||
287 | task->task_status.stat = status; | ||
288 | |||
289 | switch (task_notification_selection) { | ||
290 | |||
291 | case isci_perform_error_io_completion: | ||
292 | |||
293 | if (task->task_proto == SAS_PROTOCOL_SMP) { | ||
294 | /* There is no error escalation in the SMP case. | ||
295 | * Convert to a normal completion to avoid the | ||
296 | * timeout in the discovery path and to let the | ||
297 | * next action take place quickly. | ||
298 | */ | ||
299 | task_notification_selection | ||
300 | = isci_perform_normal_io_completion; | ||
301 | |||
302 | /* Fall through to the normal case... */ | ||
303 | } else { | ||
304 | /* Use sas_task_abort */ | ||
305 | /* Leave SAS_TASK_STATE_DONE clear | ||
306 | * Leave SAS_TASK_AT_INITIATOR set. | ||
307 | */ | ||
308 | break; | ||
309 | } | ||
310 | |||
311 | case isci_perform_aborted_io_completion: | ||
312 | /* This path can occur with task-managed requests as well as | ||
313 | * requests terminated because of LUN or device resets. | ||
314 | */ | ||
315 | /* Fall through to the normal case... */ | ||
316 | case isci_perform_normal_io_completion: | ||
317 | /* Normal notification (task_done) */ | ||
318 | isci_set_task_doneflags(task); | ||
319 | break; | ||
320 | default: | ||
321 | WARN_ONCE(1, "unknown task_notification_selection: %d\n", | ||
322 | task_notification_selection); | ||
323 | break; | ||
324 | } | ||
325 | |||
326 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
327 | |||
328 | return task_notification_selection; | ||
329 | |||
330 | } | ||
331 | /** | ||
332 | * isci_execpath_callback() - This function is called from the task | ||
333 | * execute path when the task needs to callback libsas about the submit-time | ||
334 | * task failure. The callback occurs either through the task's done function | ||
335 | * or through sas_task_abort. In the case of regular non-discovery SATA/STP I/O | ||
336 | * requests, libsas takes the host lock before calling execute task. Therefore | ||
337 | * in this situation the host lock must be managed before calling the func. | ||
338 | * | ||
339 | * @ihost: This parameter is the controller to which the I/O request was sent. | ||
340 | * @task: This parameter is the I/O request. | ||
341 | * @func: This parameter is the function to call in the correct context. | ||
342 | * @status: This parameter is the status code for the completed task. | ||
343 | * | ||
344 | */ | ||
345 | static inline void isci_execpath_callback(struct isci_host *ihost, | ||
346 | struct sas_task *task, | ||
347 | void (*func)(struct sas_task *)) | ||
348 | { | ||
349 | struct domain_device *dev = task->dev; | ||
350 | |||
351 | if (dev_is_sata(dev) && task->uldd_task) { | ||
352 | unsigned long flags; | ||
353 | |||
354 | /* Since we are still in the submit path, and since | ||
355 | * libsas takes the host lock on behalf of SATA | ||
356 | * devices before I/O starts (in the non-discovery case), | ||
357 | * we need to unlock before we can call the callback function. | ||
358 | */ | ||
359 | raw_local_irq_save(flags); | ||
360 | spin_unlock(dev->sata_dev.ap->lock); | ||
361 | func(task); | ||
362 | spin_lock(dev->sata_dev.ap->lock); | ||
363 | raw_local_irq_restore(flags); | ||
364 | } else | ||
365 | func(task); | ||
366 | } | ||
190 | #endif /* !defined(_SCI_TASK_H_) */ | 367 | #endif /* !defined(_SCI_TASK_H_) */ |
diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c index 04a6d0d59a2..16f88ab939c 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.c +++ b/drivers/scsi/isci/unsolicited_frame_control.c | |||
@@ -57,19 +57,31 @@ | |||
57 | #include "unsolicited_frame_control.h" | 57 | #include "unsolicited_frame_control.h" |
58 | #include "registers.h" | 58 | #include "registers.h" |
59 | 59 | ||
60 | void sci_unsolicited_frame_control_construct(struct isci_host *ihost) | 60 | int sci_unsolicited_frame_control_construct(struct isci_host *ihost) |
61 | { | 61 | { |
62 | struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control; | 62 | struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control; |
63 | struct sci_unsolicited_frame *uf; | 63 | struct sci_unsolicited_frame *uf; |
64 | dma_addr_t dma = ihost->ufi_dma; | 64 | u32 buf_len, header_len, i; |
65 | void *virt = ihost->ufi_buf; | 65 | dma_addr_t dma; |
66 | int i; | 66 | size_t size; |
67 | void *virt; | ||
68 | |||
69 | /* | ||
70 | * Prepare all of the memory sizes for the UF headers, UF address | ||
71 | * table, and UF buffers themselves. | ||
72 | */ | ||
73 | buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE; | ||
74 | header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header); | ||
75 | size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(uf_control->address_table.array[0]); | ||
67 | 76 | ||
68 | /* | 77 | /* |
69 | * The Unsolicited Frame buffers are set at the start of the UF | 78 | * The Unsolicited Frame buffers are set at the start of the UF |
70 | * memory descriptor entry. The headers and address table will be | 79 | * memory descriptor entry. The headers and address table will be |
71 | * placed after the buffers. | 80 | * placed after the buffers. |
72 | */ | 81 | */ |
82 | virt = dmam_alloc_coherent(&ihost->pdev->dev, size, &dma, GFP_KERNEL); | ||
83 | if (!virt) | ||
84 | return -ENOMEM; | ||
73 | 85 | ||
74 | /* | 86 | /* |
75 | * Program the location of the UF header table into the SCU. | 87 | * Program the location of the UF header table into the SCU. |
@@ -81,8 +93,8 @@ void sci_unsolicited_frame_control_construct(struct isci_host *ihost) | |||
81 | * headers, since we program the UF address table pointers to | 93 | * headers, since we program the UF address table pointers to |
82 | * NULL. | 94 | * NULL. |
83 | */ | 95 | */ |
84 | uf_control->headers.physical_address = dma + SCI_UFI_BUF_SIZE; | 96 | uf_control->headers.physical_address = dma + buf_len; |
85 | uf_control->headers.array = virt + SCI_UFI_BUF_SIZE; | 97 | uf_control->headers.array = virt + buf_len; |
86 | 98 | ||
87 | /* | 99 | /* |
88 | * Program the location of the UF address table into the SCU. | 100 | * Program the location of the UF address table into the SCU. |
@@ -91,8 +103,8 @@ void sci_unsolicited_frame_control_construct(struct isci_host *ihost) | |||
91 | * byte boundary already due to above programming headers being on a | 103 | * byte boundary already due to above programming headers being on a |
92 | * 64-bit boundary and headers are on a 64-bytes in size. | 104 | * 64-bit boundary and headers are on a 64-bytes in size. |
93 | */ | 105 | */ |
94 | uf_control->address_table.physical_address = dma + SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE; | 106 | uf_control->address_table.physical_address = dma + buf_len + header_len; |
95 | uf_control->address_table.array = virt + SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE; | 107 | uf_control->address_table.array = virt + buf_len + header_len; |
96 | uf_control->get = 0; | 108 | uf_control->get = 0; |
97 | 109 | ||
98 | /* | 110 | /* |
@@ -123,6 +135,8 @@ void sci_unsolicited_frame_control_construct(struct isci_host *ihost) | |||
123 | virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE; | 135 | virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE; |
124 | dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE; | 136 | dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE; |
125 | } | 137 | } |
138 | |||
139 | return 0; | ||
126 | } | 140 | } |
127 | 141 | ||
128 | enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control, | 142 | enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control, |
diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h index 1bc551ec611..75d896686f5 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.h +++ b/drivers/scsi/isci/unsolicited_frame_control.h | |||
@@ -257,13 +257,9 @@ struct sci_unsolicited_frame_control { | |||
257 | 257 | ||
258 | }; | 258 | }; |
259 | 259 | ||
260 | #define SCI_UFI_BUF_SIZE (SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE) | ||
261 | #define SCI_UFI_HDR_SIZE (SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header)) | ||
262 | #define SCI_UFI_TOTAL_SIZE (SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE + SCU_MAX_UNSOLICITED_FRAMES * sizeof(u64)) | ||
263 | |||
264 | struct isci_host; | 260 | struct isci_host; |
265 | 261 | ||
266 | void sci_unsolicited_frame_control_construct(struct isci_host *ihost); | 262 | int sci_unsolicited_frame_control_construct(struct isci_host *ihost); |
267 | 263 | ||
268 | enum sci_status sci_unsolicited_frame_control_get_header( | 264 | enum sci_status sci_unsolicited_frame_control_get_header( |
269 | struct sci_unsolicited_frame_control *uf_control, | 265 | struct sci_unsolicited_frame_control *uf_control, |