diff options
author | James Bottomley <JBottomley@Parallels.com> | 2012-05-21 07:17:30 -0400 |
---|---|---|
committer | James Bottomley <JBottomley@Parallels.com> | 2012-05-21 07:17:30 -0400 |
commit | e34693336564f02b3e2cc09d8b872aef22a154e9 (patch) | |
tree | 09f51f10f9406042f9176e39b4dc8de850ba712e /drivers/scsi | |
parent | 76b311fdbdd2e16e5d39cd496a67aa1a1b948914 (diff) | |
parent | de2eb4d5c5c25e8fb75d1e19092f24b83cb7d8d5 (diff) |
Merge tag 'isci-for-3.5' into misc
isci update for 3.5
1/ Rework remote-node-context (RNC) handling for proper management of
the silicon state machine in error handling and hot-plug conditions.
Further details below, suffice to say if the RNC is mismanaged the
silicon state machines may lock up.
2/ Refactor the initialization code to be reused for suspend/resume support
3/ Miscellaneous bug fixes to address discovery issues and hardware
compatibility.
RNC rework details from Jeff Skirvin:
In the controller, devices as they appear on a SAS domain (or
direct-attached SATA devices) are represented by memory structures known
as "Remote Node Contexts" (RNCs). These structures are transferred from
main memory to the controller using a set of register commands; these
commands include setting up the context ("posting"), removing the
context ("invalidating"), and commands to control the scheduling of
commands and connections to that remote device ("suspensions" and
"resumptions"). There is a similar path to control RNC scheduling from
the protocol engine, which interprets the results of command and data
transmission and reception.
In general, the controller chooses among non-suspended RNCs to find one
that has work requiring scheduling the transmission of command and data
frames to a target. Likewise, when a target tries to return data back
to the initiator, the state of the RNC is used by the controller to
determine how to treat the incoming request. As an example, if the RNC
is in the state "TX/RX Suspended", incoming SSP connection requests from
the target will be rejected by the controller hardware. When an RNC is
"TX Suspended", it will not be selected by the controller hardware to
start outgoing command or data operations (with certain priority-based
exceptions).
As mentioned above, there are two sources for management of the RNC
states: commands from driver software, and the result of transmission
and reception conditions of commands and data signaled by the controller
hardware. As an example of the latter, if an outgoing SSP command ends
with a OPEN_REJECT(BAD_DESTINATION) status, the RNC state will
transition to the "TX Suspended" state, and this is signaled by the
controller hardware in the status to the completion of the pending
command as well as signaled in a controller hardware event. Examples of
the former are included in the patch changelogs.
Driver software is required to suspend the RNC in a "TX/RX Suspended"
condition before any outstanding commands can be terminated. Failure to
guarantee this can lead to a complete hardware hang condition. Earlier
versions of the driver software did not guarantee that an RNC was
correctly managed before I/O termination, and so operated in an unsafe
way.
Further, the driver performed unnecessary contortions to preserve the
remote device command state and so was more complicated than it needed
to be. A simplifying driver assumption is that once an I/O has entered
the error handler path without having completed in the target, the
requirement on the driver is that all use of the sas_task must end.
Beyond that, recovery of operation is dependent on libsas and other
components to reset, rediscover and reconfigure the device before normal
operation can restart. In the driver, this simplifying assumption meant
that the RNC management could be reduced to entry into the suspended
state, terminating the targeted I/O request, and resuming the RNC as
needed for device-specific management such as an SSP Abort Task or LUN
Reset Management request.
Diffstat (limited to 'drivers/scsi')
42 files changed, 2066 insertions, 2386 deletions
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 351dc0b86fab..a3a056a9db67 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c | |||
@@ -218,6 +218,9 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, | |||
218 | 218 | ||
219 | if (!shost->shost_gendev.parent) | 219 | if (!shost->shost_gendev.parent) |
220 | shost->shost_gendev.parent = dev ? dev : &platform_bus; | 220 | shost->shost_gendev.parent = dev ? dev : &platform_bus; |
221 | if (!dma_dev) | ||
222 | dma_dev = shost->shost_gendev.parent; | ||
223 | |||
221 | shost->dma_dev = dma_dev; | 224 | shost->dma_dev = dma_dev; |
222 | 225 | ||
223 | error = device_add(&shost->shost_gendev); | 226 | error = device_add(&shost->shost_gendev); |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index e002cd466e9a..467dc38246f9 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -4549,8 +4549,12 @@ static int ipr_ata_slave_alloc(struct scsi_device *sdev) | |||
4549 | ENTER; | 4549 | ENTER; |
4550 | if (sdev->sdev_target) | 4550 | if (sdev->sdev_target) |
4551 | sata_port = sdev->sdev_target->hostdata; | 4551 | sata_port = sdev->sdev_target->hostdata; |
4552 | if (sata_port) | 4552 | if (sata_port) { |
4553 | rc = ata_sas_port_init(sata_port->ap); | 4553 | rc = ata_sas_port_init(sata_port->ap); |
4554 | if (rc == 0) | ||
4555 | rc = ata_sas_sync_probe(sata_port->ap); | ||
4556 | } | ||
4557 | |||
4554 | if (rc) | 4558 | if (rc) |
4555 | ipr_slave_destroy(sdev); | 4559 | ipr_slave_destroy(sdev); |
4556 | 4560 | ||
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index d4bf9c12ecd4..45385f531649 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c | |||
@@ -192,22 +192,27 @@ static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost) | |||
192 | 192 | ||
193 | static bool sci_controller_isr(struct isci_host *ihost) | 193 | static bool sci_controller_isr(struct isci_host *ihost) |
194 | { | 194 | { |
195 | if (sci_controller_completion_queue_has_entries(ihost)) { | 195 | if (sci_controller_completion_queue_has_entries(ihost)) |
196 | return true; | 196 | return true; |
197 | } else { | ||
198 | /* | ||
199 | * we have a spurious interrupt it could be that we have already | ||
200 | * emptied the completion queue from a previous interrupt */ | ||
201 | writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); | ||
202 | 197 | ||
203 | /* | 198 | /* we have a spurious interrupt it could be that we have already |
204 | * There is a race in the hardware that could cause us not to be notified | 199 | * emptied the completion queue from a previous interrupt |
205 | * of an interrupt completion if we do not take this step. We will mask | 200 | * FIXME: really!? |
206 | * then unmask the interrupts so if there is another interrupt pending | 201 | */ |
207 | * the clearing of the interrupt source we get the next interrupt message. */ | 202 | writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); |
203 | |||
204 | /* There is a race in the hardware that could cause us not to be | ||
205 | * notified of an interrupt completion if we do not take this | ||
206 | * step. We will mask then unmask the interrupts so if there is | ||
207 | * another interrupt pending the clearing of the interrupt | ||
208 | * source we get the next interrupt message. | ||
209 | */ | ||
210 | spin_lock(&ihost->scic_lock); | ||
211 | if (test_bit(IHOST_IRQ_ENABLED, &ihost->flags)) { | ||
208 | writel(0xFF000000, &ihost->smu_registers->interrupt_mask); | 212 | writel(0xFF000000, &ihost->smu_registers->interrupt_mask); |
209 | writel(0, &ihost->smu_registers->interrupt_mask); | 213 | writel(0, &ihost->smu_registers->interrupt_mask); |
210 | } | 214 | } |
215 | spin_unlock(&ihost->scic_lock); | ||
211 | 216 | ||
212 | return false; | 217 | return false; |
213 | } | 218 | } |
@@ -642,7 +647,6 @@ static void isci_host_start_complete(struct isci_host *ihost, enum sci_status co | |||
642 | if (completion_status != SCI_SUCCESS) | 647 | if (completion_status != SCI_SUCCESS) |
643 | dev_info(&ihost->pdev->dev, | 648 | dev_info(&ihost->pdev->dev, |
644 | "controller start timed out, continuing...\n"); | 649 | "controller start timed out, continuing...\n"); |
645 | isci_host_change_state(ihost, isci_ready); | ||
646 | clear_bit(IHOST_START_PENDING, &ihost->flags); | 650 | clear_bit(IHOST_START_PENDING, &ihost->flags); |
647 | wake_up(&ihost->eventq); | 651 | wake_up(&ihost->eventq); |
648 | } | 652 | } |
@@ -657,12 +661,7 @@ int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time) | |||
657 | 661 | ||
658 | sas_drain_work(ha); | 662 | sas_drain_work(ha); |
659 | 663 | ||
660 | dev_dbg(&ihost->pdev->dev, | ||
661 | "%s: ihost->status = %d, time = %ld\n", | ||
662 | __func__, isci_host_get_state(ihost), time); | ||
663 | |||
664 | return 1; | 664 | return 1; |
665 | |||
666 | } | 665 | } |
667 | 666 | ||
668 | /** | 667 | /** |
@@ -704,14 +703,15 @@ static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost) | |||
704 | 703 | ||
705 | static void sci_controller_enable_interrupts(struct isci_host *ihost) | 704 | static void sci_controller_enable_interrupts(struct isci_host *ihost) |
706 | { | 705 | { |
707 | BUG_ON(ihost->smu_registers == NULL); | 706 | set_bit(IHOST_IRQ_ENABLED, &ihost->flags); |
708 | writel(0, &ihost->smu_registers->interrupt_mask); | 707 | writel(0, &ihost->smu_registers->interrupt_mask); |
709 | } | 708 | } |
710 | 709 | ||
711 | void sci_controller_disable_interrupts(struct isci_host *ihost) | 710 | void sci_controller_disable_interrupts(struct isci_host *ihost) |
712 | { | 711 | { |
713 | BUG_ON(ihost->smu_registers == NULL); | 712 | clear_bit(IHOST_IRQ_ENABLED, &ihost->flags); |
714 | writel(0xffffffff, &ihost->smu_registers->interrupt_mask); | 713 | writel(0xffffffff, &ihost->smu_registers->interrupt_mask); |
714 | readl(&ihost->smu_registers->interrupt_mask); /* flush */ | ||
715 | } | 715 | } |
716 | 716 | ||
717 | static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost) | 717 | static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost) |
@@ -822,7 +822,7 @@ static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host * | |||
822 | &ihost->scu_registers->sdma.unsolicited_frame_put_pointer); | 822 | &ihost->scu_registers->sdma.unsolicited_frame_put_pointer); |
823 | } | 823 | } |
824 | 824 | ||
825 | static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status) | 825 | void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status) |
826 | { | 826 | { |
827 | if (ihost->sm.current_state_id == SCIC_STARTING) { | 827 | if (ihost->sm.current_state_id == SCIC_STARTING) { |
828 | /* | 828 | /* |
@@ -849,6 +849,7 @@ static bool is_phy_starting(struct isci_phy *iphy) | |||
849 | case SCI_PHY_SUB_AWAIT_SATA_POWER: | 849 | case SCI_PHY_SUB_AWAIT_SATA_POWER: |
850 | case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: | 850 | case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: |
851 | case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: | 851 | case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: |
852 | case SCI_PHY_SUB_AWAIT_OSSP_EN: | ||
852 | case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: | 853 | case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: |
853 | case SCI_PHY_SUB_FINAL: | 854 | case SCI_PHY_SUB_FINAL: |
854 | return true; | 855 | return true; |
@@ -857,6 +858,39 @@ static bool is_phy_starting(struct isci_phy *iphy) | |||
857 | } | 858 | } |
858 | } | 859 | } |
859 | 860 | ||
861 | bool is_controller_start_complete(struct isci_host *ihost) | ||
862 | { | ||
863 | int i; | ||
864 | |||
865 | for (i = 0; i < SCI_MAX_PHYS; i++) { | ||
866 | struct isci_phy *iphy = &ihost->phys[i]; | ||
867 | u32 state = iphy->sm.current_state_id; | ||
868 | |||
869 | /* in apc mode we need to check every phy, in | ||
870 | * mpc mode we only need to check phys that have | ||
871 | * been configured into a port | ||
872 | */ | ||
873 | if (is_port_config_apc(ihost)) | ||
874 | /* pass */; | ||
875 | else if (!phy_get_non_dummy_port(iphy)) | ||
876 | continue; | ||
877 | |||
878 | /* The controller start operation is complete iff: | ||
879 | * - all links have been given an opportunity to start | ||
880 | * - have no indication of a connected device | ||
881 | * - have an indication of a connected device and it has | ||
882 | * finished the link training process. | ||
883 | */ | ||
884 | if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) || | ||
885 | (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) || | ||
886 | (iphy->is_in_link_training == true && is_phy_starting(iphy)) || | ||
887 | (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask)) | ||
888 | return false; | ||
889 | } | ||
890 | |||
891 | return true; | ||
892 | } | ||
893 | |||
860 | /** | 894 | /** |
861 | * sci_controller_start_next_phy - start phy | 895 | * sci_controller_start_next_phy - start phy |
862 | * @scic: controller | 896 | * @scic: controller |
@@ -877,36 +911,7 @@ static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost) | |||
877 | return status; | 911 | return status; |
878 | 912 | ||
879 | if (ihost->next_phy_to_start >= SCI_MAX_PHYS) { | 913 | if (ihost->next_phy_to_start >= SCI_MAX_PHYS) { |
880 | bool is_controller_start_complete = true; | 914 | if (is_controller_start_complete(ihost)) { |
881 | u32 state; | ||
882 | u8 index; | ||
883 | |||
884 | for (index = 0; index < SCI_MAX_PHYS; index++) { | ||
885 | iphy = &ihost->phys[index]; | ||
886 | state = iphy->sm.current_state_id; | ||
887 | |||
888 | if (!phy_get_non_dummy_port(iphy)) | ||
889 | continue; | ||
890 | |||
891 | /* The controller start operation is complete iff: | ||
892 | * - all links have been given an opportunity to start | ||
893 | * - have no indication of a connected device | ||
894 | * - have an indication of a connected device and it has | ||
895 | * finished the link training process. | ||
896 | */ | ||
897 | if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) || | ||
898 | (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) || | ||
899 | (iphy->is_in_link_training == true && is_phy_starting(iphy)) || | ||
900 | (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask)) { | ||
901 | is_controller_start_complete = false; | ||
902 | break; | ||
903 | } | ||
904 | } | ||
905 | |||
906 | /* | ||
907 | * The controller has successfully finished the start process. | ||
908 | * Inform the SCI Core user and transition to the READY state. */ | ||
909 | if (is_controller_start_complete == true) { | ||
910 | sci_controller_transition_to_ready(ihost, SCI_SUCCESS); | 915 | sci_controller_transition_to_ready(ihost, SCI_SUCCESS); |
911 | sci_del_timer(&ihost->phy_timer); | 916 | sci_del_timer(&ihost->phy_timer); |
912 | ihost->phy_startup_timer_pending = false; | 917 | ihost->phy_startup_timer_pending = false; |
@@ -987,9 +992,8 @@ static enum sci_status sci_controller_start(struct isci_host *ihost, | |||
987 | u16 index; | 992 | u16 index; |
988 | 993 | ||
989 | if (ihost->sm.current_state_id != SCIC_INITIALIZED) { | 994 | if (ihost->sm.current_state_id != SCIC_INITIALIZED) { |
990 | dev_warn(&ihost->pdev->dev, | 995 | dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", |
991 | "SCIC Controller start operation requested in " | 996 | __func__, ihost->sm.current_state_id); |
992 | "invalid state\n"); | ||
993 | return SCI_FAILURE_INVALID_STATE; | 997 | return SCI_FAILURE_INVALID_STATE; |
994 | } | 998 | } |
995 | 999 | ||
@@ -1053,9 +1057,8 @@ void isci_host_scan_start(struct Scsi_Host *shost) | |||
1053 | spin_unlock_irq(&ihost->scic_lock); | 1057 | spin_unlock_irq(&ihost->scic_lock); |
1054 | } | 1058 | } |
1055 | 1059 | ||
1056 | static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status) | 1060 | static void isci_host_stop_complete(struct isci_host *ihost) |
1057 | { | 1061 | { |
1058 | isci_host_change_state(ihost, isci_stopped); | ||
1059 | sci_controller_disable_interrupts(ihost); | 1062 | sci_controller_disable_interrupts(ihost); |
1060 | clear_bit(IHOST_STOP_PENDING, &ihost->flags); | 1063 | clear_bit(IHOST_STOP_PENDING, &ihost->flags); |
1061 | wake_up(&ihost->eventq); | 1064 | wake_up(&ihost->eventq); |
@@ -1074,6 +1077,32 @@ static void sci_controller_completion_handler(struct isci_host *ihost) | |||
1074 | writel(0, &ihost->smu_registers->interrupt_mask); | 1077 | writel(0, &ihost->smu_registers->interrupt_mask); |
1075 | } | 1078 | } |
1076 | 1079 | ||
1080 | void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task) | ||
1081 | { | ||
1082 | task->lldd_task = NULL; | ||
1083 | if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags) && | ||
1084 | !(task->task_state_flags & SAS_TASK_STATE_ABORTED)) { | ||
1085 | if (test_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags)) { | ||
1086 | /* Normal notification (task_done) */ | ||
1087 | dev_dbg(&ihost->pdev->dev, | ||
1088 | "%s: Normal - ireq/task = %p/%p\n", | ||
1089 | __func__, ireq, task); | ||
1090 | |||
1091 | task->task_done(task); | ||
1092 | } else { | ||
1093 | dev_dbg(&ihost->pdev->dev, | ||
1094 | "%s: Error - ireq/task = %p/%p\n", | ||
1095 | __func__, ireq, task); | ||
1096 | |||
1097 | sas_task_abort(task); | ||
1098 | } | ||
1099 | } | ||
1100 | if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags)) | ||
1101 | wake_up_all(&ihost->eventq); | ||
1102 | |||
1103 | if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags)) | ||
1104 | isci_free_tag(ihost, ireq->io_tag); | ||
1105 | } | ||
1077 | /** | 1106 | /** |
1078 | * isci_host_completion_routine() - This function is the delayed service | 1107 | * isci_host_completion_routine() - This function is the delayed service |
1079 | * routine that calls the sci core library's completion handler. It's | 1108 | * routine that calls the sci core library's completion handler. It's |
@@ -1082,107 +1111,15 @@ static void sci_controller_completion_handler(struct isci_host *ihost) | |||
1082 | * @data: This parameter specifies the ISCI host object | 1111 | * @data: This parameter specifies the ISCI host object |
1083 | * | 1112 | * |
1084 | */ | 1113 | */ |
1085 | static void isci_host_completion_routine(unsigned long data) | 1114 | void isci_host_completion_routine(unsigned long data) |
1086 | { | 1115 | { |
1087 | struct isci_host *ihost = (struct isci_host *)data; | 1116 | struct isci_host *ihost = (struct isci_host *)data; |
1088 | struct list_head completed_request_list; | ||
1089 | struct list_head errored_request_list; | ||
1090 | struct list_head *current_position; | ||
1091 | struct list_head *next_position; | ||
1092 | struct isci_request *request; | ||
1093 | struct isci_request *next_request; | ||
1094 | struct sas_task *task; | ||
1095 | u16 active; | 1117 | u16 active; |
1096 | 1118 | ||
1097 | INIT_LIST_HEAD(&completed_request_list); | ||
1098 | INIT_LIST_HEAD(&errored_request_list); | ||
1099 | |||
1100 | spin_lock_irq(&ihost->scic_lock); | 1119 | spin_lock_irq(&ihost->scic_lock); |
1101 | |||
1102 | sci_controller_completion_handler(ihost); | 1120 | sci_controller_completion_handler(ihost); |
1103 | |||
1104 | /* Take the lists of completed I/Os from the host. */ | ||
1105 | |||
1106 | list_splice_init(&ihost->requests_to_complete, | ||
1107 | &completed_request_list); | ||
1108 | |||
1109 | /* Take the list of errored I/Os from the host. */ | ||
1110 | list_splice_init(&ihost->requests_to_errorback, | ||
1111 | &errored_request_list); | ||
1112 | |||
1113 | spin_unlock_irq(&ihost->scic_lock); | 1121 | spin_unlock_irq(&ihost->scic_lock); |
1114 | 1122 | ||
1115 | /* Process any completions in the lists. */ | ||
1116 | list_for_each_safe(current_position, next_position, | ||
1117 | &completed_request_list) { | ||
1118 | |||
1119 | request = list_entry(current_position, struct isci_request, | ||
1120 | completed_node); | ||
1121 | task = isci_request_access_task(request); | ||
1122 | |||
1123 | /* Normal notification (task_done) */ | ||
1124 | dev_dbg(&ihost->pdev->dev, | ||
1125 | "%s: Normal - request/task = %p/%p\n", | ||
1126 | __func__, | ||
1127 | request, | ||
1128 | task); | ||
1129 | |||
1130 | /* Return the task to libsas */ | ||
1131 | if (task != NULL) { | ||
1132 | |||
1133 | task->lldd_task = NULL; | ||
1134 | if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) { | ||
1135 | |||
1136 | /* If the task is already in the abort path, | ||
1137 | * the task_done callback cannot be called. | ||
1138 | */ | ||
1139 | task->task_done(task); | ||
1140 | } | ||
1141 | } | ||
1142 | |||
1143 | spin_lock_irq(&ihost->scic_lock); | ||
1144 | isci_free_tag(ihost, request->io_tag); | ||
1145 | spin_unlock_irq(&ihost->scic_lock); | ||
1146 | } | ||
1147 | list_for_each_entry_safe(request, next_request, &errored_request_list, | ||
1148 | completed_node) { | ||
1149 | |||
1150 | task = isci_request_access_task(request); | ||
1151 | |||
1152 | /* Use sas_task_abort */ | ||
1153 | dev_warn(&ihost->pdev->dev, | ||
1154 | "%s: Error - request/task = %p/%p\n", | ||
1155 | __func__, | ||
1156 | request, | ||
1157 | task); | ||
1158 | |||
1159 | if (task != NULL) { | ||
1160 | |||
1161 | /* Put the task into the abort path if it's not there | ||
1162 | * already. | ||
1163 | */ | ||
1164 | if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) | ||
1165 | sas_task_abort(task); | ||
1166 | |||
1167 | } else { | ||
1168 | /* This is a case where the request has completed with a | ||
1169 | * status such that it needed further target servicing, | ||
1170 | * but the sas_task reference has already been removed | ||
1171 | * from the request. Since it was errored, it was not | ||
1172 | * being aborted, so there is nothing to do except free | ||
1173 | * it. | ||
1174 | */ | ||
1175 | |||
1176 | spin_lock_irq(&ihost->scic_lock); | ||
1177 | /* Remove the request from the remote device's list | ||
1178 | * of pending requests. | ||
1179 | */ | ||
1180 | list_del_init(&request->dev_node); | ||
1181 | isci_free_tag(ihost, request->io_tag); | ||
1182 | spin_unlock_irq(&ihost->scic_lock); | ||
1183 | } | ||
1184 | } | ||
1185 | |||
1186 | /* the coalesence timeout doubles at each encoding step, so | 1123 | /* the coalesence timeout doubles at each encoding step, so |
1187 | * update it based on the ilog2 value of the outstanding requests | 1124 | * update it based on the ilog2 value of the outstanding requests |
1188 | */ | 1125 | */ |
@@ -1213,9 +1150,8 @@ static void isci_host_completion_routine(unsigned long data) | |||
1213 | static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout) | 1150 | static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout) |
1214 | { | 1151 | { |
1215 | if (ihost->sm.current_state_id != SCIC_READY) { | 1152 | if (ihost->sm.current_state_id != SCIC_READY) { |
1216 | dev_warn(&ihost->pdev->dev, | 1153 | dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", |
1217 | "SCIC Controller stop operation requested in " | 1154 | __func__, ihost->sm.current_state_id); |
1218 | "invalid state\n"); | ||
1219 | return SCI_FAILURE_INVALID_STATE; | 1155 | return SCI_FAILURE_INVALID_STATE; |
1220 | } | 1156 | } |
1221 | 1157 | ||
@@ -1241,7 +1177,7 @@ static enum sci_status sci_controller_reset(struct isci_host *ihost) | |||
1241 | switch (ihost->sm.current_state_id) { | 1177 | switch (ihost->sm.current_state_id) { |
1242 | case SCIC_RESET: | 1178 | case SCIC_RESET: |
1243 | case SCIC_READY: | 1179 | case SCIC_READY: |
1244 | case SCIC_STOPPED: | 1180 | case SCIC_STOPPING: |
1245 | case SCIC_FAILED: | 1181 | case SCIC_FAILED: |
1246 | /* | 1182 | /* |
1247 | * The reset operation is not a graceful cleanup, just | 1183 | * The reset operation is not a graceful cleanup, just |
@@ -1250,13 +1186,50 @@ static enum sci_status sci_controller_reset(struct isci_host *ihost) | |||
1250 | sci_change_state(&ihost->sm, SCIC_RESETTING); | 1186 | sci_change_state(&ihost->sm, SCIC_RESETTING); |
1251 | return SCI_SUCCESS; | 1187 | return SCI_SUCCESS; |
1252 | default: | 1188 | default: |
1253 | dev_warn(&ihost->pdev->dev, | 1189 | dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", |
1254 | "SCIC Controller reset operation requested in " | 1190 | __func__, ihost->sm.current_state_id); |
1255 | "invalid state\n"); | ||
1256 | return SCI_FAILURE_INVALID_STATE; | 1191 | return SCI_FAILURE_INVALID_STATE; |
1257 | } | 1192 | } |
1258 | } | 1193 | } |
1259 | 1194 | ||
1195 | static enum sci_status sci_controller_stop_phys(struct isci_host *ihost) | ||
1196 | { | ||
1197 | u32 index; | ||
1198 | enum sci_status status; | ||
1199 | enum sci_status phy_status; | ||
1200 | |||
1201 | status = SCI_SUCCESS; | ||
1202 | |||
1203 | for (index = 0; index < SCI_MAX_PHYS; index++) { | ||
1204 | phy_status = sci_phy_stop(&ihost->phys[index]); | ||
1205 | |||
1206 | if (phy_status != SCI_SUCCESS && | ||
1207 | phy_status != SCI_FAILURE_INVALID_STATE) { | ||
1208 | status = SCI_FAILURE; | ||
1209 | |||
1210 | dev_warn(&ihost->pdev->dev, | ||
1211 | "%s: Controller stop operation failed to stop " | ||
1212 | "phy %d because of status %d.\n", | ||
1213 | __func__, | ||
1214 | ihost->phys[index].phy_index, phy_status); | ||
1215 | } | ||
1216 | } | ||
1217 | |||
1218 | return status; | ||
1219 | } | ||
1220 | |||
1221 | |||
1222 | /** | ||
1223 | * isci_host_deinit - shutdown frame reception and dma | ||
1224 | * @ihost: host to take down | ||
1225 | * | ||
1226 | * This is called in either the driver shutdown or the suspend path. In | ||
1227 | * the shutdown case libsas went through port teardown and normal device | ||
1228 | * removal (i.e. physical links stayed up to service scsi_device removal | ||
1229 | * commands). In the suspend case we disable the hardware without | ||
1230 | * notifying libsas of the link down events since we want libsas to | ||
1231 | * remember the domain across the suspend/resume cycle | ||
1232 | */ | ||
1260 | void isci_host_deinit(struct isci_host *ihost) | 1233 | void isci_host_deinit(struct isci_host *ihost) |
1261 | { | 1234 | { |
1262 | int i; | 1235 | int i; |
@@ -1265,17 +1238,6 @@ void isci_host_deinit(struct isci_host *ihost) | |||
1265 | for (i = 0; i < isci_gpio_count(ihost); i++) | 1238 | for (i = 0; i < isci_gpio_count(ihost); i++) |
1266 | writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]); | 1239 | writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]); |
1267 | 1240 | ||
1268 | isci_host_change_state(ihost, isci_stopping); | ||
1269 | for (i = 0; i < SCI_MAX_PORTS; i++) { | ||
1270 | struct isci_port *iport = &ihost->ports[i]; | ||
1271 | struct isci_remote_device *idev, *d; | ||
1272 | |||
1273 | list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) { | ||
1274 | if (test_bit(IDEV_ALLOCATED, &idev->flags)) | ||
1275 | isci_remote_device_stop(ihost, idev); | ||
1276 | } | ||
1277 | } | ||
1278 | |||
1279 | set_bit(IHOST_STOP_PENDING, &ihost->flags); | 1241 | set_bit(IHOST_STOP_PENDING, &ihost->flags); |
1280 | 1242 | ||
1281 | spin_lock_irq(&ihost->scic_lock); | 1243 | spin_lock_irq(&ihost->scic_lock); |
@@ -1284,12 +1246,21 @@ void isci_host_deinit(struct isci_host *ihost) | |||
1284 | 1246 | ||
1285 | wait_for_stop(ihost); | 1247 | wait_for_stop(ihost); |
1286 | 1248 | ||
1249 | /* phy stop is after controller stop to allow port and device to | ||
1250 | * go idle before shutting down the phys, but the expectation is | ||
1251 | * that i/o has been shut off well before we reach this | ||
1252 | * function. | ||
1253 | */ | ||
1254 | sci_controller_stop_phys(ihost); | ||
1255 | |||
1287 | /* disable sgpio: where the above wait should give time for the | 1256 | /* disable sgpio: where the above wait should give time for the |
1288 | * enclosure to sample the gpios going inactive | 1257 | * enclosure to sample the gpios going inactive |
1289 | */ | 1258 | */ |
1290 | writel(0, &ihost->scu_registers->peg0.sgpio.interface_control); | 1259 | writel(0, &ihost->scu_registers->peg0.sgpio.interface_control); |
1291 | 1260 | ||
1261 | spin_lock_irq(&ihost->scic_lock); | ||
1292 | sci_controller_reset(ihost); | 1262 | sci_controller_reset(ihost); |
1263 | spin_unlock_irq(&ihost->scic_lock); | ||
1293 | 1264 | ||
1294 | /* Cancel any/all outstanding port timers */ | 1265 | /* Cancel any/all outstanding port timers */ |
1295 | for (i = 0; i < ihost->logical_port_entries; i++) { | 1266 | for (i = 0; i < ihost->logical_port_entries; i++) { |
@@ -1328,29 +1299,6 @@ static void __iomem *smu_base(struct isci_host *isci_host) | |||
1328 | return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id; | 1299 | return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id; |
1329 | } | 1300 | } |
1330 | 1301 | ||
1331 | static void isci_user_parameters_get(struct sci_user_parameters *u) | ||
1332 | { | ||
1333 | int i; | ||
1334 | |||
1335 | for (i = 0; i < SCI_MAX_PHYS; i++) { | ||
1336 | struct sci_phy_user_params *u_phy = &u->phys[i]; | ||
1337 | |||
1338 | u_phy->max_speed_generation = phy_gen; | ||
1339 | |||
1340 | /* we are not exporting these for now */ | ||
1341 | u_phy->align_insertion_frequency = 0x7f; | ||
1342 | u_phy->in_connection_align_insertion_frequency = 0xff; | ||
1343 | u_phy->notify_enable_spin_up_insertion_frequency = 0x33; | ||
1344 | } | ||
1345 | |||
1346 | u->stp_inactivity_timeout = stp_inactive_to; | ||
1347 | u->ssp_inactivity_timeout = ssp_inactive_to; | ||
1348 | u->stp_max_occupancy_timeout = stp_max_occ_to; | ||
1349 | u->ssp_max_occupancy_timeout = ssp_max_occ_to; | ||
1350 | u->no_outbound_task_timeout = no_outbound_task_to; | ||
1351 | u->max_concurr_spinup = max_concurr_spinup; | ||
1352 | } | ||
1353 | |||
1354 | static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm) | 1302 | static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm) |
1355 | { | 1303 | { |
1356 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); | 1304 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); |
@@ -1510,32 +1458,6 @@ static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm) | |||
1510 | sci_controller_set_interrupt_coalescence(ihost, 0, 0); | 1458 | sci_controller_set_interrupt_coalescence(ihost, 0, 0); |
1511 | } | 1459 | } |
1512 | 1460 | ||
1513 | static enum sci_status sci_controller_stop_phys(struct isci_host *ihost) | ||
1514 | { | ||
1515 | u32 index; | ||
1516 | enum sci_status status; | ||
1517 | enum sci_status phy_status; | ||
1518 | |||
1519 | status = SCI_SUCCESS; | ||
1520 | |||
1521 | for (index = 0; index < SCI_MAX_PHYS; index++) { | ||
1522 | phy_status = sci_phy_stop(&ihost->phys[index]); | ||
1523 | |||
1524 | if (phy_status != SCI_SUCCESS && | ||
1525 | phy_status != SCI_FAILURE_INVALID_STATE) { | ||
1526 | status = SCI_FAILURE; | ||
1527 | |||
1528 | dev_warn(&ihost->pdev->dev, | ||
1529 | "%s: Controller stop operation failed to stop " | ||
1530 | "phy %d because of status %d.\n", | ||
1531 | __func__, | ||
1532 | ihost->phys[index].phy_index, phy_status); | ||
1533 | } | ||
1534 | } | ||
1535 | |||
1536 | return status; | ||
1537 | } | ||
1538 | |||
1539 | static enum sci_status sci_controller_stop_ports(struct isci_host *ihost) | 1461 | static enum sci_status sci_controller_stop_ports(struct isci_host *ihost) |
1540 | { | 1462 | { |
1541 | u32 index; | 1463 | u32 index; |
@@ -1595,10 +1517,11 @@ static void sci_controller_stopping_state_enter(struct sci_base_state_machine *s | |||
1595 | { | 1517 | { |
1596 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); | 1518 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); |
1597 | 1519 | ||
1598 | /* Stop all of the components for this controller */ | ||
1599 | sci_controller_stop_phys(ihost); | ||
1600 | sci_controller_stop_ports(ihost); | ||
1601 | sci_controller_stop_devices(ihost); | 1520 | sci_controller_stop_devices(ihost); |
1521 | sci_controller_stop_ports(ihost); | ||
1522 | |||
1523 | if (!sci_controller_has_remote_devices_stopping(ihost)) | ||
1524 | isci_host_stop_complete(ihost); | ||
1602 | } | 1525 | } |
1603 | 1526 | ||
1604 | static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm) | 1527 | static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm) |
@@ -1624,6 +1547,9 @@ static void sci_controller_reset_hardware(struct isci_host *ihost) | |||
1624 | 1547 | ||
1625 | /* The write to the UFQGP clears the UFQPR */ | 1548 | /* The write to the UFQGP clears the UFQPR */ |
1626 | writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); | 1549 | writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); |
1550 | |||
1551 | /* clear all interrupts */ | ||
1552 | writel(~SMU_INTERRUPT_STATUS_RESERVED_MASK, &ihost->smu_registers->interrupt_status); | ||
1627 | } | 1553 | } |
1628 | 1554 | ||
1629 | static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm) | 1555 | static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm) |
@@ -1655,59 +1581,9 @@ static const struct sci_base_state sci_controller_state_table[] = { | |||
1655 | .enter_state = sci_controller_stopping_state_enter, | 1581 | .enter_state = sci_controller_stopping_state_enter, |
1656 | .exit_state = sci_controller_stopping_state_exit, | 1582 | .exit_state = sci_controller_stopping_state_exit, |
1657 | }, | 1583 | }, |
1658 | [SCIC_STOPPED] = {}, | ||
1659 | [SCIC_FAILED] = {} | 1584 | [SCIC_FAILED] = {} |
1660 | }; | 1585 | }; |
1661 | 1586 | ||
1662 | static void sci_controller_set_default_config_parameters(struct isci_host *ihost) | ||
1663 | { | ||
1664 | /* these defaults are overridden by the platform / firmware */ | ||
1665 | u16 index; | ||
1666 | |||
1667 | /* Default to APC mode. */ | ||
1668 | ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; | ||
1669 | |||
1670 | /* Default to APC mode. */ | ||
1671 | ihost->oem_parameters.controller.max_concurr_spin_up = 1; | ||
1672 | |||
1673 | /* Default to no SSC operation. */ | ||
1674 | ihost->oem_parameters.controller.do_enable_ssc = false; | ||
1675 | |||
1676 | /* Default to short cables on all phys. */ | ||
1677 | ihost->oem_parameters.controller.cable_selection_mask = 0; | ||
1678 | |||
1679 | /* Initialize all of the port parameter information to narrow ports. */ | ||
1680 | for (index = 0; index < SCI_MAX_PORTS; index++) { | ||
1681 | ihost->oem_parameters.ports[index].phy_mask = 0; | ||
1682 | } | ||
1683 | |||
1684 | /* Initialize all of the phy parameter information. */ | ||
1685 | for (index = 0; index < SCI_MAX_PHYS; index++) { | ||
1686 | /* Default to 3G (i.e. Gen 2). */ | ||
1687 | ihost->user_parameters.phys[index].max_speed_generation = | ||
1688 | SCIC_SDS_PARM_GEN2_SPEED; | ||
1689 | |||
1690 | /* the frequencies cannot be 0 */ | ||
1691 | ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f; | ||
1692 | ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff; | ||
1693 | ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33; | ||
1694 | |||
1695 | /* | ||
1696 | * Previous Vitesse based expanders had a arbitration issue that | ||
1697 | * is worked around by having the upper 32-bits of SAS address | ||
1698 | * with a value greater then the Vitesse company identifier. | ||
1699 | * Hence, usage of 0x5FCFFFFF. */ | ||
1700 | ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id; | ||
1701 | ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF; | ||
1702 | } | ||
1703 | |||
1704 | ihost->user_parameters.stp_inactivity_timeout = 5; | ||
1705 | ihost->user_parameters.ssp_inactivity_timeout = 5; | ||
1706 | ihost->user_parameters.stp_max_occupancy_timeout = 5; | ||
1707 | ihost->user_parameters.ssp_max_occupancy_timeout = 20; | ||
1708 | ihost->user_parameters.no_outbound_task_timeout = 2; | ||
1709 | } | ||
1710 | |||
1711 | static void controller_timeout(unsigned long data) | 1587 | static void controller_timeout(unsigned long data) |
1712 | { | 1588 | { |
1713 | struct sci_timer *tmr = (struct sci_timer *)data; | 1589 | struct sci_timer *tmr = (struct sci_timer *)data; |
@@ -1724,7 +1600,7 @@ static void controller_timeout(unsigned long data) | |||
1724 | sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT); | 1600 | sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT); |
1725 | else if (sm->current_state_id == SCIC_STOPPING) { | 1601 | else if (sm->current_state_id == SCIC_STOPPING) { |
1726 | sci_change_state(sm, SCIC_FAILED); | 1602 | sci_change_state(sm, SCIC_FAILED); |
1727 | isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT); | 1603 | isci_host_stop_complete(ihost); |
1728 | } else /* / @todo Now what do we want to do in this case? */ | 1604 | } else /* / @todo Now what do we want to do in this case? */ |
1729 | dev_err(&ihost->pdev->dev, | 1605 | dev_err(&ihost->pdev->dev, |
1730 | "%s: Controller timer fired when controller was not " | 1606 | "%s: Controller timer fired when controller was not " |
@@ -1764,9 +1640,6 @@ static enum sci_status sci_controller_construct(struct isci_host *ihost, | |||
1764 | 1640 | ||
1765 | sci_init_timer(&ihost->timer, controller_timeout); | 1641 | sci_init_timer(&ihost->timer, controller_timeout); |
1766 | 1642 | ||
1767 | /* Initialize the User and OEM parameters to default values. */ | ||
1768 | sci_controller_set_default_config_parameters(ihost); | ||
1769 | |||
1770 | return sci_controller_reset(ihost); | 1643 | return sci_controller_reset(ihost); |
1771 | } | 1644 | } |
1772 | 1645 | ||
@@ -1846,27 +1719,6 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version) | |||
1846 | return 0; | 1719 | return 0; |
1847 | } | 1720 | } |
1848 | 1721 | ||
1849 | static enum sci_status sci_oem_parameters_set(struct isci_host *ihost) | ||
1850 | { | ||
1851 | u32 state = ihost->sm.current_state_id; | ||
1852 | struct isci_pci_info *pci_info = to_pci_info(ihost->pdev); | ||
1853 | |||
1854 | if (state == SCIC_RESET || | ||
1855 | state == SCIC_INITIALIZING || | ||
1856 | state == SCIC_INITIALIZED) { | ||
1857 | u8 oem_version = pci_info->orom ? pci_info->orom->hdr.version : | ||
1858 | ISCI_ROM_VER_1_0; | ||
1859 | |||
1860 | if (sci_oem_parameters_validate(&ihost->oem_parameters, | ||
1861 | oem_version)) | ||
1862 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; | ||
1863 | |||
1864 | return SCI_SUCCESS; | ||
1865 | } | ||
1866 | |||
1867 | return SCI_FAILURE_INVALID_STATE; | ||
1868 | } | ||
1869 | |||
1870 | static u8 max_spin_up(struct isci_host *ihost) | 1722 | static u8 max_spin_up(struct isci_host *ihost) |
1871 | { | 1723 | { |
1872 | if (ihost->user_parameters.max_concurr_spinup) | 1724 | if (ihost->user_parameters.max_concurr_spinup) |
@@ -1914,7 +1766,7 @@ static void power_control_timeout(unsigned long data) | |||
1914 | ihost->power_control.phys_granted_power++; | 1766 | ihost->power_control.phys_granted_power++; |
1915 | sci_phy_consume_power_handler(iphy); | 1767 | sci_phy_consume_power_handler(iphy); |
1916 | 1768 | ||
1917 | if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) { | 1769 | if (iphy->protocol == SAS_PROTOCOL_SSP) { |
1918 | u8 j; | 1770 | u8 j; |
1919 | 1771 | ||
1920 | for (j = 0; j < SCI_MAX_PHYS; j++) { | 1772 | for (j = 0; j < SCI_MAX_PHYS; j++) { |
@@ -1988,7 +1840,7 @@ void sci_controller_power_control_queue_insert(struct isci_host *ihost, | |||
1988 | sizeof(current_phy->frame_rcvd.iaf.sas_addr)); | 1840 | sizeof(current_phy->frame_rcvd.iaf.sas_addr)); |
1989 | 1841 | ||
1990 | if (current_phy->sm.current_state_id == SCI_PHY_READY && | 1842 | if (current_phy->sm.current_state_id == SCI_PHY_READY && |
1991 | current_phy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS && | 1843 | current_phy->protocol == SAS_PROTOCOL_SSP && |
1992 | other == 0) { | 1844 | other == 0) { |
1993 | sci_phy_consume_power_handler(iphy); | 1845 | sci_phy_consume_power_handler(iphy); |
1994 | break; | 1846 | break; |
@@ -2279,9 +2131,8 @@ static enum sci_status sci_controller_initialize(struct isci_host *ihost) | |||
2279 | unsigned long i, state, val; | 2131 | unsigned long i, state, val; |
2280 | 2132 | ||
2281 | if (ihost->sm.current_state_id != SCIC_RESET) { | 2133 | if (ihost->sm.current_state_id != SCIC_RESET) { |
2282 | dev_warn(&ihost->pdev->dev, | 2134 | dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", |
2283 | "SCIC Controller initialize operation requested " | 2135 | __func__, ihost->sm.current_state_id); |
2284 | "in invalid state\n"); | ||
2285 | return SCI_FAILURE_INVALID_STATE; | 2136 | return SCI_FAILURE_INVALID_STATE; |
2286 | } | 2137 | } |
2287 | 2138 | ||
@@ -2384,96 +2235,76 @@ static enum sci_status sci_controller_initialize(struct isci_host *ihost) | |||
2384 | return result; | 2235 | return result; |
2385 | } | 2236 | } |
2386 | 2237 | ||
2387 | static enum sci_status sci_user_parameters_set(struct isci_host *ihost, | 2238 | static int sci_controller_dma_alloc(struct isci_host *ihost) |
2388 | struct sci_user_parameters *sci_parms) | ||
2389 | { | ||
2390 | u32 state = ihost->sm.current_state_id; | ||
2391 | |||
2392 | if (state == SCIC_RESET || | ||
2393 | state == SCIC_INITIALIZING || | ||
2394 | state == SCIC_INITIALIZED) { | ||
2395 | u16 index; | ||
2396 | |||
2397 | /* | ||
2398 | * Validate the user parameters. If they are not legal, then | ||
2399 | * return a failure. | ||
2400 | */ | ||
2401 | for (index = 0; index < SCI_MAX_PHYS; index++) { | ||
2402 | struct sci_phy_user_params *user_phy; | ||
2403 | |||
2404 | user_phy = &sci_parms->phys[index]; | ||
2405 | |||
2406 | if (!((user_phy->max_speed_generation <= | ||
2407 | SCIC_SDS_PARM_MAX_SPEED) && | ||
2408 | (user_phy->max_speed_generation > | ||
2409 | SCIC_SDS_PARM_NO_SPEED))) | ||
2410 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; | ||
2411 | |||
2412 | if (user_phy->in_connection_align_insertion_frequency < | ||
2413 | 3) | ||
2414 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; | ||
2415 | |||
2416 | if ((user_phy->in_connection_align_insertion_frequency < | ||
2417 | 3) || | ||
2418 | (user_phy->align_insertion_frequency == 0) || | ||
2419 | (user_phy-> | ||
2420 | notify_enable_spin_up_insertion_frequency == | ||
2421 | 0)) | ||
2422 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; | ||
2423 | } | ||
2424 | |||
2425 | if ((sci_parms->stp_inactivity_timeout == 0) || | ||
2426 | (sci_parms->ssp_inactivity_timeout == 0) || | ||
2427 | (sci_parms->stp_max_occupancy_timeout == 0) || | ||
2428 | (sci_parms->ssp_max_occupancy_timeout == 0) || | ||
2429 | (sci_parms->no_outbound_task_timeout == 0)) | ||
2430 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; | ||
2431 | |||
2432 | memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms)); | ||
2433 | |||
2434 | return SCI_SUCCESS; | ||
2435 | } | ||
2436 | |||
2437 | return SCI_FAILURE_INVALID_STATE; | ||
2438 | } | ||
2439 | |||
2440 | static int sci_controller_mem_init(struct isci_host *ihost) | ||
2441 | { | 2239 | { |
2442 | struct device *dev = &ihost->pdev->dev; | 2240 | struct device *dev = &ihost->pdev->dev; |
2443 | dma_addr_t dma; | ||
2444 | size_t size; | 2241 | size_t size; |
2445 | int err; | 2242 | int i; |
2243 | |||
2244 | /* detect re-initialization */ | ||
2245 | if (ihost->completion_queue) | ||
2246 | return 0; | ||
2446 | 2247 | ||
2447 | size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32); | 2248 | size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32); |
2448 | ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); | 2249 | ihost->completion_queue = dmam_alloc_coherent(dev, size, &ihost->cq_dma, |
2250 | GFP_KERNEL); | ||
2449 | if (!ihost->completion_queue) | 2251 | if (!ihost->completion_queue) |
2450 | return -ENOMEM; | 2252 | return -ENOMEM; |
2451 | 2253 | ||
2452 | writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower); | ||
2453 | writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper); | ||
2454 | |||
2455 | size = ihost->remote_node_entries * sizeof(union scu_remote_node_context); | 2254 | size = ihost->remote_node_entries * sizeof(union scu_remote_node_context); |
2456 | ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma, | 2255 | ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &ihost->rnc_dma, |
2457 | GFP_KERNEL); | 2256 | GFP_KERNEL); |
2257 | |||
2458 | if (!ihost->remote_node_context_table) | 2258 | if (!ihost->remote_node_context_table) |
2459 | return -ENOMEM; | 2259 | return -ENOMEM; |
2460 | 2260 | ||
2461 | writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower); | ||
2462 | writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper); | ||
2463 | |||
2464 | size = ihost->task_context_entries * sizeof(struct scu_task_context), | 2261 | size = ihost->task_context_entries * sizeof(struct scu_task_context), |
2465 | ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); | 2262 | ihost->task_context_table = dmam_alloc_coherent(dev, size, &ihost->tc_dma, |
2263 | GFP_KERNEL); | ||
2466 | if (!ihost->task_context_table) | 2264 | if (!ihost->task_context_table) |
2467 | return -ENOMEM; | 2265 | return -ENOMEM; |
2468 | 2266 | ||
2469 | ihost->task_context_dma = dma; | 2267 | size = SCI_UFI_TOTAL_SIZE; |
2470 | writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower); | 2268 | ihost->ufi_buf = dmam_alloc_coherent(dev, size, &ihost->ufi_dma, GFP_KERNEL); |
2471 | writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper); | 2269 | if (!ihost->ufi_buf) |
2270 | return -ENOMEM; | ||
2271 | |||
2272 | for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) { | ||
2273 | struct isci_request *ireq; | ||
2274 | dma_addr_t dma; | ||
2275 | |||
2276 | ireq = dmam_alloc_coherent(dev, sizeof(*ireq), &dma, GFP_KERNEL); | ||
2277 | if (!ireq) | ||
2278 | return -ENOMEM; | ||
2279 | |||
2280 | ireq->tc = &ihost->task_context_table[i]; | ||
2281 | ireq->owning_controller = ihost; | ||
2282 | ireq->request_daddr = dma; | ||
2283 | ireq->isci_host = ihost; | ||
2284 | ihost->reqs[i] = ireq; | ||
2285 | } | ||
2286 | |||
2287 | return 0; | ||
2288 | } | ||
2289 | |||
2290 | static int sci_controller_mem_init(struct isci_host *ihost) | ||
2291 | { | ||
2292 | int err = sci_controller_dma_alloc(ihost); | ||
2472 | 2293 | ||
2473 | err = sci_unsolicited_frame_control_construct(ihost); | ||
2474 | if (err) | 2294 | if (err) |
2475 | return err; | 2295 | return err; |
2476 | 2296 | ||
2297 | writel(lower_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_lower); | ||
2298 | writel(upper_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_upper); | ||
2299 | |||
2300 | writel(lower_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_lower); | ||
2301 | writel(upper_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_upper); | ||
2302 | |||
2303 | writel(lower_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_lower); | ||
2304 | writel(upper_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_upper); | ||
2305 | |||
2306 | sci_unsolicited_frame_control_construct(ihost); | ||
2307 | |||
2477 | /* | 2308 | /* |
2478 | * Inform the silicon as to the location of the UF headers and | 2309 | * Inform the silicon as to the location of the UF headers and |
2479 | * address table. | 2310 | * address table. |
@@ -2491,22 +2322,22 @@ static int sci_controller_mem_init(struct isci_host *ihost) | |||
2491 | return 0; | 2322 | return 0; |
2492 | } | 2323 | } |
2493 | 2324 | ||
2325 | /** | ||
2326 | * isci_host_init - (re-)initialize hardware and internal (private) state | ||
2327 | * @ihost: host to init | ||
2328 | * | ||
2329 | * Any public facing objects (like asd_sas_port, and asd_sas_phys), or | ||
2330 | * one-time initialization objects like locks and waitqueues, are | ||
2331 | * not touched (they are initialized in isci_host_alloc) | ||
2332 | */ | ||
2494 | int isci_host_init(struct isci_host *ihost) | 2333 | int isci_host_init(struct isci_host *ihost) |
2495 | { | 2334 | { |
2496 | int err = 0, i; | 2335 | int i, err; |
2497 | enum sci_status status; | 2336 | enum sci_status status; |
2498 | struct sci_user_parameters sci_user_params; | ||
2499 | struct isci_pci_info *pci_info = to_pci_info(ihost->pdev); | ||
2500 | |||
2501 | spin_lock_init(&ihost->state_lock); | ||
2502 | spin_lock_init(&ihost->scic_lock); | ||
2503 | init_waitqueue_head(&ihost->eventq); | ||
2504 | |||
2505 | isci_host_change_state(ihost, isci_starting); | ||
2506 | |||
2507 | status = sci_controller_construct(ihost, scu_base(ihost), | ||
2508 | smu_base(ihost)); | ||
2509 | 2337 | ||
2338 | spin_lock_irq(&ihost->scic_lock); | ||
2339 | status = sci_controller_construct(ihost, scu_base(ihost), smu_base(ihost)); | ||
2340 | spin_unlock_irq(&ihost->scic_lock); | ||
2510 | if (status != SCI_SUCCESS) { | 2341 | if (status != SCI_SUCCESS) { |
2511 | dev_err(&ihost->pdev->dev, | 2342 | dev_err(&ihost->pdev->dev, |
2512 | "%s: sci_controller_construct failed - status = %x\n", | 2343 | "%s: sci_controller_construct failed - status = %x\n", |
@@ -2515,48 +2346,6 @@ int isci_host_init(struct isci_host *ihost) | |||
2515 | return -ENODEV; | 2346 | return -ENODEV; |
2516 | } | 2347 | } |
2517 | 2348 | ||
2518 | ihost->sas_ha.dev = &ihost->pdev->dev; | ||
2519 | ihost->sas_ha.lldd_ha = ihost; | ||
2520 | |||
2521 | /* | ||
2522 | * grab initial values stored in the controller object for OEM and USER | ||
2523 | * parameters | ||
2524 | */ | ||
2525 | isci_user_parameters_get(&sci_user_params); | ||
2526 | status = sci_user_parameters_set(ihost, &sci_user_params); | ||
2527 | if (status != SCI_SUCCESS) { | ||
2528 | dev_warn(&ihost->pdev->dev, | ||
2529 | "%s: sci_user_parameters_set failed\n", | ||
2530 | __func__); | ||
2531 | return -ENODEV; | ||
2532 | } | ||
2533 | |||
2534 | /* grab any OEM parameters specified in orom */ | ||
2535 | if (pci_info->orom) { | ||
2536 | status = isci_parse_oem_parameters(&ihost->oem_parameters, | ||
2537 | pci_info->orom, | ||
2538 | ihost->id); | ||
2539 | if (status != SCI_SUCCESS) { | ||
2540 | dev_warn(&ihost->pdev->dev, | ||
2541 | "parsing firmware oem parameters failed\n"); | ||
2542 | return -EINVAL; | ||
2543 | } | ||
2544 | } | ||
2545 | |||
2546 | status = sci_oem_parameters_set(ihost); | ||
2547 | if (status != SCI_SUCCESS) { | ||
2548 | dev_warn(&ihost->pdev->dev, | ||
2549 | "%s: sci_oem_parameters_set failed\n", | ||
2550 | __func__); | ||
2551 | return -ENODEV; | ||
2552 | } | ||
2553 | |||
2554 | tasklet_init(&ihost->completion_tasklet, | ||
2555 | isci_host_completion_routine, (unsigned long)ihost); | ||
2556 | |||
2557 | INIT_LIST_HEAD(&ihost->requests_to_complete); | ||
2558 | INIT_LIST_HEAD(&ihost->requests_to_errorback); | ||
2559 | |||
2560 | spin_lock_irq(&ihost->scic_lock); | 2349 | spin_lock_irq(&ihost->scic_lock); |
2561 | status = sci_controller_initialize(ihost); | 2350 | status = sci_controller_initialize(ihost); |
2562 | spin_unlock_irq(&ihost->scic_lock); | 2351 | spin_unlock_irq(&ihost->scic_lock); |
@@ -2572,43 +2361,12 @@ int isci_host_init(struct isci_host *ihost) | |||
2572 | if (err) | 2361 | if (err) |
2573 | return err; | 2362 | return err; |
2574 | 2363 | ||
2575 | for (i = 0; i < SCI_MAX_PORTS; i++) | ||
2576 | isci_port_init(&ihost->ports[i], ihost, i); | ||
2577 | |||
2578 | for (i = 0; i < SCI_MAX_PHYS; i++) | ||
2579 | isci_phy_init(&ihost->phys[i], ihost, i); | ||
2580 | |||
2581 | /* enable sgpio */ | 2364 | /* enable sgpio */ |
2582 | writel(1, &ihost->scu_registers->peg0.sgpio.interface_control); | 2365 | writel(1, &ihost->scu_registers->peg0.sgpio.interface_control); |
2583 | for (i = 0; i < isci_gpio_count(ihost); i++) | 2366 | for (i = 0; i < isci_gpio_count(ihost); i++) |
2584 | writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]); | 2367 | writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]); |
2585 | writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code); | 2368 | writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code); |
2586 | 2369 | ||
2587 | for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { | ||
2588 | struct isci_remote_device *idev = &ihost->devices[i]; | ||
2589 | |||
2590 | INIT_LIST_HEAD(&idev->reqs_in_process); | ||
2591 | INIT_LIST_HEAD(&idev->node); | ||
2592 | } | ||
2593 | |||
2594 | for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) { | ||
2595 | struct isci_request *ireq; | ||
2596 | dma_addr_t dma; | ||
2597 | |||
2598 | ireq = dmam_alloc_coherent(&ihost->pdev->dev, | ||
2599 | sizeof(struct isci_request), &dma, | ||
2600 | GFP_KERNEL); | ||
2601 | if (!ireq) | ||
2602 | return -ENOMEM; | ||
2603 | |||
2604 | ireq->tc = &ihost->task_context_table[i]; | ||
2605 | ireq->owning_controller = ihost; | ||
2606 | spin_lock_init(&ireq->state_lock); | ||
2607 | ireq->request_daddr = dma; | ||
2608 | ireq->isci_host = ihost; | ||
2609 | ihost->reqs[i] = ireq; | ||
2610 | } | ||
2611 | |||
2612 | return 0; | 2370 | return 0; |
2613 | } | 2371 | } |
2614 | 2372 | ||
@@ -2654,7 +2412,7 @@ void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport, | |||
2654 | } | 2412 | } |
2655 | } | 2413 | } |
2656 | 2414 | ||
2657 | static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost) | 2415 | bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost) |
2658 | { | 2416 | { |
2659 | u32 index; | 2417 | u32 index; |
2660 | 2418 | ||
@@ -2680,7 +2438,7 @@ void sci_controller_remote_device_stopped(struct isci_host *ihost, | |||
2680 | } | 2438 | } |
2681 | 2439 | ||
2682 | if (!sci_controller_has_remote_devices_stopping(ihost)) | 2440 | if (!sci_controller_has_remote_devices_stopping(ihost)) |
2683 | sci_change_state(&ihost->sm, SCIC_STOPPED); | 2441 | isci_host_stop_complete(ihost); |
2684 | } | 2442 | } |
2685 | 2443 | ||
2686 | void sci_controller_post_request(struct isci_host *ihost, u32 request) | 2444 | void sci_controller_post_request(struct isci_host *ihost, u32 request) |
@@ -2842,7 +2600,8 @@ enum sci_status sci_controller_start_io(struct isci_host *ihost, | |||
2842 | enum sci_status status; | 2600 | enum sci_status status; |
2843 | 2601 | ||
2844 | if (ihost->sm.current_state_id != SCIC_READY) { | 2602 | if (ihost->sm.current_state_id != SCIC_READY) { |
2845 | dev_warn(&ihost->pdev->dev, "invalid state to start I/O"); | 2603 | dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", |
2604 | __func__, ihost->sm.current_state_id); | ||
2846 | return SCI_FAILURE_INVALID_STATE; | 2605 | return SCI_FAILURE_INVALID_STATE; |
2847 | } | 2606 | } |
2848 | 2607 | ||
@@ -2866,22 +2625,26 @@ enum sci_status sci_controller_terminate_request(struct isci_host *ihost, | |||
2866 | enum sci_status status; | 2625 | enum sci_status status; |
2867 | 2626 | ||
2868 | if (ihost->sm.current_state_id != SCIC_READY) { | 2627 | if (ihost->sm.current_state_id != SCIC_READY) { |
2869 | dev_warn(&ihost->pdev->dev, | 2628 | dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", |
2870 | "invalid state to terminate request\n"); | 2629 | __func__, ihost->sm.current_state_id); |
2871 | return SCI_FAILURE_INVALID_STATE; | 2630 | return SCI_FAILURE_INVALID_STATE; |
2872 | } | 2631 | } |
2873 | |||
2874 | status = sci_io_request_terminate(ireq); | 2632 | status = sci_io_request_terminate(ireq); |
2875 | if (status != SCI_SUCCESS) | ||
2876 | return status; | ||
2877 | 2633 | ||
2878 | /* | 2634 | dev_dbg(&ihost->pdev->dev, "%s: status=%d; ireq=%p; flags=%lx\n", |
2879 | * Utilize the original post context command and or in the POST_TC_ABORT | 2635 | __func__, status, ireq, ireq->flags); |
2880 | * request sub-type. | 2636 | |
2881 | */ | 2637 | if ((status == SCI_SUCCESS) && |
2882 | sci_controller_post_request(ihost, | 2638 | !test_bit(IREQ_PENDING_ABORT, &ireq->flags) && |
2883 | ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); | 2639 | !test_and_set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags)) { |
2884 | return SCI_SUCCESS; | 2640 | /* Utilize the original post context command and or in the |
2641 | * POST_TC_ABORT request sub-type. | ||
2642 | */ | ||
2643 | sci_controller_post_request( | ||
2644 | ihost, ireq->post_context | | ||
2645 | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); | ||
2646 | } | ||
2647 | return status; | ||
2885 | } | 2648 | } |
2886 | 2649 | ||
2887 | /** | 2650 | /** |
@@ -2915,7 +2678,8 @@ enum sci_status sci_controller_complete_io(struct isci_host *ihost, | |||
2915 | clear_bit(IREQ_ACTIVE, &ireq->flags); | 2678 | clear_bit(IREQ_ACTIVE, &ireq->flags); |
2916 | return SCI_SUCCESS; | 2679 | return SCI_SUCCESS; |
2917 | default: | 2680 | default: |
2918 | dev_warn(&ihost->pdev->dev, "invalid state to complete I/O"); | 2681 | dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", |
2682 | __func__, ihost->sm.current_state_id); | ||
2919 | return SCI_FAILURE_INVALID_STATE; | 2683 | return SCI_FAILURE_INVALID_STATE; |
2920 | } | 2684 | } |
2921 | 2685 | ||
@@ -2926,7 +2690,8 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq) | |||
2926 | struct isci_host *ihost = ireq->owning_controller; | 2690 | struct isci_host *ihost = ireq->owning_controller; |
2927 | 2691 | ||
2928 | if (ihost->sm.current_state_id != SCIC_READY) { | 2692 | if (ihost->sm.current_state_id != SCIC_READY) { |
2929 | dev_warn(&ihost->pdev->dev, "invalid state to continue I/O"); | 2693 | dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", |
2694 | __func__, ihost->sm.current_state_id); | ||
2930 | return SCI_FAILURE_INVALID_STATE; | 2695 | return SCI_FAILURE_INVALID_STATE; |
2931 | } | 2696 | } |
2932 | 2697 | ||
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index adbad69d1069..9ab58e0540e7 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h | |||
@@ -55,6 +55,7 @@ | |||
55 | #ifndef _SCI_HOST_H_ | 55 | #ifndef _SCI_HOST_H_ |
56 | #define _SCI_HOST_H_ | 56 | #define _SCI_HOST_H_ |
57 | 57 | ||
58 | #include <scsi/sas_ata.h> | ||
58 | #include "remote_device.h" | 59 | #include "remote_device.h" |
59 | #include "phy.h" | 60 | #include "phy.h" |
60 | #include "isci.h" | 61 | #include "isci.h" |
@@ -108,6 +109,8 @@ struct sci_port_configuration_agent; | |||
108 | typedef void (*port_config_fn)(struct isci_host *, | 109 | typedef void (*port_config_fn)(struct isci_host *, |
109 | struct sci_port_configuration_agent *, | 110 | struct sci_port_configuration_agent *, |
110 | struct isci_port *, struct isci_phy *); | 111 | struct isci_port *, struct isci_phy *); |
112 | bool is_port_config_apc(struct isci_host *ihost); | ||
113 | bool is_controller_start_complete(struct isci_host *ihost); | ||
111 | 114 | ||
112 | struct sci_port_configuration_agent { | 115 | struct sci_port_configuration_agent { |
113 | u16 phy_configured_mask; | 116 | u16 phy_configured_mask; |
@@ -157,13 +160,17 @@ struct isci_host { | |||
157 | struct sci_power_control power_control; | 160 | struct sci_power_control power_control; |
158 | u8 io_request_sequence[SCI_MAX_IO_REQUESTS]; | 161 | u8 io_request_sequence[SCI_MAX_IO_REQUESTS]; |
159 | struct scu_task_context *task_context_table; | 162 | struct scu_task_context *task_context_table; |
160 | dma_addr_t task_context_dma; | 163 | dma_addr_t tc_dma; |
161 | union scu_remote_node_context *remote_node_context_table; | 164 | union scu_remote_node_context *remote_node_context_table; |
165 | dma_addr_t rnc_dma; | ||
162 | u32 *completion_queue; | 166 | u32 *completion_queue; |
167 | dma_addr_t cq_dma; | ||
163 | u32 completion_queue_get; | 168 | u32 completion_queue_get; |
164 | u32 logical_port_entries; | 169 | u32 logical_port_entries; |
165 | u32 remote_node_entries; | 170 | u32 remote_node_entries; |
166 | u32 task_context_entries; | 171 | u32 task_context_entries; |
172 | void *ufi_buf; | ||
173 | dma_addr_t ufi_dma; | ||
167 | struct sci_unsolicited_frame_control uf_control; | 174 | struct sci_unsolicited_frame_control uf_control; |
168 | 175 | ||
169 | /* phy startup */ | 176 | /* phy startup */ |
@@ -190,17 +197,13 @@ struct isci_host { | |||
190 | struct asd_sas_port sas_ports[SCI_MAX_PORTS]; | 197 | struct asd_sas_port sas_ports[SCI_MAX_PORTS]; |
191 | struct sas_ha_struct sas_ha; | 198 | struct sas_ha_struct sas_ha; |
192 | 199 | ||
193 | spinlock_t state_lock; | ||
194 | struct pci_dev *pdev; | 200 | struct pci_dev *pdev; |
195 | enum isci_status status; | ||
196 | #define IHOST_START_PENDING 0 | 201 | #define IHOST_START_PENDING 0 |
197 | #define IHOST_STOP_PENDING 1 | 202 | #define IHOST_STOP_PENDING 1 |
203 | #define IHOST_IRQ_ENABLED 2 | ||
198 | unsigned long flags; | 204 | unsigned long flags; |
199 | wait_queue_head_t eventq; | 205 | wait_queue_head_t eventq; |
200 | struct Scsi_Host *shost; | ||
201 | struct tasklet_struct completion_tasklet; | 206 | struct tasklet_struct completion_tasklet; |
202 | struct list_head requests_to_complete; | ||
203 | struct list_head requests_to_errorback; | ||
204 | spinlock_t scic_lock; | 207 | spinlock_t scic_lock; |
205 | struct isci_request *reqs[SCI_MAX_IO_REQUESTS]; | 208 | struct isci_request *reqs[SCI_MAX_IO_REQUESTS]; |
206 | struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES]; | 209 | struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES]; |
@@ -274,13 +277,6 @@ enum sci_controller_states { | |||
274 | SCIC_STOPPING, | 277 | SCIC_STOPPING, |
275 | 278 | ||
276 | /** | 279 | /** |
277 | * This state indicates that the controller has successfully been stopped. | ||
278 | * In this state no new IO operations are permitted. | ||
279 | * This state is entered from the STOPPING state. | ||
280 | */ | ||
281 | SCIC_STOPPED, | ||
282 | |||
283 | /** | ||
284 | * This state indicates that the controller could not successfully be | 280 | * This state indicates that the controller could not successfully be |
285 | * initialized. In this state no new IO operations are permitted. | 281 | * initialized. In this state no new IO operations are permitted. |
286 | * This state is entered from the INITIALIZING state. | 282 | * This state is entered from the INITIALIZING state. |
@@ -309,32 +305,16 @@ static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev) | |||
309 | return pci_get_drvdata(pdev); | 305 | return pci_get_drvdata(pdev); |
310 | } | 306 | } |
311 | 307 | ||
308 | static inline struct Scsi_Host *to_shost(struct isci_host *ihost) | ||
309 | { | ||
310 | return ihost->sas_ha.core.shost; | ||
311 | } | ||
312 | |||
312 | #define for_each_isci_host(id, ihost, pdev) \ | 313 | #define for_each_isci_host(id, ihost, pdev) \ |
313 | for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \ | 314 | for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \ |
314 | id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \ | 315 | id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \ |
315 | ihost = to_pci_info(pdev)->hosts[++id]) | 316 | ihost = to_pci_info(pdev)->hosts[++id]) |
316 | 317 | ||
317 | static inline enum isci_status isci_host_get_state(struct isci_host *isci_host) | ||
318 | { | ||
319 | return isci_host->status; | ||
320 | } | ||
321 | |||
322 | static inline void isci_host_change_state(struct isci_host *isci_host, | ||
323 | enum isci_status status) | ||
324 | { | ||
325 | unsigned long flags; | ||
326 | |||
327 | dev_dbg(&isci_host->pdev->dev, | ||
328 | "%s: isci_host = %p, state = 0x%x", | ||
329 | __func__, | ||
330 | isci_host, | ||
331 | status); | ||
332 | spin_lock_irqsave(&isci_host->state_lock, flags); | ||
333 | isci_host->status = status; | ||
334 | spin_unlock_irqrestore(&isci_host->state_lock, flags); | ||
335 | |||
336 | } | ||
337 | |||
338 | static inline void wait_for_start(struct isci_host *ihost) | 318 | static inline void wait_for_start(struct isci_host *ihost) |
339 | { | 319 | { |
340 | wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags)); | 320 | wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags)); |
@@ -360,6 +340,11 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev) | |||
360 | return dev->port->ha->lldd_ha; | 340 | return dev->port->ha->lldd_ha; |
361 | } | 341 | } |
362 | 342 | ||
343 | static inline struct isci_host *idev_to_ihost(struct isci_remote_device *idev) | ||
344 | { | ||
345 | return dev_to_ihost(idev->domain_dev); | ||
346 | } | ||
347 | |||
363 | /* we always use protocol engine group zero */ | 348 | /* we always use protocol engine group zero */ |
364 | #define ISCI_PEG 0 | 349 | #define ISCI_PEG 0 |
365 | 350 | ||
@@ -378,8 +363,7 @@ static inline int sci_remote_device_node_count(struct isci_remote_device *idev) | |||
378 | { | 363 | { |
379 | struct domain_device *dev = idev->domain_dev; | 364 | struct domain_device *dev = idev->domain_dev; |
380 | 365 | ||
381 | if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) && | 366 | if (dev_is_sata(dev) && dev->parent) |
382 | !idev->is_direct_attached) | ||
383 | return SCU_STP_REMOTE_NODE_COUNT; | 367 | return SCU_STP_REMOTE_NODE_COUNT; |
384 | return SCU_SSP_REMOTE_NODE_COUNT; | 368 | return SCU_SSP_REMOTE_NODE_COUNT; |
385 | } | 369 | } |
@@ -475,36 +459,17 @@ void sci_controller_free_remote_node_context( | |||
475 | struct isci_remote_device *idev, | 459 | struct isci_remote_device *idev, |
476 | u16 node_id); | 460 | u16 node_id); |
477 | 461 | ||
478 | struct isci_request *sci_request_by_tag(struct isci_host *ihost, | 462 | struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag); |
479 | u16 io_tag); | 463 | void sci_controller_power_control_queue_insert(struct isci_host *ihost, |
480 | 464 | struct isci_phy *iphy); | |
481 | void sci_controller_power_control_queue_insert( | 465 | void sci_controller_power_control_queue_remove(struct isci_host *ihost, |
482 | struct isci_host *ihost, | 466 | struct isci_phy *iphy); |
483 | struct isci_phy *iphy); | 467 | void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport, |
484 | 468 | struct isci_phy *iphy); | |
485 | void sci_controller_power_control_queue_remove( | 469 | void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport, |
486 | struct isci_host *ihost, | 470 | struct isci_phy *iphy); |
487 | struct isci_phy *iphy); | 471 | void sci_controller_remote_device_stopped(struct isci_host *ihost, |
488 | 472 | struct isci_remote_device *idev); | |
489 | void sci_controller_link_up( | ||
490 | struct isci_host *ihost, | ||
491 | struct isci_port *iport, | ||
492 | struct isci_phy *iphy); | ||
493 | |||
494 | void sci_controller_link_down( | ||
495 | struct isci_host *ihost, | ||
496 | struct isci_port *iport, | ||
497 | struct isci_phy *iphy); | ||
498 | |||
499 | void sci_controller_remote_device_stopped( | ||
500 | struct isci_host *ihost, | ||
501 | struct isci_remote_device *idev); | ||
502 | |||
503 | void sci_controller_copy_task_context( | ||
504 | struct isci_host *ihost, | ||
505 | struct isci_request *ireq); | ||
506 | |||
507 | void sci_controller_register_setup(struct isci_host *ihost); | ||
508 | 473 | ||
509 | enum sci_status sci_controller_continue_io(struct isci_request *ireq); | 474 | enum sci_status sci_controller_continue_io(struct isci_request *ireq); |
510 | int isci_host_scan_finished(struct Scsi_Host *, unsigned long); | 475 | int isci_host_scan_finished(struct Scsi_Host *, unsigned long); |
@@ -512,29 +477,14 @@ void isci_host_scan_start(struct Scsi_Host *); | |||
512 | u16 isci_alloc_tag(struct isci_host *ihost); | 477 | u16 isci_alloc_tag(struct isci_host *ihost); |
513 | enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag); | 478 | enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag); |
514 | void isci_tci_free(struct isci_host *ihost, u16 tci); | 479 | void isci_tci_free(struct isci_host *ihost, u16 tci); |
480 | void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task); | ||
515 | 481 | ||
516 | int isci_host_init(struct isci_host *); | 482 | int isci_host_init(struct isci_host *); |
517 | 483 | void isci_host_completion_routine(unsigned long data); | |
518 | void isci_host_init_controller_names( | 484 | void isci_host_deinit(struct isci_host *); |
519 | struct isci_host *isci_host, | 485 | void sci_controller_disable_interrupts(struct isci_host *ihost); |
520 | unsigned int controller_idx); | 486 | bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost); |
521 | 487 | void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status); | |
522 | void isci_host_deinit( | ||
523 | struct isci_host *); | ||
524 | |||
525 | void isci_host_port_link_up( | ||
526 | struct isci_host *, | ||
527 | struct isci_port *, | ||
528 | struct isci_phy *); | ||
529 | int isci_host_dev_found(struct domain_device *); | ||
530 | |||
531 | void isci_host_remote_device_start_complete( | ||
532 | struct isci_host *, | ||
533 | struct isci_remote_device *, | ||
534 | enum sci_status); | ||
535 | |||
536 | void sci_controller_disable_interrupts( | ||
537 | struct isci_host *ihost); | ||
538 | 488 | ||
539 | enum sci_status sci_controller_start_io( | 489 | enum sci_status sci_controller_start_io( |
540 | struct isci_host *ihost, | 490 | struct isci_host *ihost, |
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 5137db5a5d85..47e28b555029 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c | |||
@@ -271,13 +271,12 @@ static void isci_unregister(struct isci_host *isci_host) | |||
271 | if (!isci_host) | 271 | if (!isci_host) |
272 | return; | 272 | return; |
273 | 273 | ||
274 | shost = isci_host->shost; | ||
275 | |||
276 | sas_unregister_ha(&isci_host->sas_ha); | 274 | sas_unregister_ha(&isci_host->sas_ha); |
277 | 275 | ||
278 | sas_remove_host(isci_host->shost); | 276 | shost = to_shost(isci_host); |
279 | scsi_remove_host(isci_host->shost); | 277 | sas_remove_host(shost); |
280 | scsi_host_put(isci_host->shost); | 278 | scsi_remove_host(shost); |
279 | scsi_host_put(shost); | ||
281 | } | 280 | } |
282 | 281 | ||
283 | static int __devinit isci_pci_init(struct pci_dev *pdev) | 282 | static int __devinit isci_pci_init(struct pci_dev *pdev) |
@@ -397,38 +396,199 @@ static int isci_setup_interrupts(struct pci_dev *pdev) | |||
397 | return err; | 396 | return err; |
398 | } | 397 | } |
399 | 398 | ||
399 | static void isci_user_parameters_get(struct sci_user_parameters *u) | ||
400 | { | ||
401 | int i; | ||
402 | |||
403 | for (i = 0; i < SCI_MAX_PHYS; i++) { | ||
404 | struct sci_phy_user_params *u_phy = &u->phys[i]; | ||
405 | |||
406 | u_phy->max_speed_generation = phy_gen; | ||
407 | |||
408 | /* we are not exporting these for now */ | ||
409 | u_phy->align_insertion_frequency = 0x7f; | ||
410 | u_phy->in_connection_align_insertion_frequency = 0xff; | ||
411 | u_phy->notify_enable_spin_up_insertion_frequency = 0x33; | ||
412 | } | ||
413 | |||
414 | u->stp_inactivity_timeout = stp_inactive_to; | ||
415 | u->ssp_inactivity_timeout = ssp_inactive_to; | ||
416 | u->stp_max_occupancy_timeout = stp_max_occ_to; | ||
417 | u->ssp_max_occupancy_timeout = ssp_max_occ_to; | ||
418 | u->no_outbound_task_timeout = no_outbound_task_to; | ||
419 | u->max_concurr_spinup = max_concurr_spinup; | ||
420 | } | ||
421 | |||
422 | static enum sci_status sci_user_parameters_set(struct isci_host *ihost, | ||
423 | struct sci_user_parameters *sci_parms) | ||
424 | { | ||
425 | u16 index; | ||
426 | |||
427 | /* | ||
428 | * Validate the user parameters. If they are not legal, then | ||
429 | * return a failure. | ||
430 | */ | ||
431 | for (index = 0; index < SCI_MAX_PHYS; index++) { | ||
432 | struct sci_phy_user_params *u; | ||
433 | |||
434 | u = &sci_parms->phys[index]; | ||
435 | |||
436 | if (!((u->max_speed_generation <= SCIC_SDS_PARM_MAX_SPEED) && | ||
437 | (u->max_speed_generation > SCIC_SDS_PARM_NO_SPEED))) | ||
438 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; | ||
439 | |||
440 | if (u->in_connection_align_insertion_frequency < 3) | ||
441 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; | ||
442 | |||
443 | if ((u->in_connection_align_insertion_frequency < 3) || | ||
444 | (u->align_insertion_frequency == 0) || | ||
445 | (u->notify_enable_spin_up_insertion_frequency == 0)) | ||
446 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; | ||
447 | } | ||
448 | |||
449 | if ((sci_parms->stp_inactivity_timeout == 0) || | ||
450 | (sci_parms->ssp_inactivity_timeout == 0) || | ||
451 | (sci_parms->stp_max_occupancy_timeout == 0) || | ||
452 | (sci_parms->ssp_max_occupancy_timeout == 0) || | ||
453 | (sci_parms->no_outbound_task_timeout == 0)) | ||
454 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; | ||
455 | |||
456 | memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms)); | ||
457 | |||
458 | return SCI_SUCCESS; | ||
459 | } | ||
460 | |||
461 | static void sci_oem_defaults(struct isci_host *ihost) | ||
462 | { | ||
463 | /* these defaults are overridden by the platform / firmware */ | ||
464 | struct sci_user_parameters *user = &ihost->user_parameters; | ||
465 | struct sci_oem_params *oem = &ihost->oem_parameters; | ||
466 | int i; | ||
467 | |||
468 | /* Default to APC mode. */ | ||
469 | oem->controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; | ||
470 | |||
471 | /* Default to APC mode. */ | ||
472 | oem->controller.max_concurr_spin_up = 1; | ||
473 | |||
474 | /* Default to no SSC operation. */ | ||
475 | oem->controller.do_enable_ssc = false; | ||
476 | |||
477 | /* Default to short cables on all phys. */ | ||
478 | oem->controller.cable_selection_mask = 0; | ||
479 | |||
480 | /* Initialize all of the port parameter information to narrow ports. */ | ||
481 | for (i = 0; i < SCI_MAX_PORTS; i++) | ||
482 | oem->ports[i].phy_mask = 0; | ||
483 | |||
484 | /* Initialize all of the phy parameter information. */ | ||
485 | for (i = 0; i < SCI_MAX_PHYS; i++) { | ||
486 | /* Default to 3G (i.e. Gen 2). */ | ||
487 | user->phys[i].max_speed_generation = SCIC_SDS_PARM_GEN2_SPEED; | ||
488 | |||
489 | /* the frequencies cannot be 0 */ | ||
490 | user->phys[i].align_insertion_frequency = 0x7f; | ||
491 | user->phys[i].in_connection_align_insertion_frequency = 0xff; | ||
492 | user->phys[i].notify_enable_spin_up_insertion_frequency = 0x33; | ||
493 | |||
494 | /* Previous Vitesse based expanders had a arbitration issue that | ||
495 | * is worked around by having the upper 32-bits of SAS address | ||
496 | * with a value greater then the Vitesse company identifier. | ||
497 | * Hence, usage of 0x5FCFFFFF. | ||
498 | */ | ||
499 | oem->phys[i].sas_address.low = 0x1 + ihost->id; | ||
500 | oem->phys[i].sas_address.high = 0x5FCFFFFF; | ||
501 | } | ||
502 | |||
503 | user->stp_inactivity_timeout = 5; | ||
504 | user->ssp_inactivity_timeout = 5; | ||
505 | user->stp_max_occupancy_timeout = 5; | ||
506 | user->ssp_max_occupancy_timeout = 20; | ||
507 | user->no_outbound_task_timeout = 2; | ||
508 | } | ||
509 | |||
400 | static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) | 510 | static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) |
401 | { | 511 | { |
402 | struct isci_host *isci_host; | 512 | struct isci_orom *orom = to_pci_info(pdev)->orom; |
513 | struct sci_user_parameters sci_user_params; | ||
514 | u8 oem_version = ISCI_ROM_VER_1_0; | ||
515 | struct isci_host *ihost; | ||
403 | struct Scsi_Host *shost; | 516 | struct Scsi_Host *shost; |
404 | int err; | 517 | int err, i; |
405 | 518 | ||
406 | isci_host = devm_kzalloc(&pdev->dev, sizeof(*isci_host), GFP_KERNEL); | 519 | ihost = devm_kzalloc(&pdev->dev, sizeof(*ihost), GFP_KERNEL); |
407 | if (!isci_host) | 520 | if (!ihost) |
408 | return NULL; | 521 | return NULL; |
409 | 522 | ||
410 | isci_host->pdev = pdev; | 523 | ihost->pdev = pdev; |
411 | isci_host->id = id; | 524 | ihost->id = id; |
525 | spin_lock_init(&ihost->scic_lock); | ||
526 | init_waitqueue_head(&ihost->eventq); | ||
527 | ihost->sas_ha.dev = &ihost->pdev->dev; | ||
528 | ihost->sas_ha.lldd_ha = ihost; | ||
529 | tasklet_init(&ihost->completion_tasklet, | ||
530 | isci_host_completion_routine, (unsigned long)ihost); | ||
531 | |||
532 | /* validate module parameters */ | ||
533 | /* TODO: kill struct sci_user_parameters and reference directly */ | ||
534 | sci_oem_defaults(ihost); | ||
535 | isci_user_parameters_get(&sci_user_params); | ||
536 | if (sci_user_parameters_set(ihost, &sci_user_params)) { | ||
537 | dev_warn(&pdev->dev, | ||
538 | "%s: sci_user_parameters_set failed\n", __func__); | ||
539 | return NULL; | ||
540 | } | ||
541 | |||
542 | /* sanity check platform (or 'firmware') oem parameters */ | ||
543 | if (orom) { | ||
544 | if (id < 0 || id >= SCI_MAX_CONTROLLERS || id > orom->hdr.num_elements) { | ||
545 | dev_warn(&pdev->dev, "parsing firmware oem parameters failed\n"); | ||
546 | return NULL; | ||
547 | } | ||
548 | ihost->oem_parameters = orom->ctrl[id]; | ||
549 | oem_version = orom->hdr.version; | ||
550 | } | ||
551 | |||
552 | /* validate oem parameters (platform, firmware, or built-in defaults) */ | ||
553 | if (sci_oem_parameters_validate(&ihost->oem_parameters, oem_version)) { | ||
554 | dev_warn(&pdev->dev, "oem parameter validation failed\n"); | ||
555 | return NULL; | ||
556 | } | ||
557 | |||
558 | for (i = 0; i < SCI_MAX_PORTS; i++) { | ||
559 | struct isci_port *iport = &ihost->ports[i]; | ||
560 | |||
561 | INIT_LIST_HEAD(&iport->remote_dev_list); | ||
562 | iport->isci_host = ihost; | ||
563 | } | ||
564 | |||
565 | for (i = 0; i < SCI_MAX_PHYS; i++) | ||
566 | isci_phy_init(&ihost->phys[i], ihost, i); | ||
567 | |||
568 | for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { | ||
569 | struct isci_remote_device *idev = &ihost->devices[i]; | ||
570 | |||
571 | INIT_LIST_HEAD(&idev->node); | ||
572 | } | ||
412 | 573 | ||
413 | shost = scsi_host_alloc(&isci_sht, sizeof(void *)); | 574 | shost = scsi_host_alloc(&isci_sht, sizeof(void *)); |
414 | if (!shost) | 575 | if (!shost) |
415 | return NULL; | 576 | return NULL; |
416 | isci_host->shost = shost; | ||
417 | 577 | ||
418 | dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: " | 578 | dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: " |
419 | "{%s, %s, %s, %s}\n", | 579 | "{%s, %s, %s, %s}\n", |
420 | (is_cable_select_overridden() ? "* " : ""), isci_host->id, | 580 | (is_cable_select_overridden() ? "* " : ""), ihost->id, |
421 | lookup_cable_names(decode_cable_selection(isci_host, 3)), | 581 | lookup_cable_names(decode_cable_selection(ihost, 3)), |
422 | lookup_cable_names(decode_cable_selection(isci_host, 2)), | 582 | lookup_cable_names(decode_cable_selection(ihost, 2)), |
423 | lookup_cable_names(decode_cable_selection(isci_host, 1)), | 583 | lookup_cable_names(decode_cable_selection(ihost, 1)), |
424 | lookup_cable_names(decode_cable_selection(isci_host, 0))); | 584 | lookup_cable_names(decode_cable_selection(ihost, 0))); |
425 | 585 | ||
426 | err = isci_host_init(isci_host); | 586 | err = isci_host_init(ihost); |
427 | if (err) | 587 | if (err) |
428 | goto err_shost; | 588 | goto err_shost; |
429 | 589 | ||
430 | SHOST_TO_SAS_HA(shost) = &isci_host->sas_ha; | 590 | SHOST_TO_SAS_HA(shost) = &ihost->sas_ha; |
431 | isci_host->sas_ha.core.shost = shost; | 591 | ihost->sas_ha.core.shost = shost; |
432 | shost->transportt = isci_transport_template; | 592 | shost->transportt = isci_transport_template; |
433 | 593 | ||
434 | shost->max_id = ~0; | 594 | shost->max_id = ~0; |
@@ -439,11 +599,11 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) | |||
439 | if (err) | 599 | if (err) |
440 | goto err_shost; | 600 | goto err_shost; |
441 | 601 | ||
442 | err = isci_register_sas_ha(isci_host); | 602 | err = isci_register_sas_ha(ihost); |
443 | if (err) | 603 | if (err) |
444 | goto err_shost_remove; | 604 | goto err_shost_remove; |
445 | 605 | ||
446 | return isci_host; | 606 | return ihost; |
447 | 607 | ||
448 | err_shost_remove: | 608 | err_shost_remove: |
449 | scsi_remove_host(shost); | 609 | scsi_remove_host(shost); |
@@ -476,7 +636,7 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic | |||
476 | if (!orom) | 636 | if (!orom) |
477 | orom = isci_request_oprom(pdev); | 637 | orom = isci_request_oprom(pdev); |
478 | 638 | ||
479 | for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) { | 639 | for (i = 0; orom && i < num_controllers(pdev); i++) { |
480 | if (sci_oem_parameters_validate(&orom->ctrl[i], | 640 | if (sci_oem_parameters_validate(&orom->ctrl[i], |
481 | orom->hdr.version)) { | 641 | orom->hdr.version)) { |
482 | dev_warn(&pdev->dev, | 642 | dev_warn(&pdev->dev, |
@@ -525,11 +685,11 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic | |||
525 | pci_info->hosts[i] = h; | 685 | pci_info->hosts[i] = h; |
526 | 686 | ||
527 | /* turn on DIF support */ | 687 | /* turn on DIF support */ |
528 | scsi_host_set_prot(h->shost, | 688 | scsi_host_set_prot(to_shost(h), |
529 | SHOST_DIF_TYPE1_PROTECTION | | 689 | SHOST_DIF_TYPE1_PROTECTION | |
530 | SHOST_DIF_TYPE2_PROTECTION | | 690 | SHOST_DIF_TYPE2_PROTECTION | |
531 | SHOST_DIF_TYPE3_PROTECTION); | 691 | SHOST_DIF_TYPE3_PROTECTION); |
532 | scsi_host_set_guard(h->shost, SHOST_DIX_GUARD_CRC); | 692 | scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC); |
533 | } | 693 | } |
534 | 694 | ||
535 | err = isci_setup_interrupts(pdev); | 695 | err = isci_setup_interrupts(pdev); |
@@ -537,7 +697,7 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic | |||
537 | goto err_host_alloc; | 697 | goto err_host_alloc; |
538 | 698 | ||
539 | for_each_isci_host(i, isci_host, pdev) | 699 | for_each_isci_host(i, isci_host, pdev) |
540 | scsi_scan_host(isci_host->shost); | 700 | scsi_scan_host(to_shost(isci_host)); |
541 | 701 | ||
542 | return 0; | 702 | return 0; |
543 | 703 | ||
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c index fab3586840b5..18f43d4c30ba 100644 --- a/drivers/scsi/isci/phy.c +++ b/drivers/scsi/isci/phy.c | |||
@@ -580,7 +580,7 @@ static void sci_phy_start_sas_link_training(struct isci_phy *iphy) | |||
580 | 580 | ||
581 | sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN); | 581 | sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN); |
582 | 582 | ||
583 | iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS; | 583 | iphy->protocol = SAS_PROTOCOL_SSP; |
584 | } | 584 | } |
585 | 585 | ||
586 | static void sci_phy_start_sata_link_training(struct isci_phy *iphy) | 586 | static void sci_phy_start_sata_link_training(struct isci_phy *iphy) |
@@ -591,7 +591,7 @@ static void sci_phy_start_sata_link_training(struct isci_phy *iphy) | |||
591 | */ | 591 | */ |
592 | sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER); | 592 | sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER); |
593 | 593 | ||
594 | iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA; | 594 | iphy->protocol = SAS_PROTOCOL_SATA; |
595 | } | 595 | } |
596 | 596 | ||
597 | /** | 597 | /** |
@@ -668,6 +668,19 @@ static const char *phy_event_name(u32 event_code) | |||
668 | phy_to_host(iphy)->id, iphy->phy_index, \ | 668 | phy_to_host(iphy)->id, iphy->phy_index, \ |
669 | phy_state_name(state), phy_event_name(code), code) | 669 | phy_state_name(state), phy_event_name(code), code) |
670 | 670 | ||
671 | |||
672 | void scu_link_layer_set_txcomsas_timeout(struct isci_phy *iphy, u32 timeout) | ||
673 | { | ||
674 | u32 val; | ||
675 | |||
676 | /* Extend timeout */ | ||
677 | val = readl(&iphy->link_layer_registers->transmit_comsas_signal); | ||
678 | val &= ~SCU_SAS_LLTXCOMSAS_GEN_VAL(NEGTIME, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_MASK); | ||
679 | val |= SCU_SAS_LLTXCOMSAS_GEN_VAL(NEGTIME, timeout); | ||
680 | |||
681 | writel(val, &iphy->link_layer_registers->transmit_comsas_signal); | ||
682 | } | ||
683 | |||
671 | enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) | 684 | enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) |
672 | { | 685 | { |
673 | enum sci_phy_states state = iphy->sm.current_state_id; | 686 | enum sci_phy_states state = iphy->sm.current_state_id; |
@@ -683,6 +696,13 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) | |||
683 | sci_phy_start_sata_link_training(iphy); | 696 | sci_phy_start_sata_link_training(iphy); |
684 | iphy->is_in_link_training = true; | 697 | iphy->is_in_link_training = true; |
685 | break; | 698 | break; |
699 | case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: | ||
700 | /* Extend timeout value */ | ||
701 | scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED); | ||
702 | |||
703 | /* Start the oob/sn state machine over again */ | ||
704 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); | ||
705 | break; | ||
686 | default: | 706 | default: |
687 | phy_event_dbg(iphy, state, event_code); | 707 | phy_event_dbg(iphy, state, event_code); |
688 | return SCI_FAILURE; | 708 | return SCI_FAILURE; |
@@ -717,9 +737,19 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) | |||
717 | sci_phy_start_sata_link_training(iphy); | 737 | sci_phy_start_sata_link_training(iphy); |
718 | break; | 738 | break; |
719 | case SCU_EVENT_LINK_FAILURE: | 739 | case SCU_EVENT_LINK_FAILURE: |
740 | /* Change the timeout value to default */ | ||
741 | scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); | ||
742 | |||
720 | /* Link failure change state back to the starting state */ | 743 | /* Link failure change state back to the starting state */ |
721 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); | 744 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); |
722 | break; | 745 | break; |
746 | case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: | ||
747 | /* Extend the timeout value */ | ||
748 | scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED); | ||
749 | |||
750 | /* Start the oob/sn state machine over again */ | ||
751 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); | ||
752 | break; | ||
723 | default: | 753 | default: |
724 | phy_event_warn(iphy, state, event_code); | 754 | phy_event_warn(iphy, state, event_code); |
725 | return SCI_FAILURE; | 755 | return SCI_FAILURE; |
@@ -740,7 +770,14 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) | |||
740 | sci_phy_start_sata_link_training(iphy); | 770 | sci_phy_start_sata_link_training(iphy); |
741 | break; | 771 | break; |
742 | case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: | 772 | case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: |
773 | /* Extend the timeout value */ | ||
774 | scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED); | ||
775 | |||
776 | /* Start the oob/sn state machine over again */ | ||
777 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); | ||
778 | break; | ||
743 | case SCU_EVENT_LINK_FAILURE: | 779 | case SCU_EVENT_LINK_FAILURE: |
780 | scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); | ||
744 | case SCU_EVENT_HARD_RESET_RECEIVED: | 781 | case SCU_EVENT_HARD_RESET_RECEIVED: |
745 | /* Start the oob/sn state machine over again */ | 782 | /* Start the oob/sn state machine over again */ |
746 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); | 783 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); |
@@ -753,6 +790,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) | |||
753 | case SCI_PHY_SUB_AWAIT_SAS_POWER: | 790 | case SCI_PHY_SUB_AWAIT_SAS_POWER: |
754 | switch (scu_get_event_code(event_code)) { | 791 | switch (scu_get_event_code(event_code)) { |
755 | case SCU_EVENT_LINK_FAILURE: | 792 | case SCU_EVENT_LINK_FAILURE: |
793 | /* Change the timeout value to default */ | ||
794 | scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); | ||
795 | |||
756 | /* Link failure change state back to the starting state */ | 796 | /* Link failure change state back to the starting state */ |
757 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); | 797 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); |
758 | break; | 798 | break; |
@@ -764,6 +804,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) | |||
764 | case SCI_PHY_SUB_AWAIT_SATA_POWER: | 804 | case SCI_PHY_SUB_AWAIT_SATA_POWER: |
765 | switch (scu_get_event_code(event_code)) { | 805 | switch (scu_get_event_code(event_code)) { |
766 | case SCU_EVENT_LINK_FAILURE: | 806 | case SCU_EVENT_LINK_FAILURE: |
807 | /* Change the timeout value to default */ | ||
808 | scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); | ||
809 | |||
767 | /* Link failure change state back to the starting state */ | 810 | /* Link failure change state back to the starting state */ |
768 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); | 811 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); |
769 | break; | 812 | break; |
@@ -788,6 +831,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) | |||
788 | case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: | 831 | case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: |
789 | switch (scu_get_event_code(event_code)) { | 832 | switch (scu_get_event_code(event_code)) { |
790 | case SCU_EVENT_LINK_FAILURE: | 833 | case SCU_EVENT_LINK_FAILURE: |
834 | /* Change the timeout value to default */ | ||
835 | scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); | ||
836 | |||
791 | /* Link failure change state back to the starting state */ | 837 | /* Link failure change state back to the starting state */ |
792 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); | 838 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); |
793 | break; | 839 | break; |
@@ -797,7 +843,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) | |||
797 | */ | 843 | */ |
798 | break; | 844 | break; |
799 | case SCU_EVENT_SATA_PHY_DETECTED: | 845 | case SCU_EVENT_SATA_PHY_DETECTED: |
800 | iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA; | 846 | iphy->protocol = SAS_PROTOCOL_SATA; |
801 | 847 | ||
802 | /* We have received the SATA PHY notification change state */ | 848 | /* We have received the SATA PHY notification change state */ |
803 | sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN); | 849 | sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN); |
@@ -836,6 +882,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) | |||
836 | SCI_PHY_SUB_AWAIT_SIG_FIS_UF); | 882 | SCI_PHY_SUB_AWAIT_SIG_FIS_UF); |
837 | break; | 883 | break; |
838 | case SCU_EVENT_LINK_FAILURE: | 884 | case SCU_EVENT_LINK_FAILURE: |
885 | /* Change the timeout value to default */ | ||
886 | scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); | ||
887 | |||
839 | /* Link failure change state back to the starting state */ | 888 | /* Link failure change state back to the starting state */ |
840 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); | 889 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); |
841 | break; | 890 | break; |
@@ -859,6 +908,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) | |||
859 | break; | 908 | break; |
860 | 909 | ||
861 | case SCU_EVENT_LINK_FAILURE: | 910 | case SCU_EVENT_LINK_FAILURE: |
911 | /* Change the timeout value to default */ | ||
912 | scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); | ||
913 | |||
862 | /* Link failure change state back to the starting state */ | 914 | /* Link failure change state back to the starting state */ |
863 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); | 915 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); |
864 | break; | 916 | break; |
@@ -871,16 +923,26 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) | |||
871 | case SCI_PHY_READY: | 923 | case SCI_PHY_READY: |
872 | switch (scu_get_event_code(event_code)) { | 924 | switch (scu_get_event_code(event_code)) { |
873 | case SCU_EVENT_LINK_FAILURE: | 925 | case SCU_EVENT_LINK_FAILURE: |
926 | /* Set default timeout */ | ||
927 | scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); | ||
928 | |||
874 | /* Link failure change state back to the starting state */ | 929 | /* Link failure change state back to the starting state */ |
875 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); | 930 | sci_change_state(&iphy->sm, SCI_PHY_STARTING); |
876 | break; | 931 | break; |
877 | case SCU_EVENT_BROADCAST_CHANGE: | 932 | case SCU_EVENT_BROADCAST_CHANGE: |
933 | case SCU_EVENT_BROADCAST_SES: | ||
934 | case SCU_EVENT_BROADCAST_RESERVED0: | ||
935 | case SCU_EVENT_BROADCAST_RESERVED1: | ||
936 | case SCU_EVENT_BROADCAST_EXPANDER: | ||
937 | case SCU_EVENT_BROADCAST_AEN: | ||
878 | /* Broadcast change received. Notify the port. */ | 938 | /* Broadcast change received. Notify the port. */ |
879 | if (phy_get_non_dummy_port(iphy) != NULL) | 939 | if (phy_get_non_dummy_port(iphy) != NULL) |
880 | sci_port_broadcast_change_received(iphy->owning_port, iphy); | 940 | sci_port_broadcast_change_received(iphy->owning_port, iphy); |
881 | else | 941 | else |
882 | iphy->bcn_received_while_port_unassigned = true; | 942 | iphy->bcn_received_while_port_unassigned = true; |
883 | break; | 943 | break; |
944 | case SCU_EVENT_BROADCAST_RESERVED3: | ||
945 | case SCU_EVENT_BROADCAST_RESERVED4: | ||
884 | default: | 946 | default: |
885 | phy_event_warn(iphy, state, event_code); | 947 | phy_event_warn(iphy, state, event_code); |
886 | return SCI_FAILURE_INVALID_STATE; | 948 | return SCI_FAILURE_INVALID_STATE; |
@@ -1215,7 +1277,7 @@ static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm) | |||
1215 | scu_link_layer_start_oob(iphy); | 1277 | scu_link_layer_start_oob(iphy); |
1216 | 1278 | ||
1217 | /* We don't know what kind of phy we are going to be just yet */ | 1279 | /* We don't know what kind of phy we are going to be just yet */ |
1218 | iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN; | 1280 | iphy->protocol = SAS_PROTOCOL_NONE; |
1219 | iphy->bcn_received_while_port_unassigned = false; | 1281 | iphy->bcn_received_while_port_unassigned = false; |
1220 | 1282 | ||
1221 | if (iphy->sm.previous_state_id == SCI_PHY_READY) | 1283 | if (iphy->sm.previous_state_id == SCI_PHY_READY) |
@@ -1250,7 +1312,7 @@ static void sci_phy_resetting_state_enter(struct sci_base_state_machine *sm) | |||
1250 | */ | 1312 | */ |
1251 | sci_port_deactivate_phy(iphy->owning_port, iphy, false); | 1313 | sci_port_deactivate_phy(iphy->owning_port, iphy, false); |
1252 | 1314 | ||
1253 | if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) { | 1315 | if (iphy->protocol == SAS_PROTOCOL_SSP) { |
1254 | scu_link_layer_tx_hard_reset(iphy); | 1316 | scu_link_layer_tx_hard_reset(iphy); |
1255 | } else { | 1317 | } else { |
1256 | /* The SCU does not need to have a discrete reset state so | 1318 | /* The SCU does not need to have a discrete reset state so |
@@ -1316,7 +1378,7 @@ void sci_phy_construct(struct isci_phy *iphy, | |||
1316 | iphy->owning_port = iport; | 1378 | iphy->owning_port = iport; |
1317 | iphy->phy_index = phy_index; | 1379 | iphy->phy_index = phy_index; |
1318 | iphy->bcn_received_while_port_unassigned = false; | 1380 | iphy->bcn_received_while_port_unassigned = false; |
1319 | iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN; | 1381 | iphy->protocol = SAS_PROTOCOL_NONE; |
1320 | iphy->link_layer_registers = NULL; | 1382 | iphy->link_layer_registers = NULL; |
1321 | iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN; | 1383 | iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN; |
1322 | 1384 | ||
@@ -1380,12 +1442,14 @@ int isci_phy_control(struct asd_sas_phy *sas_phy, | |||
1380 | switch (func) { | 1442 | switch (func) { |
1381 | case PHY_FUNC_DISABLE: | 1443 | case PHY_FUNC_DISABLE: |
1382 | spin_lock_irqsave(&ihost->scic_lock, flags); | 1444 | spin_lock_irqsave(&ihost->scic_lock, flags); |
1445 | scu_link_layer_start_oob(iphy); | ||
1383 | sci_phy_stop(iphy); | 1446 | sci_phy_stop(iphy); |
1384 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1447 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1385 | break; | 1448 | break; |
1386 | 1449 | ||
1387 | case PHY_FUNC_LINK_RESET: | 1450 | case PHY_FUNC_LINK_RESET: |
1388 | spin_lock_irqsave(&ihost->scic_lock, flags); | 1451 | spin_lock_irqsave(&ihost->scic_lock, flags); |
1452 | scu_link_layer_start_oob(iphy); | ||
1389 | sci_phy_stop(iphy); | 1453 | sci_phy_stop(iphy); |
1390 | sci_phy_start(iphy); | 1454 | sci_phy_start(iphy); |
1391 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1455 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h index 0e45833ba06d..45fecfa36a98 100644 --- a/drivers/scsi/isci/phy.h +++ b/drivers/scsi/isci/phy.h | |||
@@ -76,13 +76,6 @@ | |||
76 | */ | 76 | */ |
77 | #define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT 250 | 77 | #define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT 250 |
78 | 78 | ||
79 | enum sci_phy_protocol { | ||
80 | SCIC_SDS_PHY_PROTOCOL_UNKNOWN, | ||
81 | SCIC_SDS_PHY_PROTOCOL_SAS, | ||
82 | SCIC_SDS_PHY_PROTOCOL_SATA, | ||
83 | SCIC_SDS_MAX_PHY_PROTOCOLS | ||
84 | }; | ||
85 | |||
86 | /** | 79 | /** |
87 | * isci_phy - hba local phy infrastructure | 80 | * isci_phy - hba local phy infrastructure |
88 | * @sm: | 81 | * @sm: |
@@ -95,7 +88,7 @@ struct isci_phy { | |||
95 | struct sci_base_state_machine sm; | 88 | struct sci_base_state_machine sm; |
96 | struct isci_port *owning_port; | 89 | struct isci_port *owning_port; |
97 | enum sas_linkrate max_negotiated_speed; | 90 | enum sas_linkrate max_negotiated_speed; |
98 | enum sci_phy_protocol protocol; | 91 | enum sas_protocol protocol; |
99 | u8 phy_index; | 92 | u8 phy_index; |
100 | bool bcn_received_while_port_unassigned; | 93 | bool bcn_received_while_port_unassigned; |
101 | bool is_in_link_training; | 94 | bool is_in_link_training; |
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c index 5fada73b71ff..2fb85bf75449 100644 --- a/drivers/scsi/isci/port.c +++ b/drivers/scsi/isci/port.c | |||
@@ -184,7 +184,7 @@ static void isci_port_link_up(struct isci_host *isci_host, | |||
184 | 184 | ||
185 | sci_port_get_properties(iport, &properties); | 185 | sci_port_get_properties(iport, &properties); |
186 | 186 | ||
187 | if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) { | 187 | if (iphy->protocol == SAS_PROTOCOL_SATA) { |
188 | u64 attached_sas_address; | 188 | u64 attached_sas_address; |
189 | 189 | ||
190 | iphy->sas_phy.oob_mode = SATA_OOB_MODE; | 190 | iphy->sas_phy.oob_mode = SATA_OOB_MODE; |
@@ -204,7 +204,7 @@ static void isci_port_link_up(struct isci_host *isci_host, | |||
204 | 204 | ||
205 | memcpy(&iphy->sas_phy.attached_sas_addr, | 205 | memcpy(&iphy->sas_phy.attached_sas_addr, |
206 | &attached_sas_address, sizeof(attached_sas_address)); | 206 | &attached_sas_address, sizeof(attached_sas_address)); |
207 | } else if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) { | 207 | } else if (iphy->protocol == SAS_PROTOCOL_SSP) { |
208 | iphy->sas_phy.oob_mode = SAS_OOB_MODE; | 208 | iphy->sas_phy.oob_mode = SAS_OOB_MODE; |
209 | iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame); | 209 | iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame); |
210 | 210 | ||
@@ -251,10 +251,10 @@ static void isci_port_link_down(struct isci_host *isci_host, | |||
251 | if (isci_phy->sas_phy.port && | 251 | if (isci_phy->sas_phy.port && |
252 | isci_phy->sas_phy.port->num_phys == 1) { | 252 | isci_phy->sas_phy.port->num_phys == 1) { |
253 | /* change the state for all devices on this port. The | 253 | /* change the state for all devices on this port. The |
254 | * next task sent to this device will be returned as | 254 | * next task sent to this device will be returned as |
255 | * SAS_TASK_UNDELIVERED, and the scsi mid layer will | 255 | * SAS_TASK_UNDELIVERED, and the scsi mid layer will |
256 | * remove the target | 256 | * remove the target |
257 | */ | 257 | */ |
258 | list_for_each_entry(isci_device, | 258 | list_for_each_entry(isci_device, |
259 | &isci_port->remote_dev_list, | 259 | &isci_port->remote_dev_list, |
260 | node) { | 260 | node) { |
@@ -517,7 +517,7 @@ void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_a | |||
517 | */ | 517 | */ |
518 | iphy = sci_port_get_a_connected_phy(iport); | 518 | iphy = sci_port_get_a_connected_phy(iport); |
519 | if (iphy) { | 519 | if (iphy) { |
520 | if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) { | 520 | if (iphy->protocol != SAS_PROTOCOL_SATA) { |
521 | sci_phy_get_attached_sas_address(iphy, sas); | 521 | sci_phy_get_attached_sas_address(iphy, sas); |
522 | } else { | 522 | } else { |
523 | sci_phy_get_sas_address(iphy, sas); | 523 | sci_phy_get_sas_address(iphy, sas); |
@@ -624,7 +624,7 @@ static void sci_port_activate_phy(struct isci_port *iport, | |||
624 | { | 624 | { |
625 | struct isci_host *ihost = iport->owning_controller; | 625 | struct isci_host *ihost = iport->owning_controller; |
626 | 626 | ||
627 | if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA && (flags & PF_RESUME)) | 627 | if (iphy->protocol != SAS_PROTOCOL_SATA && (flags & PF_RESUME)) |
628 | sci_phy_resume(iphy); | 628 | sci_phy_resume(iphy); |
629 | 629 | ||
630 | iport->active_phy_mask |= 1 << iphy->phy_index; | 630 | iport->active_phy_mask |= 1 << iphy->phy_index; |
@@ -751,12 +751,10 @@ static bool sci_port_is_wide(struct isci_port *iport) | |||
751 | * wide ports and direct attached phys. Since there are no wide ported SATA | 751 | * wide ports and direct attached phys. Since there are no wide ported SATA |
752 | * devices this could become an invalid port configuration. | 752 | * devices this could become an invalid port configuration. |
753 | */ | 753 | */ |
754 | bool sci_port_link_detected( | 754 | bool sci_port_link_detected(struct isci_port *iport, struct isci_phy *iphy) |
755 | struct isci_port *iport, | ||
756 | struct isci_phy *iphy) | ||
757 | { | 755 | { |
758 | if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) && | 756 | if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) && |
759 | (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA)) { | 757 | (iphy->protocol == SAS_PROTOCOL_SATA)) { |
760 | if (sci_port_is_wide(iport)) { | 758 | if (sci_port_is_wide(iport)) { |
761 | sci_port_invalid_link_up(iport, iphy); | 759 | sci_port_invalid_link_up(iport, iphy); |
762 | return false; | 760 | return false; |
@@ -1201,6 +1199,8 @@ enum sci_status sci_port_add_phy(struct isci_port *iport, | |||
1201 | enum sci_status status; | 1199 | enum sci_status status; |
1202 | enum sci_port_states state; | 1200 | enum sci_port_states state; |
1203 | 1201 | ||
1202 | sci_port_bcn_enable(iport); | ||
1203 | |||
1204 | state = iport->sm.current_state_id; | 1204 | state = iport->sm.current_state_id; |
1205 | switch (state) { | 1205 | switch (state) { |
1206 | case SCI_PORT_STOPPED: { | 1206 | case SCI_PORT_STOPPED: { |
@@ -1548,6 +1548,29 @@ static void sci_port_failed_state_enter(struct sci_base_state_machine *sm) | |||
1548 | isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT); | 1548 | isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT); |
1549 | } | 1549 | } |
1550 | 1550 | ||
1551 | void sci_port_set_hang_detection_timeout(struct isci_port *iport, u32 timeout) | ||
1552 | { | ||
1553 | int phy_index; | ||
1554 | u32 phy_mask = iport->active_phy_mask; | ||
1555 | |||
1556 | if (timeout) | ||
1557 | ++iport->hang_detect_users; | ||
1558 | else if (iport->hang_detect_users > 1) | ||
1559 | --iport->hang_detect_users; | ||
1560 | else | ||
1561 | iport->hang_detect_users = 0; | ||
1562 | |||
1563 | if (timeout || (iport->hang_detect_users == 0)) { | ||
1564 | for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) { | ||
1565 | if ((phy_mask >> phy_index) & 1) { | ||
1566 | writel(timeout, | ||
1567 | &iport->phy_table[phy_index] | ||
1568 | ->link_layer_registers | ||
1569 | ->link_layer_hang_detection_timeout); | ||
1570 | } | ||
1571 | } | ||
1572 | } | ||
1573 | } | ||
1551 | /* --------------------------------------------------------------------------- */ | 1574 | /* --------------------------------------------------------------------------- */ |
1552 | 1575 | ||
1553 | static const struct sci_base_state sci_port_state_table[] = { | 1576 | static const struct sci_base_state sci_port_state_table[] = { |
@@ -1596,6 +1619,7 @@ void sci_port_construct(struct isci_port *iport, u8 index, | |||
1596 | 1619 | ||
1597 | iport->started_request_count = 0; | 1620 | iport->started_request_count = 0; |
1598 | iport->assigned_device_count = 0; | 1621 | iport->assigned_device_count = 0; |
1622 | iport->hang_detect_users = 0; | ||
1599 | 1623 | ||
1600 | iport->reserved_rni = SCU_DUMMY_INDEX; | 1624 | iport->reserved_rni = SCU_DUMMY_INDEX; |
1601 | iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG; | 1625 | iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG; |
@@ -1608,13 +1632,6 @@ void sci_port_construct(struct isci_port *iport, u8 index, | |||
1608 | iport->phy_table[index] = NULL; | 1632 | iport->phy_table[index] = NULL; |
1609 | } | 1633 | } |
1610 | 1634 | ||
1611 | void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index) | ||
1612 | { | ||
1613 | INIT_LIST_HEAD(&iport->remote_dev_list); | ||
1614 | INIT_LIST_HEAD(&iport->domain_dev_list); | ||
1615 | iport->isci_host = ihost; | ||
1616 | } | ||
1617 | |||
1618 | void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy) | 1635 | void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy) |
1619 | { | 1636 | { |
1620 | struct isci_host *ihost = iport->owning_controller; | 1637 | struct isci_host *ihost = iport->owning_controller; |
@@ -1671,17 +1688,6 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor | |||
1671 | __func__, iport, status); | 1688 | __func__, iport, status); |
1672 | 1689 | ||
1673 | } | 1690 | } |
1674 | |||
1675 | /* If the hard reset for the port has failed, consider this | ||
1676 | * the same as link failures on all phys in the port. | ||
1677 | */ | ||
1678 | if (ret != TMF_RESP_FUNC_COMPLETE) { | ||
1679 | |||
1680 | dev_err(&ihost->pdev->dev, | ||
1681 | "%s: iport = %p; hard reset failed " | ||
1682 | "(0x%x) - driving explicit link fail for all phys\n", | ||
1683 | __func__, iport, iport->hard_reset_status); | ||
1684 | } | ||
1685 | return ret; | 1691 | return ret; |
1686 | } | 1692 | } |
1687 | 1693 | ||
@@ -1740,7 +1746,7 @@ void isci_port_formed(struct asd_sas_phy *phy) | |||
1740 | struct isci_host *ihost = phy->ha->lldd_ha; | 1746 | struct isci_host *ihost = phy->ha->lldd_ha; |
1741 | struct isci_phy *iphy = to_iphy(phy); | 1747 | struct isci_phy *iphy = to_iphy(phy); |
1742 | struct asd_sas_port *port = phy->port; | 1748 | struct asd_sas_port *port = phy->port; |
1743 | struct isci_port *iport; | 1749 | struct isci_port *iport = NULL; |
1744 | unsigned long flags; | 1750 | unsigned long flags; |
1745 | int i; | 1751 | int i; |
1746 | 1752 | ||
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h index 6b56240c2051..861e8f72811b 100644 --- a/drivers/scsi/isci/port.h +++ b/drivers/scsi/isci/port.h | |||
@@ -97,7 +97,6 @@ enum isci_status { | |||
97 | struct isci_port { | 97 | struct isci_port { |
98 | struct isci_host *isci_host; | 98 | struct isci_host *isci_host; |
99 | struct list_head remote_dev_list; | 99 | struct list_head remote_dev_list; |
100 | struct list_head domain_dev_list; | ||
101 | #define IPORT_RESET_PENDING 0 | 100 | #define IPORT_RESET_PENDING 0 |
102 | unsigned long state; | 101 | unsigned long state; |
103 | enum sci_status hard_reset_status; | 102 | enum sci_status hard_reset_status; |
@@ -112,6 +111,7 @@ struct isci_port { | |||
112 | u16 reserved_tag; | 111 | u16 reserved_tag; |
113 | u32 started_request_count; | 112 | u32 started_request_count; |
114 | u32 assigned_device_count; | 113 | u32 assigned_device_count; |
114 | u32 hang_detect_users; | ||
115 | u32 not_ready_reason; | 115 | u32 not_ready_reason; |
116 | struct isci_phy *phy_table[SCI_MAX_PHYS]; | 116 | struct isci_phy *phy_table[SCI_MAX_PHYS]; |
117 | struct isci_host *owning_controller; | 117 | struct isci_host *owning_controller; |
@@ -270,14 +270,13 @@ void sci_port_get_attached_sas_address( | |||
270 | struct isci_port *iport, | 270 | struct isci_port *iport, |
271 | struct sci_sas_address *sas_address); | 271 | struct sci_sas_address *sas_address); |
272 | 272 | ||
273 | void sci_port_set_hang_detection_timeout( | ||
274 | struct isci_port *isci_port, | ||
275 | u32 timeout); | ||
276 | |||
273 | void isci_port_formed(struct asd_sas_phy *); | 277 | void isci_port_formed(struct asd_sas_phy *); |
274 | void isci_port_deformed(struct asd_sas_phy *); | 278 | void isci_port_deformed(struct asd_sas_phy *); |
275 | 279 | ||
276 | void isci_port_init( | ||
277 | struct isci_port *port, | ||
278 | struct isci_host *host, | ||
279 | int index); | ||
280 | |||
281 | int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport, | 280 | int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport, |
282 | struct isci_phy *iphy); | 281 | struct isci_phy *iphy); |
283 | int isci_ata_check_ready(struct domain_device *dev); | 282 | int isci_ata_check_ready(struct domain_device *dev); |
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c index 6d1e9544cbe5..cd962da4a57a 100644 --- a/drivers/scsi/isci/port_config.c +++ b/drivers/scsi/isci/port_config.c | |||
@@ -57,7 +57,7 @@ | |||
57 | 57 | ||
58 | #define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10) | 58 | #define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10) |
59 | #define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10) | 59 | #define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10) |
60 | #define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (250) | 60 | #define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (1000) |
61 | 61 | ||
62 | enum SCIC_SDS_APC_ACTIVITY { | 62 | enum SCIC_SDS_APC_ACTIVITY { |
63 | SCIC_SDS_APC_SKIP_PHY, | 63 | SCIC_SDS_APC_SKIP_PHY, |
@@ -472,13 +472,9 @@ sci_apc_agent_validate_phy_configuration(struct isci_host *ihost, | |||
472 | * down event or a link up event where we can not yet tell to which a phy | 472 | * down event or a link up event where we can not yet tell to which a phy |
473 | * belongs. | 473 | * belongs. |
474 | */ | 474 | */ |
475 | static void sci_apc_agent_start_timer( | 475 | static void sci_apc_agent_start_timer(struct sci_port_configuration_agent *port_agent, |
476 | struct sci_port_configuration_agent *port_agent, | 476 | u32 timeout) |
477 | u32 timeout) | ||
478 | { | 477 | { |
479 | if (port_agent->timer_pending) | ||
480 | sci_del_timer(&port_agent->timer); | ||
481 | |||
482 | port_agent->timer_pending = true; | 478 | port_agent->timer_pending = true; |
483 | sci_mod_timer(&port_agent->timer, timeout); | 479 | sci_mod_timer(&port_agent->timer, timeout); |
484 | } | 480 | } |
@@ -697,6 +693,9 @@ static void apc_agent_timeout(unsigned long data) | |||
697 | &ihost->phys[index], false); | 693 | &ihost->phys[index], false); |
698 | } | 694 | } |
699 | 695 | ||
696 | if (is_controller_start_complete(ihost)) | ||
697 | sci_controller_transition_to_ready(ihost, SCI_SUCCESS); | ||
698 | |||
700 | done: | 699 | done: |
701 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 700 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
702 | } | 701 | } |
@@ -732,6 +731,11 @@ void sci_port_configuration_agent_construct( | |||
732 | } | 731 | } |
733 | } | 732 | } |
734 | 733 | ||
734 | bool is_port_config_apc(struct isci_host *ihost) | ||
735 | { | ||
736 | return ihost->port_agent.link_up_handler == sci_apc_agent_link_up; | ||
737 | } | ||
738 | |||
735 | enum sci_status sci_port_configuration_agent_initialize( | 739 | enum sci_status sci_port_configuration_agent_initialize( |
736 | struct isci_host *ihost, | 740 | struct isci_host *ihost, |
737 | struct sci_port_configuration_agent *port_agent) | 741 | struct sci_port_configuration_agent *port_agent) |
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c index 9b8117b9d756..4d95654c3fd4 100644 --- a/drivers/scsi/isci/probe_roms.c +++ b/drivers/scsi/isci/probe_roms.c | |||
@@ -112,18 +112,6 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev) | |||
112 | return rom; | 112 | return rom; |
113 | } | 113 | } |
114 | 114 | ||
115 | enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem, | ||
116 | struct isci_orom *orom, int scu_index) | ||
117 | { | ||
118 | /* check for valid inputs */ | ||
119 | if (scu_index < 0 || scu_index >= SCI_MAX_CONTROLLERS || | ||
120 | scu_index > orom->hdr.num_elements || !oem) | ||
121 | return -EINVAL; | ||
122 | |||
123 | *oem = orom->ctrl[scu_index]; | ||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw) | 115 | struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw) |
128 | { | 116 | { |
129 | struct isci_orom *orom = NULL, *data; | 117 | struct isci_orom *orom = NULL, *data; |
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h index bb0e9d4d97c9..e08b578241f8 100644 --- a/drivers/scsi/isci/probe_roms.h +++ b/drivers/scsi/isci/probe_roms.h | |||
@@ -156,8 +156,6 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version); | |||
156 | 156 | ||
157 | struct isci_orom; | 157 | struct isci_orom; |
158 | struct isci_orom *isci_request_oprom(struct pci_dev *pdev); | 158 | struct isci_orom *isci_request_oprom(struct pci_dev *pdev); |
159 | enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem, | ||
160 | struct isci_orom *orom, int scu_index); | ||
161 | struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw); | 159 | struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw); |
162 | struct isci_orom *isci_get_efi_var(struct pci_dev *pdev); | 160 | struct isci_orom *isci_get_efi_var(struct pci_dev *pdev); |
163 | 161 | ||
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h index 7eb0ccd45fe6..97f3ceb8d724 100644 --- a/drivers/scsi/isci/registers.h +++ b/drivers/scsi/isci/registers.h | |||
@@ -1239,6 +1239,14 @@ struct scu_transport_layer_registers { | |||
1239 | #define SCU_SAS_LLCTL_GEN_BIT(name) \ | 1239 | #define SCU_SAS_LLCTL_GEN_BIT(name) \ |
1240 | SCU_GEN_BIT(SCU_SAS_LINK_LAYER_CONTROL_ ## name) | 1240 | SCU_GEN_BIT(SCU_SAS_LINK_LAYER_CONTROL_ ## name) |
1241 | 1241 | ||
1242 | #define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT (0xF0) | ||
1243 | #define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED (0x1FF) | ||
1244 | #define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_SHIFT (0) | ||
1245 | #define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_MASK (0x3FF) | ||
1246 | |||
1247 | #define SCU_SAS_LLTXCOMSAS_GEN_VAL(name, value) \ | ||
1248 | SCU_GEN_VALUE(SCU_SAS_LINK_LAYER_TXCOMSAS_ ## name, value) | ||
1249 | |||
1242 | 1250 | ||
1243 | /* #define SCU_FRXHECR_DCNT_OFFSET 0x00B0 */ | 1251 | /* #define SCU_FRXHECR_DCNT_OFFSET 0x00B0 */ |
1244 | #define SCU_PSZGCR_OFFSET 0x00E4 | 1252 | #define SCU_PSZGCR_OFFSET 0x00E4 |
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 8f501b0a81d6..c3aa6c5457b9 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c | |||
@@ -72,46 +72,11 @@ const char *dev_state_name(enum sci_remote_device_states state) | |||
72 | } | 72 | } |
73 | #undef C | 73 | #undef C |
74 | 74 | ||
75 | /** | 75 | enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev, |
76 | * isci_remote_device_not_ready() - This function is called by the ihost when | 76 | enum sci_remote_node_suspension_reasons reason) |
77 | * the remote device is not ready. We mark the isci device as ready (not | ||
78 | * "ready_for_io") and signal the waiting proccess. | ||
79 | * @isci_host: This parameter specifies the isci host object. | ||
80 | * @isci_device: This parameter specifies the remote device | ||
81 | * | ||
82 | * sci_lock is held on entrance to this function. | ||
83 | */ | ||
84 | static void isci_remote_device_not_ready(struct isci_host *ihost, | ||
85 | struct isci_remote_device *idev, u32 reason) | ||
86 | { | 77 | { |
87 | struct isci_request *ireq; | 78 | return sci_remote_node_context_suspend(&idev->rnc, reason, |
88 | 79 | SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT); | |
89 | dev_dbg(&ihost->pdev->dev, | ||
90 | "%s: isci_device = %p\n", __func__, idev); | ||
91 | |||
92 | switch (reason) { | ||
93 | case SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED: | ||
94 | set_bit(IDEV_GONE, &idev->flags); | ||
95 | break; | ||
96 | case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED: | ||
97 | set_bit(IDEV_IO_NCQERROR, &idev->flags); | ||
98 | |||
99 | /* Kill all outstanding requests for the device. */ | ||
100 | list_for_each_entry(ireq, &idev->reqs_in_process, dev_node) { | ||
101 | |||
102 | dev_dbg(&ihost->pdev->dev, | ||
103 | "%s: isci_device = %p request = %p\n", | ||
104 | __func__, idev, ireq); | ||
105 | |||
106 | sci_controller_terminate_request(ihost, | ||
107 | idev, | ||
108 | ireq); | ||
109 | } | ||
110 | /* Fall through into the default case... */ | ||
111 | default: | ||
112 | clear_bit(IDEV_IO_READY, &idev->flags); | ||
113 | break; | ||
114 | } | ||
115 | } | 80 | } |
116 | 81 | ||
117 | /** | 82 | /** |
@@ -133,18 +98,29 @@ static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote | |||
133 | wake_up(&ihost->eventq); | 98 | wake_up(&ihost->eventq); |
134 | } | 99 | } |
135 | 100 | ||
136 | /* called once the remote node context is ready to be freed. | 101 | static enum sci_status sci_remote_device_terminate_req( |
137 | * The remote device can now report that its stop operation is complete. none | 102 | struct isci_host *ihost, |
138 | */ | 103 | struct isci_remote_device *idev, |
139 | static void rnc_destruct_done(void *_dev) | 104 | int check_abort, |
105 | struct isci_request *ireq) | ||
140 | { | 106 | { |
141 | struct isci_remote_device *idev = _dev; | 107 | if (!test_bit(IREQ_ACTIVE, &ireq->flags) || |
108 | (ireq->target_device != idev) || | ||
109 | (check_abort && !test_bit(IREQ_PENDING_ABORT, &ireq->flags))) | ||
110 | return SCI_SUCCESS; | ||
142 | 111 | ||
143 | BUG_ON(idev->started_request_count != 0); | 112 | dev_dbg(&ihost->pdev->dev, |
144 | sci_change_state(&idev->sm, SCI_DEV_STOPPED); | 113 | "%s: idev=%p; flags=%lx; req=%p; req target=%p\n", |
114 | __func__, idev, idev->flags, ireq, ireq->target_device); | ||
115 | |||
116 | set_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags); | ||
117 | |||
118 | return sci_controller_terminate_request(ihost, idev, ireq); | ||
145 | } | 119 | } |
146 | 120 | ||
147 | static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_device *idev) | 121 | static enum sci_status sci_remote_device_terminate_reqs_checkabort( |
122 | struct isci_remote_device *idev, | ||
123 | int chk) | ||
148 | { | 124 | { |
149 | struct isci_host *ihost = idev->owning_port->owning_controller; | 125 | struct isci_host *ihost = idev->owning_port->owning_controller; |
150 | enum sci_status status = SCI_SUCCESS; | 126 | enum sci_status status = SCI_SUCCESS; |
@@ -154,18 +130,210 @@ static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_d | |||
154 | struct isci_request *ireq = ihost->reqs[i]; | 130 | struct isci_request *ireq = ihost->reqs[i]; |
155 | enum sci_status s; | 131 | enum sci_status s; |
156 | 132 | ||
157 | if (!test_bit(IREQ_ACTIVE, &ireq->flags) || | 133 | s = sci_remote_device_terminate_req(ihost, idev, chk, ireq); |
158 | ireq->target_device != idev) | ||
159 | continue; | ||
160 | |||
161 | s = sci_controller_terminate_request(ihost, idev, ireq); | ||
162 | if (s != SCI_SUCCESS) | 134 | if (s != SCI_SUCCESS) |
163 | status = s; | 135 | status = s; |
164 | } | 136 | } |
137 | return status; | ||
138 | } | ||
139 | |||
140 | static bool isci_compare_suspendcount( | ||
141 | struct isci_remote_device *idev, | ||
142 | u32 localcount) | ||
143 | { | ||
144 | smp_rmb(); | ||
145 | |||
146 | /* Check for a change in the suspend count, or the RNC | ||
147 | * being destroyed. | ||
148 | */ | ||
149 | return (localcount != idev->rnc.suspend_count) | ||
150 | || sci_remote_node_context_is_being_destroyed(&idev->rnc); | ||
151 | } | ||
152 | |||
153 | static bool isci_check_reqterm( | ||
154 | struct isci_host *ihost, | ||
155 | struct isci_remote_device *idev, | ||
156 | struct isci_request *ireq, | ||
157 | u32 localcount) | ||
158 | { | ||
159 | unsigned long flags; | ||
160 | bool res; | ||
165 | 161 | ||
162 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
163 | res = isci_compare_suspendcount(idev, localcount) | ||
164 | && !test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags); | ||
165 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
166 | |||
167 | return res; | ||
168 | } | ||
169 | |||
170 | static bool isci_check_devempty( | ||
171 | struct isci_host *ihost, | ||
172 | struct isci_remote_device *idev, | ||
173 | u32 localcount) | ||
174 | { | ||
175 | unsigned long flags; | ||
176 | bool res; | ||
177 | |||
178 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
179 | res = isci_compare_suspendcount(idev, localcount) | ||
180 | && idev->started_request_count == 0; | ||
181 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
182 | |||
183 | return res; | ||
184 | } | ||
185 | |||
186 | enum sci_status isci_remote_device_terminate_requests( | ||
187 | struct isci_host *ihost, | ||
188 | struct isci_remote_device *idev, | ||
189 | struct isci_request *ireq) | ||
190 | { | ||
191 | enum sci_status status = SCI_SUCCESS; | ||
192 | unsigned long flags; | ||
193 | u32 rnc_suspend_count; | ||
194 | |||
195 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
196 | |||
197 | if (isci_get_device(idev) == NULL) { | ||
198 | dev_dbg(&ihost->pdev->dev, "%s: failed isci_get_device(idev=%p)\n", | ||
199 | __func__, idev); | ||
200 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
201 | status = SCI_FAILURE; | ||
202 | } else { | ||
203 | /* If already suspended, don't wait for another suspension. */ | ||
204 | smp_rmb(); | ||
205 | rnc_suspend_count | ||
206 | = sci_remote_node_context_is_suspended(&idev->rnc) | ||
207 | ? 0 : idev->rnc.suspend_count; | ||
208 | |||
209 | dev_dbg(&ihost->pdev->dev, | ||
210 | "%s: idev=%p, ireq=%p; started_request_count=%d, " | ||
211 | "rnc_suspend_count=%d, rnc.suspend_count=%d" | ||
212 | "about to wait\n", | ||
213 | __func__, idev, ireq, idev->started_request_count, | ||
214 | rnc_suspend_count, idev->rnc.suspend_count); | ||
215 | |||
216 | #define MAX_SUSPEND_MSECS 10000 | ||
217 | if (ireq) { | ||
218 | /* Terminate a specific TC. */ | ||
219 | set_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags); | ||
220 | sci_remote_device_terminate_req(ihost, idev, 0, ireq); | ||
221 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
222 | if (!wait_event_timeout(ihost->eventq, | ||
223 | isci_check_reqterm(ihost, idev, ireq, | ||
224 | rnc_suspend_count), | ||
225 | msecs_to_jiffies(MAX_SUSPEND_MSECS))) { | ||
226 | |||
227 | dev_warn(&ihost->pdev->dev, "%s host%d timeout single\n", | ||
228 | __func__, ihost->id); | ||
229 | dev_dbg(&ihost->pdev->dev, | ||
230 | "%s: ******* Timeout waiting for " | ||
231 | "suspend; idev=%p, current state %s; " | ||
232 | "started_request_count=%d, flags=%lx\n\t" | ||
233 | "rnc_suspend_count=%d, rnc.suspend_count=%d " | ||
234 | "RNC: current state %s, current " | ||
235 | "suspend_type %x dest state %d;\n" | ||
236 | "ireq=%p, ireq->flags = %lx\n", | ||
237 | __func__, idev, | ||
238 | dev_state_name(idev->sm.current_state_id), | ||
239 | idev->started_request_count, idev->flags, | ||
240 | rnc_suspend_count, idev->rnc.suspend_count, | ||
241 | rnc_state_name(idev->rnc.sm.current_state_id), | ||
242 | idev->rnc.suspend_type, | ||
243 | idev->rnc.destination_state, | ||
244 | ireq, ireq->flags); | ||
245 | } | ||
246 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
247 | clear_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags); | ||
248 | if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags)) | ||
249 | isci_free_tag(ihost, ireq->io_tag); | ||
250 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
251 | } else { | ||
252 | /* Terminate all TCs. */ | ||
253 | sci_remote_device_terminate_requests(idev); | ||
254 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
255 | if (!wait_event_timeout(ihost->eventq, | ||
256 | isci_check_devempty(ihost, idev, | ||
257 | rnc_suspend_count), | ||
258 | msecs_to_jiffies(MAX_SUSPEND_MSECS))) { | ||
259 | |||
260 | dev_warn(&ihost->pdev->dev, "%s host%d timeout all\n", | ||
261 | __func__, ihost->id); | ||
262 | dev_dbg(&ihost->pdev->dev, | ||
263 | "%s: ******* Timeout waiting for " | ||
264 | "suspend; idev=%p, current state %s; " | ||
265 | "started_request_count=%d, flags=%lx\n\t" | ||
266 | "rnc_suspend_count=%d, " | ||
267 | "RNC: current state %s, " | ||
268 | "rnc.suspend_count=%d, current " | ||
269 | "suspend_type %x dest state %d\n", | ||
270 | __func__, idev, | ||
271 | dev_state_name(idev->sm.current_state_id), | ||
272 | idev->started_request_count, idev->flags, | ||
273 | rnc_suspend_count, | ||
274 | rnc_state_name(idev->rnc.sm.current_state_id), | ||
275 | idev->rnc.suspend_count, | ||
276 | idev->rnc.suspend_type, | ||
277 | idev->rnc.destination_state); | ||
278 | } | ||
279 | } | ||
280 | dev_dbg(&ihost->pdev->dev, "%s: idev=%p, wait done\n", | ||
281 | __func__, idev); | ||
282 | isci_put_device(idev); | ||
283 | } | ||
166 | return status; | 284 | return status; |
167 | } | 285 | } |
168 | 286 | ||
287 | /** | ||
288 | * isci_remote_device_not_ready() - This function is called by the ihost when | ||
289 | * the remote device is not ready. We mark the isci device as ready (not | ||
290 | * "ready_for_io") and signal the waiting proccess. | ||
291 | * @isci_host: This parameter specifies the isci host object. | ||
292 | * @isci_device: This parameter specifies the remote device | ||
293 | * | ||
294 | * sci_lock is held on entrance to this function. | ||
295 | */ | ||
296 | static void isci_remote_device_not_ready(struct isci_host *ihost, | ||
297 | struct isci_remote_device *idev, | ||
298 | u32 reason) | ||
299 | { | ||
300 | dev_dbg(&ihost->pdev->dev, | ||
301 | "%s: isci_device = %p; reason = %d\n", __func__, idev, reason); | ||
302 | |||
303 | switch (reason) { | ||
304 | case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED: | ||
305 | set_bit(IDEV_IO_NCQERROR, &idev->flags); | ||
306 | |||
307 | /* Suspend the remote device so the I/O can be terminated. */ | ||
308 | sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL); | ||
309 | |||
310 | /* Kill all outstanding requests for the device. */ | ||
311 | sci_remote_device_terminate_requests(idev); | ||
312 | |||
313 | /* Fall through into the default case... */ | ||
314 | default: | ||
315 | clear_bit(IDEV_IO_READY, &idev->flags); | ||
316 | break; | ||
317 | } | ||
318 | } | ||
319 | |||
320 | /* called once the remote node context is ready to be freed. | ||
321 | * The remote device can now report that its stop operation is complete. none | ||
322 | */ | ||
323 | static void rnc_destruct_done(void *_dev) | ||
324 | { | ||
325 | struct isci_remote_device *idev = _dev; | ||
326 | |||
327 | BUG_ON(idev->started_request_count != 0); | ||
328 | sci_change_state(&idev->sm, SCI_DEV_STOPPED); | ||
329 | } | ||
330 | |||
331 | enum sci_status sci_remote_device_terminate_requests( | ||
332 | struct isci_remote_device *idev) | ||
333 | { | ||
334 | return sci_remote_device_terminate_reqs_checkabort(idev, 0); | ||
335 | } | ||
336 | |||
169 | enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, | 337 | enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, |
170 | u32 timeout) | 338 | u32 timeout) |
171 | { | 339 | { |
@@ -201,13 +369,16 @@ enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, | |||
201 | case SCI_SMP_DEV_IDLE: | 369 | case SCI_SMP_DEV_IDLE: |
202 | case SCI_SMP_DEV_CMD: | 370 | case SCI_SMP_DEV_CMD: |
203 | sci_change_state(sm, SCI_DEV_STOPPING); | 371 | sci_change_state(sm, SCI_DEV_STOPPING); |
204 | if (idev->started_request_count == 0) { | 372 | if (idev->started_request_count == 0) |
205 | sci_remote_node_context_destruct(&idev->rnc, | 373 | sci_remote_node_context_destruct(&idev->rnc, |
206 | rnc_destruct_done, idev); | 374 | rnc_destruct_done, |
207 | return SCI_SUCCESS; | 375 | idev); |
208 | } else | 376 | else { |
209 | return sci_remote_device_terminate_requests(idev); | 377 | sci_remote_device_suspend( |
210 | break; | 378 | idev, SCI_SW_SUSPEND_LINKHANG_DETECT); |
379 | sci_remote_device_terminate_requests(idev); | ||
380 | } | ||
381 | return SCI_SUCCESS; | ||
211 | case SCI_DEV_STOPPING: | 382 | case SCI_DEV_STOPPING: |
212 | /* All requests should have been terminated, but if there is an | 383 | /* All requests should have been terminated, but if there is an |
213 | * attempt to stop a device already in the stopping state, then | 384 | * attempt to stop a device already in the stopping state, then |
@@ -265,22 +436,6 @@ enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev | |||
265 | return SCI_SUCCESS; | 436 | return SCI_SUCCESS; |
266 | } | 437 | } |
267 | 438 | ||
268 | enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev, | ||
269 | u32 suspend_type) | ||
270 | { | ||
271 | struct sci_base_state_machine *sm = &idev->sm; | ||
272 | enum sci_remote_device_states state = sm->current_state_id; | ||
273 | |||
274 | if (state != SCI_STP_DEV_CMD) { | ||
275 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", | ||
276 | __func__, dev_state_name(state)); | ||
277 | return SCI_FAILURE_INVALID_STATE; | ||
278 | } | ||
279 | |||
280 | return sci_remote_node_context_suspend(&idev->rnc, | ||
281 | suspend_type, NULL, NULL); | ||
282 | } | ||
283 | |||
284 | enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev, | 439 | enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev, |
285 | u32 frame_index) | 440 | u32 frame_index) |
286 | { | 441 | { |
@@ -412,9 +567,9 @@ static void atapi_remote_device_resume_done(void *_dev) | |||
412 | enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, | 567 | enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, |
413 | u32 event_code) | 568 | u32 event_code) |
414 | { | 569 | { |
570 | enum sci_status status; | ||
415 | struct sci_base_state_machine *sm = &idev->sm; | 571 | struct sci_base_state_machine *sm = &idev->sm; |
416 | enum sci_remote_device_states state = sm->current_state_id; | 572 | enum sci_remote_device_states state = sm->current_state_id; |
417 | enum sci_status status; | ||
418 | 573 | ||
419 | switch (scu_get_event_type(event_code)) { | 574 | switch (scu_get_event_type(event_code)) { |
420 | case SCU_EVENT_TYPE_RNC_OPS_MISC: | 575 | case SCU_EVENT_TYPE_RNC_OPS_MISC: |
@@ -427,9 +582,7 @@ enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, | |||
427 | status = SCI_SUCCESS; | 582 | status = SCI_SUCCESS; |
428 | 583 | ||
429 | /* Suspend the associated RNC */ | 584 | /* Suspend the associated RNC */ |
430 | sci_remote_node_context_suspend(&idev->rnc, | 585 | sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL); |
431 | SCI_SOFTWARE_SUSPENSION, | ||
432 | NULL, NULL); | ||
433 | 586 | ||
434 | dev_dbg(scirdev_to_dev(idev), | 587 | dev_dbg(scirdev_to_dev(idev), |
435 | "%s: device: %p event code: %x: %s\n", | 588 | "%s: device: %p event code: %x: %s\n", |
@@ -455,6 +608,10 @@ enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, | |||
455 | if (status != SCI_SUCCESS) | 608 | if (status != SCI_SUCCESS) |
456 | return status; | 609 | return status; |
457 | 610 | ||
611 | /* Decode device-specific states that may require an RNC resume during | ||
612 | * normal operation. When the abort path is active, these resumes are | ||
613 | * managed when the abort path exits. | ||
614 | */ | ||
458 | if (state == SCI_STP_DEV_ATAPI_ERROR) { | 615 | if (state == SCI_STP_DEV_ATAPI_ERROR) { |
459 | /* For ATAPI error state resume the RNC right away. */ | 616 | /* For ATAPI error state resume the RNC right away. */ |
460 | if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX || | 617 | if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX || |
@@ -743,10 +900,6 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost, | |||
743 | if (status != SCI_SUCCESS) | 900 | if (status != SCI_SUCCESS) |
744 | return status; | 901 | return status; |
745 | 902 | ||
746 | status = sci_remote_node_context_start_task(&idev->rnc, ireq); | ||
747 | if (status != SCI_SUCCESS) | ||
748 | goto out; | ||
749 | |||
750 | status = sci_request_start(ireq); | 903 | status = sci_request_start(ireq); |
751 | if (status != SCI_SUCCESS) | 904 | if (status != SCI_SUCCESS) |
752 | goto out; | 905 | goto out; |
@@ -765,11 +918,11 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost, | |||
765 | * the correct action when the remote node context is suspended | 918 | * the correct action when the remote node context is suspended |
766 | * and later resumed. | 919 | * and later resumed. |
767 | */ | 920 | */ |
768 | sci_remote_node_context_suspend(&idev->rnc, | 921 | sci_remote_device_suspend(idev, |
769 | SCI_SOFTWARE_SUSPENSION, NULL, NULL); | 922 | SCI_SW_SUSPEND_LINKHANG_DETECT); |
770 | sci_remote_node_context_resume(&idev->rnc, | 923 | |
771 | sci_remote_device_continue_request, | 924 | status = sci_remote_node_context_start_task(&idev->rnc, ireq, |
772 | idev); | 925 | sci_remote_device_continue_request, idev); |
773 | 926 | ||
774 | out: | 927 | out: |
775 | sci_remote_device_start_request(idev, ireq, status); | 928 | sci_remote_device_start_request(idev, ireq, status); |
@@ -783,7 +936,9 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost, | |||
783 | if (status != SCI_SUCCESS) | 936 | if (status != SCI_SUCCESS) |
784 | return status; | 937 | return status; |
785 | 938 | ||
786 | status = sci_remote_node_context_start_task(&idev->rnc, ireq); | 939 | /* Resume the RNC as needed: */ |
940 | status = sci_remote_node_context_start_task(&idev->rnc, ireq, | ||
941 | NULL, NULL); | ||
787 | if (status != SCI_SUCCESS) | 942 | if (status != SCI_SUCCESS) |
788 | break; | 943 | break; |
789 | 944 | ||
@@ -892,7 +1047,7 @@ static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_ | |||
892 | * here should go through isci_remote_device_nuke_requests. | 1047 | * here should go through isci_remote_device_nuke_requests. |
893 | * If we hit this condition, we will need a way to complete | 1048 | * If we hit this condition, we will need a way to complete |
894 | * io requests in process */ | 1049 | * io requests in process */ |
895 | BUG_ON(!list_empty(&idev->reqs_in_process)); | 1050 | BUG_ON(idev->started_request_count > 0); |
896 | 1051 | ||
897 | sci_remote_device_destruct(idev); | 1052 | sci_remote_device_destruct(idev); |
898 | list_del_init(&idev->node); | 1053 | list_del_init(&idev->node); |
@@ -954,14 +1109,21 @@ static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm | |||
954 | static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm) | 1109 | static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm) |
955 | { | 1110 | { |
956 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 1111 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
1112 | struct isci_host *ihost = idev->owning_port->owning_controller; | ||
957 | 1113 | ||
958 | sci_remote_node_context_suspend( | 1114 | dev_dbg(&ihost->pdev->dev, |
959 | &idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL); | 1115 | "%s: isci_device = %p\n", __func__, idev); |
1116 | |||
1117 | sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT); | ||
960 | } | 1118 | } |
961 | 1119 | ||
962 | static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm) | 1120 | static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm) |
963 | { | 1121 | { |
964 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 1122 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
1123 | struct isci_host *ihost = idev->owning_port->owning_controller; | ||
1124 | |||
1125 | dev_dbg(&ihost->pdev->dev, | ||
1126 | "%s: isci_device = %p\n", __func__, idev); | ||
965 | 1127 | ||
966 | sci_remote_node_context_resume(&idev->rnc, NULL, NULL); | 1128 | sci_remote_node_context_resume(&idev->rnc, NULL, NULL); |
967 | } | 1129 | } |
@@ -1113,33 +1275,20 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport, | |||
1113 | { | 1275 | { |
1114 | enum sci_status status; | 1276 | enum sci_status status; |
1115 | struct sci_port_properties properties; | 1277 | struct sci_port_properties properties; |
1116 | struct domain_device *dev = idev->domain_dev; | ||
1117 | 1278 | ||
1118 | sci_remote_device_construct(iport, idev); | 1279 | sci_remote_device_construct(iport, idev); |
1119 | 1280 | ||
1120 | /* | ||
1121 | * This information is request to determine how many remote node context | ||
1122 | * entries will be needed to store the remote node. | ||
1123 | */ | ||
1124 | idev->is_direct_attached = true; | ||
1125 | |||
1126 | sci_port_get_properties(iport, &properties); | 1281 | sci_port_get_properties(iport, &properties); |
1127 | /* Get accurate port width from port's phy mask for a DA device. */ | 1282 | /* Get accurate port width from port's phy mask for a DA device. */ |
1128 | idev->device_port_width = hweight32(properties.phy_mask); | 1283 | idev->device_port_width = hweight32(properties.phy_mask); |
1129 | 1284 | ||
1130 | status = sci_controller_allocate_remote_node_context(iport->owning_controller, | 1285 | status = sci_controller_allocate_remote_node_context(iport->owning_controller, |
1131 | idev, | 1286 | idev, |
1132 | &idev->rnc.remote_node_index); | 1287 | &idev->rnc.remote_node_index); |
1133 | 1288 | ||
1134 | if (status != SCI_SUCCESS) | 1289 | if (status != SCI_SUCCESS) |
1135 | return status; | 1290 | return status; |
1136 | 1291 | ||
1137 | if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV || | ||
1138 | (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev)) | ||
1139 | /* pass */; | ||
1140 | else | ||
1141 | return SCI_FAILURE_UNSUPPORTED_PROTOCOL; | ||
1142 | |||
1143 | idev->connection_rate = sci_port_get_max_allowed_speed(iport); | 1292 | idev->connection_rate = sci_port_get_max_allowed_speed(iport); |
1144 | 1293 | ||
1145 | return SCI_SUCCESS; | 1294 | return SCI_SUCCESS; |
@@ -1171,19 +1320,13 @@ static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport, | |||
1171 | if (status != SCI_SUCCESS) | 1320 | if (status != SCI_SUCCESS) |
1172 | return status; | 1321 | return status; |
1173 | 1322 | ||
1174 | if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV || | 1323 | /* For SAS-2 the physical link rate is actually a logical link |
1175 | (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev)) | ||
1176 | /* pass */; | ||
1177 | else | ||
1178 | return SCI_FAILURE_UNSUPPORTED_PROTOCOL; | ||
1179 | |||
1180 | /* | ||
1181 | * For SAS-2 the physical link rate is actually a logical link | ||
1182 | * rate that incorporates multiplexing. The SCU doesn't | 1324 | * rate that incorporates multiplexing. The SCU doesn't |
1183 | * incorporate multiplexing and for the purposes of the | 1325 | * incorporate multiplexing and for the purposes of the |
1184 | * connection the logical link rate is that same as the | 1326 | * connection the logical link rate is that same as the |
1185 | * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay | 1327 | * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay |
1186 | * one another, so this code works for both situations. */ | 1328 | * one another, so this code works for both situations. |
1329 | */ | ||
1187 | idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport), | 1330 | idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport), |
1188 | dev->linkrate); | 1331 | dev->linkrate); |
1189 | 1332 | ||
@@ -1193,6 +1336,105 @@ static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport, | |||
1193 | return SCI_SUCCESS; | 1336 | return SCI_SUCCESS; |
1194 | } | 1337 | } |
1195 | 1338 | ||
1339 | enum sci_status sci_remote_device_resume( | ||
1340 | struct isci_remote_device *idev, | ||
1341 | scics_sds_remote_node_context_callback cb_fn, | ||
1342 | void *cb_p) | ||
1343 | { | ||
1344 | enum sci_status status; | ||
1345 | |||
1346 | status = sci_remote_node_context_resume(&idev->rnc, cb_fn, cb_p); | ||
1347 | if (status != SCI_SUCCESS) | ||
1348 | dev_dbg(scirdev_to_dev(idev), "%s: failed to resume: %d\n", | ||
1349 | __func__, status); | ||
1350 | return status; | ||
1351 | } | ||
1352 | |||
1353 | static void isci_remote_device_resume_from_abort_complete(void *cbparam) | ||
1354 | { | ||
1355 | struct isci_remote_device *idev = cbparam; | ||
1356 | struct isci_host *ihost = idev->owning_port->owning_controller; | ||
1357 | scics_sds_remote_node_context_callback abort_resume_cb = | ||
1358 | idev->abort_resume_cb; | ||
1359 | |||
1360 | dev_dbg(scirdev_to_dev(idev), "%s: passing-along resume: %p\n", | ||
1361 | __func__, abort_resume_cb); | ||
1362 | |||
1363 | if (abort_resume_cb != NULL) { | ||
1364 | idev->abort_resume_cb = NULL; | ||
1365 | abort_resume_cb(idev->abort_resume_cbparam); | ||
1366 | } | ||
1367 | clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); | ||
1368 | wake_up(&ihost->eventq); | ||
1369 | } | ||
1370 | |||
1371 | static bool isci_remote_device_test_resume_done( | ||
1372 | struct isci_host *ihost, | ||
1373 | struct isci_remote_device *idev) | ||
1374 | { | ||
1375 | unsigned long flags; | ||
1376 | bool done; | ||
1377 | |||
1378 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
1379 | done = !test_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags) | ||
1380 | || test_bit(IDEV_STOP_PENDING, &idev->flags) | ||
1381 | || sci_remote_node_context_is_being_destroyed(&idev->rnc); | ||
1382 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
1383 | |||
1384 | return done; | ||
1385 | } | ||
1386 | |||
1387 | void isci_remote_device_wait_for_resume_from_abort( | ||
1388 | struct isci_host *ihost, | ||
1389 | struct isci_remote_device *idev) | ||
1390 | { | ||
1391 | dev_dbg(&ihost->pdev->dev, "%s: starting resume wait: %p\n", | ||
1392 | __func__, idev); | ||
1393 | |||
1394 | #define MAX_RESUME_MSECS 10000 | ||
1395 | if (!wait_event_timeout(ihost->eventq, | ||
1396 | isci_remote_device_test_resume_done(ihost, idev), | ||
1397 | msecs_to_jiffies(MAX_RESUME_MSECS))) { | ||
1398 | |||
1399 | dev_warn(&ihost->pdev->dev, "%s: #### Timeout waiting for " | ||
1400 | "resume: %p\n", __func__, idev); | ||
1401 | } | ||
1402 | clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); | ||
1403 | |||
1404 | dev_dbg(&ihost->pdev->dev, "%s: resume wait done: %p\n", | ||
1405 | __func__, idev); | ||
1406 | } | ||
1407 | |||
1408 | enum sci_status isci_remote_device_resume_from_abort( | ||
1409 | struct isci_host *ihost, | ||
1410 | struct isci_remote_device *idev) | ||
1411 | { | ||
1412 | unsigned long flags; | ||
1413 | enum sci_status status = SCI_SUCCESS; | ||
1414 | int destroyed; | ||
1415 | |||
1416 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
1417 | /* Preserve any current resume callbacks, for instance from other | ||
1418 | * resumptions. | ||
1419 | */ | ||
1420 | idev->abort_resume_cb = idev->rnc.user_callback; | ||
1421 | idev->abort_resume_cbparam = idev->rnc.user_cookie; | ||
1422 | set_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); | ||
1423 | clear_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags); | ||
1424 | destroyed = sci_remote_node_context_is_being_destroyed(&idev->rnc); | ||
1425 | if (!destroyed) | ||
1426 | status = sci_remote_device_resume( | ||
1427 | idev, isci_remote_device_resume_from_abort_complete, | ||
1428 | idev); | ||
1429 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
1430 | if (!destroyed && (status == SCI_SUCCESS)) | ||
1431 | isci_remote_device_wait_for_resume_from_abort(ihost, idev); | ||
1432 | else | ||
1433 | clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); | ||
1434 | |||
1435 | return status; | ||
1436 | } | ||
1437 | |||
1196 | /** | 1438 | /** |
1197 | * sci_remote_device_start() - This method will start the supplied remote | 1439 | * sci_remote_device_start() - This method will start the supplied remote |
1198 | * device. This method enables normal IO requests to flow through to the | 1440 | * device. This method enables normal IO requests to flow through to the |
@@ -1207,7 +1449,7 @@ static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport, | |||
1207 | * the device when there have been no phys added to it. | 1449 | * the device when there have been no phys added to it. |
1208 | */ | 1450 | */ |
1209 | static enum sci_status sci_remote_device_start(struct isci_remote_device *idev, | 1451 | static enum sci_status sci_remote_device_start(struct isci_remote_device *idev, |
1210 | u32 timeout) | 1452 | u32 timeout) |
1211 | { | 1453 | { |
1212 | struct sci_base_state_machine *sm = &idev->sm; | 1454 | struct sci_base_state_machine *sm = &idev->sm; |
1213 | enum sci_remote_device_states state = sm->current_state_id; | 1455 | enum sci_remote_device_states state = sm->current_state_id; |
@@ -1219,9 +1461,8 @@ static enum sci_status sci_remote_device_start(struct isci_remote_device *idev, | |||
1219 | return SCI_FAILURE_INVALID_STATE; | 1461 | return SCI_FAILURE_INVALID_STATE; |
1220 | } | 1462 | } |
1221 | 1463 | ||
1222 | status = sci_remote_node_context_resume(&idev->rnc, | 1464 | status = sci_remote_device_resume(idev, remote_device_resume_done, |
1223 | remote_device_resume_done, | 1465 | idev); |
1224 | idev); | ||
1225 | if (status != SCI_SUCCESS) | 1466 | if (status != SCI_SUCCESS) |
1226 | return status; | 1467 | return status; |
1227 | 1468 | ||
@@ -1259,20 +1500,6 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport, | |||
1259 | return status; | 1500 | return status; |
1260 | } | 1501 | } |
1261 | 1502 | ||
1262 | void isci_remote_device_nuke_requests(struct isci_host *ihost, struct isci_remote_device *idev) | ||
1263 | { | ||
1264 | DECLARE_COMPLETION_ONSTACK(aborted_task_completion); | ||
1265 | |||
1266 | dev_dbg(&ihost->pdev->dev, | ||
1267 | "%s: idev = %p\n", __func__, idev); | ||
1268 | |||
1269 | /* Cleanup all requests pending for this device. */ | ||
1270 | isci_terminate_pending_requests(ihost, idev); | ||
1271 | |||
1272 | dev_dbg(&ihost->pdev->dev, | ||
1273 | "%s: idev = %p, done\n", __func__, idev); | ||
1274 | } | ||
1275 | |||
1276 | /** | 1503 | /** |
1277 | * This function builds the isci_remote_device when a libsas dev_found message | 1504 | * This function builds the isci_remote_device when a libsas dev_found message |
1278 | * is received. | 1505 | * is received. |
@@ -1297,10 +1524,6 @@ isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport) | |||
1297 | dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__); | 1524 | dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__); |
1298 | return NULL; | 1525 | return NULL; |
1299 | } | 1526 | } |
1300 | |||
1301 | if (WARN_ONCE(!list_empty(&idev->reqs_in_process), "found requests in process\n")) | ||
1302 | return NULL; | ||
1303 | |||
1304 | if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n")) | 1527 | if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n")) |
1305 | return NULL; | 1528 | return NULL; |
1306 | 1529 | ||
@@ -1342,14 +1565,8 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem | |||
1342 | spin_lock_irqsave(&ihost->scic_lock, flags); | 1565 | spin_lock_irqsave(&ihost->scic_lock, flags); |
1343 | idev->domain_dev->lldd_dev = NULL; /* disable new lookups */ | 1566 | idev->domain_dev->lldd_dev = NULL; /* disable new lookups */ |
1344 | set_bit(IDEV_GONE, &idev->flags); | 1567 | set_bit(IDEV_GONE, &idev->flags); |
1345 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
1346 | |||
1347 | /* Kill all outstanding requests. */ | ||
1348 | isci_remote_device_nuke_requests(ihost, idev); | ||
1349 | 1568 | ||
1350 | set_bit(IDEV_STOP_PENDING, &idev->flags); | 1569 | set_bit(IDEV_STOP_PENDING, &idev->flags); |
1351 | |||
1352 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
1353 | status = sci_remote_device_stop(idev, 50); | 1570 | status = sci_remote_device_stop(idev, 50); |
1354 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1571 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1355 | 1572 | ||
@@ -1359,6 +1576,9 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem | |||
1359 | else | 1576 | else |
1360 | wait_for_device_stop(ihost, idev); | 1577 | wait_for_device_stop(ihost, idev); |
1361 | 1578 | ||
1579 | dev_dbg(&ihost->pdev->dev, | ||
1580 | "%s: isci_device = %p, waiting done.\n", __func__, idev); | ||
1581 | |||
1362 | return status; | 1582 | return status; |
1363 | } | 1583 | } |
1364 | 1584 | ||
@@ -1434,3 +1654,73 @@ int isci_remote_device_found(struct domain_device *dev) | |||
1434 | 1654 | ||
1435 | return status == SCI_SUCCESS ? 0 : -ENODEV; | 1655 | return status == SCI_SUCCESS ? 0 : -ENODEV; |
1436 | } | 1656 | } |
1657 | |||
1658 | enum sci_status isci_remote_device_suspend_terminate( | ||
1659 | struct isci_host *ihost, | ||
1660 | struct isci_remote_device *idev, | ||
1661 | struct isci_request *ireq) | ||
1662 | { | ||
1663 | unsigned long flags; | ||
1664 | enum sci_status status; | ||
1665 | |||
1666 | /* Put the device into suspension. */ | ||
1667 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
1668 | set_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags); | ||
1669 | sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT); | ||
1670 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
1671 | |||
1672 | /* Terminate and wait for the completions. */ | ||
1673 | status = isci_remote_device_terminate_requests(ihost, idev, ireq); | ||
1674 | if (status != SCI_SUCCESS) | ||
1675 | dev_dbg(&ihost->pdev->dev, | ||
1676 | "%s: isci_remote_device_terminate_requests(%p) " | ||
1677 | "returned %d!\n", | ||
1678 | __func__, idev, status); | ||
1679 | |||
1680 | /* NOTE: RNC resumption is left to the caller! */ | ||
1681 | return status; | ||
1682 | } | ||
1683 | |||
1684 | int isci_remote_device_is_safe_to_abort( | ||
1685 | struct isci_remote_device *idev) | ||
1686 | { | ||
1687 | return sci_remote_node_context_is_safe_to_abort(&idev->rnc); | ||
1688 | } | ||
1689 | |||
1690 | enum sci_status sci_remote_device_abort_requests_pending_abort( | ||
1691 | struct isci_remote_device *idev) | ||
1692 | { | ||
1693 | return sci_remote_device_terminate_reqs_checkabort(idev, 1); | ||
1694 | } | ||
1695 | |||
1696 | enum sci_status isci_remote_device_reset_complete( | ||
1697 | struct isci_host *ihost, | ||
1698 | struct isci_remote_device *idev) | ||
1699 | { | ||
1700 | unsigned long flags; | ||
1701 | enum sci_status status; | ||
1702 | |||
1703 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
1704 | status = sci_remote_device_reset_complete(idev); | ||
1705 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
1706 | |||
1707 | return status; | ||
1708 | } | ||
1709 | |||
1710 | void isci_dev_set_hang_detection_timeout( | ||
1711 | struct isci_remote_device *idev, | ||
1712 | u32 timeout) | ||
1713 | { | ||
1714 | if (dev_is_sata(idev->domain_dev)) { | ||
1715 | if (timeout) { | ||
1716 | if (test_and_set_bit(IDEV_RNC_LLHANG_ENABLED, | ||
1717 | &idev->flags)) | ||
1718 | return; /* Already enabled. */ | ||
1719 | } else if (!test_and_clear_bit(IDEV_RNC_LLHANG_ENABLED, | ||
1720 | &idev->flags)) | ||
1721 | return; /* Not enabled. */ | ||
1722 | |||
1723 | sci_port_set_hang_detection_timeout(idev->owning_port, | ||
1724 | timeout); | ||
1725 | } | ||
1726 | } | ||
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h index 58637ee08f55..7674caae1d88 100644 --- a/drivers/scsi/isci/remote_device.h +++ b/drivers/scsi/isci/remote_device.h | |||
@@ -85,27 +85,38 @@ struct isci_remote_device { | |||
85 | #define IDEV_GONE 3 | 85 | #define IDEV_GONE 3 |
86 | #define IDEV_IO_READY 4 | 86 | #define IDEV_IO_READY 4 |
87 | #define IDEV_IO_NCQERROR 5 | 87 | #define IDEV_IO_NCQERROR 5 |
88 | #define IDEV_RNC_LLHANG_ENABLED 6 | ||
89 | #define IDEV_ABORT_PATH_ACTIVE 7 | ||
90 | #define IDEV_ABORT_PATH_RESUME_PENDING 8 | ||
88 | unsigned long flags; | 91 | unsigned long flags; |
89 | struct kref kref; | 92 | struct kref kref; |
90 | struct isci_port *isci_port; | 93 | struct isci_port *isci_port; |
91 | struct domain_device *domain_dev; | 94 | struct domain_device *domain_dev; |
92 | struct list_head node; | 95 | struct list_head node; |
93 | struct list_head reqs_in_process; | ||
94 | struct sci_base_state_machine sm; | 96 | struct sci_base_state_machine sm; |
95 | u32 device_port_width; | 97 | u32 device_port_width; |
96 | enum sas_linkrate connection_rate; | 98 | enum sas_linkrate connection_rate; |
97 | bool is_direct_attached; | ||
98 | struct isci_port *owning_port; | 99 | struct isci_port *owning_port; |
99 | struct sci_remote_node_context rnc; | 100 | struct sci_remote_node_context rnc; |
100 | /* XXX unify with device reference counting and delete */ | 101 | /* XXX unify with device reference counting and delete */ |
101 | u32 started_request_count; | 102 | u32 started_request_count; |
102 | struct isci_request *working_request; | 103 | struct isci_request *working_request; |
103 | u32 not_ready_reason; | 104 | u32 not_ready_reason; |
105 | scics_sds_remote_node_context_callback abort_resume_cb; | ||
106 | void *abort_resume_cbparam; | ||
104 | }; | 107 | }; |
105 | 108 | ||
106 | #define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000 | 109 | #define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000 |
107 | 110 | ||
108 | /* device reference routines must be called under sci_lock */ | 111 | /* device reference routines must be called under sci_lock */ |
112 | static inline struct isci_remote_device *isci_get_device( | ||
113 | struct isci_remote_device *idev) | ||
114 | { | ||
115 | if (idev) | ||
116 | kref_get(&idev->kref); | ||
117 | return idev; | ||
118 | } | ||
119 | |||
109 | static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev) | 120 | static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev) |
110 | { | 121 | { |
111 | struct isci_remote_device *idev = dev->lldd_dev; | 122 | struct isci_remote_device *idev = dev->lldd_dev; |
@@ -302,6 +313,8 @@ static inline void sci_remote_device_decrement_request_count(struct isci_remote_ | |||
302 | idev->started_request_count--; | 313 | idev->started_request_count--; |
303 | } | 314 | } |
304 | 315 | ||
316 | void isci_dev_set_hang_detection_timeout(struct isci_remote_device *idev, u32 timeout); | ||
317 | |||
305 | enum sci_status sci_remote_device_frame_handler( | 318 | enum sci_status sci_remote_device_frame_handler( |
306 | struct isci_remote_device *idev, | 319 | struct isci_remote_device *idev, |
307 | u32 frame_index); | 320 | u32 frame_index); |
@@ -325,12 +338,50 @@ enum sci_status sci_remote_device_complete_io( | |||
325 | struct isci_remote_device *idev, | 338 | struct isci_remote_device *idev, |
326 | struct isci_request *ireq); | 339 | struct isci_request *ireq); |
327 | 340 | ||
328 | enum sci_status sci_remote_device_suspend( | ||
329 | struct isci_remote_device *idev, | ||
330 | u32 suspend_type); | ||
331 | |||
332 | void sci_remote_device_post_request( | 341 | void sci_remote_device_post_request( |
333 | struct isci_remote_device *idev, | 342 | struct isci_remote_device *idev, |
334 | u32 request); | 343 | u32 request); |
335 | 344 | ||
345 | enum sci_status sci_remote_device_terminate_requests( | ||
346 | struct isci_remote_device *idev); | ||
347 | |||
348 | int isci_remote_device_is_safe_to_abort( | ||
349 | struct isci_remote_device *idev); | ||
350 | |||
351 | enum sci_status | ||
352 | sci_remote_device_abort_requests_pending_abort( | ||
353 | struct isci_remote_device *idev); | ||
354 | |||
355 | enum sci_status isci_remote_device_suspend( | ||
356 | struct isci_host *ihost, | ||
357 | struct isci_remote_device *idev); | ||
358 | |||
359 | enum sci_status sci_remote_device_resume( | ||
360 | struct isci_remote_device *idev, | ||
361 | scics_sds_remote_node_context_callback cb_fn, | ||
362 | void *cb_p); | ||
363 | |||
364 | enum sci_status isci_remote_device_resume_from_abort( | ||
365 | struct isci_host *ihost, | ||
366 | struct isci_remote_device *idev); | ||
367 | |||
368 | enum sci_status isci_remote_device_reset( | ||
369 | struct isci_host *ihost, | ||
370 | struct isci_remote_device *idev); | ||
371 | |||
372 | enum sci_status isci_remote_device_reset_complete( | ||
373 | struct isci_host *ihost, | ||
374 | struct isci_remote_device *idev); | ||
375 | |||
376 | enum sci_status isci_remote_device_suspend_terminate( | ||
377 | struct isci_host *ihost, | ||
378 | struct isci_remote_device *idev, | ||
379 | struct isci_request *ireq); | ||
380 | |||
381 | enum sci_status isci_remote_device_terminate_requests( | ||
382 | struct isci_host *ihost, | ||
383 | struct isci_remote_device *idev, | ||
384 | struct isci_request *ireq); | ||
385 | enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev, | ||
386 | enum sci_remote_node_suspension_reasons reason); | ||
336 | #endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */ | 387 | #endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */ |
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c index 3a9463481f38..1910100638a2 100644 --- a/drivers/scsi/isci/remote_node_context.c +++ b/drivers/scsi/isci/remote_node_context.c | |||
@@ -52,7 +52,7 @@ | |||
52 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 52 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
53 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 53 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
54 | */ | 54 | */ |
55 | 55 | #include <scsi/sas_ata.h> | |
56 | #include "host.h" | 56 | #include "host.h" |
57 | #include "isci.h" | 57 | #include "isci.h" |
58 | #include "remote_device.h" | 58 | #include "remote_device.h" |
@@ -90,6 +90,15 @@ bool sci_remote_node_context_is_ready( | |||
90 | return false; | 90 | return false; |
91 | } | 91 | } |
92 | 92 | ||
93 | bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc) | ||
94 | { | ||
95 | u32 current_state = sci_rnc->sm.current_state_id; | ||
96 | |||
97 | if (current_state == SCI_RNC_TX_RX_SUSPENDED) | ||
98 | return true; | ||
99 | return false; | ||
100 | } | ||
101 | |||
93 | static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id) | 102 | static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id) |
94 | { | 103 | { |
95 | if (id < ihost->remote_node_entries && | 104 | if (id < ihost->remote_node_entries && |
@@ -131,7 +140,7 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont | |||
131 | 140 | ||
132 | rnc->ssp.arbitration_wait_time = 0; | 141 | rnc->ssp.arbitration_wait_time = 0; |
133 | 142 | ||
134 | if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { | 143 | if (dev_is_sata(dev)) { |
135 | rnc->ssp.connection_occupancy_timeout = | 144 | rnc->ssp.connection_occupancy_timeout = |
136 | ihost->user_parameters.stp_max_occupancy_timeout; | 145 | ihost->user_parameters.stp_max_occupancy_timeout; |
137 | rnc->ssp.connection_inactivity_timeout = | 146 | rnc->ssp.connection_inactivity_timeout = |
@@ -151,7 +160,6 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont | |||
151 | rnc->ssp.oaf_source_zone_group = 0; | 160 | rnc->ssp.oaf_source_zone_group = 0; |
152 | rnc->ssp.oaf_more_compatibility_features = 0; | 161 | rnc->ssp.oaf_more_compatibility_features = 0; |
153 | } | 162 | } |
154 | |||
155 | /** | 163 | /** |
156 | * | 164 | * |
157 | * @sci_rnc: | 165 | * @sci_rnc: |
@@ -165,23 +173,30 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont | |||
165 | static void sci_remote_node_context_setup_to_resume( | 173 | static void sci_remote_node_context_setup_to_resume( |
166 | struct sci_remote_node_context *sci_rnc, | 174 | struct sci_remote_node_context *sci_rnc, |
167 | scics_sds_remote_node_context_callback callback, | 175 | scics_sds_remote_node_context_callback callback, |
168 | void *callback_parameter) | 176 | void *callback_parameter, |
177 | enum sci_remote_node_context_destination_state dest_param) | ||
169 | { | 178 | { |
170 | if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) { | 179 | if (sci_rnc->destination_state != RNC_DEST_FINAL) { |
171 | sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY; | 180 | sci_rnc->destination_state = dest_param; |
172 | sci_rnc->user_callback = callback; | 181 | if (callback != NULL) { |
173 | sci_rnc->user_cookie = callback_parameter; | 182 | sci_rnc->user_callback = callback; |
183 | sci_rnc->user_cookie = callback_parameter; | ||
184 | } | ||
174 | } | 185 | } |
175 | } | 186 | } |
176 | 187 | ||
177 | static void sci_remote_node_context_setup_to_destory( | 188 | static void sci_remote_node_context_setup_to_destroy( |
178 | struct sci_remote_node_context *sci_rnc, | 189 | struct sci_remote_node_context *sci_rnc, |
179 | scics_sds_remote_node_context_callback callback, | 190 | scics_sds_remote_node_context_callback callback, |
180 | void *callback_parameter) | 191 | void *callback_parameter) |
181 | { | 192 | { |
182 | sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL; | 193 | struct isci_host *ihost = idev_to_ihost(rnc_to_dev(sci_rnc)); |
194 | |||
195 | sci_rnc->destination_state = RNC_DEST_FINAL; | ||
183 | sci_rnc->user_callback = callback; | 196 | sci_rnc->user_callback = callback; |
184 | sci_rnc->user_cookie = callback_parameter; | 197 | sci_rnc->user_cookie = callback_parameter; |
198 | |||
199 | wake_up(&ihost->eventq); | ||
185 | } | 200 | } |
186 | 201 | ||
187 | /** | 202 | /** |
@@ -203,9 +218,19 @@ static void sci_remote_node_context_notify_user( | |||
203 | 218 | ||
204 | static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc) | 219 | static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc) |
205 | { | 220 | { |
206 | if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY) | 221 | switch (rnc->destination_state) { |
222 | case RNC_DEST_READY: | ||
223 | case RNC_DEST_SUSPENDED_RESUME: | ||
224 | rnc->destination_state = RNC_DEST_READY; | ||
225 | /* Fall through... */ | ||
226 | case RNC_DEST_FINAL: | ||
207 | sci_remote_node_context_resume(rnc, rnc->user_callback, | 227 | sci_remote_node_context_resume(rnc, rnc->user_callback, |
208 | rnc->user_cookie); | 228 | rnc->user_cookie); |
229 | break; | ||
230 | default: | ||
231 | rnc->destination_state = RNC_DEST_UNSPECIFIED; | ||
232 | break; | ||
233 | } | ||
209 | } | 234 | } |
210 | 235 | ||
211 | static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc) | 236 | static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc) |
@@ -219,13 +244,12 @@ static void sci_remote_node_context_validate_context_buffer(struct sci_remote_no | |||
219 | 244 | ||
220 | rnc_buffer->ssp.is_valid = true; | 245 | rnc_buffer->ssp.is_valid = true; |
221 | 246 | ||
222 | if (!idev->is_direct_attached && | 247 | if (dev_is_sata(dev) && dev->parent) { |
223 | (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))) { | ||
224 | sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96); | 248 | sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96); |
225 | } else { | 249 | } else { |
226 | sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32); | 250 | sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32); |
227 | 251 | ||
228 | if (idev->is_direct_attached) | 252 | if (!dev->parent) |
229 | sci_port_setup_transports(idev->owning_port, | 253 | sci_port_setup_transports(idev->owning_port, |
230 | sci_rnc->remote_node_index); | 254 | sci_rnc->remote_node_index); |
231 | } | 255 | } |
@@ -248,13 +272,18 @@ static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_ | |||
248 | static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm) | 272 | static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm) |
249 | { | 273 | { |
250 | struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); | 274 | struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); |
275 | struct isci_remote_device *idev = rnc_to_dev(rnc); | ||
276 | struct isci_host *ihost = idev->owning_port->owning_controller; | ||
251 | 277 | ||
252 | /* Check to see if we have gotten back to the initial state because | 278 | /* Check to see if we have gotten back to the initial state because |
253 | * someone requested to destroy the remote node context object. | 279 | * someone requested to destroy the remote node context object. |
254 | */ | 280 | */ |
255 | if (sm->previous_state_id == SCI_RNC_INVALIDATING) { | 281 | if (sm->previous_state_id == SCI_RNC_INVALIDATING) { |
256 | rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; | 282 | rnc->destination_state = RNC_DEST_UNSPECIFIED; |
257 | sci_remote_node_context_notify_user(rnc); | 283 | sci_remote_node_context_notify_user(rnc); |
284 | |||
285 | smp_wmb(); | ||
286 | wake_up(&ihost->eventq); | ||
258 | } | 287 | } |
259 | } | 288 | } |
260 | 289 | ||
@@ -269,6 +298,8 @@ static void sci_remote_node_context_invalidating_state_enter(struct sci_base_sta | |||
269 | { | 298 | { |
270 | struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); | 299 | struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); |
271 | 300 | ||
301 | /* Terminate all outstanding requests. */ | ||
302 | sci_remote_device_terminate_requests(rnc_to_dev(rnc)); | ||
272 | sci_remote_node_context_invalidate_context_buffer(rnc); | 303 | sci_remote_node_context_invalidate_context_buffer(rnc); |
273 | } | 304 | } |
274 | 305 | ||
@@ -287,10 +318,8 @@ static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_m | |||
287 | * resume because of a target reset we also need to update | 318 | * resume because of a target reset we also need to update |
288 | * the STPTLDARNI register with the RNi of the device | 319 | * the STPTLDARNI register with the RNi of the device |
289 | */ | 320 | */ |
290 | if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) && | 321 | if (dev_is_sata(dev) && !dev->parent) |
291 | idev->is_direct_attached) | 322 | sci_port_setup_transports(idev->owning_port, rnc->remote_node_index); |
292 | sci_port_setup_transports(idev->owning_port, | ||
293 | rnc->remote_node_index); | ||
294 | 323 | ||
295 | sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME); | 324 | sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME); |
296 | } | 325 | } |
@@ -298,10 +327,22 @@ static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_m | |||
298 | static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm) | 327 | static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm) |
299 | { | 328 | { |
300 | struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); | 329 | struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); |
330 | enum sci_remote_node_context_destination_state dest_select; | ||
331 | int tell_user = 1; | ||
332 | |||
333 | dest_select = rnc->destination_state; | ||
334 | rnc->destination_state = RNC_DEST_UNSPECIFIED; | ||
301 | 335 | ||
302 | rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; | 336 | if ((dest_select == RNC_DEST_SUSPENDED) || |
337 | (dest_select == RNC_DEST_SUSPENDED_RESUME)) { | ||
338 | sci_remote_node_context_suspend( | ||
339 | rnc, rnc->suspend_reason, | ||
340 | SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT); | ||
303 | 341 | ||
304 | if (rnc->user_callback) | 342 | if (dest_select == RNC_DEST_SUSPENDED_RESUME) |
343 | tell_user = 0; /* Wait until ready again. */ | ||
344 | } | ||
345 | if (tell_user) | ||
305 | sci_remote_node_context_notify_user(rnc); | 346 | sci_remote_node_context_notify_user(rnc); |
306 | } | 347 | } |
307 | 348 | ||
@@ -315,10 +356,34 @@ static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_sta | |||
315 | static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm) | 356 | static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm) |
316 | { | 357 | { |
317 | struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); | 358 | struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); |
359 | struct isci_remote_device *idev = rnc_to_dev(rnc); | ||
360 | struct isci_host *ihost = idev->owning_port->owning_controller; | ||
361 | u32 new_count = rnc->suspend_count + 1; | ||
362 | |||
363 | if (new_count == 0) | ||
364 | rnc->suspend_count = 1; | ||
365 | else | ||
366 | rnc->suspend_count = new_count; | ||
367 | smp_wmb(); | ||
318 | 368 | ||
369 | /* Terminate outstanding requests pending abort. */ | ||
370 | sci_remote_device_abort_requests_pending_abort(idev); | ||
371 | |||
372 | wake_up(&ihost->eventq); | ||
319 | sci_remote_node_context_continue_state_transitions(rnc); | 373 | sci_remote_node_context_continue_state_transitions(rnc); |
320 | } | 374 | } |
321 | 375 | ||
376 | static void sci_remote_node_context_await_suspend_state_exit( | ||
377 | struct sci_base_state_machine *sm) | ||
378 | { | ||
379 | struct sci_remote_node_context *rnc | ||
380 | = container_of(sm, typeof(*rnc), sm); | ||
381 | struct isci_remote_device *idev = rnc_to_dev(rnc); | ||
382 | |||
383 | if (dev_is_sata(idev->domain_dev)) | ||
384 | isci_dev_set_hang_detection_timeout(idev, 0); | ||
385 | } | ||
386 | |||
322 | static const struct sci_base_state sci_remote_node_context_state_table[] = { | 387 | static const struct sci_base_state sci_remote_node_context_state_table[] = { |
323 | [SCI_RNC_INITIAL] = { | 388 | [SCI_RNC_INITIAL] = { |
324 | .enter_state = sci_remote_node_context_initial_state_enter, | 389 | .enter_state = sci_remote_node_context_initial_state_enter, |
@@ -341,7 +406,9 @@ static const struct sci_base_state sci_remote_node_context_state_table[] = { | |||
341 | [SCI_RNC_TX_RX_SUSPENDED] = { | 406 | [SCI_RNC_TX_RX_SUSPENDED] = { |
342 | .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter, | 407 | .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter, |
343 | }, | 408 | }, |
344 | [SCI_RNC_AWAIT_SUSPENSION] = { }, | 409 | [SCI_RNC_AWAIT_SUSPENSION] = { |
410 | .exit_state = sci_remote_node_context_await_suspend_state_exit, | ||
411 | }, | ||
345 | }; | 412 | }; |
346 | 413 | ||
347 | void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, | 414 | void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, |
@@ -350,7 +417,7 @@ void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, | |||
350 | memset(rnc, 0, sizeof(struct sci_remote_node_context)); | 417 | memset(rnc, 0, sizeof(struct sci_remote_node_context)); |
351 | 418 | ||
352 | rnc->remote_node_index = remote_node_index; | 419 | rnc->remote_node_index = remote_node_index; |
353 | rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; | 420 | rnc->destination_state = RNC_DEST_UNSPECIFIED; |
354 | 421 | ||
355 | sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL); | 422 | sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL); |
356 | } | 423 | } |
@@ -359,6 +426,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con | |||
359 | u32 event_code) | 426 | u32 event_code) |
360 | { | 427 | { |
361 | enum scis_sds_remote_node_context_states state; | 428 | enum scis_sds_remote_node_context_states state; |
429 | u32 next_state; | ||
362 | 430 | ||
363 | state = sci_rnc->sm.current_state_id; | 431 | state = sci_rnc->sm.current_state_id; |
364 | switch (state) { | 432 | switch (state) { |
@@ -373,18 +441,18 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con | |||
373 | break; | 441 | break; |
374 | case SCI_RNC_INVALIDATING: | 442 | case SCI_RNC_INVALIDATING: |
375 | if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) { | 443 | if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) { |
376 | if (sci_rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) | 444 | if (sci_rnc->destination_state == RNC_DEST_FINAL) |
377 | state = SCI_RNC_INITIAL; | 445 | next_state = SCI_RNC_INITIAL; |
378 | else | 446 | else |
379 | state = SCI_RNC_POSTING; | 447 | next_state = SCI_RNC_POSTING; |
380 | sci_change_state(&sci_rnc->sm, state); | 448 | sci_change_state(&sci_rnc->sm, next_state); |
381 | } else { | 449 | } else { |
382 | switch (scu_get_event_type(event_code)) { | 450 | switch (scu_get_event_type(event_code)) { |
383 | case SCU_EVENT_TYPE_RNC_SUSPEND_TX: | 451 | case SCU_EVENT_TYPE_RNC_SUSPEND_TX: |
384 | case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: | 452 | case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: |
385 | /* We really dont care if the hardware is going to suspend | 453 | /* We really dont care if the hardware is going to suspend |
386 | * the device since it's being invalidated anyway */ | 454 | * the device since it's being invalidated anyway */ |
387 | dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), | 455 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), |
388 | "%s: SCIC Remote Node Context 0x%p was " | 456 | "%s: SCIC Remote Node Context 0x%p was " |
389 | "suspeneded by hardware while being " | 457 | "suspeneded by hardware while being " |
390 | "invalidated.\n", __func__, sci_rnc); | 458 | "invalidated.\n", __func__, sci_rnc); |
@@ -403,7 +471,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con | |||
403 | case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: | 471 | case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: |
404 | /* We really dont care if the hardware is going to suspend | 472 | /* We really dont care if the hardware is going to suspend |
405 | * the device since it's being resumed anyway */ | 473 | * the device since it's being resumed anyway */ |
406 | dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), | 474 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), |
407 | "%s: SCIC Remote Node Context 0x%p was " | 475 | "%s: SCIC Remote Node Context 0x%p was " |
408 | "suspeneded by hardware while being resumed.\n", | 476 | "suspeneded by hardware while being resumed.\n", |
409 | __func__, sci_rnc); | 477 | __func__, sci_rnc); |
@@ -417,11 +485,11 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con | |||
417 | switch (scu_get_event_type(event_code)) { | 485 | switch (scu_get_event_type(event_code)) { |
418 | case SCU_EVENT_TL_RNC_SUSPEND_TX: | 486 | case SCU_EVENT_TL_RNC_SUSPEND_TX: |
419 | sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED); | 487 | sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED); |
420 | sci_rnc->suspension_code = scu_get_event_specifier(event_code); | 488 | sci_rnc->suspend_type = scu_get_event_type(event_code); |
421 | break; | 489 | break; |
422 | case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: | 490 | case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: |
423 | sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED); | 491 | sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED); |
424 | sci_rnc->suspension_code = scu_get_event_specifier(event_code); | 492 | sci_rnc->suspend_type = scu_get_event_type(event_code); |
425 | break; | 493 | break; |
426 | default: | 494 | default: |
427 | goto out; | 495 | goto out; |
@@ -430,27 +498,29 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con | |||
430 | case SCI_RNC_AWAIT_SUSPENSION: | 498 | case SCI_RNC_AWAIT_SUSPENSION: |
431 | switch (scu_get_event_type(event_code)) { | 499 | switch (scu_get_event_type(event_code)) { |
432 | case SCU_EVENT_TL_RNC_SUSPEND_TX: | 500 | case SCU_EVENT_TL_RNC_SUSPEND_TX: |
433 | sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED); | 501 | next_state = SCI_RNC_TX_SUSPENDED; |
434 | sci_rnc->suspension_code = scu_get_event_specifier(event_code); | ||
435 | break; | 502 | break; |
436 | case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: | 503 | case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: |
437 | sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED); | 504 | next_state = SCI_RNC_TX_RX_SUSPENDED; |
438 | sci_rnc->suspension_code = scu_get_event_specifier(event_code); | ||
439 | break; | 505 | break; |
440 | default: | 506 | default: |
441 | goto out; | 507 | goto out; |
442 | } | 508 | } |
509 | if (sci_rnc->suspend_type == scu_get_event_type(event_code)) | ||
510 | sci_change_state(&sci_rnc->sm, next_state); | ||
443 | break; | 511 | break; |
444 | default: | 512 | default: |
445 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), | 513 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), |
446 | "%s: invalid state %d\n", __func__, state); | 514 | "%s: invalid state: %s\n", __func__, |
515 | rnc_state_name(state)); | ||
447 | return SCI_FAILURE_INVALID_STATE; | 516 | return SCI_FAILURE_INVALID_STATE; |
448 | } | 517 | } |
449 | return SCI_SUCCESS; | 518 | return SCI_SUCCESS; |
450 | 519 | ||
451 | out: | 520 | out: |
452 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), | 521 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), |
453 | "%s: code: %#x state: %d\n", __func__, event_code, state); | 522 | "%s: code: %#x state: %s\n", __func__, event_code, |
523 | rnc_state_name(state)); | ||
454 | return SCI_FAILURE; | 524 | return SCI_FAILURE; |
455 | 525 | ||
456 | } | 526 | } |
@@ -464,20 +534,23 @@ enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context | |||
464 | state = sci_rnc->sm.current_state_id; | 534 | state = sci_rnc->sm.current_state_id; |
465 | switch (state) { | 535 | switch (state) { |
466 | case SCI_RNC_INVALIDATING: | 536 | case SCI_RNC_INVALIDATING: |
467 | sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); | 537 | sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p); |
468 | return SCI_SUCCESS; | 538 | return SCI_SUCCESS; |
469 | case SCI_RNC_POSTING: | 539 | case SCI_RNC_POSTING: |
470 | case SCI_RNC_RESUMING: | 540 | case SCI_RNC_RESUMING: |
471 | case SCI_RNC_READY: | 541 | case SCI_RNC_READY: |
472 | case SCI_RNC_TX_SUSPENDED: | 542 | case SCI_RNC_TX_SUSPENDED: |
473 | case SCI_RNC_TX_RX_SUSPENDED: | 543 | case SCI_RNC_TX_RX_SUSPENDED: |
474 | case SCI_RNC_AWAIT_SUSPENSION: | 544 | sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p); |
475 | sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); | ||
476 | sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); | 545 | sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); |
477 | return SCI_SUCCESS; | 546 | return SCI_SUCCESS; |
547 | case SCI_RNC_AWAIT_SUSPENSION: | ||
548 | sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p); | ||
549 | return SCI_SUCCESS; | ||
478 | case SCI_RNC_INITIAL: | 550 | case SCI_RNC_INITIAL: |
479 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), | 551 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), |
480 | "%s: invalid state %d\n", __func__, state); | 552 | "%s: invalid state: %s\n", __func__, |
553 | rnc_state_name(state)); | ||
481 | /* We have decided that the destruct request on the remote node context | 554 | /* We have decided that the destruct request on the remote node context |
482 | * can not fail since it is either in the initial/destroyed state or is | 555 | * can not fail since it is either in the initial/destroyed state or is |
483 | * can be destroyed. | 556 | * can be destroyed. |
@@ -485,35 +558,101 @@ enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context | |||
485 | return SCI_SUCCESS; | 558 | return SCI_SUCCESS; |
486 | default: | 559 | default: |
487 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), | 560 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), |
488 | "%s: invalid state %d\n", __func__, state); | 561 | "%s: invalid state %s\n", __func__, |
562 | rnc_state_name(state)); | ||
489 | return SCI_FAILURE_INVALID_STATE; | 563 | return SCI_FAILURE_INVALID_STATE; |
490 | } | 564 | } |
491 | } | 565 | } |
492 | 566 | ||
493 | enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc, | 567 | enum sci_status sci_remote_node_context_suspend( |
494 | u32 suspend_type, | 568 | struct sci_remote_node_context *sci_rnc, |
495 | scics_sds_remote_node_context_callback cb_fn, | 569 | enum sci_remote_node_suspension_reasons suspend_reason, |
496 | void *cb_p) | 570 | u32 suspend_type) |
497 | { | 571 | { |
498 | enum scis_sds_remote_node_context_states state; | 572 | enum scis_sds_remote_node_context_states state |
573 | = sci_rnc->sm.current_state_id; | ||
574 | struct isci_remote_device *idev = rnc_to_dev(sci_rnc); | ||
575 | enum sci_status status = SCI_FAILURE_INVALID_STATE; | ||
576 | enum sci_remote_node_context_destination_state dest_param = | ||
577 | RNC_DEST_UNSPECIFIED; | ||
578 | |||
579 | dev_dbg(scirdev_to_dev(idev), | ||
580 | "%s: current state %s, current suspend_type %x dest state %d," | ||
581 | " arg suspend_reason %d, arg suspend_type %x", | ||
582 | __func__, rnc_state_name(state), sci_rnc->suspend_type, | ||
583 | sci_rnc->destination_state, suspend_reason, | ||
584 | suspend_type); | ||
585 | |||
586 | /* Disable automatic state continuations if explicitly suspending. */ | ||
587 | if ((suspend_reason == SCI_HW_SUSPEND) || | ||
588 | (sci_rnc->destination_state == RNC_DEST_FINAL)) | ||
589 | dest_param = sci_rnc->destination_state; | ||
499 | 590 | ||
500 | state = sci_rnc->sm.current_state_id; | 591 | switch (state) { |
501 | if (state != SCI_RNC_READY) { | 592 | case SCI_RNC_READY: |
593 | break; | ||
594 | case SCI_RNC_INVALIDATING: | ||
595 | if (sci_rnc->destination_state == RNC_DEST_FINAL) { | ||
596 | dev_warn(scirdev_to_dev(idev), | ||
597 | "%s: already destroying %p\n", | ||
598 | __func__, sci_rnc); | ||
599 | return SCI_FAILURE_INVALID_STATE; | ||
600 | } | ||
601 | /* Fall through and handle like SCI_RNC_POSTING */ | ||
602 | case SCI_RNC_RESUMING: | ||
603 | /* Fall through and handle like SCI_RNC_POSTING */ | ||
604 | case SCI_RNC_POSTING: | ||
605 | /* Set the destination state to AWAIT - this signals the | ||
606 | * entry into the SCI_RNC_READY state that a suspension | ||
607 | * needs to be done immediately. | ||
608 | */ | ||
609 | if (sci_rnc->destination_state != RNC_DEST_FINAL) | ||
610 | sci_rnc->destination_state = RNC_DEST_SUSPENDED; | ||
611 | sci_rnc->suspend_type = suspend_type; | ||
612 | sci_rnc->suspend_reason = suspend_reason; | ||
613 | return SCI_SUCCESS; | ||
614 | |||
615 | case SCI_RNC_TX_SUSPENDED: | ||
616 | if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX) | ||
617 | status = SCI_SUCCESS; | ||
618 | break; | ||
619 | case SCI_RNC_TX_RX_SUSPENDED: | ||
620 | if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX) | ||
621 | status = SCI_SUCCESS; | ||
622 | break; | ||
623 | case SCI_RNC_AWAIT_SUSPENSION: | ||
624 | if ((sci_rnc->suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX) | ||
625 | || (suspend_type == sci_rnc->suspend_type)) | ||
626 | return SCI_SUCCESS; | ||
627 | break; | ||
628 | default: | ||
502 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), | 629 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), |
503 | "%s: invalid state %d\n", __func__, state); | 630 | "%s: invalid state %s\n", __func__, |
631 | rnc_state_name(state)); | ||
504 | return SCI_FAILURE_INVALID_STATE; | 632 | return SCI_FAILURE_INVALID_STATE; |
505 | } | 633 | } |
634 | sci_rnc->destination_state = dest_param; | ||
635 | sci_rnc->suspend_type = suspend_type; | ||
636 | sci_rnc->suspend_reason = suspend_reason; | ||
637 | |||
638 | if (status == SCI_SUCCESS) { /* Already in the destination state? */ | ||
639 | struct isci_host *ihost = idev->owning_port->owning_controller; | ||
640 | |||
641 | wake_up_all(&ihost->eventq); /* Let observers look. */ | ||
642 | return SCI_SUCCESS; | ||
643 | } | ||
644 | if ((suspend_reason == SCI_SW_SUSPEND_NORMAL) || | ||
645 | (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)) { | ||
506 | 646 | ||
507 | sci_rnc->user_callback = cb_fn; | 647 | if (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT) |
508 | sci_rnc->user_cookie = cb_p; | 648 | isci_dev_set_hang_detection_timeout(idev, 0x00000001); |
509 | sci_rnc->suspension_code = suspend_type; | ||
510 | 649 | ||
511 | if (suspend_type == SCI_SOFTWARE_SUSPENSION) { | 650 | sci_remote_device_post_request( |
512 | sci_remote_device_post_request(rnc_to_dev(sci_rnc), | 651 | idev, SCI_SOFTWARE_SUSPEND_CMD); |
513 | SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX); | ||
514 | } | 652 | } |
653 | if (state != SCI_RNC_AWAIT_SUSPENSION) | ||
654 | sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION); | ||
515 | 655 | ||
516 | sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION); | ||
517 | return SCI_SUCCESS; | 656 | return SCI_SUCCESS; |
518 | } | 657 | } |
519 | 658 | ||
@@ -522,56 +661,86 @@ enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *s | |||
522 | void *cb_p) | 661 | void *cb_p) |
523 | { | 662 | { |
524 | enum scis_sds_remote_node_context_states state; | 663 | enum scis_sds_remote_node_context_states state; |
664 | struct isci_remote_device *idev = rnc_to_dev(sci_rnc); | ||
525 | 665 | ||
526 | state = sci_rnc->sm.current_state_id; | 666 | state = sci_rnc->sm.current_state_id; |
667 | dev_dbg(scirdev_to_dev(idev), | ||
668 | "%s: state %s, cb_fn = %p, cb_p = %p; dest_state = %d; " | ||
669 | "dev resume path %s\n", | ||
670 | __func__, rnc_state_name(state), cb_fn, cb_p, | ||
671 | sci_rnc->destination_state, | ||
672 | test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags) | ||
673 | ? "<abort active>" : "<normal>"); | ||
674 | |||
527 | switch (state) { | 675 | switch (state) { |
528 | case SCI_RNC_INITIAL: | 676 | case SCI_RNC_INITIAL: |
529 | if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) | 677 | if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) |
530 | return SCI_FAILURE_INVALID_STATE; | 678 | return SCI_FAILURE_INVALID_STATE; |
531 | 679 | ||
532 | sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); | 680 | sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p, |
533 | sci_remote_node_context_construct_buffer(sci_rnc); | 681 | RNC_DEST_READY); |
534 | sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING); | 682 | if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) { |
683 | sci_remote_node_context_construct_buffer(sci_rnc); | ||
684 | sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING); | ||
685 | } | ||
535 | return SCI_SUCCESS; | 686 | return SCI_SUCCESS; |
687 | |||
536 | case SCI_RNC_POSTING: | 688 | case SCI_RNC_POSTING: |
537 | case SCI_RNC_INVALIDATING: | 689 | case SCI_RNC_INVALIDATING: |
538 | case SCI_RNC_RESUMING: | 690 | case SCI_RNC_RESUMING: |
539 | if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY) | 691 | /* We are still waiting to post when a resume was |
540 | return SCI_FAILURE_INVALID_STATE; | 692 | * requested. |
541 | 693 | */ | |
542 | sci_rnc->user_callback = cb_fn; | 694 | switch (sci_rnc->destination_state) { |
543 | sci_rnc->user_cookie = cb_p; | 695 | case RNC_DEST_SUSPENDED: |
696 | case RNC_DEST_SUSPENDED_RESUME: | ||
697 | /* Previously waiting to suspend after posting. | ||
698 | * Now continue onto resumption. | ||
699 | */ | ||
700 | sci_remote_node_context_setup_to_resume( | ||
701 | sci_rnc, cb_fn, cb_p, | ||
702 | RNC_DEST_SUSPENDED_RESUME); | ||
703 | break; | ||
704 | default: | ||
705 | sci_remote_node_context_setup_to_resume( | ||
706 | sci_rnc, cb_fn, cb_p, | ||
707 | RNC_DEST_READY); | ||
708 | break; | ||
709 | } | ||
544 | return SCI_SUCCESS; | 710 | return SCI_SUCCESS; |
545 | case SCI_RNC_TX_SUSPENDED: { | 711 | |
546 | struct isci_remote_device *idev = rnc_to_dev(sci_rnc); | 712 | case SCI_RNC_TX_SUSPENDED: |
547 | struct domain_device *dev = idev->domain_dev; | 713 | case SCI_RNC_TX_RX_SUSPENDED: |
548 | 714 | { | |
549 | sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); | 715 | struct domain_device *dev = idev->domain_dev; |
550 | 716 | /* If this is an expander attached SATA device we must | |
551 | /* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */ | 717 | * invalidate and repost the RNC since this is the only |
552 | if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev)) | 718 | * way to clear the TCi to NCQ tag mapping table for |
553 | sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); | 719 | * the RNi. All other device types we can just resume. |
554 | else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { | 720 | */ |
555 | if (idev->is_direct_attached) { | 721 | sci_remote_node_context_setup_to_resume( |
556 | /* @todo Fix this since I am being silly in writing to the STPTLDARNI register. */ | 722 | sci_rnc, cb_fn, cb_p, RNC_DEST_READY); |
557 | sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); | 723 | |
558 | } else { | 724 | if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) { |
559 | sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); | 725 | if ((dev_is_sata(dev) && dev->parent) || |
726 | (sci_rnc->destination_state == RNC_DEST_FINAL)) | ||
727 | sci_change_state(&sci_rnc->sm, | ||
728 | SCI_RNC_INVALIDATING); | ||
729 | else | ||
730 | sci_change_state(&sci_rnc->sm, | ||
731 | SCI_RNC_RESUMING); | ||
560 | } | 732 | } |
561 | } else | 733 | } |
562 | return SCI_FAILURE; | ||
563 | return SCI_SUCCESS; | 734 | return SCI_SUCCESS; |
564 | } | 735 | |
565 | case SCI_RNC_TX_RX_SUSPENDED: | ||
566 | sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); | ||
567 | sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); | ||
568 | return SCI_FAILURE_INVALID_STATE; | ||
569 | case SCI_RNC_AWAIT_SUSPENSION: | 736 | case SCI_RNC_AWAIT_SUSPENSION: |
570 | sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); | 737 | sci_remote_node_context_setup_to_resume( |
738 | sci_rnc, cb_fn, cb_p, RNC_DEST_SUSPENDED_RESUME); | ||
571 | return SCI_SUCCESS; | 739 | return SCI_SUCCESS; |
572 | default: | 740 | default: |
573 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), | 741 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), |
574 | "%s: invalid state %d\n", __func__, state); | 742 | "%s: invalid state %s\n", __func__, |
743 | rnc_state_name(state)); | ||
575 | return SCI_FAILURE_INVALID_STATE; | 744 | return SCI_FAILURE_INVALID_STATE; |
576 | } | 745 | } |
577 | } | 746 | } |
@@ -590,35 +759,51 @@ enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context | |||
590 | case SCI_RNC_TX_RX_SUSPENDED: | 759 | case SCI_RNC_TX_RX_SUSPENDED: |
591 | case SCI_RNC_AWAIT_SUSPENSION: | 760 | case SCI_RNC_AWAIT_SUSPENSION: |
592 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), | 761 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), |
593 | "%s: invalid state %d\n", __func__, state); | 762 | "%s: invalid state %s\n", __func__, |
763 | rnc_state_name(state)); | ||
594 | return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; | 764 | return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; |
595 | default: | 765 | default: |
596 | break; | 766 | dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), |
767 | "%s: invalid state %s\n", __func__, | ||
768 | rnc_state_name(state)); | ||
769 | return SCI_FAILURE_INVALID_STATE; | ||
597 | } | 770 | } |
598 | dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), | ||
599 | "%s: requested to start IO while still resuming, %d\n", | ||
600 | __func__, state); | ||
601 | return SCI_FAILURE_INVALID_STATE; | ||
602 | } | 771 | } |
603 | 772 | ||
604 | enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, | 773 | enum sci_status sci_remote_node_context_start_task( |
605 | struct isci_request *ireq) | 774 | struct sci_remote_node_context *sci_rnc, |
775 | struct isci_request *ireq, | ||
776 | scics_sds_remote_node_context_callback cb_fn, | ||
777 | void *cb_p) | ||
778 | { | ||
779 | enum sci_status status = sci_remote_node_context_resume(sci_rnc, | ||
780 | cb_fn, cb_p); | ||
781 | if (status != SCI_SUCCESS) | ||
782 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), | ||
783 | "%s: resume failed: %d\n", __func__, status); | ||
784 | return status; | ||
785 | } | ||
786 | |||
787 | int sci_remote_node_context_is_safe_to_abort( | ||
788 | struct sci_remote_node_context *sci_rnc) | ||
606 | { | 789 | { |
607 | enum scis_sds_remote_node_context_states state; | 790 | enum scis_sds_remote_node_context_states state; |
608 | 791 | ||
609 | state = sci_rnc->sm.current_state_id; | 792 | state = sci_rnc->sm.current_state_id; |
610 | switch (state) { | 793 | switch (state) { |
794 | case SCI_RNC_INVALIDATING: | ||
795 | case SCI_RNC_TX_RX_SUSPENDED: | ||
796 | return 1; | ||
797 | case SCI_RNC_POSTING: | ||
611 | case SCI_RNC_RESUMING: | 798 | case SCI_RNC_RESUMING: |
612 | case SCI_RNC_READY: | 799 | case SCI_RNC_READY: |
613 | case SCI_RNC_AWAIT_SUSPENSION: | ||
614 | return SCI_SUCCESS; | ||
615 | case SCI_RNC_TX_SUSPENDED: | 800 | case SCI_RNC_TX_SUSPENDED: |
616 | case SCI_RNC_TX_RX_SUSPENDED: | 801 | case SCI_RNC_AWAIT_SUSPENSION: |
617 | sci_remote_node_context_resume(sci_rnc, NULL, NULL); | 802 | case SCI_RNC_INITIAL: |
618 | return SCI_SUCCESS; | 803 | return 0; |
619 | default: | 804 | default: |
620 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), | 805 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), |
621 | "%s: invalid state %d\n", __func__, state); | 806 | "%s: invalid state %d\n", __func__, state); |
622 | return SCI_FAILURE_INVALID_STATE; | 807 | return 0; |
623 | } | 808 | } |
624 | } | 809 | } |
diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h index a241e0f4c865..a703b9ce0c2c 100644 --- a/drivers/scsi/isci/remote_node_context.h +++ b/drivers/scsi/isci/remote_node_context.h | |||
@@ -75,8 +75,13 @@ | |||
75 | */ | 75 | */ |
76 | #define SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX 0x0FFF | 76 | #define SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX 0x0FFF |
77 | 77 | ||
78 | #define SCU_HARDWARE_SUSPENSION (0) | 78 | enum sci_remote_node_suspension_reasons { |
79 | #define SCI_SOFTWARE_SUSPENSION (1) | 79 | SCI_HW_SUSPEND, |
80 | SCI_SW_SUSPEND_NORMAL, | ||
81 | SCI_SW_SUSPEND_LINKHANG_DETECT | ||
82 | }; | ||
83 | #define SCI_SOFTWARE_SUSPEND_CMD SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX | ||
84 | #define SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT SCU_EVENT_TL_RNC_SUSPEND_TX_RX | ||
80 | 85 | ||
81 | struct isci_request; | 86 | struct isci_request; |
82 | struct isci_remote_device; | 87 | struct isci_remote_device; |
@@ -137,9 +142,13 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state); | |||
137 | * node context. | 142 | * node context. |
138 | */ | 143 | */ |
139 | enum sci_remote_node_context_destination_state { | 144 | enum sci_remote_node_context_destination_state { |
140 | SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED, | 145 | RNC_DEST_UNSPECIFIED, |
141 | SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY, | 146 | RNC_DEST_READY, |
142 | SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL | 147 | RNC_DEST_FINAL, |
148 | RNC_DEST_SUSPENDED, /* Set when suspend during post/invalidate */ | ||
149 | RNC_DEST_SUSPENDED_RESUME /* Set when a resume was done during posting | ||
150 | * or invalidating and already suspending. | ||
151 | */ | ||
143 | }; | 152 | }; |
144 | 153 | ||
145 | /** | 154 | /** |
@@ -156,10 +165,12 @@ struct sci_remote_node_context { | |||
156 | u16 remote_node_index; | 165 | u16 remote_node_index; |
157 | 166 | ||
158 | /** | 167 | /** |
159 | * This field is the recored suspension code or the reason for the remote node | 168 | * This field is the recored suspension type of the remote node |
160 | * context suspension. | 169 | * context suspension. |
161 | */ | 170 | */ |
162 | u32 suspension_code; | 171 | u32 suspend_type; |
172 | enum sci_remote_node_suspension_reasons suspend_reason; | ||
173 | u32 suspend_count; | ||
163 | 174 | ||
164 | /** | 175 | /** |
165 | * This field is true if the remote node context is resuming from its current | 176 | * This field is true if the remote node context is resuming from its current |
@@ -193,6 +204,8 @@ void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, | |||
193 | bool sci_remote_node_context_is_ready( | 204 | bool sci_remote_node_context_is_ready( |
194 | struct sci_remote_node_context *sci_rnc); | 205 | struct sci_remote_node_context *sci_rnc); |
195 | 206 | ||
207 | bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc); | ||
208 | |||
196 | enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, | 209 | enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, |
197 | u32 event_code); | 210 | u32 event_code); |
198 | enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, | 211 | enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, |
@@ -200,14 +213,24 @@ enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context | |||
200 | void *callback_parameter); | 213 | void *callback_parameter); |
201 | enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc, | 214 | enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc, |
202 | u32 suspend_type, | 215 | u32 suspend_type, |
203 | scics_sds_remote_node_context_callback cb_fn, | 216 | u32 suspension_code); |
204 | void *cb_p); | ||
205 | enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc, | 217 | enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc, |
206 | scics_sds_remote_node_context_callback cb_fn, | 218 | scics_sds_remote_node_context_callback cb_fn, |
207 | void *cb_p); | 219 | void *cb_p); |
208 | enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, | 220 | enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, |
209 | struct isci_request *ireq); | 221 | struct isci_request *ireq, |
222 | scics_sds_remote_node_context_callback cb_fn, | ||
223 | void *cb_p); | ||
210 | enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc, | 224 | enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc, |
211 | struct isci_request *ireq); | 225 | struct isci_request *ireq); |
226 | int sci_remote_node_context_is_safe_to_abort( | ||
227 | struct sci_remote_node_context *sci_rnc); | ||
212 | 228 | ||
229 | static inline bool sci_remote_node_context_is_being_destroyed( | ||
230 | struct sci_remote_node_context *sci_rnc) | ||
231 | { | ||
232 | return (sci_rnc->destination_state == RNC_DEST_FINAL) | ||
233 | || ((sci_rnc->sm.current_state_id == SCI_RNC_INITIAL) | ||
234 | && (sci_rnc->destination_state == RNC_DEST_UNSPECIFIED)); | ||
235 | } | ||
213 | #endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */ | 236 | #endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */ |
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 2def1e3960f6..7a0431c73493 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c | |||
@@ -92,11 +92,11 @@ static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost, | |||
92 | if (idx == 0) { | 92 | if (idx == 0) { |
93 | offset = (void *) &ireq->tc->sgl_pair_ab - | 93 | offset = (void *) &ireq->tc->sgl_pair_ab - |
94 | (void *) &ihost->task_context_table[0]; | 94 | (void *) &ihost->task_context_table[0]; |
95 | return ihost->task_context_dma + offset; | 95 | return ihost->tc_dma + offset; |
96 | } else if (idx == 1) { | 96 | } else if (idx == 1) { |
97 | offset = (void *) &ireq->tc->sgl_pair_cd - | 97 | offset = (void *) &ireq->tc->sgl_pair_cd - |
98 | (void *) &ihost->task_context_table[0]; | 98 | (void *) &ihost->task_context_table[0]; |
99 | return ihost->task_context_dma + offset; | 99 | return ihost->tc_dma + offset; |
100 | } | 100 | } |
101 | 101 | ||
102 | return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); | 102 | return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); |
@@ -730,7 +730,7 @@ static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *i | |||
730 | { | 730 | { |
731 | struct sas_task *task = isci_request_access_task(ireq); | 731 | struct sas_task *task = isci_request_access_task(ireq); |
732 | 732 | ||
733 | ireq->protocol = SCIC_SSP_PROTOCOL; | 733 | ireq->protocol = SAS_PROTOCOL_SSP; |
734 | 734 | ||
735 | scu_ssp_io_request_construct_task_context(ireq, | 735 | scu_ssp_io_request_construct_task_context(ireq, |
736 | task->data_dir, | 736 | task->data_dir, |
@@ -763,7 +763,7 @@ static enum sci_status sci_io_request_construct_basic_sata(struct isci_request * | |||
763 | bool copy = false; | 763 | bool copy = false; |
764 | struct sas_task *task = isci_request_access_task(ireq); | 764 | struct sas_task *task = isci_request_access_task(ireq); |
765 | 765 | ||
766 | ireq->protocol = SCIC_STP_PROTOCOL; | 766 | ireq->protocol = SAS_PROTOCOL_STP; |
767 | 767 | ||
768 | copy = (task->data_dir == DMA_NONE) ? false : true; | 768 | copy = (task->data_dir == DMA_NONE) ? false : true; |
769 | 769 | ||
@@ -863,6 +863,8 @@ sci_io_request_terminate(struct isci_request *ireq) | |||
863 | 863 | ||
864 | switch (state) { | 864 | switch (state) { |
865 | case SCI_REQ_CONSTRUCTED: | 865 | case SCI_REQ_CONSTRUCTED: |
866 | /* Set to make sure no HW terminate posting is done: */ | ||
867 | set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags); | ||
866 | ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; | 868 | ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; |
867 | ireq->sci_status = SCI_FAILURE_IO_TERMINATED; | 869 | ireq->sci_status = SCI_FAILURE_IO_TERMINATED; |
868 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 870 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
@@ -883,8 +885,7 @@ sci_io_request_terminate(struct isci_request *ireq) | |||
883 | case SCI_REQ_ATAPI_WAIT_PIO_SETUP: | 885 | case SCI_REQ_ATAPI_WAIT_PIO_SETUP: |
884 | case SCI_REQ_ATAPI_WAIT_D2H: | 886 | case SCI_REQ_ATAPI_WAIT_D2H: |
885 | case SCI_REQ_ATAPI_WAIT_TC_COMP: | 887 | case SCI_REQ_ATAPI_WAIT_TC_COMP: |
886 | sci_change_state(&ireq->sm, SCI_REQ_ABORTING); | 888 | /* Fall through and change state to ABORTING... */ |
887 | return SCI_SUCCESS; | ||
888 | case SCI_REQ_TASK_WAIT_TC_RESP: | 889 | case SCI_REQ_TASK_WAIT_TC_RESP: |
889 | /* The task frame was already confirmed to have been | 890 | /* The task frame was already confirmed to have been |
890 | * sent by the SCU HW. Since the state machine is | 891 | * sent by the SCU HW. Since the state machine is |
@@ -893,20 +894,21 @@ sci_io_request_terminate(struct isci_request *ireq) | |||
893 | * and don't wait for the task response. | 894 | * and don't wait for the task response. |
894 | */ | 895 | */ |
895 | sci_change_state(&ireq->sm, SCI_REQ_ABORTING); | 896 | sci_change_state(&ireq->sm, SCI_REQ_ABORTING); |
896 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 897 | /* Fall through and handle like ABORTING... */ |
897 | return SCI_SUCCESS; | ||
898 | case SCI_REQ_ABORTING: | 898 | case SCI_REQ_ABORTING: |
899 | /* If a request has a termination requested twice, return | 899 | if (!isci_remote_device_is_safe_to_abort(ireq->target_device)) |
900 | * a failure indication, since HW confirmation of the first | 900 | set_bit(IREQ_PENDING_ABORT, &ireq->flags); |
901 | * abort is still outstanding. | 901 | else |
902 | clear_bit(IREQ_PENDING_ABORT, &ireq->flags); | ||
903 | /* If the request is only waiting on the remote device | ||
904 | * suspension, return SUCCESS so the caller will wait too. | ||
902 | */ | 905 | */ |
906 | return SCI_SUCCESS; | ||
903 | case SCI_REQ_COMPLETED: | 907 | case SCI_REQ_COMPLETED: |
904 | default: | 908 | default: |
905 | dev_warn(&ireq->owning_controller->pdev->dev, | 909 | dev_warn(&ireq->owning_controller->pdev->dev, |
906 | "%s: SCIC IO Request requested to abort while in wrong " | 910 | "%s: SCIC IO Request requested to abort while in wrong " |
907 | "state %d\n", | 911 | "state %d\n", __func__, ireq->sm.current_state_id); |
908 | __func__, | ||
909 | ireq->sm.current_state_id); | ||
910 | break; | 912 | break; |
911 | } | 913 | } |
912 | 914 | ||
@@ -1070,7 +1072,7 @@ request_started_state_tc_event(struct isci_request *ireq, | |||
1070 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): | 1072 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): |
1071 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): | 1073 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): |
1072 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): | 1074 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): |
1073 | if (ireq->protocol == SCIC_STP_PROTOCOL) { | 1075 | if (ireq->protocol == SAS_PROTOCOL_STP) { |
1074 | ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> | 1076 | ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> |
1075 | SCU_COMPLETION_TL_STATUS_SHIFT; | 1077 | SCU_COMPLETION_TL_STATUS_SHIFT; |
1076 | ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; | 1078 | ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; |
@@ -2117,7 +2119,7 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq | |||
2117 | */ | 2119 | */ |
2118 | if (ireq->stp.rsp.fis_type == FIS_REGD2H) { | 2120 | if (ireq->stp.rsp.fis_type == FIS_REGD2H) { |
2119 | sci_remote_device_suspend(ireq->target_device, | 2121 | sci_remote_device_suspend(ireq->target_device, |
2120 | SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); | 2122 | SCI_SW_SUSPEND_NORMAL); |
2121 | 2123 | ||
2122 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; | 2124 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
2123 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; | 2125 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
@@ -2138,13 +2140,6 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq | |||
2138 | /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR | 2140 | /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR |
2139 | * - this comes only for B0 | 2141 | * - this comes only for B0 |
2140 | */ | 2142 | */ |
2141 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN): | ||
2142 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): | ||
2143 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR): | ||
2144 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR): | ||
2145 | sci_remote_device_suspend(ireq->target_device, | ||
2146 | SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); | ||
2147 | /* Fall through to the default case */ | ||
2148 | default: | 2143 | default: |
2149 | /* All other completion status cause the IO to be complete. */ | 2144 | /* All other completion status cause the IO to be complete. */ |
2150 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); | 2145 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
@@ -2262,15 +2257,151 @@ static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ire | |||
2262 | return status; | 2257 | return status; |
2263 | } | 2258 | } |
2264 | 2259 | ||
2260 | static int sci_request_smp_completion_status_is_tx_suspend( | ||
2261 | unsigned int completion_status) | ||
2262 | { | ||
2263 | switch (completion_status) { | ||
2264 | case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: | ||
2265 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: | ||
2266 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: | ||
2267 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: | ||
2268 | case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: | ||
2269 | case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: | ||
2270 | return 1; | ||
2271 | } | ||
2272 | return 0; | ||
2273 | } | ||
2274 | |||
2275 | static int sci_request_smp_completion_status_is_tx_rx_suspend( | ||
2276 | unsigned int completion_status) | ||
2277 | { | ||
2278 | return 0; /* There are no Tx/Rx SMP suspend conditions. */ | ||
2279 | } | ||
2280 | |||
2281 | static int sci_request_ssp_completion_status_is_tx_suspend( | ||
2282 | unsigned int completion_status) | ||
2283 | { | ||
2284 | switch (completion_status) { | ||
2285 | case SCU_TASK_DONE_TX_RAW_CMD_ERR: | ||
2286 | case SCU_TASK_DONE_LF_ERR: | ||
2287 | case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: | ||
2288 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: | ||
2289 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: | ||
2290 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: | ||
2291 | case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: | ||
2292 | case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: | ||
2293 | case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: | ||
2294 | case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: | ||
2295 | case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: | ||
2296 | return 1; | ||
2297 | } | ||
2298 | return 0; | ||
2299 | } | ||
2300 | |||
2301 | static int sci_request_ssp_completion_status_is_tx_rx_suspend( | ||
2302 | unsigned int completion_status) | ||
2303 | { | ||
2304 | return 0; /* There are no Tx/Rx SSP suspend conditions. */ | ||
2305 | } | ||
2306 | |||
2307 | static int sci_request_stpsata_completion_status_is_tx_suspend( | ||
2308 | unsigned int completion_status) | ||
2309 | { | ||
2310 | switch (completion_status) { | ||
2311 | case SCU_TASK_DONE_TX_RAW_CMD_ERR: | ||
2312 | case SCU_TASK_DONE_LL_R_ERR: | ||
2313 | case SCU_TASK_DONE_LL_PERR: | ||
2314 | case SCU_TASK_DONE_REG_ERR: | ||
2315 | case SCU_TASK_DONE_SDB_ERR: | ||
2316 | case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: | ||
2317 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: | ||
2318 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: | ||
2319 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: | ||
2320 | case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: | ||
2321 | case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: | ||
2322 | case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: | ||
2323 | case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: | ||
2324 | case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: | ||
2325 | return 1; | ||
2326 | } | ||
2327 | return 0; | ||
2328 | } | ||
2329 | |||
2330 | |||
2331 | static int sci_request_stpsata_completion_status_is_tx_rx_suspend( | ||
2332 | unsigned int completion_status) | ||
2333 | { | ||
2334 | switch (completion_status) { | ||
2335 | case SCU_TASK_DONE_LF_ERR: | ||
2336 | case SCU_TASK_DONE_LL_SY_TERM: | ||
2337 | case SCU_TASK_DONE_LL_LF_TERM: | ||
2338 | case SCU_TASK_DONE_BREAK_RCVD: | ||
2339 | case SCU_TASK_DONE_INV_FIS_LEN: | ||
2340 | case SCU_TASK_DONE_UNEXP_FIS: | ||
2341 | case SCU_TASK_DONE_UNEXP_SDBFIS: | ||
2342 | case SCU_TASK_DONE_MAX_PLD_ERR: | ||
2343 | return 1; | ||
2344 | } | ||
2345 | return 0; | ||
2346 | } | ||
2347 | |||
2348 | static void sci_request_handle_suspending_completions( | ||
2349 | struct isci_request *ireq, | ||
2350 | u32 completion_code) | ||
2351 | { | ||
2352 | int is_tx = 0; | ||
2353 | int is_tx_rx = 0; | ||
2354 | |||
2355 | switch (ireq->protocol) { | ||
2356 | case SAS_PROTOCOL_SMP: | ||
2357 | is_tx = sci_request_smp_completion_status_is_tx_suspend( | ||
2358 | completion_code); | ||
2359 | is_tx_rx = sci_request_smp_completion_status_is_tx_rx_suspend( | ||
2360 | completion_code); | ||
2361 | break; | ||
2362 | case SAS_PROTOCOL_SSP: | ||
2363 | is_tx = sci_request_ssp_completion_status_is_tx_suspend( | ||
2364 | completion_code); | ||
2365 | is_tx_rx = sci_request_ssp_completion_status_is_tx_rx_suspend( | ||
2366 | completion_code); | ||
2367 | break; | ||
2368 | case SAS_PROTOCOL_STP: | ||
2369 | is_tx = sci_request_stpsata_completion_status_is_tx_suspend( | ||
2370 | completion_code); | ||
2371 | is_tx_rx = | ||
2372 | sci_request_stpsata_completion_status_is_tx_rx_suspend( | ||
2373 | completion_code); | ||
2374 | break; | ||
2375 | default: | ||
2376 | dev_warn(&ireq->isci_host->pdev->dev, | ||
2377 | "%s: request %p has no valid protocol\n", | ||
2378 | __func__, ireq); | ||
2379 | break; | ||
2380 | } | ||
2381 | if (is_tx || is_tx_rx) { | ||
2382 | BUG_ON(is_tx && is_tx_rx); | ||
2383 | |||
2384 | sci_remote_node_context_suspend( | ||
2385 | &ireq->target_device->rnc, | ||
2386 | SCI_HW_SUSPEND, | ||
2387 | (is_tx_rx) ? SCU_EVENT_TL_RNC_SUSPEND_TX_RX | ||
2388 | : SCU_EVENT_TL_RNC_SUSPEND_TX); | ||
2389 | } | ||
2390 | } | ||
2391 | |||
2265 | enum sci_status | 2392 | enum sci_status |
2266 | sci_io_request_tc_completion(struct isci_request *ireq, | 2393 | sci_io_request_tc_completion(struct isci_request *ireq, |
2267 | u32 completion_code) | 2394 | u32 completion_code) |
2268 | { | 2395 | { |
2269 | enum sci_base_request_states state; | 2396 | enum sci_base_request_states state; |
2270 | struct isci_host *ihost = ireq->owning_controller; | 2397 | struct isci_host *ihost = ireq->owning_controller; |
2271 | 2398 | ||
2272 | state = ireq->sm.current_state_id; | 2399 | state = ireq->sm.current_state_id; |
2273 | 2400 | ||
2401 | /* Decode those completions that signal upcoming suspension events. */ | ||
2402 | sci_request_handle_suspending_completions( | ||
2403 | ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code)); | ||
2404 | |||
2274 | switch (state) { | 2405 | switch (state) { |
2275 | case SCI_REQ_STARTED: | 2406 | case SCI_REQ_STARTED: |
2276 | return request_started_state_tc_event(ireq, completion_code); | 2407 | return request_started_state_tc_event(ireq, completion_code); |
@@ -2362,9 +2493,6 @@ static void isci_request_process_response_iu( | |||
2362 | * @request: This parameter is the completed isci_request object. | 2493 | * @request: This parameter is the completed isci_request object. |
2363 | * @response_ptr: This parameter specifies the service response for the I/O. | 2494 | * @response_ptr: This parameter specifies the service response for the I/O. |
2364 | * @status_ptr: This parameter specifies the exec status for the I/O. | 2495 | * @status_ptr: This parameter specifies the exec status for the I/O. |
2365 | * @complete_to_host_ptr: This parameter specifies the action to be taken by | ||
2366 | * the LLDD with respect to completing this request or forcing an abort | ||
2367 | * condition on the I/O. | ||
2368 | * @open_rej_reason: This parameter specifies the encoded reason for the | 2496 | * @open_rej_reason: This parameter specifies the encoded reason for the |
2369 | * abandon-class reject. | 2497 | * abandon-class reject. |
2370 | * | 2498 | * |
@@ -2375,14 +2503,12 @@ static void isci_request_set_open_reject_status( | |||
2375 | struct sas_task *task, | 2503 | struct sas_task *task, |
2376 | enum service_response *response_ptr, | 2504 | enum service_response *response_ptr, |
2377 | enum exec_status *status_ptr, | 2505 | enum exec_status *status_ptr, |
2378 | enum isci_completion_selection *complete_to_host_ptr, | ||
2379 | enum sas_open_rej_reason open_rej_reason) | 2506 | enum sas_open_rej_reason open_rej_reason) |
2380 | { | 2507 | { |
2381 | /* Task in the target is done. */ | 2508 | /* Task in the target is done. */ |
2382 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2509 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2383 | *response_ptr = SAS_TASK_UNDELIVERED; | 2510 | *response_ptr = SAS_TASK_UNDELIVERED; |
2384 | *status_ptr = SAS_OPEN_REJECT; | 2511 | *status_ptr = SAS_OPEN_REJECT; |
2385 | *complete_to_host_ptr = isci_perform_normal_io_completion; | ||
2386 | task->task_status.open_rej_reason = open_rej_reason; | 2512 | task->task_status.open_rej_reason = open_rej_reason; |
2387 | } | 2513 | } |
2388 | 2514 | ||
@@ -2392,9 +2518,6 @@ static void isci_request_set_open_reject_status( | |||
2392 | * @request: This parameter is the completed isci_request object. | 2518 | * @request: This parameter is the completed isci_request object. |
2393 | * @response_ptr: This parameter specifies the service response for the I/O. | 2519 | * @response_ptr: This parameter specifies the service response for the I/O. |
2394 | * @status_ptr: This parameter specifies the exec status for the I/O. | 2520 | * @status_ptr: This parameter specifies the exec status for the I/O. |
2395 | * @complete_to_host_ptr: This parameter specifies the action to be taken by | ||
2396 | * the LLDD with respect to completing this request or forcing an abort | ||
2397 | * condition on the I/O. | ||
2398 | * | 2521 | * |
2399 | * none. | 2522 | * none. |
2400 | */ | 2523 | */ |
@@ -2403,8 +2526,7 @@ static void isci_request_handle_controller_specific_errors( | |||
2403 | struct isci_request *request, | 2526 | struct isci_request *request, |
2404 | struct sas_task *task, | 2527 | struct sas_task *task, |
2405 | enum service_response *response_ptr, | 2528 | enum service_response *response_ptr, |
2406 | enum exec_status *status_ptr, | 2529 | enum exec_status *status_ptr) |
2407 | enum isci_completion_selection *complete_to_host_ptr) | ||
2408 | { | 2530 | { |
2409 | unsigned int cstatus; | 2531 | unsigned int cstatus; |
2410 | 2532 | ||
@@ -2445,9 +2567,6 @@ static void isci_request_handle_controller_specific_errors( | |||
2445 | *status_ptr = SAS_ABORTED_TASK; | 2567 | *status_ptr = SAS_ABORTED_TASK; |
2446 | 2568 | ||
2447 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2569 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2448 | |||
2449 | *complete_to_host_ptr = | ||
2450 | isci_perform_normal_io_completion; | ||
2451 | } else { | 2570 | } else { |
2452 | /* Task in the target is not done. */ | 2571 | /* Task in the target is not done. */ |
2453 | *response_ptr = SAS_TASK_UNDELIVERED; | 2572 | *response_ptr = SAS_TASK_UNDELIVERED; |
@@ -2458,9 +2577,6 @@ static void isci_request_handle_controller_specific_errors( | |||
2458 | *status_ptr = SAM_STAT_TASK_ABORTED; | 2577 | *status_ptr = SAM_STAT_TASK_ABORTED; |
2459 | 2578 | ||
2460 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2579 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2461 | |||
2462 | *complete_to_host_ptr = | ||
2463 | isci_perform_error_io_completion; | ||
2464 | } | 2580 | } |
2465 | 2581 | ||
2466 | break; | 2582 | break; |
@@ -2489,8 +2605,6 @@ static void isci_request_handle_controller_specific_errors( | |||
2489 | *status_ptr = SAS_ABORTED_TASK; | 2605 | *status_ptr = SAS_ABORTED_TASK; |
2490 | 2606 | ||
2491 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2607 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2492 | |||
2493 | *complete_to_host_ptr = isci_perform_normal_io_completion; | ||
2494 | break; | 2608 | break; |
2495 | 2609 | ||
2496 | 2610 | ||
@@ -2501,7 +2615,7 @@ static void isci_request_handle_controller_specific_errors( | |||
2501 | 2615 | ||
2502 | isci_request_set_open_reject_status( | 2616 | isci_request_set_open_reject_status( |
2503 | request, task, response_ptr, status_ptr, | 2617 | request, task, response_ptr, status_ptr, |
2504 | complete_to_host_ptr, SAS_OREJ_WRONG_DEST); | 2618 | SAS_OREJ_WRONG_DEST); |
2505 | break; | 2619 | break; |
2506 | 2620 | ||
2507 | case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: | 2621 | case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: |
@@ -2511,56 +2625,56 @@ static void isci_request_handle_controller_specific_errors( | |||
2511 | */ | 2625 | */ |
2512 | isci_request_set_open_reject_status( | 2626 | isci_request_set_open_reject_status( |
2513 | request, task, response_ptr, status_ptr, | 2627 | request, task, response_ptr, status_ptr, |
2514 | complete_to_host_ptr, SAS_OREJ_RESV_AB0); | 2628 | SAS_OREJ_RESV_AB0); |
2515 | break; | 2629 | break; |
2516 | 2630 | ||
2517 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: | 2631 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: |
2518 | 2632 | ||
2519 | isci_request_set_open_reject_status( | 2633 | isci_request_set_open_reject_status( |
2520 | request, task, response_ptr, status_ptr, | 2634 | request, task, response_ptr, status_ptr, |
2521 | complete_to_host_ptr, SAS_OREJ_RESV_AB1); | 2635 | SAS_OREJ_RESV_AB1); |
2522 | break; | 2636 | break; |
2523 | 2637 | ||
2524 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: | 2638 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: |
2525 | 2639 | ||
2526 | isci_request_set_open_reject_status( | 2640 | isci_request_set_open_reject_status( |
2527 | request, task, response_ptr, status_ptr, | 2641 | request, task, response_ptr, status_ptr, |
2528 | complete_to_host_ptr, SAS_OREJ_RESV_AB2); | 2642 | SAS_OREJ_RESV_AB2); |
2529 | break; | 2643 | break; |
2530 | 2644 | ||
2531 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: | 2645 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: |
2532 | 2646 | ||
2533 | isci_request_set_open_reject_status( | 2647 | isci_request_set_open_reject_status( |
2534 | request, task, response_ptr, status_ptr, | 2648 | request, task, response_ptr, status_ptr, |
2535 | complete_to_host_ptr, SAS_OREJ_RESV_AB3); | 2649 | SAS_OREJ_RESV_AB3); |
2536 | break; | 2650 | break; |
2537 | 2651 | ||
2538 | case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: | 2652 | case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: |
2539 | 2653 | ||
2540 | isci_request_set_open_reject_status( | 2654 | isci_request_set_open_reject_status( |
2541 | request, task, response_ptr, status_ptr, | 2655 | request, task, response_ptr, status_ptr, |
2542 | complete_to_host_ptr, SAS_OREJ_BAD_DEST); | 2656 | SAS_OREJ_BAD_DEST); |
2543 | break; | 2657 | break; |
2544 | 2658 | ||
2545 | case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: | 2659 | case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: |
2546 | 2660 | ||
2547 | isci_request_set_open_reject_status( | 2661 | isci_request_set_open_reject_status( |
2548 | request, task, response_ptr, status_ptr, | 2662 | request, task, response_ptr, status_ptr, |
2549 | complete_to_host_ptr, SAS_OREJ_STP_NORES); | 2663 | SAS_OREJ_STP_NORES); |
2550 | break; | 2664 | break; |
2551 | 2665 | ||
2552 | case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: | 2666 | case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: |
2553 | 2667 | ||
2554 | isci_request_set_open_reject_status( | 2668 | isci_request_set_open_reject_status( |
2555 | request, task, response_ptr, status_ptr, | 2669 | request, task, response_ptr, status_ptr, |
2556 | complete_to_host_ptr, SAS_OREJ_EPROTO); | 2670 | SAS_OREJ_EPROTO); |
2557 | break; | 2671 | break; |
2558 | 2672 | ||
2559 | case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: | 2673 | case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: |
2560 | 2674 | ||
2561 | isci_request_set_open_reject_status( | 2675 | isci_request_set_open_reject_status( |
2562 | request, task, response_ptr, status_ptr, | 2676 | request, task, response_ptr, status_ptr, |
2563 | complete_to_host_ptr, SAS_OREJ_CONN_RATE); | 2677 | SAS_OREJ_CONN_RATE); |
2564 | break; | 2678 | break; |
2565 | 2679 | ||
2566 | case SCU_TASK_DONE_LL_R_ERR: | 2680 | case SCU_TASK_DONE_LL_R_ERR: |
@@ -2592,95 +2706,12 @@ static void isci_request_handle_controller_specific_errors( | |||
2592 | *response_ptr = SAS_TASK_UNDELIVERED; | 2706 | *response_ptr = SAS_TASK_UNDELIVERED; |
2593 | *status_ptr = SAM_STAT_TASK_ABORTED; | 2707 | *status_ptr = SAM_STAT_TASK_ABORTED; |
2594 | 2708 | ||
2595 | if (task->task_proto == SAS_PROTOCOL_SMP) { | 2709 | if (task->task_proto == SAS_PROTOCOL_SMP) |
2596 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2710 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2597 | 2711 | else | |
2598 | *complete_to_host_ptr = isci_perform_normal_io_completion; | ||
2599 | } else { | ||
2600 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2712 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2601 | |||
2602 | *complete_to_host_ptr = isci_perform_error_io_completion; | ||
2603 | } | ||
2604 | break; | ||
2605 | } | ||
2606 | } | ||
2607 | |||
2608 | /** | ||
2609 | * isci_task_save_for_upper_layer_completion() - This function saves the | ||
2610 | * request for later completion to the upper layer driver. | ||
2611 | * @host: This parameter is a pointer to the host on which the the request | ||
2612 | * should be queued (either as an error or success). | ||
2613 | * @request: This parameter is the completed request. | ||
2614 | * @response: This parameter is the response code for the completed task. | ||
2615 | * @status: This parameter is the status code for the completed task. | ||
2616 | * | ||
2617 | * none. | ||
2618 | */ | ||
2619 | static void isci_task_save_for_upper_layer_completion( | ||
2620 | struct isci_host *host, | ||
2621 | struct isci_request *request, | ||
2622 | enum service_response response, | ||
2623 | enum exec_status status, | ||
2624 | enum isci_completion_selection task_notification_selection) | ||
2625 | { | ||
2626 | struct sas_task *task = isci_request_access_task(request); | ||
2627 | |||
2628 | task_notification_selection | ||
2629 | = isci_task_set_completion_status(task, response, status, | ||
2630 | task_notification_selection); | ||
2631 | |||
2632 | /* Tasks aborted specifically by a call to the lldd_abort_task | ||
2633 | * function should not be completed to the host in the regular path. | ||
2634 | */ | ||
2635 | switch (task_notification_selection) { | ||
2636 | |||
2637 | case isci_perform_normal_io_completion: | ||
2638 | /* Normal notification (task_done) */ | ||
2639 | |||
2640 | /* Add to the completed list. */ | ||
2641 | list_add(&request->completed_node, | ||
2642 | &host->requests_to_complete); | ||
2643 | |||
2644 | /* Take the request off the device's pending request list. */ | ||
2645 | list_del_init(&request->dev_node); | ||
2646 | break; | ||
2647 | |||
2648 | case isci_perform_aborted_io_completion: | ||
2649 | /* No notification to libsas because this request is | ||
2650 | * already in the abort path. | ||
2651 | */ | ||
2652 | /* Wake up whatever process was waiting for this | ||
2653 | * request to complete. | ||
2654 | */ | ||
2655 | WARN_ON(request->io_request_completion == NULL); | ||
2656 | |||
2657 | if (request->io_request_completion != NULL) { | ||
2658 | |||
2659 | /* Signal whoever is waiting that this | ||
2660 | * request is complete. | ||
2661 | */ | ||
2662 | complete(request->io_request_completion); | ||
2663 | } | ||
2664 | break; | ||
2665 | |||
2666 | case isci_perform_error_io_completion: | ||
2667 | /* Use sas_task_abort */ | ||
2668 | /* Add to the aborted list. */ | ||
2669 | list_add(&request->completed_node, | ||
2670 | &host->requests_to_errorback); | ||
2671 | break; | ||
2672 | |||
2673 | default: | ||
2674 | /* Add to the error to libsas list. */ | ||
2675 | list_add(&request->completed_node, | ||
2676 | &host->requests_to_errorback); | ||
2677 | break; | 2713 | break; |
2678 | } | 2714 | } |
2679 | dev_dbg(&host->pdev->dev, | ||
2680 | "%s: %d - task = %p, response=%d (%d), status=%d (%d)\n", | ||
2681 | __func__, task_notification_selection, task, | ||
2682 | (task) ? task->task_status.resp : 0, response, | ||
2683 | (task) ? task->task_status.stat : 0, status); | ||
2684 | } | 2715 | } |
2685 | 2716 | ||
2686 | static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) | 2717 | static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) |
@@ -2715,295 +2746,164 @@ static void isci_request_io_request_complete(struct isci_host *ihost, | |||
2715 | struct isci_remote_device *idev = request->target_device; | 2746 | struct isci_remote_device *idev = request->target_device; |
2716 | enum service_response response = SAS_TASK_UNDELIVERED; | 2747 | enum service_response response = SAS_TASK_UNDELIVERED; |
2717 | enum exec_status status = SAS_ABORTED_TASK; | 2748 | enum exec_status status = SAS_ABORTED_TASK; |
2718 | enum isci_request_status request_status; | ||
2719 | enum isci_completion_selection complete_to_host | ||
2720 | = isci_perform_normal_io_completion; | ||
2721 | 2749 | ||
2722 | dev_dbg(&ihost->pdev->dev, | 2750 | dev_dbg(&ihost->pdev->dev, |
2723 | "%s: request = %p, task = %p,\n" | 2751 | "%s: request = %p, task = %p, " |
2724 | "task->data_dir = %d completion_status = 0x%x\n", | 2752 | "task->data_dir = %d completion_status = 0x%x\n", |
2725 | __func__, | 2753 | __func__, request, task, task->data_dir, completion_status); |
2726 | request, | ||
2727 | task, | ||
2728 | task->data_dir, | ||
2729 | completion_status); | ||
2730 | 2754 | ||
2731 | spin_lock(&request->state_lock); | 2755 | /* The request is done from an SCU HW perspective. */ |
2732 | request_status = request->status; | ||
2733 | 2756 | ||
2734 | /* Decode the request status. Note that if the request has been | 2757 | /* This is an active request being completed from the core. */ |
2735 | * aborted by a task management function, we don't care | 2758 | switch (completion_status) { |
2736 | * what the status is. | ||
2737 | */ | ||
2738 | switch (request_status) { | ||
2739 | |||
2740 | case aborted: | ||
2741 | /* "aborted" indicates that the request was aborted by a task | ||
2742 | * management function, since once a task management request is | ||
2743 | * perfomed by the device, the request only completes because | ||
2744 | * of the subsequent driver terminate. | ||
2745 | * | ||
2746 | * Aborted also means an external thread is explicitly managing | ||
2747 | * this request, so that we do not complete it up the stack. | ||
2748 | * | ||
2749 | * The target is still there (since the TMF was successful). | ||
2750 | */ | ||
2751 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | ||
2752 | response = SAS_TASK_COMPLETE; | ||
2753 | 2759 | ||
2754 | /* See if the device has been/is being stopped. Note | 2760 | case SCI_IO_FAILURE_RESPONSE_VALID: |
2755 | * that we ignore the quiesce state, since we are | 2761 | dev_dbg(&ihost->pdev->dev, |
2756 | * concerned about the actual device state. | 2762 | "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", |
2757 | */ | 2763 | __func__, request, task); |
2758 | if (!idev) | 2764 | |
2759 | status = SAS_DEVICE_UNKNOWN; | 2765 | if (sas_protocol_ata(task->task_proto)) { |
2760 | else | 2766 | isci_process_stp_response(task, &request->stp.rsp); |
2761 | status = SAS_ABORTED_TASK; | 2767 | } else if (SAS_PROTOCOL_SSP == task->task_proto) { |
2768 | |||
2769 | /* crack the iu response buffer. */ | ||
2770 | resp_iu = &request->ssp.rsp; | ||
2771 | isci_request_process_response_iu(task, resp_iu, | ||
2772 | &ihost->pdev->dev); | ||
2773 | |||
2774 | } else if (SAS_PROTOCOL_SMP == task->task_proto) { | ||
2775 | |||
2776 | dev_err(&ihost->pdev->dev, | ||
2777 | "%s: SCI_IO_FAILURE_RESPONSE_VALID: " | ||
2778 | "SAS_PROTOCOL_SMP protocol\n", | ||
2779 | __func__); | ||
2762 | 2780 | ||
2763 | complete_to_host = isci_perform_aborted_io_completion; | 2781 | } else |
2764 | /* This was an aborted request. */ | 2782 | dev_err(&ihost->pdev->dev, |
2783 | "%s: unknown protocol\n", __func__); | ||
2765 | 2784 | ||
2766 | spin_unlock(&request->state_lock); | 2785 | /* use the task status set in the task struct by the |
2786 | * isci_request_process_response_iu call. | ||
2787 | */ | ||
2788 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | ||
2789 | response = task->task_status.resp; | ||
2790 | status = task->task_status.stat; | ||
2767 | break; | 2791 | break; |
2768 | 2792 | ||
2769 | case aborting: | 2793 | case SCI_IO_SUCCESS: |
2770 | /* aborting means that the task management function tried and | 2794 | case SCI_IO_SUCCESS_IO_DONE_EARLY: |
2771 | * failed to abort the request. We need to note the request | 2795 | |
2772 | * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the | 2796 | response = SAS_TASK_COMPLETE; |
2773 | * target as down. | 2797 | status = SAM_STAT_GOOD; |
2774 | * | ||
2775 | * Aborting also means an external thread is explicitly managing | ||
2776 | * this request, so that we do not complete it up the stack. | ||
2777 | */ | ||
2778 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2798 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2779 | response = SAS_TASK_UNDELIVERED; | ||
2780 | 2799 | ||
2781 | if (!idev) | 2800 | if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) { |
2782 | /* The device has been /is being stopped. Note that | ||
2783 | * we ignore the quiesce state, since we are | ||
2784 | * concerned about the actual device state. | ||
2785 | */ | ||
2786 | status = SAS_DEVICE_UNKNOWN; | ||
2787 | else | ||
2788 | status = SAS_PHY_DOWN; | ||
2789 | 2801 | ||
2790 | complete_to_host = isci_perform_aborted_io_completion; | 2802 | /* This was an SSP / STP / SATA transfer. |
2803 | * There is a possibility that less data than | ||
2804 | * the maximum was transferred. | ||
2805 | */ | ||
2806 | u32 transferred_length = sci_req_tx_bytes(request); | ||
2791 | 2807 | ||
2792 | /* This was an aborted request. */ | 2808 | task->task_status.residual |
2809 | = task->total_xfer_len - transferred_length; | ||
2810 | |||
2811 | /* If there were residual bytes, call this an | ||
2812 | * underrun. | ||
2813 | */ | ||
2814 | if (task->task_status.residual != 0) | ||
2815 | status = SAS_DATA_UNDERRUN; | ||
2793 | 2816 | ||
2794 | spin_unlock(&request->state_lock); | 2817 | dev_dbg(&ihost->pdev->dev, |
2818 | "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", | ||
2819 | __func__, status); | ||
2820 | |||
2821 | } else | ||
2822 | dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_SUCCESS\n", | ||
2823 | __func__); | ||
2795 | break; | 2824 | break; |
2796 | 2825 | ||
2797 | case terminating: | 2826 | case SCI_IO_FAILURE_TERMINATED: |
2798 | 2827 | ||
2799 | /* This was an terminated request. This happens when | 2828 | dev_dbg(&ihost->pdev->dev, |
2800 | * the I/O is being terminated because of an action on | 2829 | "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", |
2801 | * the device (reset, tear down, etc.), and the I/O needs | 2830 | __func__, request, task); |
2802 | * to be completed up the stack. | 2831 | |
2803 | */ | 2832 | /* The request was terminated explicitly. */ |
2804 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2833 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2805 | response = SAS_TASK_UNDELIVERED; | 2834 | response = SAS_TASK_UNDELIVERED; |
2806 | 2835 | ||
2807 | /* See if the device has been/is being stopped. Note | 2836 | /* See if the device has been/is being stopped. Note |
2808 | * that we ignore the quiesce state, since we are | 2837 | * that we ignore the quiesce state, since we are |
2809 | * concerned about the actual device state. | 2838 | * concerned about the actual device state. |
2810 | */ | 2839 | */ |
2811 | if (!idev) | 2840 | if (!idev) |
2812 | status = SAS_DEVICE_UNKNOWN; | 2841 | status = SAS_DEVICE_UNKNOWN; |
2813 | else | 2842 | else |
2814 | status = SAS_ABORTED_TASK; | 2843 | status = SAS_ABORTED_TASK; |
2815 | |||
2816 | complete_to_host = isci_perform_aborted_io_completion; | ||
2817 | |||
2818 | /* This was a terminated request. */ | ||
2819 | |||
2820 | spin_unlock(&request->state_lock); | ||
2821 | break; | 2844 | break; |
2822 | 2845 | ||
2823 | case dead: | 2846 | case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: |
2824 | /* This was a terminated request that timed-out during the | ||
2825 | * termination process. There is no task to complete to | ||
2826 | * libsas. | ||
2827 | */ | ||
2828 | complete_to_host = isci_perform_normal_io_completion; | ||
2829 | spin_unlock(&request->state_lock); | ||
2830 | break; | ||
2831 | 2847 | ||
2832 | default: | 2848 | isci_request_handle_controller_specific_errors(idev, request, |
2833 | 2849 | task, &response, | |
2834 | /* The request is done from an SCU HW perspective. */ | 2850 | &status); |
2835 | request->status = completed; | 2851 | break; |
2836 | |||
2837 | spin_unlock(&request->state_lock); | ||
2838 | |||
2839 | /* This is an active request being completed from the core. */ | ||
2840 | switch (completion_status) { | ||
2841 | |||
2842 | case SCI_IO_FAILURE_RESPONSE_VALID: | ||
2843 | dev_dbg(&ihost->pdev->dev, | ||
2844 | "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", | ||
2845 | __func__, | ||
2846 | request, | ||
2847 | task); | ||
2848 | |||
2849 | if (sas_protocol_ata(task->task_proto)) { | ||
2850 | isci_process_stp_response(task, &request->stp.rsp); | ||
2851 | } else if (SAS_PROTOCOL_SSP == task->task_proto) { | ||
2852 | |||
2853 | /* crack the iu response buffer. */ | ||
2854 | resp_iu = &request->ssp.rsp; | ||
2855 | isci_request_process_response_iu(task, resp_iu, | ||
2856 | &ihost->pdev->dev); | ||
2857 | |||
2858 | } else if (SAS_PROTOCOL_SMP == task->task_proto) { | ||
2859 | |||
2860 | dev_err(&ihost->pdev->dev, | ||
2861 | "%s: SCI_IO_FAILURE_RESPONSE_VALID: " | ||
2862 | "SAS_PROTOCOL_SMP protocol\n", | ||
2863 | __func__); | ||
2864 | |||
2865 | } else | ||
2866 | dev_err(&ihost->pdev->dev, | ||
2867 | "%s: unknown protocol\n", __func__); | ||
2868 | |||
2869 | /* use the task status set in the task struct by the | ||
2870 | * isci_request_process_response_iu call. | ||
2871 | */ | ||
2872 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | ||
2873 | response = task->task_status.resp; | ||
2874 | status = task->task_status.stat; | ||
2875 | break; | ||
2876 | 2852 | ||
2877 | case SCI_IO_SUCCESS: | 2853 | case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: |
2878 | case SCI_IO_SUCCESS_IO_DONE_EARLY: | 2854 | /* This is a special case, in that the I/O completion |
2855 | * is telling us that the device needs a reset. | ||
2856 | * In order for the device reset condition to be | ||
2857 | * noticed, the I/O has to be handled in the error | ||
2858 | * handler. Set the reset flag and cause the | ||
2859 | * SCSI error thread to be scheduled. | ||
2860 | */ | ||
2861 | spin_lock_irqsave(&task->task_state_lock, task_flags); | ||
2862 | task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; | ||
2863 | spin_unlock_irqrestore(&task->task_state_lock, task_flags); | ||
2879 | 2864 | ||
2880 | response = SAS_TASK_COMPLETE; | 2865 | /* Fail the I/O. */ |
2881 | status = SAM_STAT_GOOD; | 2866 | response = SAS_TASK_UNDELIVERED; |
2882 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2867 | status = SAM_STAT_TASK_ABORTED; |
2883 | 2868 | ||
2884 | if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) { | 2869 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2870 | break; | ||
2885 | 2871 | ||
2886 | /* This was an SSP / STP / SATA transfer. | 2872 | case SCI_FAILURE_RETRY_REQUIRED: |
2887 | * There is a possibility that less data than | ||
2888 | * the maximum was transferred. | ||
2889 | */ | ||
2890 | u32 transferred_length = sci_req_tx_bytes(request); | ||
2891 | 2873 | ||
2892 | task->task_status.residual | 2874 | /* Fail the I/O so it can be retried. */ |
2893 | = task->total_xfer_len - transferred_length; | 2875 | response = SAS_TASK_UNDELIVERED; |
2876 | if (!idev) | ||
2877 | status = SAS_DEVICE_UNKNOWN; | ||
2878 | else | ||
2879 | status = SAS_ABORTED_TASK; | ||
2894 | 2880 | ||
2895 | /* If there were residual bytes, call this an | 2881 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2896 | * underrun. | 2882 | break; |
2897 | */ | ||
2898 | if (task->task_status.residual != 0) | ||
2899 | status = SAS_DATA_UNDERRUN; | ||
2900 | 2883 | ||
2901 | dev_dbg(&ihost->pdev->dev, | ||
2902 | "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", | ||
2903 | __func__, | ||
2904 | status); | ||
2905 | 2884 | ||
2906 | } else | 2885 | default: |
2907 | dev_dbg(&ihost->pdev->dev, | 2886 | /* Catch any otherwise unhandled error codes here. */ |
2908 | "%s: SCI_IO_SUCCESS\n", | 2887 | dev_dbg(&ihost->pdev->dev, |
2909 | __func__); | 2888 | "%s: invalid completion code: 0x%x - " |
2889 | "isci_request = %p\n", | ||
2890 | __func__, completion_status, request); | ||
2910 | 2891 | ||
2911 | break; | 2892 | response = SAS_TASK_UNDELIVERED; |
2912 | 2893 | ||
2913 | case SCI_IO_FAILURE_TERMINATED: | 2894 | /* See if the device has been/is being stopped. Note |
2914 | dev_dbg(&ihost->pdev->dev, | 2895 | * that we ignore the quiesce state, since we are |
2915 | "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", | 2896 | * concerned about the actual device state. |
2916 | __func__, | 2897 | */ |
2917 | request, | 2898 | if (!idev) |
2918 | task); | 2899 | status = SAS_DEVICE_UNKNOWN; |
2900 | else | ||
2901 | status = SAS_ABORTED_TASK; | ||
2919 | 2902 | ||
2920 | /* The request was terminated explicitly. No handling | 2903 | if (SAS_PROTOCOL_SMP == task->task_proto) |
2921 | * is needed in the SCSI error handler path. | ||
2922 | */ | ||
2923 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2904 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2924 | response = SAS_TASK_UNDELIVERED; | 2905 | else |
2925 | |||
2926 | /* See if the device has been/is being stopped. Note | ||
2927 | * that we ignore the quiesce state, since we are | ||
2928 | * concerned about the actual device state. | ||
2929 | */ | ||
2930 | if (!idev) | ||
2931 | status = SAS_DEVICE_UNKNOWN; | ||
2932 | else | ||
2933 | status = SAS_ABORTED_TASK; | ||
2934 | |||
2935 | complete_to_host = isci_perform_normal_io_completion; | ||
2936 | break; | ||
2937 | |||
2938 | case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: | ||
2939 | |||
2940 | isci_request_handle_controller_specific_errors( | ||
2941 | idev, request, task, &response, &status, | ||
2942 | &complete_to_host); | ||
2943 | |||
2944 | break; | ||
2945 | |||
2946 | case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: | ||
2947 | /* This is a special case, in that the I/O completion | ||
2948 | * is telling us that the device needs a reset. | ||
2949 | * In order for the device reset condition to be | ||
2950 | * noticed, the I/O has to be handled in the error | ||
2951 | * handler. Set the reset flag and cause the | ||
2952 | * SCSI error thread to be scheduled. | ||
2953 | */ | ||
2954 | spin_lock_irqsave(&task->task_state_lock, task_flags); | ||
2955 | task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; | ||
2956 | spin_unlock_irqrestore(&task->task_state_lock, task_flags); | ||
2957 | |||
2958 | /* Fail the I/O. */ | ||
2959 | response = SAS_TASK_UNDELIVERED; | ||
2960 | status = SAM_STAT_TASK_ABORTED; | ||
2961 | |||
2962 | complete_to_host = isci_perform_error_io_completion; | ||
2963 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2906 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2964 | break; | ||
2965 | |||
2966 | case SCI_FAILURE_RETRY_REQUIRED: | ||
2967 | |||
2968 | /* Fail the I/O so it can be retried. */ | ||
2969 | response = SAS_TASK_UNDELIVERED; | ||
2970 | if (!idev) | ||
2971 | status = SAS_DEVICE_UNKNOWN; | ||
2972 | else | ||
2973 | status = SAS_ABORTED_TASK; | ||
2974 | |||
2975 | complete_to_host = isci_perform_normal_io_completion; | ||
2976 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | ||
2977 | break; | ||
2978 | |||
2979 | |||
2980 | default: | ||
2981 | /* Catch any otherwise unhandled error codes here. */ | ||
2982 | dev_dbg(&ihost->pdev->dev, | ||
2983 | "%s: invalid completion code: 0x%x - " | ||
2984 | "isci_request = %p\n", | ||
2985 | __func__, completion_status, request); | ||
2986 | |||
2987 | response = SAS_TASK_UNDELIVERED; | ||
2988 | |||
2989 | /* See if the device has been/is being stopped. Note | ||
2990 | * that we ignore the quiesce state, since we are | ||
2991 | * concerned about the actual device state. | ||
2992 | */ | ||
2993 | if (!idev) | ||
2994 | status = SAS_DEVICE_UNKNOWN; | ||
2995 | else | ||
2996 | status = SAS_ABORTED_TASK; | ||
2997 | |||
2998 | if (SAS_PROTOCOL_SMP == task->task_proto) { | ||
2999 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | ||
3000 | complete_to_host = isci_perform_normal_io_completion; | ||
3001 | } else { | ||
3002 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | ||
3003 | complete_to_host = isci_perform_error_io_completion; | ||
3004 | } | ||
3005 | break; | ||
3006 | } | ||
3007 | break; | 2907 | break; |
3008 | } | 2908 | } |
3009 | 2909 | ||
@@ -3038,10 +2938,18 @@ static void isci_request_io_request_complete(struct isci_host *ihost, | |||
3038 | break; | 2938 | break; |
3039 | } | 2939 | } |
3040 | 2940 | ||
3041 | /* Put the completed request on the correct list */ | 2941 | spin_lock_irqsave(&task->task_state_lock, task_flags); |
3042 | isci_task_save_for_upper_layer_completion(ihost, request, response, | 2942 | |
3043 | status, complete_to_host | 2943 | task->task_status.resp = response; |
3044 | ); | 2944 | task->task_status.stat = status; |
2945 | |||
2946 | if (test_bit(IREQ_COMPLETE_IN_TARGET, &request->flags)) { | ||
2947 | /* Normal notification (task_done) */ | ||
2948 | task->task_state_flags |= SAS_TASK_STATE_DONE; | ||
2949 | task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | | ||
2950 | SAS_TASK_STATE_PENDING); | ||
2951 | } | ||
2952 | spin_unlock_irqrestore(&task->task_state_lock, task_flags); | ||
3045 | 2953 | ||
3046 | /* complete the io request to the core. */ | 2954 | /* complete the io request to the core. */ |
3047 | sci_controller_complete_io(ihost, request->target_device, request); | 2955 | sci_controller_complete_io(ihost, request->target_device, request); |
@@ -3051,6 +2959,8 @@ static void isci_request_io_request_complete(struct isci_host *ihost, | |||
3051 | * task to recognize the already completed case. | 2959 | * task to recognize the already completed case. |
3052 | */ | 2960 | */ |
3053 | set_bit(IREQ_TERMINATED, &request->flags); | 2961 | set_bit(IREQ_TERMINATED, &request->flags); |
2962 | |||
2963 | ireq_done(ihost, request, task); | ||
3054 | } | 2964 | } |
3055 | 2965 | ||
3056 | static void sci_request_started_state_enter(struct sci_base_state_machine *sm) | 2966 | static void sci_request_started_state_enter(struct sci_base_state_machine *sm) |
@@ -3169,7 +3079,7 @@ sci_general_request_construct(struct isci_host *ihost, | |||
3169 | sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT); | 3079 | sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT); |
3170 | 3080 | ||
3171 | ireq->target_device = idev; | 3081 | ireq->target_device = idev; |
3172 | ireq->protocol = SCIC_NO_PROTOCOL; | 3082 | ireq->protocol = SAS_PROTOCOL_NONE; |
3173 | ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; | 3083 | ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; |
3174 | 3084 | ||
3175 | ireq->sci_status = SCI_SUCCESS; | 3085 | ireq->sci_status = SCI_SUCCESS; |
@@ -3193,7 +3103,7 @@ sci_io_request_construct(struct isci_host *ihost, | |||
3193 | 3103 | ||
3194 | if (dev->dev_type == SAS_END_DEV) | 3104 | if (dev->dev_type == SAS_END_DEV) |
3195 | /* pass */; | 3105 | /* pass */; |
3196 | else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) | 3106 | else if (dev_is_sata(dev)) |
3197 | memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); | 3107 | memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); |
3198 | else if (dev_is_expander(dev)) | 3108 | else if (dev_is_expander(dev)) |
3199 | /* pass */; | 3109 | /* pass */; |
@@ -3215,10 +3125,15 @@ enum sci_status sci_task_request_construct(struct isci_host *ihost, | |||
3215 | /* Build the common part of the request */ | 3125 | /* Build the common part of the request */ |
3216 | sci_general_request_construct(ihost, idev, ireq); | 3126 | sci_general_request_construct(ihost, idev, ireq); |
3217 | 3127 | ||
3218 | if (dev->dev_type == SAS_END_DEV || | 3128 | if (dev->dev_type == SAS_END_DEV || dev_is_sata(dev)) { |
3219 | dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { | ||
3220 | set_bit(IREQ_TMF, &ireq->flags); | 3129 | set_bit(IREQ_TMF, &ireq->flags); |
3221 | memset(ireq->tc, 0, sizeof(struct scu_task_context)); | 3130 | memset(ireq->tc, 0, sizeof(struct scu_task_context)); |
3131 | |||
3132 | /* Set the protocol indicator. */ | ||
3133 | if (dev_is_sata(dev)) | ||
3134 | ireq->protocol = SAS_PROTOCOL_STP; | ||
3135 | else | ||
3136 | ireq->protocol = SAS_PROTOCOL_SSP; | ||
3222 | } else | 3137 | } else |
3223 | status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; | 3138 | status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; |
3224 | 3139 | ||
@@ -3311,7 +3226,7 @@ sci_io_request_construct_smp(struct device *dev, | |||
3311 | if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE)) | 3226 | if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE)) |
3312 | return SCI_FAILURE; | 3227 | return SCI_FAILURE; |
3313 | 3228 | ||
3314 | ireq->protocol = SCIC_SMP_PROTOCOL; | 3229 | ireq->protocol = SAS_PROTOCOL_SMP; |
3315 | 3230 | ||
3316 | /* byte swap the smp request. */ | 3231 | /* byte swap the smp request. */ |
3317 | 3232 | ||
@@ -3496,9 +3411,6 @@ static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 t | |||
3496 | ireq->io_request_completion = NULL; | 3411 | ireq->io_request_completion = NULL; |
3497 | ireq->flags = 0; | 3412 | ireq->flags = 0; |
3498 | ireq->num_sg_entries = 0; | 3413 | ireq->num_sg_entries = 0; |
3499 | INIT_LIST_HEAD(&ireq->completed_node); | ||
3500 | INIT_LIST_HEAD(&ireq->dev_node); | ||
3501 | isci_request_change_state(ireq, allocated); | ||
3502 | 3414 | ||
3503 | return ireq; | 3415 | return ireq; |
3504 | } | 3416 | } |
@@ -3582,26 +3494,15 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide | |||
3582 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 3494 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
3583 | return status; | 3495 | return status; |
3584 | } | 3496 | } |
3585 | |||
3586 | /* Either I/O started OK, or the core has signaled that | 3497 | /* Either I/O started OK, or the core has signaled that |
3587 | * the device needs a target reset. | 3498 | * the device needs a target reset. |
3588 | * | ||
3589 | * In either case, hold onto the I/O for later. | ||
3590 | * | ||
3591 | * Update it's status and add it to the list in the | ||
3592 | * remote device object. | ||
3593 | */ | 3499 | */ |
3594 | list_add(&ireq->dev_node, &idev->reqs_in_process); | 3500 | if (status != SCI_SUCCESS) { |
3595 | |||
3596 | if (status == SCI_SUCCESS) { | ||
3597 | isci_request_change_state(ireq, started); | ||
3598 | } else { | ||
3599 | /* The request did not really start in the | 3501 | /* The request did not really start in the |
3600 | * hardware, so clear the request handle | 3502 | * hardware, so clear the request handle |
3601 | * here so no terminations will be done. | 3503 | * here so no terminations will be done. |
3602 | */ | 3504 | */ |
3603 | set_bit(IREQ_TERMINATED, &ireq->flags); | 3505 | set_bit(IREQ_TERMINATED, &ireq->flags); |
3604 | isci_request_change_state(ireq, completed); | ||
3605 | } | 3506 | } |
3606 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 3507 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
3607 | 3508 | ||
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 057f2378452d..aff95317fcf4 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h | |||
@@ -61,30 +61,6 @@ | |||
61 | #include "scu_task_context.h" | 61 | #include "scu_task_context.h" |
62 | 62 | ||
63 | /** | 63 | /** |
64 | * struct isci_request_status - This enum defines the possible states of an I/O | ||
65 | * request. | ||
66 | * | ||
67 | * | ||
68 | */ | ||
69 | enum isci_request_status { | ||
70 | unallocated = 0x00, | ||
71 | allocated = 0x01, | ||
72 | started = 0x02, | ||
73 | completed = 0x03, | ||
74 | aborting = 0x04, | ||
75 | aborted = 0x05, | ||
76 | terminating = 0x06, | ||
77 | dead = 0x07 | ||
78 | }; | ||
79 | |||
80 | enum sci_request_protocol { | ||
81 | SCIC_NO_PROTOCOL, | ||
82 | SCIC_SMP_PROTOCOL, | ||
83 | SCIC_SSP_PROTOCOL, | ||
84 | SCIC_STP_PROTOCOL | ||
85 | }; /* XXX remove me, use sas_task.{dev|task_proto} instead */; | ||
86 | |||
87 | /** | ||
88 | * isci_stp_request - extra request infrastructure to handle pio/atapi protocol | 64 | * isci_stp_request - extra request infrastructure to handle pio/atapi protocol |
89 | * @pio_len - number of bytes requested at PIO setup | 65 | * @pio_len - number of bytes requested at PIO setup |
90 | * @status - pio setup ending status value to tell us if we need | 66 | * @status - pio setup ending status value to tell us if we need |
@@ -104,11 +80,14 @@ struct isci_stp_request { | |||
104 | }; | 80 | }; |
105 | 81 | ||
106 | struct isci_request { | 82 | struct isci_request { |
107 | enum isci_request_status status; | ||
108 | #define IREQ_COMPLETE_IN_TARGET 0 | 83 | #define IREQ_COMPLETE_IN_TARGET 0 |
109 | #define IREQ_TERMINATED 1 | 84 | #define IREQ_TERMINATED 1 |
110 | #define IREQ_TMF 2 | 85 | #define IREQ_TMF 2 |
111 | #define IREQ_ACTIVE 3 | 86 | #define IREQ_ACTIVE 3 |
87 | #define IREQ_PENDING_ABORT 4 /* Set == device was not suspended yet */ | ||
88 | #define IREQ_TC_ABORT_POSTED 5 | ||
89 | #define IREQ_ABORT_PATH_ACTIVE 6 | ||
90 | #define IREQ_NO_AUTO_FREE_TAG 7 /* Set when being explicitly managed */ | ||
112 | unsigned long flags; | 91 | unsigned long flags; |
113 | /* XXX kill ttype and ttype_ptr, allocate full sas_task */ | 92 | /* XXX kill ttype and ttype_ptr, allocate full sas_task */ |
114 | union ttype_ptr_union { | 93 | union ttype_ptr_union { |
@@ -116,11 +95,6 @@ struct isci_request { | |||
116 | struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */ | 95 | struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */ |
117 | } ttype_ptr; | 96 | } ttype_ptr; |
118 | struct isci_host *isci_host; | 97 | struct isci_host *isci_host; |
119 | /* For use in the requests_to_{complete|abort} lists: */ | ||
120 | struct list_head completed_node; | ||
121 | /* For use in the reqs_in_process list: */ | ||
122 | struct list_head dev_node; | ||
123 | spinlock_t state_lock; | ||
124 | dma_addr_t request_daddr; | 98 | dma_addr_t request_daddr; |
125 | dma_addr_t zero_scatter_daddr; | 99 | dma_addr_t zero_scatter_daddr; |
126 | unsigned int num_sg_entries; | 100 | unsigned int num_sg_entries; |
@@ -140,7 +114,7 @@ struct isci_request { | |||
140 | struct isci_host *owning_controller; | 114 | struct isci_host *owning_controller; |
141 | struct isci_remote_device *target_device; | 115 | struct isci_remote_device *target_device; |
142 | u16 io_tag; | 116 | u16 io_tag; |
143 | enum sci_request_protocol protocol; | 117 | enum sas_protocol protocol; |
144 | u32 scu_status; /* hardware result */ | 118 | u32 scu_status; /* hardware result */ |
145 | u32 sci_status; /* upper layer disposition */ | 119 | u32 sci_status; /* upper layer disposition */ |
146 | u32 post_context; | 120 | u32 post_context; |
@@ -309,92 +283,6 @@ sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr) | |||
309 | return ireq->request_daddr + (requested_addr - base_addr); | 283 | return ireq->request_daddr + (requested_addr - base_addr); |
310 | } | 284 | } |
311 | 285 | ||
312 | /** | ||
313 | * isci_request_change_state() - This function sets the status of the request | ||
314 | * object. | ||
315 | * @request: This parameter points to the isci_request object | ||
316 | * @status: This Parameter is the new status of the object | ||
317 | * | ||
318 | */ | ||
319 | static inline enum isci_request_status | ||
320 | isci_request_change_state(struct isci_request *isci_request, | ||
321 | enum isci_request_status status) | ||
322 | { | ||
323 | enum isci_request_status old_state; | ||
324 | unsigned long flags; | ||
325 | |||
326 | dev_dbg(&isci_request->isci_host->pdev->dev, | ||
327 | "%s: isci_request = %p, state = 0x%x\n", | ||
328 | __func__, | ||
329 | isci_request, | ||
330 | status); | ||
331 | |||
332 | BUG_ON(isci_request == NULL); | ||
333 | |||
334 | spin_lock_irqsave(&isci_request->state_lock, flags); | ||
335 | old_state = isci_request->status; | ||
336 | isci_request->status = status; | ||
337 | spin_unlock_irqrestore(&isci_request->state_lock, flags); | ||
338 | |||
339 | return old_state; | ||
340 | } | ||
341 | |||
342 | /** | ||
343 | * isci_request_change_started_to_newstate() - This function sets the status of | ||
344 | * the request object. | ||
345 | * @request: This parameter points to the isci_request object | ||
346 | * @status: This Parameter is the new status of the object | ||
347 | * | ||
348 | * state previous to any change. | ||
349 | */ | ||
350 | static inline enum isci_request_status | ||
351 | isci_request_change_started_to_newstate(struct isci_request *isci_request, | ||
352 | struct completion *completion_ptr, | ||
353 | enum isci_request_status newstate) | ||
354 | { | ||
355 | enum isci_request_status old_state; | ||
356 | unsigned long flags; | ||
357 | |||
358 | spin_lock_irqsave(&isci_request->state_lock, flags); | ||
359 | |||
360 | old_state = isci_request->status; | ||
361 | |||
362 | if (old_state == started || old_state == aborting) { | ||
363 | BUG_ON(isci_request->io_request_completion != NULL); | ||
364 | |||
365 | isci_request->io_request_completion = completion_ptr; | ||
366 | isci_request->status = newstate; | ||
367 | } | ||
368 | |||
369 | spin_unlock_irqrestore(&isci_request->state_lock, flags); | ||
370 | |||
371 | dev_dbg(&isci_request->isci_host->pdev->dev, | ||
372 | "%s: isci_request = %p, old_state = 0x%x\n", | ||
373 | __func__, | ||
374 | isci_request, | ||
375 | old_state); | ||
376 | |||
377 | return old_state; | ||
378 | } | ||
379 | |||
380 | /** | ||
381 | * isci_request_change_started_to_aborted() - This function sets the status of | ||
382 | * the request object. | ||
383 | * @request: This parameter points to the isci_request object | ||
384 | * @completion_ptr: This parameter is saved as the kernel completion structure | ||
385 | * signalled when the old request completes. | ||
386 | * | ||
387 | * state previous to any change. | ||
388 | */ | ||
389 | static inline enum isci_request_status | ||
390 | isci_request_change_started_to_aborted(struct isci_request *isci_request, | ||
391 | struct completion *completion_ptr) | ||
392 | { | ||
393 | return isci_request_change_started_to_newstate(isci_request, | ||
394 | completion_ptr, | ||
395 | aborted); | ||
396 | } | ||
397 | |||
398 | #define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr) | 286 | #define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr) |
399 | 287 | ||
400 | #define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr) | 288 | #define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr) |
@@ -404,8 +292,6 @@ struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost, | |||
404 | u16 tag); | 292 | u16 tag); |
405 | int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, | 293 | int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, |
406 | struct sas_task *task, u16 tag); | 294 | struct sas_task *task, u16 tag); |
407 | void isci_terminate_pending_requests(struct isci_host *ihost, | ||
408 | struct isci_remote_device *idev); | ||
409 | enum sci_status | 295 | enum sci_status |
410 | sci_task_request_construct(struct isci_host *ihost, | 296 | sci_task_request_construct(struct isci_host *ihost, |
411 | struct isci_remote_device *idev, | 297 | struct isci_remote_device *idev, |
@@ -421,5 +307,4 @@ static inline int isci_task_is_ncq_recovery(struct sas_task *task) | |||
421 | task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ); | 307 | task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ); |
422 | 308 | ||
423 | } | 309 | } |
424 | |||
425 | #endif /* !defined(_ISCI_REQUEST_H_) */ | 310 | #endif /* !defined(_ISCI_REQUEST_H_) */ |
diff --git a/drivers/scsi/isci/scu_completion_codes.h b/drivers/scsi/isci/scu_completion_codes.h index c8b329c695f9..071cb74a211c 100644 --- a/drivers/scsi/isci/scu_completion_codes.h +++ b/drivers/scsi/isci/scu_completion_codes.h | |||
@@ -224,6 +224,7 @@ | |||
224 | * 32-bit value like we want, each immediate value must be cast to a u32. | 224 | * 32-bit value like we want, each immediate value must be cast to a u32. |
225 | */ | 225 | */ |
226 | #define SCU_TASK_DONE_GOOD ((u32)0x00) | 226 | #define SCU_TASK_DONE_GOOD ((u32)0x00) |
227 | #define SCU_TASK_DONE_TX_RAW_CMD_ERR ((u32)0x08) | ||
227 | #define SCU_TASK_DONE_CRC_ERR ((u32)0x14) | 228 | #define SCU_TASK_DONE_CRC_ERR ((u32)0x14) |
228 | #define SCU_TASK_DONE_CHECK_RESPONSE ((u32)0x14) | 229 | #define SCU_TASK_DONE_CHECK_RESPONSE ((u32)0x14) |
229 | #define SCU_TASK_DONE_GEN_RESPONSE ((u32)0x15) | 230 | #define SCU_TASK_DONE_GEN_RESPONSE ((u32)0x15) |
@@ -237,6 +238,7 @@ | |||
237 | #define SCU_TASK_DONE_LL_LF_TERM ((u32)0x1A) | 238 | #define SCU_TASK_DONE_LL_LF_TERM ((u32)0x1A) |
238 | #define SCU_TASK_DONE_DATA_LEN_ERR ((u32)0x1A) | 239 | #define SCU_TASK_DONE_DATA_LEN_ERR ((u32)0x1A) |
239 | #define SCU_TASK_DONE_LL_CL_TERM ((u32)0x1B) | 240 | #define SCU_TASK_DONE_LL_CL_TERM ((u32)0x1B) |
241 | #define SCU_TASK_DONE_BREAK_RCVD ((u32)0x1B) | ||
240 | #define SCU_TASK_DONE_LL_ABORT_ERR ((u32)0x1B) | 242 | #define SCU_TASK_DONE_LL_ABORT_ERR ((u32)0x1B) |
241 | #define SCU_TASK_DONE_SEQ_INV_TYPE ((u32)0x1C) | 243 | #define SCU_TASK_DONE_SEQ_INV_TYPE ((u32)0x1C) |
242 | #define SCU_TASK_DONE_UNEXP_XR ((u32)0x1C) | 244 | #define SCU_TASK_DONE_UNEXP_XR ((u32)0x1C) |
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 374254ede9d4..6bc74eb012c9 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c | |||
@@ -78,54 +78,25 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task, | |||
78 | enum exec_status status) | 78 | enum exec_status status) |
79 | 79 | ||
80 | { | 80 | { |
81 | enum isci_completion_selection disposition; | 81 | unsigned long flags; |
82 | 82 | ||
83 | disposition = isci_perform_normal_io_completion; | 83 | /* Normal notification (task_done) */ |
84 | disposition = isci_task_set_completion_status(task, response, status, | 84 | dev_dbg(&ihost->pdev->dev, "%s: task = %p, response=%d, status=%d\n", |
85 | disposition); | 85 | __func__, task, response, status); |
86 | 86 | ||
87 | /* Tasks aborted specifically by a call to the lldd_abort_task | 87 | spin_lock_irqsave(&task->task_state_lock, flags); |
88 | * function should not be completed to the host in the regular path. | ||
89 | */ | ||
90 | switch (disposition) { | ||
91 | case isci_perform_normal_io_completion: | ||
92 | /* Normal notification (task_done) */ | ||
93 | dev_dbg(&ihost->pdev->dev, | ||
94 | "%s: Normal - task = %p, response=%d, " | ||
95 | "status=%d\n", | ||
96 | __func__, task, response, status); | ||
97 | |||
98 | task->lldd_task = NULL; | ||
99 | task->task_done(task); | ||
100 | break; | ||
101 | |||
102 | case isci_perform_aborted_io_completion: | ||
103 | /* | ||
104 | * No notification because this request is already in the | ||
105 | * abort path. | ||
106 | */ | ||
107 | dev_dbg(&ihost->pdev->dev, | ||
108 | "%s: Aborted - task = %p, response=%d, " | ||
109 | "status=%d\n", | ||
110 | __func__, task, response, status); | ||
111 | break; | ||
112 | 88 | ||
113 | case isci_perform_error_io_completion: | 89 | task->task_status.resp = response; |
114 | /* Use sas_task_abort */ | 90 | task->task_status.stat = status; |
115 | dev_dbg(&ihost->pdev->dev, | ||
116 | "%s: Error - task = %p, response=%d, " | ||
117 | "status=%d\n", | ||
118 | __func__, task, response, status); | ||
119 | sas_task_abort(task); | ||
120 | break; | ||
121 | 91 | ||
122 | default: | 92 | /* Normal notification (task_done) */ |
123 | dev_dbg(&ihost->pdev->dev, | 93 | task->task_state_flags |= SAS_TASK_STATE_DONE; |
124 | "%s: isci task notification default case!", | 94 | task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | |
125 | __func__); | 95 | SAS_TASK_STATE_PENDING); |
126 | sas_task_abort(task); | 96 | task->lldd_task = NULL; |
127 | break; | 97 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
128 | } | 98 | |
99 | task->task_done(task); | ||
129 | } | 100 | } |
130 | 101 | ||
131 | #define for_each_sas_task(num, task) \ | 102 | #define for_each_sas_task(num, task) \ |
@@ -289,60 +260,6 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, | |||
289 | return ireq; | 260 | return ireq; |
290 | } | 261 | } |
291 | 262 | ||
292 | /** | ||
293 | * isci_request_mark_zombie() - This function must be called with scic_lock held. | ||
294 | */ | ||
295 | static void isci_request_mark_zombie(struct isci_host *ihost, struct isci_request *ireq) | ||
296 | { | ||
297 | struct completion *tmf_completion = NULL; | ||
298 | struct completion *req_completion; | ||
299 | |||
300 | /* Set the request state to "dead". */ | ||
301 | ireq->status = dead; | ||
302 | |||
303 | req_completion = ireq->io_request_completion; | ||
304 | ireq->io_request_completion = NULL; | ||
305 | |||
306 | if (test_bit(IREQ_TMF, &ireq->flags)) { | ||
307 | /* Break links with the TMF request. */ | ||
308 | struct isci_tmf *tmf = isci_request_access_tmf(ireq); | ||
309 | |||
310 | /* In the case where a task request is dying, | ||
311 | * the thread waiting on the complete will sit and | ||
312 | * timeout unless we wake it now. Since the TMF | ||
313 | * has a default error status, complete it here | ||
314 | * to wake the waiting thread. | ||
315 | */ | ||
316 | if (tmf) { | ||
317 | tmf_completion = tmf->complete; | ||
318 | tmf->complete = NULL; | ||
319 | } | ||
320 | ireq->ttype_ptr.tmf_task_ptr = NULL; | ||
321 | dev_dbg(&ihost->pdev->dev, "%s: tmf_code %d, managed tag %#x\n", | ||
322 | __func__, tmf->tmf_code, tmf->io_tag); | ||
323 | } else { | ||
324 | /* Break links with the sas_task - the callback is done | ||
325 | * elsewhere. | ||
326 | */ | ||
327 | struct sas_task *task = isci_request_access_task(ireq); | ||
328 | |||
329 | if (task) | ||
330 | task->lldd_task = NULL; | ||
331 | |||
332 | ireq->ttype_ptr.io_task_ptr = NULL; | ||
333 | } | ||
334 | |||
335 | dev_warn(&ihost->pdev->dev, "task context unrecoverable (tag: %#x)\n", | ||
336 | ireq->io_tag); | ||
337 | |||
338 | /* Don't force waiting threads to timeout. */ | ||
339 | if (req_completion) | ||
340 | complete(req_completion); | ||
341 | |||
342 | if (tmf_completion != NULL) | ||
343 | complete(tmf_completion); | ||
344 | } | ||
345 | |||
346 | static int isci_task_execute_tmf(struct isci_host *ihost, | 263 | static int isci_task_execute_tmf(struct isci_host *ihost, |
347 | struct isci_remote_device *idev, | 264 | struct isci_remote_device *idev, |
348 | struct isci_tmf *tmf, unsigned long timeout_ms) | 265 | struct isci_tmf *tmf, unsigned long timeout_ms) |
@@ -400,17 +317,11 @@ static int isci_task_execute_tmf(struct isci_host *ihost, | |||
400 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 317 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
401 | goto err_tci; | 318 | goto err_tci; |
402 | } | 319 | } |
403 | |||
404 | if (tmf->cb_state_func != NULL) | ||
405 | tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data); | ||
406 | |||
407 | isci_request_change_state(ireq, started); | ||
408 | |||
409 | /* add the request to the remote device request list. */ | ||
410 | list_add(&ireq->dev_node, &idev->reqs_in_process); | ||
411 | |||
412 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 320 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
413 | 321 | ||
322 | /* The RNC must be unsuspended before the TMF can get a response. */ | ||
323 | isci_remote_device_resume_from_abort(ihost, idev); | ||
324 | |||
414 | /* Wait for the TMF to complete, or a timeout. */ | 325 | /* Wait for the TMF to complete, or a timeout. */ |
415 | timeleft = wait_for_completion_timeout(&completion, | 326 | timeleft = wait_for_completion_timeout(&completion, |
416 | msecs_to_jiffies(timeout_ms)); | 327 | msecs_to_jiffies(timeout_ms)); |
@@ -419,32 +330,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost, | |||
419 | /* The TMF did not complete - this could be because | 330 | /* The TMF did not complete - this could be because |
420 | * of an unplug. Terminate the TMF request now. | 331 | * of an unplug. Terminate the TMF request now. |
421 | */ | 332 | */ |
422 | spin_lock_irqsave(&ihost->scic_lock, flags); | 333 | isci_remote_device_suspend_terminate(ihost, idev, ireq); |
423 | |||
424 | if (tmf->cb_state_func != NULL) | ||
425 | tmf->cb_state_func(isci_tmf_timed_out, tmf, | ||
426 | tmf->cb_data); | ||
427 | |||
428 | sci_controller_terminate_request(ihost, idev, ireq); | ||
429 | |||
430 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
431 | |||
432 | timeleft = wait_for_completion_timeout( | ||
433 | &completion, | ||
434 | msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC)); | ||
435 | |||
436 | if (!timeleft) { | ||
437 | /* Strange condition - the termination of the TMF | ||
438 | * request timed-out. | ||
439 | */ | ||
440 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
441 | |||
442 | /* If the TMF status has not changed, kill it. */ | ||
443 | if (tmf->status == SCI_FAILURE_TIMEOUT) | ||
444 | isci_request_mark_zombie(ihost, ireq); | ||
445 | |||
446 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
447 | } | ||
448 | } | 334 | } |
449 | 335 | ||
450 | isci_print_tmf(ihost, tmf); | 336 | isci_print_tmf(ihost, tmf); |
@@ -476,315 +362,21 @@ static int isci_task_execute_tmf(struct isci_host *ihost, | |||
476 | } | 362 | } |
477 | 363 | ||
478 | static void isci_task_build_tmf(struct isci_tmf *tmf, | 364 | static void isci_task_build_tmf(struct isci_tmf *tmf, |
479 | enum isci_tmf_function_codes code, | 365 | enum isci_tmf_function_codes code) |
480 | void (*tmf_sent_cb)(enum isci_tmf_cb_state, | ||
481 | struct isci_tmf *, | ||
482 | void *), | ||
483 | void *cb_data) | ||
484 | { | 366 | { |
485 | memset(tmf, 0, sizeof(*tmf)); | 367 | memset(tmf, 0, sizeof(*tmf)); |
486 | 368 | tmf->tmf_code = code; | |
487 | tmf->tmf_code = code; | ||
488 | tmf->cb_state_func = tmf_sent_cb; | ||
489 | tmf->cb_data = cb_data; | ||
490 | } | 369 | } |
491 | 370 | ||
492 | static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf, | 371 | static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf, |
493 | enum isci_tmf_function_codes code, | 372 | enum isci_tmf_function_codes code, |
494 | void (*tmf_sent_cb)(enum isci_tmf_cb_state, | ||
495 | struct isci_tmf *, | ||
496 | void *), | ||
497 | struct isci_request *old_request) | 373 | struct isci_request *old_request) |
498 | { | 374 | { |
499 | isci_task_build_tmf(tmf, code, tmf_sent_cb, old_request); | 375 | isci_task_build_tmf(tmf, code); |
500 | tmf->io_tag = old_request->io_tag; | 376 | tmf->io_tag = old_request->io_tag; |
501 | } | 377 | } |
502 | 378 | ||
503 | /** | 379 | /** |
504 | * isci_task_validate_request_to_abort() - This function checks the given I/O | ||
505 | * against the "started" state. If the request is still "started", it's | ||
506 | * state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD | ||
507 | * BEFORE CALLING THIS FUNCTION. | ||
508 | * @isci_request: This parameter specifies the request object to control. | ||
509 | * @isci_host: This parameter specifies the ISCI host object | ||
510 | * @isci_device: This is the device to which the request is pending. | ||
511 | * @aborted_io_completion: This is a completion structure that will be added to | ||
512 | * the request in case it is changed to aborting; this completion is | ||
513 | * triggered when the request is fully completed. | ||
514 | * | ||
515 | * Either "started" on successful change of the task status to "aborted", or | ||
516 | * "unallocated" if the task cannot be controlled. | ||
517 | */ | ||
518 | static enum isci_request_status isci_task_validate_request_to_abort( | ||
519 | struct isci_request *isci_request, | ||
520 | struct isci_host *isci_host, | ||
521 | struct isci_remote_device *isci_device, | ||
522 | struct completion *aborted_io_completion) | ||
523 | { | ||
524 | enum isci_request_status old_state = unallocated; | ||
525 | |||
526 | /* Only abort the task if it's in the | ||
527 | * device's request_in_process list | ||
528 | */ | ||
529 | if (isci_request && !list_empty(&isci_request->dev_node)) { | ||
530 | old_state = isci_request_change_started_to_aborted( | ||
531 | isci_request, aborted_io_completion); | ||
532 | |||
533 | } | ||
534 | |||
535 | return old_state; | ||
536 | } | ||
537 | |||
538 | static int isci_request_is_dealloc_managed(enum isci_request_status stat) | ||
539 | { | ||
540 | switch (stat) { | ||
541 | case aborted: | ||
542 | case aborting: | ||
543 | case terminating: | ||
544 | case completed: | ||
545 | case dead: | ||
546 | return true; | ||
547 | default: | ||
548 | return false; | ||
549 | } | ||
550 | } | ||
551 | |||
552 | /** | ||
553 | * isci_terminate_request_core() - This function will terminate the given | ||
554 | * request, and wait for it to complete. This function must only be called | ||
555 | * from a thread that can wait. Note that the request is terminated and | ||
556 | * completed (back to the host, if started there). | ||
557 | * @ihost: This SCU. | ||
558 | * @idev: The target. | ||
559 | * @isci_request: The I/O request to be terminated. | ||
560 | * | ||
561 | */ | ||
562 | static void isci_terminate_request_core(struct isci_host *ihost, | ||
563 | struct isci_remote_device *idev, | ||
564 | struct isci_request *isci_request) | ||
565 | { | ||
566 | enum sci_status status = SCI_SUCCESS; | ||
567 | bool was_terminated = false; | ||
568 | bool needs_cleanup_handling = false; | ||
569 | unsigned long flags; | ||
570 | unsigned long termination_completed = 1; | ||
571 | struct completion *io_request_completion; | ||
572 | |||
573 | dev_dbg(&ihost->pdev->dev, | ||
574 | "%s: device = %p; request = %p\n", | ||
575 | __func__, idev, isci_request); | ||
576 | |||
577 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
578 | |||
579 | io_request_completion = isci_request->io_request_completion; | ||
580 | |||
581 | /* Note that we are not going to control | ||
582 | * the target to abort the request. | ||
583 | */ | ||
584 | set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags); | ||
585 | |||
586 | /* Make sure the request wasn't just sitting around signalling | ||
587 | * device condition (if the request handle is NULL, then the | ||
588 | * request completed but needed additional handling here). | ||
589 | */ | ||
590 | if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) { | ||
591 | was_terminated = true; | ||
592 | needs_cleanup_handling = true; | ||
593 | status = sci_controller_terminate_request(ihost, | ||
594 | idev, | ||
595 | isci_request); | ||
596 | } | ||
597 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
598 | |||
599 | /* | ||
600 | * The only time the request to terminate will | ||
601 | * fail is when the io request is completed and | ||
602 | * being aborted. | ||
603 | */ | ||
604 | if (status != SCI_SUCCESS) { | ||
605 | dev_dbg(&ihost->pdev->dev, | ||
606 | "%s: sci_controller_terminate_request" | ||
607 | " returned = 0x%x\n", | ||
608 | __func__, status); | ||
609 | |||
610 | isci_request->io_request_completion = NULL; | ||
611 | |||
612 | } else { | ||
613 | if (was_terminated) { | ||
614 | dev_dbg(&ihost->pdev->dev, | ||
615 | "%s: before completion wait (%p/%p)\n", | ||
616 | __func__, isci_request, io_request_completion); | ||
617 | |||
618 | /* Wait here for the request to complete. */ | ||
619 | termination_completed | ||
620 | = wait_for_completion_timeout( | ||
621 | io_request_completion, | ||
622 | msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC)); | ||
623 | |||
624 | if (!termination_completed) { | ||
625 | |||
626 | /* The request to terminate has timed out. */ | ||
627 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
628 | |||
629 | /* Check for state changes. */ | ||
630 | if (!test_bit(IREQ_TERMINATED, | ||
631 | &isci_request->flags)) { | ||
632 | |||
633 | /* The best we can do is to have the | ||
634 | * request die a silent death if it | ||
635 | * ever really completes. | ||
636 | */ | ||
637 | isci_request_mark_zombie(ihost, | ||
638 | isci_request); | ||
639 | needs_cleanup_handling = true; | ||
640 | } else | ||
641 | termination_completed = 1; | ||
642 | |||
643 | spin_unlock_irqrestore(&ihost->scic_lock, | ||
644 | flags); | ||
645 | |||
646 | if (!termination_completed) { | ||
647 | |||
648 | dev_dbg(&ihost->pdev->dev, | ||
649 | "%s: *** Timeout waiting for " | ||
650 | "termination(%p/%p)\n", | ||
651 | __func__, io_request_completion, | ||
652 | isci_request); | ||
653 | |||
654 | /* The request can no longer be referenced | ||
655 | * safely since it may go away if the | ||
656 | * termination every really does complete. | ||
657 | */ | ||
658 | isci_request = NULL; | ||
659 | } | ||
660 | } | ||
661 | if (termination_completed) | ||
662 | dev_dbg(&ihost->pdev->dev, | ||
663 | "%s: after completion wait (%p/%p)\n", | ||
664 | __func__, isci_request, io_request_completion); | ||
665 | } | ||
666 | |||
667 | if (termination_completed) { | ||
668 | |||
669 | isci_request->io_request_completion = NULL; | ||
670 | |||
671 | /* Peek at the status of the request. This will tell | ||
672 | * us if there was special handling on the request such that it | ||
673 | * needs to be detached and freed here. | ||
674 | */ | ||
675 | spin_lock_irqsave(&isci_request->state_lock, flags); | ||
676 | |||
677 | needs_cleanup_handling | ||
678 | = isci_request_is_dealloc_managed( | ||
679 | isci_request->status); | ||
680 | |||
681 | spin_unlock_irqrestore(&isci_request->state_lock, flags); | ||
682 | |||
683 | } | ||
684 | if (needs_cleanup_handling) { | ||
685 | |||
686 | dev_dbg(&ihost->pdev->dev, | ||
687 | "%s: cleanup isci_device=%p, request=%p\n", | ||
688 | __func__, idev, isci_request); | ||
689 | |||
690 | if (isci_request != NULL) { | ||
691 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
692 | isci_free_tag(ihost, isci_request->io_tag); | ||
693 | isci_request_change_state(isci_request, unallocated); | ||
694 | list_del_init(&isci_request->dev_node); | ||
695 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
696 | } | ||
697 | } | ||
698 | } | ||
699 | } | ||
700 | |||
701 | /** | ||
702 | * isci_terminate_pending_requests() - This function will change the all of the | ||
703 | * requests on the given device's state to "aborting", will terminate the | ||
704 | * requests, and wait for them to complete. This function must only be | ||
705 | * called from a thread that can wait. Note that the requests are all | ||
706 | * terminated and completed (back to the host, if started there). | ||
707 | * @isci_host: This parameter specifies SCU. | ||
708 | * @idev: This parameter specifies the target. | ||
709 | * | ||
710 | */ | ||
711 | void isci_terminate_pending_requests(struct isci_host *ihost, | ||
712 | struct isci_remote_device *idev) | ||
713 | { | ||
714 | struct completion request_completion; | ||
715 | enum isci_request_status old_state; | ||
716 | unsigned long flags; | ||
717 | LIST_HEAD(list); | ||
718 | |||
719 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
720 | list_splice_init(&idev->reqs_in_process, &list); | ||
721 | |||
722 | /* assumes that isci_terminate_request_core deletes from the list */ | ||
723 | while (!list_empty(&list)) { | ||
724 | struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node); | ||
725 | |||
726 | /* Change state to "terminating" if it is currently | ||
727 | * "started". | ||
728 | */ | ||
729 | old_state = isci_request_change_started_to_newstate(ireq, | ||
730 | &request_completion, | ||
731 | terminating); | ||
732 | switch (old_state) { | ||
733 | case started: | ||
734 | case completed: | ||
735 | case aborting: | ||
736 | break; | ||
737 | default: | ||
738 | /* termination in progress, or otherwise dispositioned. | ||
739 | * We know the request was on 'list' so should be safe | ||
740 | * to move it back to reqs_in_process | ||
741 | */ | ||
742 | list_move(&ireq->dev_node, &idev->reqs_in_process); | ||
743 | ireq = NULL; | ||
744 | break; | ||
745 | } | ||
746 | |||
747 | if (!ireq) | ||
748 | continue; | ||
749 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
750 | |||
751 | init_completion(&request_completion); | ||
752 | |||
753 | dev_dbg(&ihost->pdev->dev, | ||
754 | "%s: idev=%p request=%p; task=%p old_state=%d\n", | ||
755 | __func__, idev, ireq, | ||
756 | (!test_bit(IREQ_TMF, &ireq->flags) | ||
757 | ? isci_request_access_task(ireq) | ||
758 | : NULL), | ||
759 | old_state); | ||
760 | |||
761 | /* If the old_state is started: | ||
762 | * This request was not already being aborted. If it had been, | ||
763 | * then the aborting I/O (ie. the TMF request) would not be in | ||
764 | * the aborting state, and thus would be terminated here. Note | ||
765 | * that since the TMF completion's call to the kernel function | ||
766 | * "complete()" does not happen until the pending I/O request | ||
767 | * terminate fully completes, we do not have to implement a | ||
768 | * special wait here for already aborting requests - the | ||
769 | * termination of the TMF request will force the request | ||
770 | * to finish it's already started terminate. | ||
771 | * | ||
772 | * If old_state == completed: | ||
773 | * This request completed from the SCU hardware perspective | ||
774 | * and now just needs cleaning up in terms of freeing the | ||
775 | * request and potentially calling up to libsas. | ||
776 | * | ||
777 | * If old_state == aborting: | ||
778 | * This request has already gone through a TMF timeout, but may | ||
779 | * not have been terminated; needs cleaning up at least. | ||
780 | */ | ||
781 | isci_terminate_request_core(ihost, idev, ireq); | ||
782 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
783 | } | ||
784 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
785 | } | ||
786 | |||
787 | /** | ||
788 | * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain | 380 | * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain |
789 | * Template functions. | 381 | * Template functions. |
790 | * @lun: This parameter specifies the lun to be reset. | 382 | * @lun: This parameter specifies the lun to be reset. |
@@ -807,7 +399,7 @@ static int isci_task_send_lu_reset_sas( | |||
807 | * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or | 399 | * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or |
808 | * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED"). | 400 | * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED"). |
809 | */ | 401 | */ |
810 | isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL); | 402 | isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset); |
811 | 403 | ||
812 | #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */ | 404 | #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */ |
813 | ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS); | 405 | ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS); |
@@ -826,42 +418,44 @@ static int isci_task_send_lu_reset_sas( | |||
826 | 418 | ||
827 | int isci_task_lu_reset(struct domain_device *dev, u8 *lun) | 419 | int isci_task_lu_reset(struct domain_device *dev, u8 *lun) |
828 | { | 420 | { |
829 | struct isci_host *isci_host = dev_to_ihost(dev); | 421 | struct isci_host *ihost = dev_to_ihost(dev); |
830 | struct isci_remote_device *isci_device; | 422 | struct isci_remote_device *idev; |
831 | unsigned long flags; | 423 | unsigned long flags; |
832 | int ret; | 424 | int ret = TMF_RESP_FUNC_COMPLETE; |
833 | 425 | ||
834 | spin_lock_irqsave(&isci_host->scic_lock, flags); | 426 | spin_lock_irqsave(&ihost->scic_lock, flags); |
835 | isci_device = isci_lookup_device(dev); | 427 | idev = isci_get_device(dev->lldd_dev); |
836 | spin_unlock_irqrestore(&isci_host->scic_lock, flags); | 428 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
837 | 429 | ||
838 | dev_dbg(&isci_host->pdev->dev, | 430 | dev_dbg(&ihost->pdev->dev, |
839 | "%s: domain_device=%p, isci_host=%p; isci_device=%p\n", | 431 | "%s: domain_device=%p, isci_host=%p; isci_device=%p\n", |
840 | __func__, dev, isci_host, isci_device); | 432 | __func__, dev, ihost, idev); |
841 | 433 | ||
842 | if (!isci_device) { | 434 | if (!idev) { |
843 | /* If the device is gone, stop the escalations. */ | 435 | /* If the device is gone, escalate to I_T_Nexus_Reset. */ |
844 | dev_dbg(&isci_host->pdev->dev, "%s: No dev\n", __func__); | 436 | dev_dbg(&ihost->pdev->dev, "%s: No dev\n", __func__); |
845 | 437 | ||
846 | ret = TMF_RESP_FUNC_COMPLETE; | 438 | ret = TMF_RESP_FUNC_FAILED; |
847 | goto out; | 439 | goto out; |
848 | } | 440 | } |
849 | 441 | ||
850 | /* Send the task management part of the reset. */ | 442 | /* Suspend the RNC, kill all TCs */ |
851 | if (dev_is_sata(dev)) { | 443 | if (isci_remote_device_suspend_terminate(ihost, idev, NULL) |
852 | sas_ata_schedule_reset(dev); | 444 | != SCI_SUCCESS) { |
853 | ret = TMF_RESP_FUNC_COMPLETE; | 445 | /* The suspend/terminate only fails if isci_get_device fails */ |
854 | } else | 446 | ret = TMF_RESP_FUNC_FAILED; |
855 | ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun); | 447 | goto out; |
856 | 448 | } | |
857 | /* If the LUN reset worked, all the I/O can now be terminated. */ | 449 | /* All pending I/Os have been terminated and cleaned up. */ |
858 | if (ret == TMF_RESP_FUNC_COMPLETE) | 450 | if (!test_bit(IDEV_GONE, &idev->flags)) { |
859 | /* Terminate all I/O now. */ | 451 | if (dev_is_sata(dev)) |
860 | isci_terminate_pending_requests(isci_host, | 452 | sas_ata_schedule_reset(dev); |
861 | isci_device); | 453 | else |
862 | 454 | /* Send the task management part of the reset. */ | |
455 | ret = isci_task_send_lu_reset_sas(ihost, idev, lun); | ||
456 | } | ||
863 | out: | 457 | out: |
864 | isci_put_device(isci_device); | 458 | isci_put_device(idev); |
865 | return ret; | 459 | return ret; |
866 | } | 460 | } |
867 | 461 | ||
@@ -882,63 +476,6 @@ int isci_task_clear_nexus_ha(struct sas_ha_struct *ha) | |||
882 | /* Task Management Functions. Must be called from process context. */ | 476 | /* Task Management Functions. Must be called from process context. */ |
883 | 477 | ||
884 | /** | 478 | /** |
885 | * isci_abort_task_process_cb() - This is a helper function for the abort task | ||
886 | * TMF command. It manages the request state with respect to the successful | ||
887 | * transmission / completion of the abort task request. | ||
888 | * @cb_state: This parameter specifies when this function was called - after | ||
889 | * the TMF request has been started and after it has timed-out. | ||
890 | * @tmf: This parameter specifies the TMF in progress. | ||
891 | * | ||
892 | * | ||
893 | */ | ||
894 | static void isci_abort_task_process_cb( | ||
895 | enum isci_tmf_cb_state cb_state, | ||
896 | struct isci_tmf *tmf, | ||
897 | void *cb_data) | ||
898 | { | ||
899 | struct isci_request *old_request; | ||
900 | |||
901 | old_request = (struct isci_request *)cb_data; | ||
902 | |||
903 | dev_dbg(&old_request->isci_host->pdev->dev, | ||
904 | "%s: tmf=%p, old_request=%p\n", | ||
905 | __func__, tmf, old_request); | ||
906 | |||
907 | switch (cb_state) { | ||
908 | |||
909 | case isci_tmf_started: | ||
910 | /* The TMF has been started. Nothing to do here, since the | ||
911 | * request state was already set to "aborted" by the abort | ||
912 | * task function. | ||
913 | */ | ||
914 | if ((old_request->status != aborted) | ||
915 | && (old_request->status != completed)) | ||
916 | dev_dbg(&old_request->isci_host->pdev->dev, | ||
917 | "%s: Bad request status (%d): tmf=%p, old_request=%p\n", | ||
918 | __func__, old_request->status, tmf, old_request); | ||
919 | break; | ||
920 | |||
921 | case isci_tmf_timed_out: | ||
922 | |||
923 | /* Set the task's state to "aborting", since the abort task | ||
924 | * function thread set it to "aborted" (above) in anticipation | ||
925 | * of the task management request working correctly. Since the | ||
926 | * timeout has now fired, the TMF request failed. We set the | ||
927 | * state such that the request completion will indicate the | ||
928 | * device is no longer present. | ||
929 | */ | ||
930 | isci_request_change_state(old_request, aborting); | ||
931 | break; | ||
932 | |||
933 | default: | ||
934 | dev_dbg(&old_request->isci_host->pdev->dev, | ||
935 | "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n", | ||
936 | __func__, cb_state, tmf, old_request); | ||
937 | break; | ||
938 | } | ||
939 | } | ||
940 | |||
941 | /** | ||
942 | * isci_task_abort_task() - This function is one of the SAS Domain Template | 479 | * isci_task_abort_task() - This function is one of the SAS Domain Template |
943 | * functions. This function is called by libsas to abort a specified task. | 480 | * functions. This function is called by libsas to abort a specified task. |
944 | * @task: This parameter specifies the SAS task to abort. | 481 | * @task: This parameter specifies the SAS task to abort. |
@@ -947,22 +484,20 @@ static void isci_abort_task_process_cb( | |||
947 | */ | 484 | */ |
948 | int isci_task_abort_task(struct sas_task *task) | 485 | int isci_task_abort_task(struct sas_task *task) |
949 | { | 486 | { |
950 | struct isci_host *isci_host = dev_to_ihost(task->dev); | 487 | struct isci_host *ihost = dev_to_ihost(task->dev); |
951 | DECLARE_COMPLETION_ONSTACK(aborted_io_completion); | 488 | DECLARE_COMPLETION_ONSTACK(aborted_io_completion); |
952 | struct isci_request *old_request = NULL; | 489 | struct isci_request *old_request = NULL; |
953 | enum isci_request_status old_state; | 490 | struct isci_remote_device *idev = NULL; |
954 | struct isci_remote_device *isci_device = NULL; | ||
955 | struct isci_tmf tmf; | 491 | struct isci_tmf tmf; |
956 | int ret = TMF_RESP_FUNC_FAILED; | 492 | int ret = TMF_RESP_FUNC_FAILED; |
957 | unsigned long flags; | 493 | unsigned long flags; |
958 | int perform_termination = 0; | ||
959 | 494 | ||
960 | /* Get the isci_request reference from the task. Note that | 495 | /* Get the isci_request reference from the task. Note that |
961 | * this check does not depend on the pending request list | 496 | * this check does not depend on the pending request list |
962 | * in the device, because tasks driving resets may land here | 497 | * in the device, because tasks driving resets may land here |
963 | * after completion in the core. | 498 | * after completion in the core. |
964 | */ | 499 | */ |
965 | spin_lock_irqsave(&isci_host->scic_lock, flags); | 500 | spin_lock_irqsave(&ihost->scic_lock, flags); |
966 | spin_lock(&task->task_state_lock); | 501 | spin_lock(&task->task_state_lock); |
967 | 502 | ||
968 | old_request = task->lldd_task; | 503 | old_request = task->lldd_task; |
@@ -971,20 +506,29 @@ int isci_task_abort_task(struct sas_task *task) | |||
971 | if (!(task->task_state_flags & SAS_TASK_STATE_DONE) && | 506 | if (!(task->task_state_flags & SAS_TASK_STATE_DONE) && |
972 | (task->task_state_flags & SAS_TASK_AT_INITIATOR) && | 507 | (task->task_state_flags & SAS_TASK_AT_INITIATOR) && |
973 | old_request) | 508 | old_request) |
974 | isci_device = isci_lookup_device(task->dev); | 509 | idev = isci_get_device(task->dev->lldd_dev); |
975 | 510 | ||
976 | spin_unlock(&task->task_state_lock); | 511 | spin_unlock(&task->task_state_lock); |
977 | spin_unlock_irqrestore(&isci_host->scic_lock, flags); | 512 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
978 | 513 | ||
979 | dev_dbg(&isci_host->pdev->dev, | 514 | dev_warn(&ihost->pdev->dev, |
980 | "%s: dev = %p, task = %p, old_request == %p\n", | 515 | "%s: dev = %p (%s%s), task = %p, old_request == %p\n", |
981 | __func__, isci_device, task, old_request); | 516 | __func__, idev, |
517 | (dev_is_sata(task->dev) ? "STP/SATA" | ||
518 | : ((dev_is_expander(task->dev)) | ||
519 | ? "SMP" | ||
520 | : "SSP")), | ||
521 | ((idev) ? ((test_bit(IDEV_GONE, &idev->flags)) | ||
522 | ? " IDEV_GONE" | ||
523 | : "") | ||
524 | : " <NULL>"), | ||
525 | task, old_request); | ||
982 | 526 | ||
983 | /* Device reset conditions signalled in task_state_flags are the | 527 | /* Device reset conditions signalled in task_state_flags are the |
984 | * responsbility of libsas to observe at the start of the error | 528 | * responsbility of libsas to observe at the start of the error |
985 | * handler thread. | 529 | * handler thread. |
986 | */ | 530 | */ |
987 | if (!isci_device || !old_request) { | 531 | if (!idev || !old_request) { |
988 | /* The request has already completed and there | 532 | /* The request has already completed and there |
989 | * is nothing to do here other than to set the task | 533 | * is nothing to do here other than to set the task |
990 | * done bit, and indicate that the task abort function | 534 | * done bit, and indicate that the task abort function |
@@ -998,108 +542,72 @@ int isci_task_abort_task(struct sas_task *task) | |||
998 | 542 | ||
999 | ret = TMF_RESP_FUNC_COMPLETE; | 543 | ret = TMF_RESP_FUNC_COMPLETE; |
1000 | 544 | ||
1001 | dev_dbg(&isci_host->pdev->dev, | 545 | dev_warn(&ihost->pdev->dev, |
1002 | "%s: abort task not needed for %p\n", | 546 | "%s: abort task not needed for %p\n", |
1003 | __func__, task); | 547 | __func__, task); |
1004 | goto out; | 548 | goto out; |
1005 | } | 549 | } |
1006 | 550 | /* Suspend the RNC, kill the TC */ | |
1007 | spin_lock_irqsave(&isci_host->scic_lock, flags); | 551 | if (isci_remote_device_suspend_terminate(ihost, idev, old_request) |
1008 | 552 | != SCI_SUCCESS) { | |
1009 | /* Check the request status and change to "aborted" if currently | 553 | dev_warn(&ihost->pdev->dev, |
1010 | * "starting"; if true then set the I/O kernel completion | 554 | "%s: isci_remote_device_reset_terminate(dev=%p, " |
1011 | * struct that will be triggered when the request completes. | 555 | "req=%p, task=%p) failed\n", |
1012 | */ | 556 | __func__, idev, old_request, task); |
1013 | old_state = isci_task_validate_request_to_abort( | 557 | ret = TMF_RESP_FUNC_FAILED; |
1014 | old_request, isci_host, isci_device, | ||
1015 | &aborted_io_completion); | ||
1016 | if ((old_state != started) && | ||
1017 | (old_state != completed) && | ||
1018 | (old_state != aborting)) { | ||
1019 | |||
1020 | spin_unlock_irqrestore(&isci_host->scic_lock, flags); | ||
1021 | |||
1022 | /* The request was already being handled by someone else (because | ||
1023 | * they got to set the state away from started). | ||
1024 | */ | ||
1025 | dev_dbg(&isci_host->pdev->dev, | ||
1026 | "%s: device = %p; old_request %p already being aborted\n", | ||
1027 | __func__, | ||
1028 | isci_device, old_request); | ||
1029 | ret = TMF_RESP_FUNC_COMPLETE; | ||
1030 | goto out; | 558 | goto out; |
1031 | } | 559 | } |
560 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
561 | |||
1032 | if (task->task_proto == SAS_PROTOCOL_SMP || | 562 | if (task->task_proto == SAS_PROTOCOL_SMP || |
1033 | sas_protocol_ata(task->task_proto) || | 563 | sas_protocol_ata(task->task_proto) || |
1034 | test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) { | 564 | test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags) || |
565 | test_bit(IDEV_GONE, &idev->flags)) { | ||
1035 | 566 | ||
1036 | spin_unlock_irqrestore(&isci_host->scic_lock, flags); | 567 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1037 | 568 | ||
1038 | dev_dbg(&isci_host->pdev->dev, | 569 | /* No task to send, so explicitly resume the device here */ |
1039 | "%s: %s request" | 570 | isci_remote_device_resume_from_abort(ihost, idev); |
1040 | " or complete_in_target (%d), thus no TMF\n", | ||
1041 | __func__, | ||
1042 | ((task->task_proto == SAS_PROTOCOL_SMP) | ||
1043 | ? "SMP" | ||
1044 | : (sas_protocol_ata(task->task_proto) | ||
1045 | ? "SATA/STP" | ||
1046 | : "<other>") | ||
1047 | ), | ||
1048 | test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)); | ||
1049 | |||
1050 | if (test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) { | ||
1051 | spin_lock_irqsave(&task->task_state_lock, flags); | ||
1052 | task->task_state_flags |= SAS_TASK_STATE_DONE; | ||
1053 | task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | | ||
1054 | SAS_TASK_STATE_PENDING); | ||
1055 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
1056 | ret = TMF_RESP_FUNC_COMPLETE; | ||
1057 | } else { | ||
1058 | spin_lock_irqsave(&task->task_state_lock, flags); | ||
1059 | task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | | ||
1060 | SAS_TASK_STATE_PENDING); | ||
1061 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
1062 | } | ||
1063 | 571 | ||
1064 | /* STP and SMP devices are not sent a TMF, but the | 572 | dev_warn(&ihost->pdev->dev, |
1065 | * outstanding I/O request is terminated below. This is | 573 | "%s: %s request" |
1066 | * because SATA/STP and SMP discovery path timeouts directly | 574 | " or complete_in_target (%d), " |
1067 | * call the abort task interface for cleanup. | 575 | "or IDEV_GONE (%d), thus no TMF\n", |
1068 | */ | 576 | __func__, |
1069 | perform_termination = 1; | 577 | ((task->task_proto == SAS_PROTOCOL_SMP) |
578 | ? "SMP" | ||
579 | : (sas_protocol_ata(task->task_proto) | ||
580 | ? "SATA/STP" | ||
581 | : "<other>") | ||
582 | ), | ||
583 | test_bit(IREQ_COMPLETE_IN_TARGET, | ||
584 | &old_request->flags), | ||
585 | test_bit(IDEV_GONE, &idev->flags)); | ||
586 | |||
587 | spin_lock_irqsave(&task->task_state_lock, flags); | ||
588 | task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | | ||
589 | SAS_TASK_STATE_PENDING); | ||
590 | task->task_state_flags |= SAS_TASK_STATE_DONE; | ||
591 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
1070 | 592 | ||
593 | ret = TMF_RESP_FUNC_COMPLETE; | ||
1071 | } else { | 594 | } else { |
1072 | /* Fill in the tmf stucture */ | 595 | /* Fill in the tmf stucture */ |
1073 | isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort, | 596 | isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort, |
1074 | isci_abort_task_process_cb, | ||
1075 | old_request); | 597 | old_request); |
1076 | 598 | ||
1077 | spin_unlock_irqrestore(&isci_host->scic_lock, flags); | 599 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1078 | 600 | ||
601 | /* Send the task management request. */ | ||
1079 | #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */ | 602 | #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */ |
1080 | ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, | 603 | ret = isci_task_execute_tmf(ihost, idev, &tmf, |
1081 | ISCI_ABORT_TASK_TIMEOUT_MS); | 604 | ISCI_ABORT_TASK_TIMEOUT_MS); |
1082 | |||
1083 | if (ret == TMF_RESP_FUNC_COMPLETE) | ||
1084 | perform_termination = 1; | ||
1085 | else | ||
1086 | dev_dbg(&isci_host->pdev->dev, | ||
1087 | "%s: isci_task_send_tmf failed\n", __func__); | ||
1088 | } | 605 | } |
1089 | if (perform_termination) { | 606 | out: |
1090 | set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags); | 607 | dev_warn(&ihost->pdev->dev, |
1091 | 608 | "%s: Done; dev = %p, task = %p , old_request == %p\n", | |
1092 | /* Clean up the request on our side, and wait for the aborted | 609 | __func__, idev, task, old_request); |
1093 | * I/O to complete. | 610 | isci_put_device(idev); |
1094 | */ | ||
1095 | isci_terminate_request_core(isci_host, isci_device, | ||
1096 | old_request); | ||
1097 | } | ||
1098 | |||
1099 | /* Make sure we do not leave a reference to aborted_io_completion */ | ||
1100 | old_request->io_request_completion = NULL; | ||
1101 | out: | ||
1102 | isci_put_device(isci_device); | ||
1103 | return ret; | 611 | return ret; |
1104 | } | 612 | } |
1105 | 613 | ||
@@ -1195,14 +703,11 @@ isci_task_request_complete(struct isci_host *ihost, | |||
1195 | { | 703 | { |
1196 | struct isci_tmf *tmf = isci_request_access_tmf(ireq); | 704 | struct isci_tmf *tmf = isci_request_access_tmf(ireq); |
1197 | struct completion *tmf_complete = NULL; | 705 | struct completion *tmf_complete = NULL; |
1198 | struct completion *request_complete = ireq->io_request_completion; | ||
1199 | 706 | ||
1200 | dev_dbg(&ihost->pdev->dev, | 707 | dev_dbg(&ihost->pdev->dev, |
1201 | "%s: request = %p, status=%d\n", | 708 | "%s: request = %p, status=%d\n", |
1202 | __func__, ireq, completion_status); | 709 | __func__, ireq, completion_status); |
1203 | 710 | ||
1204 | isci_request_change_state(ireq, completed); | ||
1205 | |||
1206 | set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags); | 711 | set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags); |
1207 | 712 | ||
1208 | if (tmf) { | 713 | if (tmf) { |
@@ -1226,20 +731,11 @@ isci_task_request_complete(struct isci_host *ihost, | |||
1226 | */ | 731 | */ |
1227 | set_bit(IREQ_TERMINATED, &ireq->flags); | 732 | set_bit(IREQ_TERMINATED, &ireq->flags); |
1228 | 733 | ||
1229 | /* As soon as something is in the terminate path, deallocation is | 734 | if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags)) |
1230 | * managed there. Note that the final non-managed state of a task | 735 | wake_up_all(&ihost->eventq); |
1231 | * request is "completed". | ||
1232 | */ | ||
1233 | if ((ireq->status == completed) || | ||
1234 | !isci_request_is_dealloc_managed(ireq->status)) { | ||
1235 | isci_request_change_state(ireq, unallocated); | ||
1236 | isci_free_tag(ihost, ireq->io_tag); | ||
1237 | list_del_init(&ireq->dev_node); | ||
1238 | } | ||
1239 | 736 | ||
1240 | /* "request_complete" is set if the task was being terminated. */ | 737 | if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags)) |
1241 | if (request_complete) | 738 | isci_free_tag(ihost, ireq->io_tag); |
1242 | complete(request_complete); | ||
1243 | 739 | ||
1244 | /* The task management part completes last. */ | 740 | /* The task management part completes last. */ |
1245 | if (tmf_complete) | 741 | if (tmf_complete) |
@@ -1250,48 +746,38 @@ static int isci_reset_device(struct isci_host *ihost, | |||
1250 | struct domain_device *dev, | 746 | struct domain_device *dev, |
1251 | struct isci_remote_device *idev) | 747 | struct isci_remote_device *idev) |
1252 | { | 748 | { |
1253 | int rc; | 749 | int rc = TMF_RESP_FUNC_COMPLETE, reset_stat = -1; |
1254 | unsigned long flags; | ||
1255 | enum sci_status status; | ||
1256 | struct sas_phy *phy = sas_get_local_phy(dev); | 750 | struct sas_phy *phy = sas_get_local_phy(dev); |
1257 | struct isci_port *iport = dev->port->lldd_port; | 751 | struct isci_port *iport = dev->port->lldd_port; |
1258 | 752 | ||
1259 | dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev); | 753 | dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev); |
1260 | 754 | ||
1261 | spin_lock_irqsave(&ihost->scic_lock, flags); | 755 | /* Suspend the RNC, terminate all outstanding TCs. */ |
1262 | status = sci_remote_device_reset(idev); | 756 | if (isci_remote_device_suspend_terminate(ihost, idev, NULL) |
1263 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 757 | != SCI_SUCCESS) { |
1264 | |||
1265 | if (status != SCI_SUCCESS) { | ||
1266 | dev_dbg(&ihost->pdev->dev, | ||
1267 | "%s: sci_remote_device_reset(%p) returned %d!\n", | ||
1268 | __func__, idev, status); | ||
1269 | rc = TMF_RESP_FUNC_FAILED; | 758 | rc = TMF_RESP_FUNC_FAILED; |
1270 | goto out; | 759 | goto out; |
1271 | } | 760 | } |
1272 | 761 | /* Note that since the termination for outstanding requests succeeded, | |
1273 | if (scsi_is_sas_phy_local(phy)) { | 762 | * this function will return success. This is because the resets will |
1274 | struct isci_phy *iphy = &ihost->phys[phy->number]; | 763 | * only fail if the device has been removed (ie. hotplug), and the |
1275 | 764 | * primary duty of this function is to cleanup tasks, so that is the | |
1276 | rc = isci_port_perform_hard_reset(ihost, iport, iphy); | 765 | * relevant status. |
1277 | } else | 766 | */ |
1278 | rc = sas_phy_reset(phy, !dev_is_sata(dev)); | 767 | if (!test_bit(IDEV_GONE, &idev->flags)) { |
1279 | 768 | if (scsi_is_sas_phy_local(phy)) { | |
1280 | /* Terminate in-progress I/O now. */ | 769 | struct isci_phy *iphy = &ihost->phys[phy->number]; |
1281 | isci_remote_device_nuke_requests(ihost, idev); | 770 | |
1282 | 771 | reset_stat = isci_port_perform_hard_reset(ihost, iport, | |
1283 | /* Since all pending TCs have been cleaned, resume the RNC. */ | 772 | iphy); |
1284 | spin_lock_irqsave(&ihost->scic_lock, flags); | 773 | } else |
1285 | status = sci_remote_device_reset_complete(idev); | 774 | reset_stat = sas_phy_reset(phy, !dev_is_sata(dev)); |
1286 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
1287 | |||
1288 | if (status != SCI_SUCCESS) { | ||
1289 | dev_dbg(&ihost->pdev->dev, | ||
1290 | "%s: sci_remote_device_reset_complete(%p) " | ||
1291 | "returned %d!\n", __func__, idev, status); | ||
1292 | } | 775 | } |
776 | /* Explicitly resume the RNC here, since there was no task sent. */ | ||
777 | isci_remote_device_resume_from_abort(ihost, idev); | ||
1293 | 778 | ||
1294 | dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev); | 779 | dev_dbg(&ihost->pdev->dev, "%s: idev %p complete, reset_stat=%d.\n", |
780 | __func__, idev, reset_stat); | ||
1295 | out: | 781 | out: |
1296 | sas_put_local_phy(phy); | 782 | sas_put_local_phy(phy); |
1297 | return rc; | 783 | return rc; |
@@ -1305,7 +791,7 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev) | |||
1305 | int ret; | 791 | int ret; |
1306 | 792 | ||
1307 | spin_lock_irqsave(&ihost->scic_lock, flags); | 793 | spin_lock_irqsave(&ihost->scic_lock, flags); |
1308 | idev = isci_lookup_device(dev); | 794 | idev = isci_get_device(dev->lldd_dev); |
1309 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 795 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1310 | 796 | ||
1311 | if (!idev) { | 797 | if (!idev) { |
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h index 7b6d0e32fd9b..9c06cbad1d26 100644 --- a/drivers/scsi/isci/task.h +++ b/drivers/scsi/isci/task.h | |||
@@ -63,19 +63,6 @@ | |||
63 | struct isci_request; | 63 | struct isci_request; |
64 | 64 | ||
65 | /** | 65 | /** |
66 | * enum isci_tmf_cb_state - This enum defines the possible states in which the | ||
67 | * TMF callback function is invoked during the TMF execution process. | ||
68 | * | ||
69 | * | ||
70 | */ | ||
71 | enum isci_tmf_cb_state { | ||
72 | |||
73 | isci_tmf_init_state = 0, | ||
74 | isci_tmf_started, | ||
75 | isci_tmf_timed_out | ||
76 | }; | ||
77 | |||
78 | /** | ||
79 | * enum isci_tmf_function_codes - This enum defines the possible preparations | 66 | * enum isci_tmf_function_codes - This enum defines the possible preparations |
80 | * of task management requests. | 67 | * of task management requests. |
81 | * | 68 | * |
@@ -87,6 +74,7 @@ enum isci_tmf_function_codes { | |||
87 | isci_tmf_ssp_task_abort = TMF_ABORT_TASK, | 74 | isci_tmf_ssp_task_abort = TMF_ABORT_TASK, |
88 | isci_tmf_ssp_lun_reset = TMF_LU_RESET, | 75 | isci_tmf_ssp_lun_reset = TMF_LU_RESET, |
89 | }; | 76 | }; |
77 | |||
90 | /** | 78 | /** |
91 | * struct isci_tmf - This class represents the task management object which | 79 | * struct isci_tmf - This class represents the task management object which |
92 | * acts as an interface to libsas for processing task management requests | 80 | * acts as an interface to libsas for processing task management requests |
@@ -106,15 +94,6 @@ struct isci_tmf { | |||
106 | u16 io_tag; | 94 | u16 io_tag; |
107 | enum isci_tmf_function_codes tmf_code; | 95 | enum isci_tmf_function_codes tmf_code; |
108 | int status; | 96 | int status; |
109 | |||
110 | /* The optional callback function allows the user process to | ||
111 | * track the TMF transmit / timeout conditions. | ||
112 | */ | ||
113 | void (*cb_state_func)( | ||
114 | enum isci_tmf_cb_state, | ||
115 | struct isci_tmf *, void *); | ||
116 | void *cb_data; | ||
117 | |||
118 | }; | 97 | }; |
119 | 98 | ||
120 | static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf) | 99 | static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf) |
@@ -208,113 +187,4 @@ int isci_queuecommand( | |||
208 | struct scsi_cmnd *scsi_cmd, | 187 | struct scsi_cmnd *scsi_cmd, |
209 | void (*donefunc)(struct scsi_cmnd *)); | 188 | void (*donefunc)(struct scsi_cmnd *)); |
210 | 189 | ||
211 | /** | ||
212 | * enum isci_completion_selection - This enum defines the possible actions to | ||
213 | * take with respect to a given request's notification back to libsas. | ||
214 | * | ||
215 | * | ||
216 | */ | ||
217 | enum isci_completion_selection { | ||
218 | |||
219 | isci_perform_normal_io_completion, /* Normal notify (task_done) */ | ||
220 | isci_perform_aborted_io_completion, /* No notification. */ | ||
221 | isci_perform_error_io_completion /* Use sas_task_abort */ | ||
222 | }; | ||
223 | |||
224 | /** | ||
225 | * isci_task_set_completion_status() - This function sets the completion status | ||
226 | * for the request. | ||
227 | * @task: This parameter is the completed request. | ||
228 | * @response: This parameter is the response code for the completed task. | ||
229 | * @status: This parameter is the status code for the completed task. | ||
230 | * | ||
231 | * @return The new notification mode for the request. | ||
232 | */ | ||
233 | static inline enum isci_completion_selection | ||
234 | isci_task_set_completion_status( | ||
235 | struct sas_task *task, | ||
236 | enum service_response response, | ||
237 | enum exec_status status, | ||
238 | enum isci_completion_selection task_notification_selection) | ||
239 | { | ||
240 | unsigned long flags; | ||
241 | |||
242 | spin_lock_irqsave(&task->task_state_lock, flags); | ||
243 | |||
244 | /* If a device reset is being indicated, make sure the I/O | ||
245 | * is in the error path. | ||
246 | */ | ||
247 | if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) { | ||
248 | /* Fail the I/O to make sure it goes into the error path. */ | ||
249 | response = SAS_TASK_UNDELIVERED; | ||
250 | status = SAM_STAT_TASK_ABORTED; | ||
251 | |||
252 | task_notification_selection = isci_perform_error_io_completion; | ||
253 | } | ||
254 | task->task_status.resp = response; | ||
255 | task->task_status.stat = status; | ||
256 | |||
257 | switch (task->task_proto) { | ||
258 | |||
259 | case SAS_PROTOCOL_SATA: | ||
260 | case SAS_PROTOCOL_STP: | ||
261 | case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: | ||
262 | |||
263 | if (task_notification_selection | ||
264 | == isci_perform_error_io_completion) { | ||
265 | /* SATA/STP I/O has it's own means of scheduling device | ||
266 | * error handling on the normal path. | ||
267 | */ | ||
268 | task_notification_selection | ||
269 | = isci_perform_normal_io_completion; | ||
270 | } | ||
271 | break; | ||
272 | default: | ||
273 | break; | ||
274 | } | ||
275 | |||
276 | switch (task_notification_selection) { | ||
277 | |||
278 | case isci_perform_error_io_completion: | ||
279 | |||
280 | if (task->task_proto == SAS_PROTOCOL_SMP) { | ||
281 | /* There is no error escalation in the SMP case. | ||
282 | * Convert to a normal completion to avoid the | ||
283 | * timeout in the discovery path and to let the | ||
284 | * next action take place quickly. | ||
285 | */ | ||
286 | task_notification_selection | ||
287 | = isci_perform_normal_io_completion; | ||
288 | |||
289 | /* Fall through to the normal case... */ | ||
290 | } else { | ||
291 | /* Use sas_task_abort */ | ||
292 | /* Leave SAS_TASK_STATE_DONE clear | ||
293 | * Leave SAS_TASK_AT_INITIATOR set. | ||
294 | */ | ||
295 | break; | ||
296 | } | ||
297 | |||
298 | case isci_perform_aborted_io_completion: | ||
299 | /* This path can occur with task-managed requests as well as | ||
300 | * requests terminated because of LUN or device resets. | ||
301 | */ | ||
302 | /* Fall through to the normal case... */ | ||
303 | case isci_perform_normal_io_completion: | ||
304 | /* Normal notification (task_done) */ | ||
305 | task->task_state_flags |= SAS_TASK_STATE_DONE; | ||
306 | task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | | ||
307 | SAS_TASK_STATE_PENDING); | ||
308 | break; | ||
309 | default: | ||
310 | WARN_ONCE(1, "unknown task_notification_selection: %d\n", | ||
311 | task_notification_selection); | ||
312 | break; | ||
313 | } | ||
314 | |||
315 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
316 | |||
317 | return task_notification_selection; | ||
318 | |||
319 | } | ||
320 | #endif /* !defined(_SCI_TASK_H_) */ | 190 | #endif /* !defined(_SCI_TASK_H_) */ |
diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c index 16f88ab939c8..04a6d0d59a22 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.c +++ b/drivers/scsi/isci/unsolicited_frame_control.c | |||
@@ -57,31 +57,19 @@ | |||
57 | #include "unsolicited_frame_control.h" | 57 | #include "unsolicited_frame_control.h" |
58 | #include "registers.h" | 58 | #include "registers.h" |
59 | 59 | ||
60 | int sci_unsolicited_frame_control_construct(struct isci_host *ihost) | 60 | void sci_unsolicited_frame_control_construct(struct isci_host *ihost) |
61 | { | 61 | { |
62 | struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control; | 62 | struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control; |
63 | struct sci_unsolicited_frame *uf; | 63 | struct sci_unsolicited_frame *uf; |
64 | u32 buf_len, header_len, i; | 64 | dma_addr_t dma = ihost->ufi_dma; |
65 | dma_addr_t dma; | 65 | void *virt = ihost->ufi_buf; |
66 | size_t size; | 66 | int i; |
67 | void *virt; | ||
68 | |||
69 | /* | ||
70 | * Prepare all of the memory sizes for the UF headers, UF address | ||
71 | * table, and UF buffers themselves. | ||
72 | */ | ||
73 | buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE; | ||
74 | header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header); | ||
75 | size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(uf_control->address_table.array[0]); | ||
76 | 67 | ||
77 | /* | 68 | /* |
78 | * The Unsolicited Frame buffers are set at the start of the UF | 69 | * The Unsolicited Frame buffers are set at the start of the UF |
79 | * memory descriptor entry. The headers and address table will be | 70 | * memory descriptor entry. The headers and address table will be |
80 | * placed after the buffers. | 71 | * placed after the buffers. |
81 | */ | 72 | */ |
82 | virt = dmam_alloc_coherent(&ihost->pdev->dev, size, &dma, GFP_KERNEL); | ||
83 | if (!virt) | ||
84 | return -ENOMEM; | ||
85 | 73 | ||
86 | /* | 74 | /* |
87 | * Program the location of the UF header table into the SCU. | 75 | * Program the location of the UF header table into the SCU. |
@@ -93,8 +81,8 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost) | |||
93 | * headers, since we program the UF address table pointers to | 81 | * headers, since we program the UF address table pointers to |
94 | * NULL. | 82 | * NULL. |
95 | */ | 83 | */ |
96 | uf_control->headers.physical_address = dma + buf_len; | 84 | uf_control->headers.physical_address = dma + SCI_UFI_BUF_SIZE; |
97 | uf_control->headers.array = virt + buf_len; | 85 | uf_control->headers.array = virt + SCI_UFI_BUF_SIZE; |
98 | 86 | ||
99 | /* | 87 | /* |
100 | * Program the location of the UF address table into the SCU. | 88 | * Program the location of the UF address table into the SCU. |
@@ -103,8 +91,8 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost) | |||
103 | * byte boundary already due to above programming headers being on a | 91 | * byte boundary already due to above programming headers being on a |
104 | * 64-bit boundary and headers are on a 64-bytes in size. | 92 | * 64-bit boundary and headers are on a 64-bytes in size. |
105 | */ | 93 | */ |
106 | uf_control->address_table.physical_address = dma + buf_len + header_len; | 94 | uf_control->address_table.physical_address = dma + SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE; |
107 | uf_control->address_table.array = virt + buf_len + header_len; | 95 | uf_control->address_table.array = virt + SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE; |
108 | uf_control->get = 0; | 96 | uf_control->get = 0; |
109 | 97 | ||
110 | /* | 98 | /* |
@@ -135,8 +123,6 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost) | |||
135 | virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE; | 123 | virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE; |
136 | dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE; | 124 | dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE; |
137 | } | 125 | } |
138 | |||
139 | return 0; | ||
140 | } | 126 | } |
141 | 127 | ||
142 | enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control, | 128 | enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control, |
diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h index 75d896686f5a..1bc551ec611f 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.h +++ b/drivers/scsi/isci/unsolicited_frame_control.h | |||
@@ -257,9 +257,13 @@ struct sci_unsolicited_frame_control { | |||
257 | 257 | ||
258 | }; | 258 | }; |
259 | 259 | ||
260 | #define SCI_UFI_BUF_SIZE (SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE) | ||
261 | #define SCI_UFI_HDR_SIZE (SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header)) | ||
262 | #define SCI_UFI_TOTAL_SIZE (SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE + SCU_MAX_UNSOLICITED_FRAMES * sizeof(u64)) | ||
263 | |||
260 | struct isci_host; | 264 | struct isci_host; |
261 | 265 | ||
262 | int sci_unsolicited_frame_control_construct(struct isci_host *ihost); | 266 | void sci_unsolicited_frame_control_construct(struct isci_host *ihost); |
263 | 267 | ||
264 | enum sci_status sci_unsolicited_frame_control_get_header( | 268 | enum sci_status sci_unsolicited_frame_control_get_header( |
265 | struct sci_unsolicited_frame_control *uf_control, | 269 | struct sci_unsolicited_frame_control *uf_control, |
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index fb6610b249e1..c1402fb499ab 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c | |||
@@ -1742,17 +1742,19 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1742 | 1742 | ||
1743 | mfs = ntohs(flp->fl_csp.sp_bb_data) & | 1743 | mfs = ntohs(flp->fl_csp.sp_bb_data) & |
1744 | FC_SP_BB_DATA_MASK; | 1744 | FC_SP_BB_DATA_MASK; |
1745 | if (mfs >= FC_SP_MIN_MAX_PAYLOAD && | 1745 | |
1746 | mfs <= lport->mfs) { | 1746 | if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) { |
1747 | lport->mfs = mfs; | ||
1748 | fc_host_maxframe_size(lport->host) = mfs; | ||
1749 | } else { | ||
1750 | FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " | 1747 | FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " |
1751 | "lport->mfs:%hu\n", mfs, lport->mfs); | 1748 | "lport->mfs:%hu\n", mfs, lport->mfs); |
1752 | fc_lport_error(lport, fp); | 1749 | fc_lport_error(lport, fp); |
1753 | goto err; | 1750 | goto err; |
1754 | } | 1751 | } |
1755 | 1752 | ||
1753 | if (mfs <= lport->mfs) { | ||
1754 | lport->mfs = mfs; | ||
1755 | fc_host_maxframe_size(lport->host) = mfs; | ||
1756 | } | ||
1757 | |||
1756 | csp_flags = ntohs(flp->fl_csp.sp_features); | 1758 | csp_flags = ntohs(flp->fl_csp.sp_features); |
1757 | r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov); | 1759 | r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov); |
1758 | e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); | 1760 | e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); |
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index bc0cecc6ad62..441d88ad99a7 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c | |||
@@ -546,11 +546,12 @@ static struct ata_port_info sata_port_info = { | |||
546 | .port_ops = &sas_sata_ops | 546 | .port_ops = &sas_sata_ops |
547 | }; | 547 | }; |
548 | 548 | ||
549 | int sas_ata_init_host_and_port(struct domain_device *found_dev) | 549 | int sas_ata_init(struct domain_device *found_dev) |
550 | { | 550 | { |
551 | struct sas_ha_struct *ha = found_dev->port->ha; | 551 | struct sas_ha_struct *ha = found_dev->port->ha; |
552 | struct Scsi_Host *shost = ha->core.shost; | 552 | struct Scsi_Host *shost = ha->core.shost; |
553 | struct ata_port *ap; | 553 | struct ata_port *ap; |
554 | int rc; | ||
554 | 555 | ||
555 | ata_host_init(&found_dev->sata_dev.ata_host, | 556 | ata_host_init(&found_dev->sata_dev.ata_host, |
556 | ha->dev, | 557 | ha->dev, |
@@ -567,8 +568,11 @@ int sas_ata_init_host_and_port(struct domain_device *found_dev) | |||
567 | ap->private_data = found_dev; | 568 | ap->private_data = found_dev; |
568 | ap->cbl = ATA_CBL_SATA; | 569 | ap->cbl = ATA_CBL_SATA; |
569 | ap->scsi_host = shost; | 570 | ap->scsi_host = shost; |
570 | /* publish initialized ata port */ | 571 | rc = ata_sas_port_init(ap); |
571 | smp_wmb(); | 572 | if (rc) { |
573 | ata_sas_port_destroy(ap); | ||
574 | return rc; | ||
575 | } | ||
572 | found_dev->sata_dev.ap = ap; | 576 | found_dev->sata_dev.ap = ap; |
573 | 577 | ||
574 | return 0; | 578 | return 0; |
@@ -648,18 +652,13 @@ static void sas_get_ata_command_set(struct domain_device *dev) | |||
648 | void sas_probe_sata(struct asd_sas_port *port) | 652 | void sas_probe_sata(struct asd_sas_port *port) |
649 | { | 653 | { |
650 | struct domain_device *dev, *n; | 654 | struct domain_device *dev, *n; |
651 | int err; | ||
652 | 655 | ||
653 | mutex_lock(&port->ha->disco_mutex); | 656 | mutex_lock(&port->ha->disco_mutex); |
654 | list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) { | 657 | list_for_each_entry(dev, &port->disco_list, disco_list_node) { |
655 | if (!dev_is_sata(dev)) | 658 | if (!dev_is_sata(dev)) |
656 | continue; | 659 | continue; |
657 | 660 | ||
658 | err = sas_ata_init_host_and_port(dev); | 661 | ata_sas_async_probe(dev->sata_dev.ap); |
659 | if (err) | ||
660 | sas_fail_probe(dev, __func__, err); | ||
661 | else | ||
662 | ata_sas_async_port_init(dev->sata_dev.ap); | ||
663 | } | 662 | } |
664 | mutex_unlock(&port->ha->disco_mutex); | 663 | mutex_unlock(&port->ha->disco_mutex); |
665 | 664 | ||
@@ -718,18 +717,6 @@ static void async_sas_ata_eh(void *data, async_cookie_t cookie) | |||
718 | sas_put_device(dev); | 717 | sas_put_device(dev); |
719 | } | 718 | } |
720 | 719 | ||
721 | static bool sas_ata_dev_eh_valid(struct domain_device *dev) | ||
722 | { | ||
723 | struct ata_port *ap; | ||
724 | |||
725 | if (!dev_is_sata(dev)) | ||
726 | return false; | ||
727 | ap = dev->sata_dev.ap; | ||
728 | /* consume fully initialized ata ports */ | ||
729 | smp_rmb(); | ||
730 | return !!ap; | ||
731 | } | ||
732 | |||
733 | void sas_ata_strategy_handler(struct Scsi_Host *shost) | 720 | void sas_ata_strategy_handler(struct Scsi_Host *shost) |
734 | { | 721 | { |
735 | struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); | 722 | struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); |
@@ -753,7 +740,7 @@ void sas_ata_strategy_handler(struct Scsi_Host *shost) | |||
753 | 740 | ||
754 | spin_lock(&port->dev_list_lock); | 741 | spin_lock(&port->dev_list_lock); |
755 | list_for_each_entry(dev, &port->dev_list, dev_list_node) { | 742 | list_for_each_entry(dev, &port->dev_list, dev_list_node) { |
756 | if (!sas_ata_dev_eh_valid(dev)) | 743 | if (!dev_is_sata(dev)) |
757 | continue; | 744 | continue; |
758 | async_schedule_domain(async_sas_ata_eh, dev, &async); | 745 | async_schedule_domain(async_sas_ata_eh, dev, &async); |
759 | } | 746 | } |
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index 364679675602..629a0865b130 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c | |||
@@ -72,6 +72,7 @@ static int sas_get_port_device(struct asd_sas_port *port) | |||
72 | struct asd_sas_phy *phy; | 72 | struct asd_sas_phy *phy; |
73 | struct sas_rphy *rphy; | 73 | struct sas_rphy *rphy; |
74 | struct domain_device *dev; | 74 | struct domain_device *dev; |
75 | int rc = -ENODEV; | ||
75 | 76 | ||
76 | dev = sas_alloc_device(); | 77 | dev = sas_alloc_device(); |
77 | if (!dev) | 78 | if (!dev) |
@@ -110,9 +111,16 @@ static int sas_get_port_device(struct asd_sas_port *port) | |||
110 | 111 | ||
111 | sas_init_dev(dev); | 112 | sas_init_dev(dev); |
112 | 113 | ||
114 | dev->port = port; | ||
113 | switch (dev->dev_type) { | 115 | switch (dev->dev_type) { |
114 | case SAS_END_DEV: | ||
115 | case SATA_DEV: | 116 | case SATA_DEV: |
117 | rc = sas_ata_init(dev); | ||
118 | if (rc) { | ||
119 | rphy = NULL; | ||
120 | break; | ||
121 | } | ||
122 | /* fall through */ | ||
123 | case SAS_END_DEV: | ||
116 | rphy = sas_end_device_alloc(port->port); | 124 | rphy = sas_end_device_alloc(port->port); |
117 | break; | 125 | break; |
118 | case EDGE_DEV: | 126 | case EDGE_DEV: |
@@ -131,19 +139,14 @@ static int sas_get_port_device(struct asd_sas_port *port) | |||
131 | 139 | ||
132 | if (!rphy) { | 140 | if (!rphy) { |
133 | sas_put_device(dev); | 141 | sas_put_device(dev); |
134 | return -ENODEV; | 142 | return rc; |
135 | } | 143 | } |
136 | 144 | ||
137 | spin_lock_irq(&port->phy_list_lock); | ||
138 | list_for_each_entry(phy, &port->phy_list, port_phy_el) | ||
139 | sas_phy_set_target(phy, dev); | ||
140 | spin_unlock_irq(&port->phy_list_lock); | ||
141 | rphy->identify.phy_identifier = phy->phy->identify.phy_identifier; | 145 | rphy->identify.phy_identifier = phy->phy->identify.phy_identifier; |
142 | memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE); | 146 | memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE); |
143 | sas_fill_in_rphy(dev, rphy); | 147 | sas_fill_in_rphy(dev, rphy); |
144 | sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr); | 148 | sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr); |
145 | port->port_dev = dev; | 149 | port->port_dev = dev; |
146 | dev->port = port; | ||
147 | dev->linkrate = port->linkrate; | 150 | dev->linkrate = port->linkrate; |
148 | dev->min_linkrate = port->linkrate; | 151 | dev->min_linkrate = port->linkrate; |
149 | dev->max_linkrate = port->linkrate; | 152 | dev->max_linkrate = port->linkrate; |
@@ -155,6 +158,7 @@ static int sas_get_port_device(struct asd_sas_port *port) | |||
155 | sas_device_set_phy(dev, port->port); | 158 | sas_device_set_phy(dev, port->port); |
156 | 159 | ||
157 | dev->rphy = rphy; | 160 | dev->rphy = rphy; |
161 | get_device(&dev->rphy->dev); | ||
158 | 162 | ||
159 | if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV) | 163 | if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV) |
160 | list_add_tail(&dev->disco_list_node, &port->disco_list); | 164 | list_add_tail(&dev->disco_list_node, &port->disco_list); |
@@ -164,6 +168,11 @@ static int sas_get_port_device(struct asd_sas_port *port) | |||
164 | spin_unlock_irq(&port->dev_list_lock); | 168 | spin_unlock_irq(&port->dev_list_lock); |
165 | } | 169 | } |
166 | 170 | ||
171 | spin_lock_irq(&port->phy_list_lock); | ||
172 | list_for_each_entry(phy, &port->phy_list, port_phy_el) | ||
173 | sas_phy_set_target(phy, dev); | ||
174 | spin_unlock_irq(&port->phy_list_lock); | ||
175 | |||
167 | return 0; | 176 | return 0; |
168 | } | 177 | } |
169 | 178 | ||
@@ -205,8 +214,7 @@ void sas_notify_lldd_dev_gone(struct domain_device *dev) | |||
205 | static void sas_probe_devices(struct work_struct *work) | 214 | static void sas_probe_devices(struct work_struct *work) |
206 | { | 215 | { |
207 | struct domain_device *dev, *n; | 216 | struct domain_device *dev, *n; |
208 | struct sas_discovery_event *ev = | 217 | struct sas_discovery_event *ev = to_sas_discovery_event(work); |
209 | container_of(work, struct sas_discovery_event, work); | ||
210 | struct asd_sas_port *port = ev->port; | 218 | struct asd_sas_port *port = ev->port; |
211 | 219 | ||
212 | clear_bit(DISCE_PROBE, &port->disc.pending); | 220 | clear_bit(DISCE_PROBE, &port->disc.pending); |
@@ -255,6 +263,9 @@ void sas_free_device(struct kref *kref) | |||
255 | { | 263 | { |
256 | struct domain_device *dev = container_of(kref, typeof(*dev), kref); | 264 | struct domain_device *dev = container_of(kref, typeof(*dev), kref); |
257 | 265 | ||
266 | put_device(&dev->rphy->dev); | ||
267 | dev->rphy = NULL; | ||
268 | |||
258 | if (dev->parent) | 269 | if (dev->parent) |
259 | sas_put_device(dev->parent); | 270 | sas_put_device(dev->parent); |
260 | 271 | ||
@@ -291,8 +302,7 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d | |||
291 | static void sas_destruct_devices(struct work_struct *work) | 302 | static void sas_destruct_devices(struct work_struct *work) |
292 | { | 303 | { |
293 | struct domain_device *dev, *n; | 304 | struct domain_device *dev, *n; |
294 | struct sas_discovery_event *ev = | 305 | struct sas_discovery_event *ev = to_sas_discovery_event(work); |
295 | container_of(work, struct sas_discovery_event, work); | ||
296 | struct asd_sas_port *port = ev->port; | 306 | struct asd_sas_port *port = ev->port; |
297 | 307 | ||
298 | clear_bit(DISCE_DESTRUCT, &port->disc.pending); | 308 | clear_bit(DISCE_DESTRUCT, &port->disc.pending); |
@@ -302,7 +312,6 @@ static void sas_destruct_devices(struct work_struct *work) | |||
302 | 312 | ||
303 | sas_remove_children(&dev->rphy->dev); | 313 | sas_remove_children(&dev->rphy->dev); |
304 | sas_rphy_delete(dev->rphy); | 314 | sas_rphy_delete(dev->rphy); |
305 | dev->rphy = NULL; | ||
306 | sas_unregister_common_dev(port, dev); | 315 | sas_unregister_common_dev(port, dev); |
307 | } | 316 | } |
308 | } | 317 | } |
@@ -314,11 +323,11 @@ void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev) | |||
314 | /* this rphy never saw sas_rphy_add */ | 323 | /* this rphy never saw sas_rphy_add */ |
315 | list_del_init(&dev->disco_list_node); | 324 | list_del_init(&dev->disco_list_node); |
316 | sas_rphy_free(dev->rphy); | 325 | sas_rphy_free(dev->rphy); |
317 | dev->rphy = NULL; | ||
318 | sas_unregister_common_dev(port, dev); | 326 | sas_unregister_common_dev(port, dev); |
327 | return; | ||
319 | } | 328 | } |
320 | 329 | ||
321 | if (dev->rphy && !test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) { | 330 | if (!test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) { |
322 | sas_rphy_unlink(dev->rphy); | 331 | sas_rphy_unlink(dev->rphy); |
323 | list_move_tail(&dev->disco_list_node, &port->destroy_list); | 332 | list_move_tail(&dev->disco_list_node, &port->destroy_list); |
324 | sas_discover_event(dev->port, DISCE_DESTRUCT); | 333 | sas_discover_event(dev->port, DISCE_DESTRUCT); |
@@ -377,8 +386,7 @@ static void sas_discover_domain(struct work_struct *work) | |||
377 | { | 386 | { |
378 | struct domain_device *dev; | 387 | struct domain_device *dev; |
379 | int error = 0; | 388 | int error = 0; |
380 | struct sas_discovery_event *ev = | 389 | struct sas_discovery_event *ev = to_sas_discovery_event(work); |
381 | container_of(work, struct sas_discovery_event, work); | ||
382 | struct asd_sas_port *port = ev->port; | 390 | struct asd_sas_port *port = ev->port; |
383 | 391 | ||
384 | clear_bit(DISCE_DISCOVER_DOMAIN, &port->disc.pending); | 392 | clear_bit(DISCE_DISCOVER_DOMAIN, &port->disc.pending); |
@@ -419,8 +427,6 @@ static void sas_discover_domain(struct work_struct *work) | |||
419 | 427 | ||
420 | if (error) { | 428 | if (error) { |
421 | sas_rphy_free(dev->rphy); | 429 | sas_rphy_free(dev->rphy); |
422 | dev->rphy = NULL; | ||
423 | |||
424 | list_del_init(&dev->disco_list_node); | 430 | list_del_init(&dev->disco_list_node); |
425 | spin_lock_irq(&port->dev_list_lock); | 431 | spin_lock_irq(&port->dev_list_lock); |
426 | list_del_init(&dev->dev_list_node); | 432 | list_del_init(&dev->dev_list_node); |
@@ -437,8 +443,7 @@ static void sas_discover_domain(struct work_struct *work) | |||
437 | static void sas_revalidate_domain(struct work_struct *work) | 443 | static void sas_revalidate_domain(struct work_struct *work) |
438 | { | 444 | { |
439 | int res = 0; | 445 | int res = 0; |
440 | struct sas_discovery_event *ev = | 446 | struct sas_discovery_event *ev = to_sas_discovery_event(work); |
441 | container_of(work, struct sas_discovery_event, work); | ||
442 | struct asd_sas_port *port = ev->port; | 447 | struct asd_sas_port *port = ev->port; |
443 | struct sas_ha_struct *ha = port->ha; | 448 | struct sas_ha_struct *ha = port->ha; |
444 | 449 | ||
@@ -466,21 +471,25 @@ static void sas_revalidate_domain(struct work_struct *work) | |||
466 | 471 | ||
467 | /* ---------- Events ---------- */ | 472 | /* ---------- Events ---------- */ |
468 | 473 | ||
469 | static void sas_chain_work(struct sas_ha_struct *ha, struct work_struct *work) | 474 | static void sas_chain_work(struct sas_ha_struct *ha, struct sas_work *sw) |
470 | { | 475 | { |
471 | /* chained work is not subject to SA_HA_DRAINING or SAS_HA_REGISTERED */ | 476 | /* chained work is not subject to SA_HA_DRAINING or |
472 | scsi_queue_work(ha->core.shost, work); | 477 | * SAS_HA_REGISTERED, because it is either submitted in the |
478 | * workqueue, or known to be submitted from a context that is | ||
479 | * not racing against draining | ||
480 | */ | ||
481 | scsi_queue_work(ha->core.shost, &sw->work); | ||
473 | } | 482 | } |
474 | 483 | ||
475 | static void sas_chain_event(int event, unsigned long *pending, | 484 | static void sas_chain_event(int event, unsigned long *pending, |
476 | struct work_struct *work, | 485 | struct sas_work *sw, |
477 | struct sas_ha_struct *ha) | 486 | struct sas_ha_struct *ha) |
478 | { | 487 | { |
479 | if (!test_and_set_bit(event, pending)) { | 488 | if (!test_and_set_bit(event, pending)) { |
480 | unsigned long flags; | 489 | unsigned long flags; |
481 | 490 | ||
482 | spin_lock_irqsave(&ha->state_lock, flags); | 491 | spin_lock_irqsave(&ha->state_lock, flags); |
483 | sas_chain_work(ha, work); | 492 | sas_chain_work(ha, sw); |
484 | spin_unlock_irqrestore(&ha->state_lock, flags); | 493 | spin_unlock_irqrestore(&ha->state_lock, flags); |
485 | } | 494 | } |
486 | } | 495 | } |
@@ -519,7 +528,7 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port) | |||
519 | 528 | ||
520 | disc->pending = 0; | 529 | disc->pending = 0; |
521 | for (i = 0; i < DISC_NUM_EVENTS; i++) { | 530 | for (i = 0; i < DISC_NUM_EVENTS; i++) { |
522 | INIT_WORK(&disc->disc_work[i].work, sas_event_fns[i]); | 531 | INIT_SAS_WORK(&disc->disc_work[i].work, sas_event_fns[i]); |
523 | disc->disc_work[i].port = port; | 532 | disc->disc_work[i].port = port; |
524 | } | 533 | } |
525 | } | 534 | } |
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c index 16639bbae629..4e4292d210c1 100644 --- a/drivers/scsi/libsas/sas_event.c +++ b/drivers/scsi/libsas/sas_event.c | |||
@@ -27,19 +27,21 @@ | |||
27 | #include "sas_internal.h" | 27 | #include "sas_internal.h" |
28 | #include "sas_dump.h" | 28 | #include "sas_dump.h" |
29 | 29 | ||
30 | void sas_queue_work(struct sas_ha_struct *ha, struct work_struct *work) | 30 | void sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw) |
31 | { | 31 | { |
32 | if (!test_bit(SAS_HA_REGISTERED, &ha->state)) | 32 | if (!test_bit(SAS_HA_REGISTERED, &ha->state)) |
33 | return; | 33 | return; |
34 | 34 | ||
35 | if (test_bit(SAS_HA_DRAINING, &ha->state)) | 35 | if (test_bit(SAS_HA_DRAINING, &ha->state)) { |
36 | list_add(&work->entry, &ha->defer_q); | 36 | /* add it to the defer list, if not already pending */ |
37 | else | 37 | if (list_empty(&sw->drain_node)) |
38 | scsi_queue_work(ha->core.shost, work); | 38 | list_add(&sw->drain_node, &ha->defer_q); |
39 | } else | ||
40 | scsi_queue_work(ha->core.shost, &sw->work); | ||
39 | } | 41 | } |
40 | 42 | ||
41 | static void sas_queue_event(int event, unsigned long *pending, | 43 | static void sas_queue_event(int event, unsigned long *pending, |
42 | struct work_struct *work, | 44 | struct sas_work *work, |
43 | struct sas_ha_struct *ha) | 45 | struct sas_ha_struct *ha) |
44 | { | 46 | { |
45 | if (!test_and_set_bit(event, pending)) { | 47 | if (!test_and_set_bit(event, pending)) { |
@@ -55,7 +57,7 @@ static void sas_queue_event(int event, unsigned long *pending, | |||
55 | void __sas_drain_work(struct sas_ha_struct *ha) | 57 | void __sas_drain_work(struct sas_ha_struct *ha) |
56 | { | 58 | { |
57 | struct workqueue_struct *wq = ha->core.shost->work_q; | 59 | struct workqueue_struct *wq = ha->core.shost->work_q; |
58 | struct work_struct *w, *_w; | 60 | struct sas_work *sw, *_sw; |
59 | 61 | ||
60 | set_bit(SAS_HA_DRAINING, &ha->state); | 62 | set_bit(SAS_HA_DRAINING, &ha->state); |
61 | /* flush submitters */ | 63 | /* flush submitters */ |
@@ -66,9 +68,9 @@ void __sas_drain_work(struct sas_ha_struct *ha) | |||
66 | 68 | ||
67 | spin_lock_irq(&ha->state_lock); | 69 | spin_lock_irq(&ha->state_lock); |
68 | clear_bit(SAS_HA_DRAINING, &ha->state); | 70 | clear_bit(SAS_HA_DRAINING, &ha->state); |
69 | list_for_each_entry_safe(w, _w, &ha->defer_q, entry) { | 71 | list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) { |
70 | list_del_init(&w->entry); | 72 | list_del_init(&sw->drain_node); |
71 | sas_queue_work(ha, w); | 73 | sas_queue_work(ha, sw); |
72 | } | 74 | } |
73 | spin_unlock_irq(&ha->state_lock); | 75 | spin_unlock_irq(&ha->state_lock); |
74 | } | 76 | } |
@@ -151,7 +153,7 @@ int sas_init_events(struct sas_ha_struct *sas_ha) | |||
151 | int i; | 153 | int i; |
152 | 154 | ||
153 | for (i = 0; i < HA_NUM_EVENTS; i++) { | 155 | for (i = 0; i < HA_NUM_EVENTS; i++) { |
154 | INIT_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]); | 156 | INIT_SAS_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]); |
155 | sas_ha->ha_events[i].ha = sas_ha; | 157 | sas_ha->ha_events[i].ha = sas_ha; |
156 | } | 158 | } |
157 | 159 | ||
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 05acd9e35fc4..caa0525d2523 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c | |||
@@ -202,6 +202,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) | |||
202 | u8 sas_addr[SAS_ADDR_SIZE]; | 202 | u8 sas_addr[SAS_ADDR_SIZE]; |
203 | struct smp_resp *resp = rsp; | 203 | struct smp_resp *resp = rsp; |
204 | struct discover_resp *dr = &resp->disc; | 204 | struct discover_resp *dr = &resp->disc; |
205 | struct sas_ha_struct *ha = dev->port->ha; | ||
205 | struct expander_device *ex = &dev->ex_dev; | 206 | struct expander_device *ex = &dev->ex_dev; |
206 | struct ex_phy *phy = &ex->ex_phy[phy_id]; | 207 | struct ex_phy *phy = &ex->ex_phy[phy_id]; |
207 | struct sas_rphy *rphy = dev->rphy; | 208 | struct sas_rphy *rphy = dev->rphy; |
@@ -209,6 +210,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) | |||
209 | char *type; | 210 | char *type; |
210 | 211 | ||
211 | if (new_phy) { | 212 | if (new_phy) { |
213 | if (WARN_ON_ONCE(test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))) | ||
214 | return; | ||
212 | phy->phy = sas_phy_alloc(&rphy->dev, phy_id); | 215 | phy->phy = sas_phy_alloc(&rphy->dev, phy_id); |
213 | 216 | ||
214 | /* FIXME: error_handling */ | 217 | /* FIXME: error_handling */ |
@@ -233,6 +236,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) | |||
233 | memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); | 236 | memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); |
234 | 237 | ||
235 | phy->attached_dev_type = to_dev_type(dr); | 238 | phy->attached_dev_type = to_dev_type(dr); |
239 | if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) | ||
240 | goto out; | ||
236 | phy->phy_id = phy_id; | 241 | phy->phy_id = phy_id; |
237 | phy->linkrate = dr->linkrate; | 242 | phy->linkrate = dr->linkrate; |
238 | phy->attached_sata_host = dr->attached_sata_host; | 243 | phy->attached_sata_host = dr->attached_sata_host; |
@@ -240,7 +245,14 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) | |||
240 | phy->attached_sata_ps = dr->attached_sata_ps; | 245 | phy->attached_sata_ps = dr->attached_sata_ps; |
241 | phy->attached_iproto = dr->iproto << 1; | 246 | phy->attached_iproto = dr->iproto << 1; |
242 | phy->attached_tproto = dr->tproto << 1; | 247 | phy->attached_tproto = dr->tproto << 1; |
243 | memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE); | 248 | /* help some expanders that fail to zero sas_address in the 'no |
249 | * device' case | ||
250 | */ | ||
251 | if (phy->attached_dev_type == NO_DEVICE || | ||
252 | phy->linkrate < SAS_LINK_RATE_1_5_GBPS) | ||
253 | memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); | ||
254 | else | ||
255 | memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE); | ||
244 | phy->attached_phy_id = dr->attached_phy_id; | 256 | phy->attached_phy_id = dr->attached_phy_id; |
245 | phy->phy_change_count = dr->change_count; | 257 | phy->phy_change_count = dr->change_count; |
246 | phy->routing_attr = dr->routing_attr; | 258 | phy->routing_attr = dr->routing_attr; |
@@ -266,6 +278,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) | |||
266 | return; | 278 | return; |
267 | } | 279 | } |
268 | 280 | ||
281 | out: | ||
269 | switch (phy->attached_dev_type) { | 282 | switch (phy->attached_dev_type) { |
270 | case SATA_PENDING: | 283 | case SATA_PENDING: |
271 | type = "stp pending"; | 284 | type = "stp pending"; |
@@ -304,7 +317,15 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) | |||
304 | else | 317 | else |
305 | return; | 318 | return; |
306 | 319 | ||
307 | SAS_DPRINTK("ex %016llx phy%02d:%c:%X attached: %016llx (%s)\n", | 320 | /* if the attached device type changed and ata_eh is active, |
321 | * make sure we run revalidation when eh completes (see: | ||
322 | * sas_enable_revalidation) | ||
323 | */ | ||
324 | if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) | ||
325 | set_bit(DISCE_REVALIDATE_DOMAIN, &dev->port->disc.pending); | ||
326 | |||
327 | SAS_DPRINTK("%sex %016llx phy%02d:%c:%X attached: %016llx (%s)\n", | ||
328 | test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state) ? "ata: " : "", | ||
308 | SAS_ADDR(dev->sas_addr), phy->phy_id, | 329 | SAS_ADDR(dev->sas_addr), phy->phy_id, |
309 | sas_route_char(dev, phy), phy->linkrate, | 330 | sas_route_char(dev, phy), phy->linkrate, |
310 | SAS_ADDR(phy->attached_sas_addr), type); | 331 | SAS_ADDR(phy->attached_sas_addr), type); |
@@ -776,13 +797,16 @@ static struct domain_device *sas_ex_discover_end_dev( | |||
776 | if (res) | 797 | if (res) |
777 | goto out_free; | 798 | goto out_free; |
778 | 799 | ||
800 | sas_init_dev(child); | ||
801 | res = sas_ata_init(child); | ||
802 | if (res) | ||
803 | goto out_free; | ||
779 | rphy = sas_end_device_alloc(phy->port); | 804 | rphy = sas_end_device_alloc(phy->port); |
780 | if (unlikely(!rphy)) | 805 | if (!rphy) |
781 | goto out_free; | 806 | goto out_free; |
782 | 807 | ||
783 | sas_init_dev(child); | ||
784 | |||
785 | child->rphy = rphy; | 808 | child->rphy = rphy; |
809 | get_device(&rphy->dev); | ||
786 | 810 | ||
787 | list_add_tail(&child->disco_list_node, &parent->port->disco_list); | 811 | list_add_tail(&child->disco_list_node, &parent->port->disco_list); |
788 | 812 | ||
@@ -806,6 +830,7 @@ static struct domain_device *sas_ex_discover_end_dev( | |||
806 | sas_init_dev(child); | 830 | sas_init_dev(child); |
807 | 831 | ||
808 | child->rphy = rphy; | 832 | child->rphy = rphy; |
833 | get_device(&rphy->dev); | ||
809 | sas_fill_in_rphy(child, rphy); | 834 | sas_fill_in_rphy(child, rphy); |
810 | 835 | ||
811 | list_add_tail(&child->disco_list_node, &parent->port->disco_list); | 836 | list_add_tail(&child->disco_list_node, &parent->port->disco_list); |
@@ -830,8 +855,6 @@ static struct domain_device *sas_ex_discover_end_dev( | |||
830 | 855 | ||
831 | out_list_del: | 856 | out_list_del: |
832 | sas_rphy_free(child->rphy); | 857 | sas_rphy_free(child->rphy); |
833 | child->rphy = NULL; | ||
834 | |||
835 | list_del(&child->disco_list_node); | 858 | list_del(&child->disco_list_node); |
836 | spin_lock_irq(&parent->port->dev_list_lock); | 859 | spin_lock_irq(&parent->port->dev_list_lock); |
837 | list_del(&child->dev_list_node); | 860 | list_del(&child->dev_list_node); |
@@ -911,6 +934,7 @@ static struct domain_device *sas_ex_discover_expander( | |||
911 | } | 934 | } |
912 | port = parent->port; | 935 | port = parent->port; |
913 | child->rphy = rphy; | 936 | child->rphy = rphy; |
937 | get_device(&rphy->dev); | ||
914 | edev = rphy_to_expander_device(rphy); | 938 | edev = rphy_to_expander_device(rphy); |
915 | child->dev_type = phy->attached_dev_type; | 939 | child->dev_type = phy->attached_dev_type; |
916 | kref_get(&parent->kref); | 940 | kref_get(&parent->kref); |
@@ -934,6 +958,7 @@ static struct domain_device *sas_ex_discover_expander( | |||
934 | 958 | ||
935 | res = sas_discover_expander(child); | 959 | res = sas_discover_expander(child); |
936 | if (res) { | 960 | if (res) { |
961 | sas_rphy_delete(rphy); | ||
937 | spin_lock_irq(&parent->port->dev_list_lock); | 962 | spin_lock_irq(&parent->port->dev_list_lock); |
938 | list_del(&child->dev_list_node); | 963 | list_del(&child->dev_list_node); |
939 | spin_unlock_irq(&parent->port->dev_list_lock); | 964 | spin_unlock_irq(&parent->port->dev_list_lock); |
@@ -1718,9 +1743,17 @@ static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id, | |||
1718 | int phy_change_count = 0; | 1743 | int phy_change_count = 0; |
1719 | 1744 | ||
1720 | res = sas_get_phy_change_count(dev, i, &phy_change_count); | 1745 | res = sas_get_phy_change_count(dev, i, &phy_change_count); |
1721 | if (res) | 1746 | switch (res) { |
1722 | goto out; | 1747 | case SMP_RESP_PHY_VACANT: |
1723 | else if (phy_change_count != ex->ex_phy[i].phy_change_count) { | 1748 | case SMP_RESP_NO_PHY: |
1749 | continue; | ||
1750 | case SMP_RESP_FUNC_ACC: | ||
1751 | break; | ||
1752 | default: | ||
1753 | return res; | ||
1754 | } | ||
1755 | |||
1756 | if (phy_change_count != ex->ex_phy[i].phy_change_count) { | ||
1724 | if (update) | 1757 | if (update) |
1725 | ex->ex_phy[i].phy_change_count = | 1758 | ex->ex_phy[i].phy_change_count = |
1726 | phy_change_count; | 1759 | phy_change_count; |
@@ -1728,8 +1761,7 @@ static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id, | |||
1728 | return 0; | 1761 | return 0; |
1729 | } | 1762 | } |
1730 | } | 1763 | } |
1731 | out: | 1764 | return 0; |
1732 | return res; | ||
1733 | } | 1765 | } |
1734 | 1766 | ||
1735 | static int sas_get_ex_change_count(struct domain_device *dev, int *ecc) | 1767 | static int sas_get_ex_change_count(struct domain_device *dev, int *ecc) |
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c index 120bff64be30..10cb5ae30977 100644 --- a/drivers/scsi/libsas/sas_init.c +++ b/drivers/scsi/libsas/sas_init.c | |||
@@ -94,8 +94,7 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr) | |||
94 | 94 | ||
95 | void sas_hae_reset(struct work_struct *work) | 95 | void sas_hae_reset(struct work_struct *work) |
96 | { | 96 | { |
97 | struct sas_ha_event *ev = | 97 | struct sas_ha_event *ev = to_sas_ha_event(work); |
98 | container_of(work, struct sas_ha_event, work); | ||
99 | struct sas_ha_struct *ha = ev->ha; | 98 | struct sas_ha_struct *ha = ev->ha; |
100 | 99 | ||
101 | clear_bit(HAE_RESET, &ha->pending); | 100 | clear_bit(HAE_RESET, &ha->pending); |
@@ -369,14 +368,14 @@ static void sas_phy_release(struct sas_phy *phy) | |||
369 | 368 | ||
370 | static void phy_reset_work(struct work_struct *work) | 369 | static void phy_reset_work(struct work_struct *work) |
371 | { | 370 | { |
372 | struct sas_phy_data *d = container_of(work, typeof(*d), reset_work); | 371 | struct sas_phy_data *d = container_of(work, typeof(*d), reset_work.work); |
373 | 372 | ||
374 | d->reset_result = transport_sas_phy_reset(d->phy, d->hard_reset); | 373 | d->reset_result = transport_sas_phy_reset(d->phy, d->hard_reset); |
375 | } | 374 | } |
376 | 375 | ||
377 | static void phy_enable_work(struct work_struct *work) | 376 | static void phy_enable_work(struct work_struct *work) |
378 | { | 377 | { |
379 | struct sas_phy_data *d = container_of(work, typeof(*d), enable_work); | 378 | struct sas_phy_data *d = container_of(work, typeof(*d), enable_work.work); |
380 | 379 | ||
381 | d->enable_result = sas_phy_enable(d->phy, d->enable); | 380 | d->enable_result = sas_phy_enable(d->phy, d->enable); |
382 | } | 381 | } |
@@ -389,8 +388,8 @@ static int sas_phy_setup(struct sas_phy *phy) | |||
389 | return -ENOMEM; | 388 | return -ENOMEM; |
390 | 389 | ||
391 | mutex_init(&d->event_lock); | 390 | mutex_init(&d->event_lock); |
392 | INIT_WORK(&d->reset_work, phy_reset_work); | 391 | INIT_SAS_WORK(&d->reset_work, phy_reset_work); |
393 | INIT_WORK(&d->enable_work, phy_enable_work); | 392 | INIT_SAS_WORK(&d->enable_work, phy_enable_work); |
394 | d->phy = phy; | 393 | d->phy = phy; |
395 | phy->hostdata = d; | 394 | phy->hostdata = d; |
396 | 395 | ||
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index f05c63879949..507e4cf12e56 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h | |||
@@ -45,10 +45,10 @@ struct sas_phy_data { | |||
45 | struct mutex event_lock; | 45 | struct mutex event_lock; |
46 | int hard_reset; | 46 | int hard_reset; |
47 | int reset_result; | 47 | int reset_result; |
48 | struct work_struct reset_work; | 48 | struct sas_work reset_work; |
49 | int enable; | 49 | int enable; |
50 | int enable_result; | 50 | int enable_result; |
51 | struct work_struct enable_work; | 51 | struct sas_work enable_work; |
52 | }; | 52 | }; |
53 | 53 | ||
54 | void sas_scsi_recover_host(struct Scsi_Host *shost); | 54 | void sas_scsi_recover_host(struct Scsi_Host *shost); |
@@ -80,7 +80,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work); | |||
80 | void sas_porte_link_reset_err(struct work_struct *work); | 80 | void sas_porte_link_reset_err(struct work_struct *work); |
81 | void sas_porte_timer_event(struct work_struct *work); | 81 | void sas_porte_timer_event(struct work_struct *work); |
82 | void sas_porte_hard_reset(struct work_struct *work); | 82 | void sas_porte_hard_reset(struct work_struct *work); |
83 | void sas_queue_work(struct sas_ha_struct *ha, struct work_struct *work); | 83 | void sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw); |
84 | 84 | ||
85 | int sas_notify_lldd_dev_found(struct domain_device *); | 85 | int sas_notify_lldd_dev_found(struct domain_device *); |
86 | void sas_notify_lldd_dev_gone(struct domain_device *); | 86 | void sas_notify_lldd_dev_gone(struct domain_device *); |
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c index dcfd4a9105c5..521422e857ab 100644 --- a/drivers/scsi/libsas/sas_phy.c +++ b/drivers/scsi/libsas/sas_phy.c | |||
@@ -32,8 +32,7 @@ | |||
32 | 32 | ||
33 | static void sas_phye_loss_of_signal(struct work_struct *work) | 33 | static void sas_phye_loss_of_signal(struct work_struct *work) |
34 | { | 34 | { |
35 | struct asd_sas_event *ev = | 35 | struct asd_sas_event *ev = to_asd_sas_event(work); |
36 | container_of(work, struct asd_sas_event, work); | ||
37 | struct asd_sas_phy *phy = ev->phy; | 36 | struct asd_sas_phy *phy = ev->phy; |
38 | 37 | ||
39 | clear_bit(PHYE_LOSS_OF_SIGNAL, &phy->phy_events_pending); | 38 | clear_bit(PHYE_LOSS_OF_SIGNAL, &phy->phy_events_pending); |
@@ -43,8 +42,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work) | |||
43 | 42 | ||
44 | static void sas_phye_oob_done(struct work_struct *work) | 43 | static void sas_phye_oob_done(struct work_struct *work) |
45 | { | 44 | { |
46 | struct asd_sas_event *ev = | 45 | struct asd_sas_event *ev = to_asd_sas_event(work); |
47 | container_of(work, struct asd_sas_event, work); | ||
48 | struct asd_sas_phy *phy = ev->phy; | 46 | struct asd_sas_phy *phy = ev->phy; |
49 | 47 | ||
50 | clear_bit(PHYE_OOB_DONE, &phy->phy_events_pending); | 48 | clear_bit(PHYE_OOB_DONE, &phy->phy_events_pending); |
@@ -53,8 +51,7 @@ static void sas_phye_oob_done(struct work_struct *work) | |||
53 | 51 | ||
54 | static void sas_phye_oob_error(struct work_struct *work) | 52 | static void sas_phye_oob_error(struct work_struct *work) |
55 | { | 53 | { |
56 | struct asd_sas_event *ev = | 54 | struct asd_sas_event *ev = to_asd_sas_event(work); |
57 | container_of(work, struct asd_sas_event, work); | ||
58 | struct asd_sas_phy *phy = ev->phy; | 55 | struct asd_sas_phy *phy = ev->phy; |
59 | struct sas_ha_struct *sas_ha = phy->ha; | 56 | struct sas_ha_struct *sas_ha = phy->ha; |
60 | struct asd_sas_port *port = phy->port; | 57 | struct asd_sas_port *port = phy->port; |
@@ -85,8 +82,7 @@ static void sas_phye_oob_error(struct work_struct *work) | |||
85 | 82 | ||
86 | static void sas_phye_spinup_hold(struct work_struct *work) | 83 | static void sas_phye_spinup_hold(struct work_struct *work) |
87 | { | 84 | { |
88 | struct asd_sas_event *ev = | 85 | struct asd_sas_event *ev = to_asd_sas_event(work); |
89 | container_of(work, struct asd_sas_event, work); | ||
90 | struct asd_sas_phy *phy = ev->phy; | 86 | struct asd_sas_phy *phy = ev->phy; |
91 | struct sas_ha_struct *sas_ha = phy->ha; | 87 | struct sas_ha_struct *sas_ha = phy->ha; |
92 | struct sas_internal *i = | 88 | struct sas_internal *i = |
@@ -127,14 +123,12 @@ int sas_register_phys(struct sas_ha_struct *sas_ha) | |||
127 | phy->error = 0; | 123 | phy->error = 0; |
128 | INIT_LIST_HEAD(&phy->port_phy_el); | 124 | INIT_LIST_HEAD(&phy->port_phy_el); |
129 | for (k = 0; k < PORT_NUM_EVENTS; k++) { | 125 | for (k = 0; k < PORT_NUM_EVENTS; k++) { |
130 | INIT_WORK(&phy->port_events[k].work, | 126 | INIT_SAS_WORK(&phy->port_events[k].work, sas_port_event_fns[k]); |
131 | sas_port_event_fns[k]); | ||
132 | phy->port_events[k].phy = phy; | 127 | phy->port_events[k].phy = phy; |
133 | } | 128 | } |
134 | 129 | ||
135 | for (k = 0; k < PHY_NUM_EVENTS; k++) { | 130 | for (k = 0; k < PHY_NUM_EVENTS; k++) { |
136 | INIT_WORK(&phy->phy_events[k].work, | 131 | INIT_SAS_WORK(&phy->phy_events[k].work, sas_phy_event_fns[k]); |
137 | sas_phy_event_fns[k]); | ||
138 | phy->phy_events[k].phy = phy; | 132 | phy->phy_events[k].phy = phy; |
139 | } | 133 | } |
140 | 134 | ||
@@ -144,8 +138,7 @@ int sas_register_phys(struct sas_ha_struct *sas_ha) | |||
144 | spin_lock_init(&phy->sas_prim_lock); | 138 | spin_lock_init(&phy->sas_prim_lock); |
145 | phy->frame_rcvd_size = 0; | 139 | phy->frame_rcvd_size = 0; |
146 | 140 | ||
147 | phy->phy = sas_phy_alloc(&sas_ha->core.shost->shost_gendev, | 141 | phy->phy = sas_phy_alloc(&sas_ha->core.shost->shost_gendev, i); |
148 | i); | ||
149 | if (!phy->phy) | 142 | if (!phy->phy) |
150 | return -ENOMEM; | 143 | return -ENOMEM; |
151 | 144 | ||
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c index eb19c016d500..e884a8c58a0c 100644 --- a/drivers/scsi/libsas/sas_port.c +++ b/drivers/scsi/libsas/sas_port.c | |||
@@ -123,7 +123,7 @@ static void sas_form_port(struct asd_sas_phy *phy) | |||
123 | spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags); | 123 | spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags); |
124 | 124 | ||
125 | if (!port->port) { | 125 | if (!port->port) { |
126 | port->port = sas_port_alloc(phy->phy->dev.parent, phy->id); | 126 | port->port = sas_port_alloc(phy->phy->dev.parent, port->id); |
127 | BUG_ON(!port->port); | 127 | BUG_ON(!port->port); |
128 | sas_port_add(port->port); | 128 | sas_port_add(port->port); |
129 | } | 129 | } |
@@ -208,8 +208,7 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone) | |||
208 | 208 | ||
209 | void sas_porte_bytes_dmaed(struct work_struct *work) | 209 | void sas_porte_bytes_dmaed(struct work_struct *work) |
210 | { | 210 | { |
211 | struct asd_sas_event *ev = | 211 | struct asd_sas_event *ev = to_asd_sas_event(work); |
212 | container_of(work, struct asd_sas_event, work); | ||
213 | struct asd_sas_phy *phy = ev->phy; | 212 | struct asd_sas_phy *phy = ev->phy; |
214 | 213 | ||
215 | clear_bit(PORTE_BYTES_DMAED, &phy->port_events_pending); | 214 | clear_bit(PORTE_BYTES_DMAED, &phy->port_events_pending); |
@@ -219,8 +218,7 @@ void sas_porte_bytes_dmaed(struct work_struct *work) | |||
219 | 218 | ||
220 | void sas_porte_broadcast_rcvd(struct work_struct *work) | 219 | void sas_porte_broadcast_rcvd(struct work_struct *work) |
221 | { | 220 | { |
222 | struct asd_sas_event *ev = | 221 | struct asd_sas_event *ev = to_asd_sas_event(work); |
223 | container_of(work, struct asd_sas_event, work); | ||
224 | struct asd_sas_phy *phy = ev->phy; | 222 | struct asd_sas_phy *phy = ev->phy; |
225 | unsigned long flags; | 223 | unsigned long flags; |
226 | u32 prim; | 224 | u32 prim; |
@@ -237,8 +235,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work) | |||
237 | 235 | ||
238 | void sas_porte_link_reset_err(struct work_struct *work) | 236 | void sas_porte_link_reset_err(struct work_struct *work) |
239 | { | 237 | { |
240 | struct asd_sas_event *ev = | 238 | struct asd_sas_event *ev = to_asd_sas_event(work); |
241 | container_of(work, struct asd_sas_event, work); | ||
242 | struct asd_sas_phy *phy = ev->phy; | 239 | struct asd_sas_phy *phy = ev->phy; |
243 | 240 | ||
244 | clear_bit(PORTE_LINK_RESET_ERR, &phy->port_events_pending); | 241 | clear_bit(PORTE_LINK_RESET_ERR, &phy->port_events_pending); |
@@ -248,8 +245,7 @@ void sas_porte_link_reset_err(struct work_struct *work) | |||
248 | 245 | ||
249 | void sas_porte_timer_event(struct work_struct *work) | 246 | void sas_porte_timer_event(struct work_struct *work) |
250 | { | 247 | { |
251 | struct asd_sas_event *ev = | 248 | struct asd_sas_event *ev = to_asd_sas_event(work); |
252 | container_of(work, struct asd_sas_event, work); | ||
253 | struct asd_sas_phy *phy = ev->phy; | 249 | struct asd_sas_phy *phy = ev->phy; |
254 | 250 | ||
255 | clear_bit(PORTE_TIMER_EVENT, &phy->port_events_pending); | 251 | clear_bit(PORTE_TIMER_EVENT, &phy->port_events_pending); |
@@ -259,8 +255,7 @@ void sas_porte_timer_event(struct work_struct *work) | |||
259 | 255 | ||
260 | void sas_porte_hard_reset(struct work_struct *work) | 256 | void sas_porte_hard_reset(struct work_struct *work) |
261 | { | 257 | { |
262 | struct asd_sas_event *ev = | 258 | struct asd_sas_event *ev = to_asd_sas_event(work); |
263 | container_of(work, struct asd_sas_event, work); | ||
264 | struct asd_sas_phy *phy = ev->phy; | 259 | struct asd_sas_phy *phy = ev->phy; |
265 | 260 | ||
266 | clear_bit(PORTE_HARD_RESET, &phy->port_events_pending); | 261 | clear_bit(PORTE_HARD_RESET, &phy->port_events_pending); |
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index f74cc0602f3b..bc3cc6d91117 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c | |||
@@ -1367,6 +1367,9 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job) | |||
1367 | struct qla_hw_data *ha = vha->hw; | 1367 | struct qla_hw_data *ha = vha->hw; |
1368 | int rval = 0; | 1368 | int rval = 0; |
1369 | 1369 | ||
1370 | if (ha->flags.isp82xx_reset_hdlr_active) | ||
1371 | return -EBUSY; | ||
1372 | |||
1370 | rval = qla2x00_optrom_setup(bsg_job, vha, 0); | 1373 | rval = qla2x00_optrom_setup(bsg_job, vha, 0); |
1371 | if (rval) | 1374 | if (rval) |
1372 | return rval; | 1375 | return rval; |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 897731b93df2..62324a1d5573 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c | |||
@@ -15,7 +15,7 @@ | |||
15 | * | Mailbox commands | 0x113e | 0x112c-0x112e | | 15 | * | Mailbox commands | 0x113e | 0x112c-0x112e | |
16 | * | | | 0x113a | | 16 | * | | | 0x113a | |
17 | * | Device Discovery | 0x2086 | 0x2020-0x2022 | | 17 | * | Device Discovery | 0x2086 | 0x2020-0x2022 | |
18 | * | Queue Command and IO tracing | 0x302f | 0x3006,0x3008 | | 18 | * | Queue Command and IO tracing | 0x3030 | 0x3006,0x3008 | |
19 | * | | | 0x302d-0x302e | | 19 | * | | | 0x302d-0x302e | |
20 | * | DPC Thread | 0x401c | | | 20 | * | DPC Thread | 0x401c | | |
21 | * | Async Events | 0x505d | 0x502b-0x502f | | 21 | * | Async Events | 0x505d | 0x502b-0x502f | |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index f79844ce7122..ce42288049b5 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -1715,13 +1715,24 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) | |||
1715 | res = DID_ERROR << 16; | 1715 | res = DID_ERROR << 16; |
1716 | break; | 1716 | break; |
1717 | } | 1717 | } |
1718 | } else { | 1718 | } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && |
1719 | lscsi_status != SAM_STAT_BUSY) { | ||
1720 | /* | ||
1721 | * scsi status of task set and busy are considered to be | ||
1722 | * task not completed. | ||
1723 | */ | ||
1724 | |||
1719 | ql_dbg(ql_dbg_io, fcport->vha, 0x301f, | 1725 | ql_dbg(ql_dbg_io, fcport->vha, 0x301f, |
1720 | "Dropped frame(s) detected (0x%x " | 1726 | "Dropped frame(s) detected (0x%x " |
1721 | "of 0x%x bytes).\n", resid, scsi_bufflen(cp)); | 1727 | "of 0x%x bytes).\n", resid, |
1728 | scsi_bufflen(cp)); | ||
1722 | 1729 | ||
1723 | res = DID_ERROR << 16 | lscsi_status; | 1730 | res = DID_ERROR << 16 | lscsi_status; |
1724 | goto check_scsi_status; | 1731 | goto check_scsi_status; |
1732 | } else { | ||
1733 | ql_dbg(ql_dbg_io, fcport->vha, 0x3030, | ||
1734 | "scsi_status: 0x%x, lscsi_status: 0x%x\n", | ||
1735 | scsi_status, lscsi_status); | ||
1725 | } | 1736 | } |
1726 | 1737 | ||
1727 | res = DID_OK << 16 | lscsi_status; | 1738 | res = DID_OK << 16 | lscsi_status; |
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index f0528539bbbc..de722a933438 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c | |||
@@ -3125,6 +3125,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha) | |||
3125 | ql_log(ql_log_info, vha, 0x00b7, | 3125 | ql_log(ql_log_info, vha, 0x00b7, |
3126 | "HW State: COLD/RE-INIT.\n"); | 3126 | "HW State: COLD/RE-INIT.\n"); |
3127 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); | 3127 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); |
3128 | qla82xx_set_rst_ready(ha); | ||
3128 | if (ql2xmdenable) { | 3129 | if (ql2xmdenable) { |
3129 | if (qla82xx_md_collect(vha)) | 3130 | if (qla82xx_md_collect(vha)) |
3130 | ql_log(ql_log_warn, vha, 0xb02c, | 3131 | ql_log(ql_log_warn, vha, 0xb02c, |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index a2f999273a5f..7db803377c64 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -3577,9 +3577,25 @@ void qla2x00_relogin(struct scsi_qla_host *vha) | |||
3577 | continue; | 3577 | continue; |
3578 | /* Attempt a retry. */ | 3578 | /* Attempt a retry. */ |
3579 | status = 1; | 3579 | status = 1; |
3580 | } else | 3580 | } else { |
3581 | status = qla2x00_fabric_login(vha, | 3581 | status = qla2x00_fabric_login(vha, |
3582 | fcport, &next_loopid); | 3582 | fcport, &next_loopid); |
3583 | if (status == QLA_SUCCESS) { | ||
3584 | int status2; | ||
3585 | uint8_t opts; | ||
3586 | |||
3587 | opts = 0; | ||
3588 | if (fcport->flags & | ||
3589 | FCF_FCP2_DEVICE) | ||
3590 | opts |= BIT_1; | ||
3591 | status2 = | ||
3592 | qla2x00_get_port_database( | ||
3593 | vha, fcport, | ||
3594 | opts); | ||
3595 | if (status2 != QLA_SUCCESS) | ||
3596 | status = 1; | ||
3597 | } | ||
3598 | } | ||
3583 | } else | 3599 | } else |
3584 | status = qla2x00_local_device_login(vha, | 3600 | status = qla2x00_local_device_login(vha, |
3585 | fcport); | 3601 | fcport); |
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 3c13c0a6be63..a683e766d1ae 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c | |||
@@ -1017,6 +1017,9 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) | |||
1017 | !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha)) | 1017 | !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha)) |
1018 | return; | 1018 | return; |
1019 | 1019 | ||
1020 | if (ha->flags.isp82xx_reset_hdlr_active) | ||
1021 | return; | ||
1022 | |||
1020 | ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr, | 1023 | ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr, |
1021 | ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header)); | 1024 | ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header)); |
1022 | if (hdr.version == __constant_cpu_to_le16(0xffff)) | 1025 | if (hdr.version == __constant_cpu_to_le16(0xffff)) |
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 29d780c38040..f5fdb16bec9b 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
@@ -7,9 +7,9 @@ | |||
7 | /* | 7 | /* |
8 | * Driver version | 8 | * Driver version |
9 | */ | 9 | */ |
10 | #define QLA2XXX_VERSION "8.03.07.13-k" | 10 | #define QLA2XXX_VERSION "8.04.00.03-k" |
11 | 11 | ||
12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
13 | #define QLA_DRIVER_MINOR_VER 3 | 13 | #define QLA_DRIVER_MINOR_VER 4 |
14 | #define QLA_DRIVER_PATCH_VER 7 | 14 | #define QLA_DRIVER_PATCH_VER 0 |
15 | #define QLA_DRIVER_BETA_VER 3 | 15 | #define QLA_DRIVER_BETA_VER 3 |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index dbe43924d7ae..62ddfd31d4ce 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -1638,7 +1638,7 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, | |||
1638 | request_fn_proc *request_fn) | 1638 | request_fn_proc *request_fn) |
1639 | { | 1639 | { |
1640 | struct request_queue *q; | 1640 | struct request_queue *q; |
1641 | struct device *dev = shost->shost_gendev.parent; | 1641 | struct device *dev = shost->dma_dev; |
1642 | 1642 | ||
1643 | q = blk_init_queue(request_fn, NULL); | 1643 | q = blk_init_queue(request_fn, NULL); |
1644 | if (!q) | 1644 | if (!q) |
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index efccd72c4a3e..1b3843117268 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c | |||
@@ -175,7 +175,8 @@ static void virtscsi_complete_free(void *buf) | |||
175 | 175 | ||
176 | if (cmd->comp) | 176 | if (cmd->comp) |
177 | complete_all(cmd->comp); | 177 | complete_all(cmd->comp); |
178 | mempool_free(cmd, virtscsi_cmd_pool); | 178 | else |
179 | mempool_free(cmd, virtscsi_cmd_pool); | ||
179 | } | 180 | } |
180 | 181 | ||
181 | static void virtscsi_ctrl_done(struct virtqueue *vq) | 182 | static void virtscsi_ctrl_done(struct virtqueue *vq) |
@@ -311,21 +312,22 @@ out: | |||
311 | static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) | 312 | static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) |
312 | { | 313 | { |
313 | DECLARE_COMPLETION_ONSTACK(comp); | 314 | DECLARE_COMPLETION_ONSTACK(comp); |
314 | int ret; | 315 | int ret = FAILED; |
315 | 316 | ||
316 | cmd->comp = ∁ | 317 | cmd->comp = ∁ |
317 | ret = virtscsi_kick_cmd(vscsi, vscsi->ctrl_vq, cmd, | 318 | if (virtscsi_kick_cmd(vscsi, vscsi->ctrl_vq, cmd, |
318 | sizeof cmd->req.tmf, sizeof cmd->resp.tmf, | 319 | sizeof cmd->req.tmf, sizeof cmd->resp.tmf, |
319 | GFP_NOIO); | 320 | GFP_NOIO) < 0) |
320 | if (ret < 0) | 321 | goto out; |
321 | return FAILED; | ||
322 | 322 | ||
323 | wait_for_completion(&comp); | 323 | wait_for_completion(&comp); |
324 | if (cmd->resp.tmf.response != VIRTIO_SCSI_S_OK && | 324 | if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK || |
325 | cmd->resp.tmf.response != VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) | 325 | cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) |
326 | return FAILED; | 326 | ret = SUCCESS; |
327 | 327 | ||
328 | return SUCCESS; | 328 | out: |
329 | mempool_free(cmd, virtscsi_cmd_pool); | ||
330 | return ret; | ||
329 | } | 331 | } |
330 | 332 | ||
331 | static int virtscsi_device_reset(struct scsi_cmnd *sc) | 333 | static int virtscsi_device_reset(struct scsi_cmnd *sc) |