diff options
| -rw-r--r-- | drivers/scsi/isci/host.c | 8 | ||||
| -rw-r--r-- | drivers/scsi/isci/host.h | 20 | ||||
| -rw-r--r-- | drivers/scsi/isci/isci.h | 5 | ||||
| -rw-r--r-- | drivers/scsi/isci/phy.c | 33 | ||||
| -rw-r--r-- | drivers/scsi/isci/phy.h | 17 | ||||
| -rw-r--r-- | drivers/scsi/isci/port.c | 4 | ||||
| -rw-r--r-- | drivers/scsi/isci/port.h | 17 | ||||
| -rw-r--r-- | drivers/scsi/isci/port_config.c | 14 | ||||
| -rw-r--r-- | drivers/scsi/isci/remote_device.c | 43 | ||||
| -rw-r--r-- | drivers/scsi/isci/remote_device.h | 97 | ||||
| -rw-r--r-- | drivers/scsi/isci/remote_node_context.c | 2 | ||||
| -rw-r--r-- | drivers/scsi/isci/remote_node_context.h | 3 | ||||
| -rw-r--r-- | drivers/scsi/isci/request.c | 328 | ||||
| -rw-r--r-- | drivers/scsi/isci/request.h | 73 | ||||
| -rw-r--r-- | drivers/scsi/isci/task.c | 2 |
15 files changed, 188 insertions, 478 deletions
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index f31f64e4b713..88e731333532 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c | |||
| @@ -2627,7 +2627,7 @@ enum sci_status sci_controller_start_io(struct isci_host *ihost, | |||
| 2627 | return status; | 2627 | return status; |
| 2628 | 2628 | ||
| 2629 | set_bit(IREQ_ACTIVE, &ireq->flags); | 2629 | set_bit(IREQ_ACTIVE, &ireq->flags); |
| 2630 | sci_controller_post_request(ihost, sci_request_get_post_context(ireq)); | 2630 | sci_controller_post_request(ihost, ireq->post_context); |
| 2631 | return SCI_SUCCESS; | 2631 | return SCI_SUCCESS; |
| 2632 | } | 2632 | } |
| 2633 | 2633 | ||
| @@ -2707,7 +2707,7 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq) | |||
| 2707 | } | 2707 | } |
| 2708 | 2708 | ||
| 2709 | set_bit(IREQ_ACTIVE, &ireq->flags); | 2709 | set_bit(IREQ_ACTIVE, &ireq->flags); |
| 2710 | sci_controller_post_request(ihost, sci_request_get_post_context(ireq)); | 2710 | sci_controller_post_request(ihost, ireq->post_context); |
| 2711 | return SCI_SUCCESS; | 2711 | return SCI_SUCCESS; |
| 2712 | } | 2712 | } |
| 2713 | 2713 | ||
| @@ -2747,9 +2747,7 @@ enum sci_task_status sci_controller_start_task(struct isci_host *ihost, | |||
| 2747 | return SCI_SUCCESS; | 2747 | return SCI_SUCCESS; |
| 2748 | case SCI_SUCCESS: | 2748 | case SCI_SUCCESS: |
| 2749 | set_bit(IREQ_ACTIVE, &ireq->flags); | 2749 | set_bit(IREQ_ACTIVE, &ireq->flags); |
| 2750 | 2750 | sci_controller_post_request(ihost, ireq->post_context); | |
| 2751 | sci_controller_post_request(ihost, | ||
| 2752 | sci_request_get_post_context(ireq)); | ||
| 2753 | break; | 2751 | break; |
| 2754 | default: | 2752 | default: |
| 2755 | break; | 2753 | break; |
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index d87f21de1807..a72d2be2445d 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h | |||
| @@ -172,6 +172,7 @@ struct isci_host { | |||
| 172 | /* XXX kill */ | 172 | /* XXX kill */ |
| 173 | bool phy_startup_timer_pending; | 173 | bool phy_startup_timer_pending; |
| 174 | u32 next_phy_to_start; | 174 | u32 next_phy_to_start; |
| 175 | /* XXX convert to unsigned long and use bitops */ | ||
| 175 | u8 invalid_phy_mask; | 176 | u8 invalid_phy_mask; |
| 176 | 177 | ||
| 177 | /* TODO attempt dynamic interrupt coalescing scheme */ | 178 | /* TODO attempt dynamic interrupt coalescing scheme */ |
| @@ -359,13 +360,8 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev) | |||
| 359 | return dev->port->ha->lldd_ha; | 360 | return dev->port->ha->lldd_ha; |
| 360 | } | 361 | } |
| 361 | 362 | ||
| 362 | /** | 363 | /* we always use protocol engine group zero */ |
| 363 | * sci_controller_get_protocol_engine_group() - | 364 | #define ISCI_PEG 0 |
| 364 | * | ||
| 365 | * This macro returns the protocol engine group for this controller object. | ||
| 366 | * Presently we only support protocol engine group 0 so just return that | ||
| 367 | */ | ||
| 368 | #define sci_controller_get_protocol_engine_group(controller) 0 | ||
| 369 | 365 | ||
| 370 | /* see sci_controller_io_tag_allocate|free for how seq and tci are built */ | 366 | /* see sci_controller_io_tag_allocate|free for how seq and tci are built */ |
| 371 | #define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci) | 367 | #define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci) |
| @@ -386,16 +382,6 @@ static inline int sci_remote_device_node_count(struct isci_remote_device *idev) | |||
| 386 | } | 382 | } |
| 387 | 383 | ||
| 388 | /** | 384 | /** |
| 389 | * sci_controller_set_invalid_phy() - | ||
| 390 | * | ||
| 391 | * This macro will set the bit in the invalid phy mask for this controller | ||
| 392 | * object. This is used to control messages reported for invalid link up | ||
| 393 | * notifications. | ||
| 394 | */ | ||
| 395 | #define sci_controller_set_invalid_phy(controller, phy) \ | ||
| 396 | ((controller)->invalid_phy_mask |= (1 << (phy)->phy_index)) | ||
| 397 | |||
| 398 | /** | ||
| 399 | * sci_controller_clear_invalid_phy() - | 385 | * sci_controller_clear_invalid_phy() - |
| 400 | * | 386 | * |
| 401 | * This macro will clear the bit in the invalid phy mask for this controller | 387 | * This macro will clear the bit in the invalid phy mask for this controller |
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h index 3afccfcb94e1..d1de63312e7f 100644 --- a/drivers/scsi/isci/isci.h +++ b/drivers/scsi/isci/isci.h | |||
| @@ -73,11 +73,6 @@ | |||
| 73 | 73 | ||
| 74 | #define SCI_CONTROLLER_INVALID_IO_TAG 0xFFFF | 74 | #define SCI_CONTROLLER_INVALID_IO_TAG 0xFFFF |
| 75 | 75 | ||
| 76 | enum sci_controller_mode { | ||
| 77 | SCI_MODE_SPEED, | ||
| 78 | SCI_MODE_SIZE /* deprecated */ | ||
| 79 | }; | ||
| 80 | |||
| 81 | #define SCI_MAX_PHYS (4UL) | 76 | #define SCI_MAX_PHYS (4UL) |
| 82 | #define SCI_MAX_PORTS SCI_MAX_PHYS | 77 | #define SCI_MAX_PORTS SCI_MAX_PHYS |
| 83 | #define SCI_MAX_SMP_PHYS (384) /* not silicon constrained */ | 78 | #define SCI_MAX_SMP_PHYS (384) /* not silicon constrained */ |
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c index 0df9f713f487..e56080af78f4 100644 --- a/drivers/scsi/isci/phy.c +++ b/drivers/scsi/isci/phy.c | |||
| @@ -265,10 +265,11 @@ done: | |||
| 265 | * port (i.e. it's contained in the dummy port). !NULL All other | 265 | * port (i.e. it's contained in the dummy port). !NULL All other |
| 266 | * values indicate a handle/pointer to the port containing the phy. | 266 | * values indicate a handle/pointer to the port containing the phy. |
| 267 | */ | 267 | */ |
| 268 | struct isci_port *phy_get_non_dummy_port( | 268 | struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy) |
| 269 | struct isci_phy *iphy) | ||
| 270 | { | 269 | { |
| 271 | if (sci_port_get_index(iphy->owning_port) == SCIC_SDS_DUMMY_PORT) | 270 | struct isci_port *iport = iphy->owning_port; |
| 271 | |||
| 272 | if (iport->physical_port_index == SCIC_SDS_DUMMY_PORT) | ||
| 272 | return NULL; | 273 | return NULL; |
| 273 | 274 | ||
| 274 | return iphy->owning_port; | 275 | return iphy->owning_port; |
| @@ -858,10 +859,9 @@ enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index) | |||
| 858 | struct dev_to_host_fis *frame_header; | 859 | struct dev_to_host_fis *frame_header; |
| 859 | u32 *fis_frame_data; | 860 | u32 *fis_frame_data; |
| 860 | 861 | ||
| 861 | result = sci_unsolicited_frame_control_get_header( | 862 | result = sci_unsolicited_frame_control_get_header(&ihost->uf_control, |
| 862 | &(sci_phy_get_controller(iphy)->uf_control), | 863 | frame_index, |
| 863 | frame_index, | 864 | (void **)&frame_header); |
| 864 | (void **)&frame_header); | ||
| 865 | 865 | ||
| 866 | if (result != SCI_SUCCESS) | 866 | if (result != SCI_SUCCESS) |
| 867 | return result; | 867 | return result; |
| @@ -1090,6 +1090,8 @@ static void scu_link_layer_tx_hard_reset( | |||
| 1090 | static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm) | 1090 | static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm) |
| 1091 | { | 1091 | { |
| 1092 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); | 1092 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); |
| 1093 | struct isci_port *iport = iphy->owning_port; | ||
| 1094 | struct isci_host *ihost = iport->owning_controller; | ||
| 1093 | 1095 | ||
| 1094 | /* | 1096 | /* |
| 1095 | * @todo We need to get to the controller to place this PE in a | 1097 | * @todo We need to get to the controller to place this PE in a |
| @@ -1100,14 +1102,14 @@ static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm) | |||
| 1100 | scu_link_layer_stop_protocol_engine(iphy); | 1102 | scu_link_layer_stop_protocol_engine(iphy); |
| 1101 | 1103 | ||
| 1102 | if (iphy->sm.previous_state_id != SCI_PHY_INITIAL) | 1104 | if (iphy->sm.previous_state_id != SCI_PHY_INITIAL) |
| 1103 | sci_controller_link_down(sci_phy_get_controller(iphy), | 1105 | sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy); |
| 1104 | phy_get_non_dummy_port(iphy), | ||
| 1105 | iphy); | ||
| 1106 | } | 1106 | } |
| 1107 | 1107 | ||
| 1108 | static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm) | 1108 | static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm) |
| 1109 | { | 1109 | { |
| 1110 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); | 1110 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); |
| 1111 | struct isci_port *iport = iphy->owning_port; | ||
| 1112 | struct isci_host *ihost = iport->owning_controller; | ||
| 1111 | 1113 | ||
| 1112 | scu_link_layer_stop_protocol_engine(iphy); | 1114 | scu_link_layer_stop_protocol_engine(iphy); |
| 1113 | scu_link_layer_start_oob(iphy); | 1115 | scu_link_layer_start_oob(iphy); |
| @@ -1117,9 +1119,7 @@ static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm) | |||
| 1117 | iphy->bcn_received_while_port_unassigned = false; | 1119 | iphy->bcn_received_while_port_unassigned = false; |
| 1118 | 1120 | ||
| 1119 | if (iphy->sm.previous_state_id == SCI_PHY_READY) | 1121 | if (iphy->sm.previous_state_id == SCI_PHY_READY) |
| 1120 | sci_controller_link_down(sci_phy_get_controller(iphy), | 1122 | sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy); |
| 1121 | phy_get_non_dummy_port(iphy), | ||
| 1122 | iphy); | ||
| 1123 | 1123 | ||
| 1124 | sci_change_state(&iphy->sm, SCI_PHY_SUB_INITIAL); | 1124 | sci_change_state(&iphy->sm, SCI_PHY_SUB_INITIAL); |
| 1125 | } | 1125 | } |
| @@ -1127,11 +1127,10 @@ static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm) | |||
| 1127 | static void sci_phy_ready_state_enter(struct sci_base_state_machine *sm) | 1127 | static void sci_phy_ready_state_enter(struct sci_base_state_machine *sm) |
| 1128 | { | 1128 | { |
| 1129 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); | 1129 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); |
| 1130 | struct isci_port *iport = iphy->owning_port; | ||
| 1131 | struct isci_host *ihost = iport->owning_controller; | ||
| 1130 | 1132 | ||
| 1131 | sci_controller_link_up(sci_phy_get_controller(iphy), | 1133 | sci_controller_link_up(ihost, phy_get_non_dummy_port(iphy), iphy); |
| 1132 | phy_get_non_dummy_port(iphy), | ||
| 1133 | iphy); | ||
| 1134 | |||
| 1135 | } | 1134 | } |
| 1136 | 1135 | ||
| 1137 | static void sci_phy_ready_state_exit(struct sci_base_state_machine *sm) | 1136 | static void sci_phy_ready_state_exit(struct sci_base_state_machine *sm) |
diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h index 5d2c1b4906a3..67699c8e321c 100644 --- a/drivers/scsi/isci/phy.h +++ b/drivers/scsi/isci/phy.h | |||
| @@ -440,23 +440,6 @@ enum sci_phy_states { | |||
| 440 | SCI_PHY_FINAL, | 440 | SCI_PHY_FINAL, |
| 441 | }; | 441 | }; |
| 442 | 442 | ||
| 443 | /** | ||
| 444 | * sci_phy_get_index() - | ||
| 445 | * | ||
| 446 | * This macro returns the phy index for the specified phy | ||
| 447 | */ | ||
| 448 | #define sci_phy_get_index(phy) \ | ||
| 449 | ((phy)->phy_index) | ||
| 450 | |||
| 451 | /** | ||
| 452 | * sci_phy_get_controller() - This macro returns the controller for this | ||
| 453 | * phy | ||
| 454 | * | ||
| 455 | * | ||
| 456 | */ | ||
| 457 | #define sci_phy_get_controller(phy) \ | ||
| 458 | (sci_port_get_controller((phy)->owning_port)) | ||
| 459 | |||
| 460 | void sci_phy_construct( | 443 | void sci_phy_construct( |
| 461 | struct isci_phy *iphy, | 444 | struct isci_phy *iphy, |
| 462 | struct isci_port *iport, | 445 | struct isci_port *iport, |
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c index 1822ed68409e..8f6f9b77e41a 100644 --- a/drivers/scsi/isci/port.c +++ b/drivers/scsi/isci/port.c | |||
| @@ -654,7 +654,7 @@ static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy | |||
| 654 | void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy, | 654 | void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy, |
| 655 | bool do_notify_user) | 655 | bool do_notify_user) |
| 656 | { | 656 | { |
| 657 | struct isci_host *ihost = sci_port_get_controller(iport); | 657 | struct isci_host *ihost = iport->owning_controller; |
| 658 | 658 | ||
| 659 | iport->active_phy_mask &= ~(1 << iphy->phy_index); | 659 | iport->active_phy_mask &= ~(1 << iphy->phy_index); |
| 660 | 660 | ||
| @@ -678,7 +678,7 @@ static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *i | |||
| 678 | * invalid link. | 678 | * invalid link. |
| 679 | */ | 679 | */ |
| 680 | if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) { | 680 | if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) { |
| 681 | sci_controller_set_invalid_phy(ihost, iphy); | 681 | ihost->invalid_phy_mask |= 1 << iphy->phy_index; |
| 682 | dev_warn(&ihost->pdev->dev, "Invalid link up!\n"); | 682 | dev_warn(&ihost->pdev->dev, "Invalid link up!\n"); |
| 683 | } | 683 | } |
| 684 | } | 684 | } |
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h index 4c4ab8126d9f..b50ecd4e8f9c 100644 --- a/drivers/scsi/isci/port.h +++ b/drivers/scsi/isci/port.h | |||
| @@ -210,23 +210,6 @@ enum sci_port_states { | |||
| 210 | 210 | ||
| 211 | }; | 211 | }; |
| 212 | 212 | ||
| 213 | /** | ||
| 214 | * sci_port_get_controller() - | ||
| 215 | * | ||
| 216 | * Helper macro to get the owning controller of this port | ||
| 217 | */ | ||
| 218 | #define sci_port_get_controller(this_port) \ | ||
| 219 | ((this_port)->owning_controller) | ||
| 220 | |||
| 221 | /** | ||
| 222 | * sci_port_get_index() - | ||
| 223 | * | ||
| 224 | * This macro returns the physical port index for this port object | ||
| 225 | */ | ||
| 226 | #define sci_port_get_index(this_port) \ | ||
| 227 | ((this_port)->physical_port_index) | ||
| 228 | |||
| 229 | |||
| 230 | static inline void sci_port_decrement_request_count(struct isci_port *iport) | 213 | static inline void sci_port_decrement_request_count(struct isci_port *iport) |
| 231 | { | 214 | { |
| 232 | if (WARN_ONCE(iport->started_request_count == 0, | 215 | if (WARN_ONCE(iport->started_request_count == 0, |
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c index c8b16db6bbde..486b113c634a 100644 --- a/drivers/scsi/isci/port_config.c +++ b/drivers/scsi/isci/port_config.c | |||
| @@ -367,10 +367,10 @@ static void sci_mpc_agent_link_up(struct isci_host *ihost, | |||
| 367 | if (!iport) | 367 | if (!iport) |
| 368 | return; | 368 | return; |
| 369 | 369 | ||
| 370 | port_agent->phy_ready_mask |= (1 << sci_phy_get_index(iphy)); | 370 | port_agent->phy_ready_mask |= (1 << iphy->phy_index); |
| 371 | sci_port_link_up(iport, iphy); | 371 | sci_port_link_up(iport, iphy); |
| 372 | if ((iport->active_phy_mask & (1 << sci_phy_get_index(iphy)))) | 372 | if ((iport->active_phy_mask & (1 << iphy->phy_index))) |
| 373 | port_agent->phy_configured_mask |= (1 << sci_phy_get_index(iphy)); | 373 | port_agent->phy_configured_mask |= (1 << iphy->phy_index); |
| 374 | } | 374 | } |
| 375 | 375 | ||
| 376 | /** | 376 | /** |
| @@ -404,10 +404,8 @@ static void sci_mpc_agent_link_down( | |||
| 404 | * rebuilding the port with the phys that remain in the ready | 404 | * rebuilding the port with the phys that remain in the ready |
| 405 | * state. | 405 | * state. |
| 406 | */ | 406 | */ |
| 407 | port_agent->phy_ready_mask &= | 407 | port_agent->phy_ready_mask &= ~(1 << iphy->phy_index); |
| 408 | ~(1 << sci_phy_get_index(iphy)); | 408 | port_agent->phy_configured_mask &= ~(1 << iphy->phy_index); |
| 409 | port_agent->phy_configured_mask &= | ||
| 410 | ~(1 << sci_phy_get_index(iphy)); | ||
| 411 | 409 | ||
| 412 | /* | 410 | /* |
| 413 | * Check to see if there are more phys waiting to be | 411 | * Check to see if there are more phys waiting to be |
| @@ -643,7 +641,7 @@ static void sci_apc_agent_link_down( | |||
| 643 | struct isci_port *iport, | 641 | struct isci_port *iport, |
| 644 | struct isci_phy *iphy) | 642 | struct isci_phy *iphy) |
| 645 | { | 643 | { |
| 646 | port_agent->phy_ready_mask &= ~(1 << sci_phy_get_index(iphy)); | 644 | port_agent->phy_ready_mask &= ~(1 << iphy->phy_index); |
| 647 | 645 | ||
| 648 | if (!iport) | 646 | if (!iport) |
| 649 | return; | 647 | return; |
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 8c752abb4331..85e54f542075 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c | |||
| @@ -456,7 +456,7 @@ static void sci_remote_device_start_request(struct isci_remote_device *idev, | |||
| 456 | sci_port_complete_io(iport, idev, ireq); | 456 | sci_port_complete_io(iport, idev, ireq); |
| 457 | else { | 457 | else { |
| 458 | kref_get(&idev->kref); | 458 | kref_get(&idev->kref); |
| 459 | sci_remote_device_increment_request_count(idev); | 459 | idev->started_request_count++; |
| 460 | } | 460 | } |
| 461 | } | 461 | } |
| 462 | 462 | ||
| @@ -636,7 +636,7 @@ enum sci_status sci_remote_device_complete_io(struct isci_host *ihost, | |||
| 636 | * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE". | 636 | * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE". |
| 637 | */ | 637 | */ |
| 638 | sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET); | 638 | sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET); |
| 639 | } else if (sci_remote_device_get_request_count(idev) == 0) | 639 | } else if (idev->started_request_count == 0) |
| 640 | sci_change_state(sm, SCI_STP_DEV_IDLE); | 640 | sci_change_state(sm, SCI_STP_DEV_IDLE); |
| 641 | break; | 641 | break; |
| 642 | case SCI_SMP_DEV_CMD: | 642 | case SCI_SMP_DEV_CMD: |
| @@ -650,10 +650,10 @@ enum sci_status sci_remote_device_complete_io(struct isci_host *ihost, | |||
| 650 | if (status != SCI_SUCCESS) | 650 | if (status != SCI_SUCCESS) |
| 651 | break; | 651 | break; |
| 652 | 652 | ||
| 653 | if (sci_remote_device_get_request_count(idev) == 0) | 653 | if (idev->started_request_count == 0) |
| 654 | sci_remote_node_context_destruct(&idev->rnc, | 654 | sci_remote_node_context_destruct(&idev->rnc, |
| 655 | rnc_destruct_done, | 655 | rnc_destruct_done, |
| 656 | idev); | 656 | idev); |
| 657 | break; | 657 | break; |
| 658 | } | 658 | } |
| 659 | 659 | ||
| @@ -761,26 +761,17 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost, | |||
| 761 | return status; | 761 | return status; |
| 762 | } | 762 | } |
| 763 | 763 | ||
| 764 | /** | 764 | void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request) |
| 765 | * | ||
| 766 | * @sci_dev: | ||
| 767 | * @request: | ||
| 768 | * | ||
| 769 | * This method takes the request and bulids an appropriate SCU context for the | ||
| 770 | * request and then requests the controller to post the request. none | ||
| 771 | */ | ||
| 772 | void sci_remote_device_post_request( | ||
| 773 | struct isci_remote_device *idev, | ||
| 774 | u32 request) | ||
| 775 | { | 765 | { |
| 766 | struct isci_port *iport = idev->owning_port; | ||
| 776 | u32 context; | 767 | u32 context; |
| 777 | 768 | ||
| 778 | context = sci_remote_device_build_command_context(idev, request); | 769 | context = request | |
| 770 | (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | | ||
| 771 | (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | | ||
| 772 | idev->rnc.remote_node_index; | ||
| 779 | 773 | ||
| 780 | sci_controller_post_request( | 774 | sci_controller_post_request(iport->owning_controller, context); |
| 781 | sci_remote_device_get_controller(idev), | ||
| 782 | context | ||
| 783 | ); | ||
| 784 | } | 775 | } |
| 785 | 776 | ||
| 786 | /* called once the remote node context has transisitioned to a | 777 | /* called once the remote node context has transisitioned to a |
| @@ -893,7 +884,7 @@ static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine | |||
| 893 | static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm) | 884 | static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm) |
| 894 | { | 885 | { |
| 895 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 886 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
| 896 | struct isci_host *ihost = sci_remote_device_get_controller(idev); | 887 | struct isci_host *ihost = idev->owning_port->owning_controller; |
| 897 | 888 | ||
| 898 | isci_remote_device_not_ready(ihost, idev, | 889 | isci_remote_device_not_ready(ihost, idev, |
| 899 | SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED); | 890 | SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED); |
| @@ -961,7 +952,7 @@ static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_stat | |||
| 961 | static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) | 952 | static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) |
| 962 | { | 953 | { |
| 963 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 954 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
| 964 | struct isci_host *ihost = sci_remote_device_get_controller(idev); | 955 | struct isci_host *ihost = idev->owning_port->owning_controller; |
| 965 | 956 | ||
| 966 | BUG_ON(idev->working_request == NULL); | 957 | BUG_ON(idev->working_request == NULL); |
| 967 | 958 | ||
| @@ -972,7 +963,7 @@ static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state | |||
| 972 | static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm) | 963 | static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm) |
| 973 | { | 964 | { |
| 974 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 965 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
| 975 | struct isci_host *ihost = sci_remote_device_get_controller(idev); | 966 | struct isci_host *ihost = idev->owning_port->owning_controller; |
| 976 | 967 | ||
| 977 | if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED) | 968 | if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED) |
| 978 | isci_remote_device_not_ready(ihost, idev, | 969 | isci_remote_device_not_ready(ihost, idev, |
| @@ -982,7 +973,7 @@ static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base | |||
| 982 | static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) | 973 | static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) |
| 983 | { | 974 | { |
| 984 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 975 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
| 985 | struct isci_host *ihost = sci_remote_device_get_controller(idev); | 976 | struct isci_host *ihost = idev->owning_port->owning_controller; |
| 986 | 977 | ||
| 987 | isci_remote_device_ready(ihost, idev); | 978 | isci_remote_device_ready(ihost, idev); |
| 988 | } | 979 | } |
| @@ -990,7 +981,7 @@ static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_stat | |||
| 990 | static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) | 981 | static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) |
| 991 | { | 982 | { |
| 992 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 983 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
| 993 | struct isci_host *ihost = sci_remote_device_get_controller(idev); | 984 | struct isci_host *ihost = idev->owning_port->owning_controller; |
| 994 | 985 | ||
| 995 | BUG_ON(idev->working_request == NULL); | 986 | BUG_ON(idev->working_request == NULL); |
| 996 | 987 | ||
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h index fa9a0e6cc309..57ccfc3d6ad3 100644 --- a/drivers/scsi/isci/remote_device.h +++ b/drivers/scsi/isci/remote_device.h | |||
| @@ -305,91 +305,18 @@ static inline bool dev_is_expander(struct domain_device *dev) | |||
| 305 | return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV; | 305 | return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV; |
| 306 | } | 306 | } |
| 307 | 307 | ||
| 308 | /** | 308 | static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev) |
| 309 | * sci_remote_device_increment_request_count() - | 309 | { |
| 310 | * | 310 | /* XXX delete this voodoo when converting to the top-level device |
| 311 | * This macro incrments the request count for this device | 311 | * reference count |
| 312 | */ | 312 | */ |
| 313 | #define sci_remote_device_increment_request_count(idev) \ | 313 | if (WARN_ONCE(idev->started_request_count == 0, |
| 314 | ((idev)->started_request_count++) | 314 | "%s: tried to decrement started_request_count past 0!?", |
| 315 | 315 | __func__)) | |
| 316 | /** | 316 | /* pass */; |
| 317 | * sci_remote_device_decrement_request_count() - | 317 | else |
| 318 | * | 318 | idev->started_request_count--; |
| 319 | * This macro decrements the request count for this device. This count will | 319 | } |
| 320 | * never decrment past 0. | ||
| 321 | */ | ||
| 322 | #define sci_remote_device_decrement_request_count(idev) \ | ||
| 323 | ((idev)->started_request_count > 0 ? \ | ||
| 324 | (idev)->started_request_count-- : 0) | ||
| 325 | |||
| 326 | /** | ||
| 327 | * sci_remote_device_get_request_count() - | ||
| 328 | * | ||
| 329 | * This is a helper macro to return the current device request count. | ||
| 330 | */ | ||
| 331 | #define sci_remote_device_get_request_count(idev) \ | ||
| 332 | ((idev)->started_request_count) | ||
| 333 | |||
| 334 | /** | ||
| 335 | * sci_remote_device_get_controller() - | ||
| 336 | * | ||
| 337 | * This macro returns the controller object that contains this device object | ||
| 338 | */ | ||
| 339 | #define sci_remote_device_get_controller(idev) \ | ||
| 340 | sci_port_get_controller(sci_remote_device_get_port(idev)) | ||
| 341 | |||
| 342 | /** | ||
| 343 | * sci_remote_device_get_port() - | ||
| 344 | * | ||
| 345 | * This macro returns the owning port of this device | ||
| 346 | */ | ||
| 347 | #define sci_remote_device_get_port(idev) \ | ||
| 348 | ((idev)->owning_port) | ||
| 349 | |||
| 350 | /** | ||
| 351 | * sci_remote_device_get_controller_peg() - | ||
| 352 | * | ||
| 353 | * This macro returns the controllers protocol engine group | ||
| 354 | */ | ||
| 355 | #define sci_remote_device_get_controller_peg(idev) \ | ||
| 356 | (\ | ||
| 357 | sci_controller_get_protocol_engine_group(\ | ||
| 358 | sci_port_get_controller(\ | ||
| 359 | sci_remote_device_get_port(idev) \ | ||
| 360 | ) \ | ||
| 361 | ) \ | ||
| 362 | ) | ||
| 363 | |||
| 364 | /** | ||
| 365 | * sci_remote_device_get_index() - | ||
| 366 | * | ||
| 367 | * This macro returns the remote node index for this device object | ||
| 368 | */ | ||
| 369 | #define sci_remote_device_get_index(idev) \ | ||
| 370 | ((idev)->rnc.remote_node_index) | ||
| 371 | |||
| 372 | /** | ||
| 373 | * sci_remote_device_build_command_context() - | ||
| 374 | * | ||
| 375 | * This macro builds a remote device context for the SCU post request operation | ||
| 376 | */ | ||
| 377 | #define sci_remote_device_build_command_context(device, command) \ | ||
| 378 | ((command) \ | ||
| 379 | | (sci_remote_device_get_controller_peg((device)) << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) \ | ||
| 380 | | ((device)->owning_port->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) \ | ||
| 381 | | (sci_remote_device_get_index((device))) \ | ||
| 382 | ) | ||
| 383 | |||
| 384 | /** | ||
| 385 | * sci_remote_device_set_working_request() - | ||
| 386 | * | ||
| 387 | * This macro makes the working request assingment for the remote device | ||
| 388 | * object. To clear the working request use this macro with a NULL request | ||
| 389 | * object. | ||
| 390 | */ | ||
| 391 | #define sci_remote_device_set_working_request(device, request) \ | ||
| 392 | ((device)->working_request = (request)) | ||
| 393 | 320 | ||
| 394 | enum sci_status sci_remote_device_frame_handler( | 321 | enum sci_status sci_remote_device_frame_handler( |
| 395 | struct isci_remote_device *idev, | 322 | struct isci_remote_device *idev, |
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c index c2dfd5a72181..748e8339d1ec 100644 --- a/drivers/scsi/isci/remote_node_context.c +++ b/drivers/scsi/isci/remote_node_context.c | |||
| @@ -111,7 +111,7 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont | |||
| 111 | struct isci_host *ihost; | 111 | struct isci_host *ihost; |
| 112 | __le64 sas_addr; | 112 | __le64 sas_addr; |
| 113 | 113 | ||
| 114 | ihost = sci_remote_device_get_controller(idev); | 114 | ihost = idev->owning_port->owning_controller; |
| 115 | rnc = sci_rnc_by_id(ihost, rni); | 115 | rnc = sci_rnc_by_id(ihost, rni); |
| 116 | 116 | ||
| 117 | memset(rnc, 0, sizeof(union scu_remote_node_context) | 117 | memset(rnc, 0, sizeof(union scu_remote_node_context) |
diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h index b475c5c26642..41580ad12520 100644 --- a/drivers/scsi/isci/remote_node_context.h +++ b/drivers/scsi/isci/remote_node_context.h | |||
| @@ -204,9 +204,6 @@ void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, | |||
| 204 | bool sci_remote_node_context_is_ready( | 204 | bool sci_remote_node_context_is_ready( |
| 205 | struct sci_remote_node_context *sci_rnc); | 205 | struct sci_remote_node_context *sci_rnc); |
| 206 | 206 | ||
| 207 | #define sci_remote_node_context_get_remote_node_index(rcn) \ | ||
| 208 | ((rnc)->remote_node_index) | ||
| 209 | |||
| 210 | enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, | 207 | enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, |
| 211 | u32 event_code); | 208 | u32 event_code); |
| 212 | enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, | 209 | enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, |
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index bcb3c08c19a7..7c500bb6a8e0 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c | |||
| @@ -211,22 +211,21 @@ static void scu_ssp_reqeust_construct_task_context( | |||
| 211 | struct isci_remote_device *idev; | 211 | struct isci_remote_device *idev; |
| 212 | struct isci_port *iport; | 212 | struct isci_port *iport; |
| 213 | 213 | ||
| 214 | idev = sci_request_get_device(ireq); | 214 | idev = ireq->target_device; |
| 215 | iport = sci_request_get_port(ireq); | 215 | iport = idev->owning_port; |
| 216 | 216 | ||
| 217 | /* Fill in the TC with the its required data */ | 217 | /* Fill in the TC with the its required data */ |
| 218 | task_context->abort = 0; | 218 | task_context->abort = 0; |
| 219 | task_context->priority = 0; | 219 | task_context->priority = 0; |
| 220 | task_context->initiator_request = 1; | 220 | task_context->initiator_request = 1; |
| 221 | task_context->connection_rate = idev->connection_rate; | 221 | task_context->connection_rate = idev->connection_rate; |
| 222 | task_context->protocol_engine_index = | 222 | task_context->protocol_engine_index = ISCI_PEG; |
| 223 | sci_controller_get_protocol_engine_group(controller); | 223 | task_context->logical_port_index = iport->physical_port_index; |
| 224 | task_context->logical_port_index = sci_port_get_index(iport); | ||
| 225 | task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; | 224 | task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; |
| 226 | task_context->valid = SCU_TASK_CONTEXT_VALID; | 225 | task_context->valid = SCU_TASK_CONTEXT_VALID; |
| 227 | task_context->context_type = SCU_TASK_CONTEXT_TYPE; | 226 | task_context->context_type = SCU_TASK_CONTEXT_TYPE; |
| 228 | 227 | ||
| 229 | task_context->remote_node_index = sci_remote_device_get_index(idev); | 228 | task_context->remote_node_index = idev->rnc.remote_node_index; |
| 230 | task_context->command_code = 0; | 229 | task_context->command_code = 0; |
| 231 | 230 | ||
| 232 | task_context->link_layer_control = 0; | 231 | task_context->link_layer_control = 0; |
| @@ -242,9 +241,8 @@ static void scu_ssp_reqeust_construct_task_context( | |||
| 242 | task_context->task_phase = 0x01; | 241 | task_context->task_phase = 0x01; |
| 243 | 242 | ||
| 244 | ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | | 243 | ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | |
| 245 | (sci_controller_get_protocol_engine_group(controller) << | 244 | (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | |
| 246 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | | 245 | (iport->physical_port_index << |
| 247 | (sci_port_get_index(iport) << | ||
| 248 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | | 246 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | |
| 249 | ISCI_TAG_TCI(ireq->io_tag)); | 247 | ISCI_TAG_TCI(ireq->io_tag)); |
| 250 | 248 | ||
| @@ -349,23 +347,21 @@ static void scu_sata_reqeust_construct_task_context( | |||
| 349 | struct isci_remote_device *idev; | 347 | struct isci_remote_device *idev; |
| 350 | struct isci_port *iport; | 348 | struct isci_port *iport; |
| 351 | 349 | ||
| 352 | idev = sci_request_get_device(ireq); | 350 | idev = ireq->target_device; |
| 353 | iport = sci_request_get_port(ireq); | 351 | iport = idev->owning_port; |
| 354 | 352 | ||
| 355 | /* Fill in the TC with the its required data */ | 353 | /* Fill in the TC with the its required data */ |
| 356 | task_context->abort = 0; | 354 | task_context->abort = 0; |
| 357 | task_context->priority = SCU_TASK_PRIORITY_NORMAL; | 355 | task_context->priority = SCU_TASK_PRIORITY_NORMAL; |
| 358 | task_context->initiator_request = 1; | 356 | task_context->initiator_request = 1; |
| 359 | task_context->connection_rate = idev->connection_rate; | 357 | task_context->connection_rate = idev->connection_rate; |
| 360 | task_context->protocol_engine_index = | 358 | task_context->protocol_engine_index = ISCI_PEG; |
| 361 | sci_controller_get_protocol_engine_group(controller); | 359 | task_context->logical_port_index = iport->physical_port_index; |
| 362 | task_context->logical_port_index = | ||
| 363 | sci_port_get_index(iport); | ||
| 364 | task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; | 360 | task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; |
| 365 | task_context->valid = SCU_TASK_CONTEXT_VALID; | 361 | task_context->valid = SCU_TASK_CONTEXT_VALID; |
| 366 | task_context->context_type = SCU_TASK_CONTEXT_TYPE; | 362 | task_context->context_type = SCU_TASK_CONTEXT_TYPE; |
| 367 | 363 | ||
| 368 | task_context->remote_node_index = sci_remote_device_get_index(idev); | 364 | task_context->remote_node_index = idev->rnc.remote_node_index; |
| 369 | task_context->command_code = 0; | 365 | task_context->command_code = 0; |
| 370 | 366 | ||
| 371 | task_context->link_layer_control = 0; | 367 | task_context->link_layer_control = 0; |
| @@ -385,11 +381,10 @@ static void scu_sata_reqeust_construct_task_context( | |||
| 385 | task_context->type.words[0] = *(u32 *)&ireq->stp.cmd; | 381 | task_context->type.words[0] = *(u32 *)&ireq->stp.cmd; |
| 386 | 382 | ||
| 387 | ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | | 383 | ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | |
| 388 | (sci_controller_get_protocol_engine_group(controller) << | 384 | (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | |
| 389 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | | 385 | (iport->physical_port_index << |
| 390 | (sci_port_get_index(iport) << | 386 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | |
| 391 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | | 387 | ISCI_TAG_TCI(ireq->io_tag)); |
| 392 | ISCI_TAG_TCI(ireq->io_tag)); | ||
| 393 | /* | 388 | /* |
| 394 | * Copy the physical address for the command buffer to the SCU Task | 389 | * Copy the physical address for the command buffer to the SCU Task |
| 395 | * Context. We must offset the command buffer by 4 bytes because the | 390 | * Context. We must offset the command buffer by 4 bytes because the |
| @@ -716,10 +711,8 @@ sci_io_request_terminate(struct isci_request *ireq) | |||
| 716 | 711 | ||
| 717 | switch (state) { | 712 | switch (state) { |
| 718 | case SCI_REQ_CONSTRUCTED: | 713 | case SCI_REQ_CONSTRUCTED: |
| 719 | sci_request_set_status(ireq, | 714 | ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; |
| 720 | SCU_TASK_DONE_TASK_ABORT, | 715 | ireq->sci_status = SCI_FAILURE_IO_TERMINATED; |
| 721 | SCI_FAILURE_IO_TERMINATED); | ||
| 722 | |||
| 723 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 716 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
| 724 | return SCI_SUCCESS; | 717 | return SCI_SUCCESS; |
| 725 | case SCI_REQ_STARTED: | 718 | case SCI_REQ_STARTED: |
| @@ -848,9 +841,8 @@ request_started_state_tc_event(struct isci_request *ireq, | |||
| 848 | */ | 841 | */ |
| 849 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 842 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
| 850 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 843 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
| 851 | sci_request_set_status(ireq, | 844 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
| 852 | SCU_TASK_DONE_GOOD, | 845 | ireq->sci_status = SCI_SUCCESS; |
| 853 | SCI_SUCCESS); | ||
| 854 | break; | 846 | break; |
| 855 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): { | 847 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): { |
| 856 | /* There are times when the SCU hardware will return an early | 848 | /* There are times when the SCU hardware will return an early |
| @@ -868,13 +860,11 @@ request_started_state_tc_event(struct isci_request *ireq, | |||
| 868 | word_cnt); | 860 | word_cnt); |
| 869 | 861 | ||
| 870 | if (resp->status == 0) { | 862 | if (resp->status == 0) { |
| 871 | sci_request_set_status(ireq, | 863 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
| 872 | SCU_TASK_DONE_GOOD, | 864 | ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; |
| 873 | SCI_SUCCESS_IO_DONE_EARLY); | ||
| 874 | } else { | 865 | } else { |
| 875 | sci_request_set_status(ireq, | 866 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
| 876 | SCU_TASK_DONE_CHECK_RESPONSE, | 867 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
| 877 | SCI_FAILURE_IO_RESPONSE_VALID); | ||
| 878 | } | 868 | } |
| 879 | break; | 869 | break; |
| 880 | } | 870 | } |
| @@ -885,9 +875,8 @@ request_started_state_tc_event(struct isci_request *ireq, | |||
| 885 | &ireq->ssp.rsp, | 875 | &ireq->ssp.rsp, |
| 886 | word_cnt); | 876 | word_cnt); |
| 887 | 877 | ||
| 888 | sci_request_set_status(ireq, | 878 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
| 889 | SCU_TASK_DONE_CHECK_RESPONSE, | 879 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
| 890 | SCI_FAILURE_IO_RESPONSE_VALID); | ||
| 891 | break; | 880 | break; |
| 892 | } | 881 | } |
| 893 | 882 | ||
| @@ -900,13 +889,12 @@ request_started_state_tc_event(struct isci_request *ireq, | |||
| 900 | datapres = resp_iu->datapres; | 889 | datapres = resp_iu->datapres; |
| 901 | 890 | ||
| 902 | if (datapres == 1 || datapres == 2) { | 891 | if (datapres == 1 || datapres == 2) { |
| 903 | sci_request_set_status(ireq, | 892 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
| 904 | SCU_TASK_DONE_CHECK_RESPONSE, | 893 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
| 905 | SCI_FAILURE_IO_RESPONSE_VALID); | 894 | } else { |
| 906 | } else | 895 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
| 907 | sci_request_set_status(ireq, | 896 | ireq->sci_status = SCI_SUCCESS; |
| 908 | SCU_TASK_DONE_GOOD, | 897 | } |
| 909 | SCI_SUCCESS); | ||
| 910 | break; | 898 | break; |
| 911 | /* only stp device gets suspended. */ | 899 | /* only stp device gets suspended. */ |
| 912 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): | 900 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): |
| @@ -921,15 +909,13 @@ request_started_state_tc_event(struct isci_request *ireq, | |||
| 921 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): | 909 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): |
| 922 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): | 910 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): |
| 923 | if (ireq->protocol == SCIC_STP_PROTOCOL) { | 911 | if (ireq->protocol == SCIC_STP_PROTOCOL) { |
| 924 | sci_request_set_status(ireq, | 912 | ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> |
| 925 | SCU_GET_COMPLETION_TL_STATUS(completion_code) >> | 913 | SCU_COMPLETION_TL_STATUS_SHIFT; |
| 926 | SCU_COMPLETION_TL_STATUS_SHIFT, | 914 | ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; |
| 927 | SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); | ||
| 928 | } else { | 915 | } else { |
| 929 | sci_request_set_status(ireq, | 916 | ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> |
| 930 | SCU_GET_COMPLETION_TL_STATUS(completion_code) >> | 917 | SCU_COMPLETION_TL_STATUS_SHIFT; |
| 931 | SCU_COMPLETION_TL_STATUS_SHIFT, | 918 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
| 932 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); | ||
| 933 | } | 919 | } |
| 934 | break; | 920 | break; |
| 935 | 921 | ||
| @@ -944,10 +930,9 @@ request_started_state_tc_event(struct isci_request *ireq, | |||
| 944 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): | 930 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): |
| 945 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): | 931 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): |
| 946 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): | 932 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): |
| 947 | sci_request_set_status(ireq, | 933 | ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> |
| 948 | SCU_GET_COMPLETION_TL_STATUS(completion_code) >> | 934 | SCU_COMPLETION_TL_STATUS_SHIFT; |
| 949 | SCU_COMPLETION_TL_STATUS_SHIFT, | 935 | ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; |
| 950 | SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); | ||
| 951 | break; | 936 | break; |
| 952 | 937 | ||
| 953 | /* neither ssp nor stp gets suspended. */ | 938 | /* neither ssp nor stp gets suspended. */ |
| @@ -967,11 +952,9 @@ request_started_state_tc_event(struct isci_request *ireq, | |||
| 967 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): | 952 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): |
| 968 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): | 953 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): |
| 969 | default: | 954 | default: |
| 970 | sci_request_set_status( | 955 | ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> |
| 971 | ireq, | 956 | SCU_COMPLETION_TL_STATUS_SHIFT; |
| 972 | SCU_GET_COMPLETION_TL_STATUS(completion_code) >> | 957 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
| 973 | SCU_COMPLETION_TL_STATUS_SHIFT, | ||
| 974 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); | ||
| 975 | break; | 958 | break; |
| 976 | } | 959 | } |
| 977 | 960 | ||
| @@ -991,9 +974,8 @@ request_aborting_state_tc_event(struct isci_request *ireq, | |||
| 991 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 974 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
| 992 | case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): | 975 | case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): |
| 993 | case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): | 976 | case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): |
| 994 | sci_request_set_status(ireq, SCU_TASK_DONE_TASK_ABORT, | 977 | ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; |
| 995 | SCI_FAILURE_IO_TERMINATED); | 978 | ireq->sci_status = SCI_FAILURE_IO_TERMINATED; |
| 996 | |||
| 997 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 979 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
| 998 | break; | 980 | break; |
| 999 | 981 | ||
| @@ -1012,9 +994,8 @@ static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq | |||
| 1012 | { | 994 | { |
| 1013 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 995 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
| 1014 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 996 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
| 1015 | sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, | 997 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
| 1016 | SCI_SUCCESS); | 998 | ireq->sci_status = SCI_SUCCESS; |
| 1017 | |||
| 1018 | sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); | 999 | sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); |
| 1019 | break; | 1000 | break; |
| 1020 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): | 1001 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): |
| @@ -1036,10 +1017,8 @@ static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq | |||
| 1036 | * If a NAK was received, then it is up to the user to retry | 1017 | * If a NAK was received, then it is up to the user to retry |
| 1037 | * the request. | 1018 | * the request. |
| 1038 | */ | 1019 | */ |
| 1039 | sci_request_set_status(ireq, | 1020 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
| 1040 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), | 1021 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
| 1041 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); | ||
| 1042 | |||
| 1043 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1022 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
| 1044 | break; | 1023 | break; |
| 1045 | } | 1024 | } |
| @@ -1057,12 +1036,10 @@ smp_request_await_response_tc_event(struct isci_request *ireq, | |||
| 1057 | * unexpected. but if the TC has success status, we | 1036 | * unexpected. but if the TC has success status, we |
| 1058 | * complete the IO anyway. | 1037 | * complete the IO anyway. |
| 1059 | */ | 1038 | */ |
| 1060 | sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, | 1039 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
| 1061 | SCI_SUCCESS); | 1040 | ireq->sci_status = SCI_SUCCESS; |
| 1062 | |||
| 1063 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1041 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
| 1064 | break; | 1042 | break; |
| 1065 | |||
| 1066 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): | 1043 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): |
| 1067 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): | 1044 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): |
| 1068 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): | 1045 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): |
| @@ -1074,20 +1051,16 @@ smp_request_await_response_tc_event(struct isci_request *ireq, | |||
| 1074 | * these SMP_XXX_XX_ERR status. For these type of error, | 1051 | * these SMP_XXX_XX_ERR status. For these type of error, |
| 1075 | * we ask ihost user to retry the request. | 1052 | * we ask ihost user to retry the request. |
| 1076 | */ | 1053 | */ |
| 1077 | sci_request_set_status(ireq, SCU_TASK_DONE_SMP_RESP_TO_ERR, | 1054 | ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR; |
| 1078 | SCI_FAILURE_RETRY_REQUIRED); | 1055 | ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED; |
| 1079 | |||
| 1080 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1056 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
| 1081 | break; | 1057 | break; |
| 1082 | |||
| 1083 | default: | 1058 | default: |
| 1084 | /* All other completion status cause the IO to be complete. If a NAK | 1059 | /* All other completion status cause the IO to be complete. If a NAK |
| 1085 | * was received, then it is up to the user to retry the request | 1060 | * was received, then it is up to the user to retry the request |
| 1086 | */ | 1061 | */ |
| 1087 | sci_request_set_status(ireq, | 1062 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
| 1088 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), | 1063 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
| 1089 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); | ||
| 1090 | |||
| 1091 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1064 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
| 1092 | break; | 1065 | break; |
| 1093 | } | 1066 | } |
| @@ -1101,9 +1074,8 @@ smp_request_await_tc_event(struct isci_request *ireq, | |||
| 1101 | { | 1074 | { |
| 1102 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 1075 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
| 1103 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 1076 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
| 1104 | sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, | 1077 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
| 1105 | SCI_SUCCESS); | 1078 | ireq->sci_status = SCI_SUCCESS; |
| 1106 | |||
| 1107 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1079 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
| 1108 | break; | 1080 | break; |
| 1109 | default: | 1081 | default: |
| @@ -1111,10 +1083,8 @@ smp_request_await_tc_event(struct isci_request *ireq, | |||
| 1111 | * complete. If a NAK was received, then it is up to | 1083 | * complete. If a NAK was received, then it is up to |
| 1112 | * the user to retry the request. | 1084 | * the user to retry the request. |
| 1113 | */ | 1085 | */ |
| 1114 | sci_request_set_status(ireq, | 1086 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
| 1115 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), | 1087 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
| 1116 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); | ||
| 1117 | |||
| 1118 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1088 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
| 1119 | break; | 1089 | break; |
| 1120 | } | 1090 | } |
| @@ -1171,9 +1141,8 @@ stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, | |||
| 1171 | { | 1141 | { |
| 1172 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 1142 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
| 1173 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 1143 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
| 1174 | sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, | 1144 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
| 1175 | SCI_SUCCESS); | 1145 | ireq->sci_status = SCI_SUCCESS; |
| 1176 | |||
| 1177 | sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H); | 1146 | sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H); |
| 1178 | break; | 1147 | break; |
| 1179 | 1148 | ||
| @@ -1182,10 +1151,8 @@ stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, | |||
| 1182 | * complete. If a NAK was received, then it is up to | 1151 | * complete. If a NAK was received, then it is up to |
| 1183 | * the user to retry the request. | 1152 | * the user to retry the request. |
| 1184 | */ | 1153 | */ |
| 1185 | sci_request_set_status(ireq, | 1154 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
| 1186 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), | 1155 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
| 1187 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); | ||
| 1188 | |||
| 1189 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1156 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
| 1190 | break; | 1157 | break; |
| 1191 | } | 1158 | } |
| @@ -1363,10 +1330,8 @@ stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq, | |||
| 1363 | 1330 | ||
| 1364 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 1331 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
| 1365 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 1332 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
| 1366 | sci_request_set_status(ireq, | 1333 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
| 1367 | SCU_TASK_DONE_GOOD, | 1334 | ireq->sci_status = SCI_SUCCESS; |
| 1368 | SCI_SUCCESS); | ||
| 1369 | |||
| 1370 | sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); | 1335 | sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); |
| 1371 | break; | 1336 | break; |
| 1372 | 1337 | ||
| @@ -1375,10 +1340,8 @@ stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq, | |||
| 1375 | * complete. If a NAK was received, then it is up to | 1340 | * complete. If a NAK was received, then it is up to |
| 1376 | * the user to retry the request. | 1341 | * the user to retry the request. |
| 1377 | */ | 1342 | */ |
| 1378 | sci_request_set_status(ireq, | 1343 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
| 1379 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), | 1344 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
| 1380 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); | ||
| 1381 | |||
| 1382 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1345 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
| 1383 | break; | 1346 | break; |
| 1384 | } | 1347 | } |
| @@ -1426,11 +1389,8 @@ pio_data_out_tx_done_tc_event(struct isci_request *ireq, | |||
| 1426 | * If a NAK was received, then it is up to the user to retry | 1389 | * If a NAK was received, then it is up to the user to retry |
| 1427 | * the request. | 1390 | * the request. |
| 1428 | */ | 1391 | */ |
| 1429 | sci_request_set_status( | 1392 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
| 1430 | ireq, | 1393 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
| 1431 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), | ||
| 1432 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); | ||
| 1433 | |||
| 1434 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1394 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
| 1435 | break; | 1395 | break; |
| 1436 | } | 1396 | } |
| @@ -1438,15 +1398,6 @@ pio_data_out_tx_done_tc_event(struct isci_request *ireq, | |||
| 1438 | return status; | 1398 | return status; |
| 1439 | } | 1399 | } |
| 1440 | 1400 | ||
| 1441 | static void sci_stp_request_udma_complete_request( | ||
| 1442 | struct isci_request *ireq, | ||
| 1443 | u32 scu_status, | ||
| 1444 | enum sci_status sci_status) | ||
| 1445 | { | ||
| 1446 | sci_request_set_status(ireq, scu_status, sci_status); | ||
| 1447 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | ||
| 1448 | } | ||
| 1449 | |||
| 1450 | static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq, | 1401 | static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq, |
| 1451 | u32 frame_index) | 1402 | u32 frame_index) |
| 1452 | { | 1403 | { |
| @@ -1512,13 +1463,12 @@ sci_io_request_frame_handler(struct isci_request *ireq, | |||
| 1512 | 1463 | ||
| 1513 | if (resp_iu->datapres == 0x01 || | 1464 | if (resp_iu->datapres == 0x01 || |
| 1514 | resp_iu->datapres == 0x02) { | 1465 | resp_iu->datapres == 0x02) { |
| 1515 | sci_request_set_status(ireq, | 1466 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
| 1516 | SCU_TASK_DONE_CHECK_RESPONSE, | 1467 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
| 1517 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); | 1468 | } else { |
| 1518 | } else | 1469 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
| 1519 | sci_request_set_status(ireq, | 1470 | ireq->sci_status = SCI_SUCCESS; |
| 1520 | SCU_TASK_DONE_GOOD, | 1471 | } |
| 1521 | SCI_SUCCESS); | ||
| 1522 | } else { | 1472 | } else { |
| 1523 | /* not a response frame, why did it get forwarded? */ | 1473 | /* not a response frame, why did it get forwarded? */ |
| 1524 | dev_err(&ihost->pdev->dev, | 1474 | dev_err(&ihost->pdev->dev, |
| @@ -1567,9 +1517,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, | |||
| 1567 | sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ, | 1517 | sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ, |
| 1568 | smp_resp, word_cnt); | 1518 | smp_resp, word_cnt); |
| 1569 | 1519 | ||
| 1570 | sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, | 1520 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
| 1571 | SCI_SUCCESS); | 1521 | ireq->sci_status = SCI_SUCCESS; |
| 1572 | |||
| 1573 | sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP); | 1522 | sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP); |
| 1574 | } else { | 1523 | } else { |
| 1575 | /* | 1524 | /* |
| @@ -1584,10 +1533,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, | |||
| 1584 | frame_index, | 1533 | frame_index, |
| 1585 | rsp_hdr->frame_type); | 1534 | rsp_hdr->frame_type); |
| 1586 | 1535 | ||
| 1587 | sci_request_set_status(ireq, | 1536 | ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR; |
| 1588 | SCU_TASK_DONE_SMP_FRM_TYPE_ERR, | 1537 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
| 1589 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); | ||
| 1590 | |||
| 1591 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1538 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
| 1592 | } | 1539 | } |
| 1593 | 1540 | ||
| @@ -1602,16 +1549,14 @@ sci_io_request_frame_handler(struct isci_request *ireq, | |||
| 1602 | 1549 | ||
| 1603 | case SCI_REQ_STP_UDMA_WAIT_D2H: | 1550 | case SCI_REQ_STP_UDMA_WAIT_D2H: |
| 1604 | /* Use the general frame handler to copy the resposne data */ | 1551 | /* Use the general frame handler to copy the resposne data */ |
| 1605 | status = sci_stp_request_udma_general_frame_handler(ireq, | 1552 | status = sci_stp_request_udma_general_frame_handler(ireq, frame_index); |
| 1606 | frame_index); | ||
| 1607 | 1553 | ||
| 1608 | if (status != SCI_SUCCESS) | 1554 | if (status != SCI_SUCCESS) |
| 1609 | return status; | 1555 | return status; |
| 1610 | 1556 | ||
| 1611 | sci_stp_request_udma_complete_request(ireq, | 1557 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
| 1612 | SCU_TASK_DONE_CHECK_RESPONSE, | 1558 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
| 1613 | SCI_FAILURE_IO_RESPONSE_VALID); | 1559 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
| 1614 | |||
| 1615 | return SCI_SUCCESS; | 1560 | return SCI_SUCCESS; |
| 1616 | 1561 | ||
| 1617 | case SCI_REQ_STP_NON_DATA_WAIT_D2H: { | 1562 | case SCI_REQ_STP_NON_DATA_WAIT_D2H: { |
| @@ -1645,8 +1590,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, | |||
| 1645 | frame_buffer); | 1590 | frame_buffer); |
| 1646 | 1591 | ||
| 1647 | /* The command has completed with error */ | 1592 | /* The command has completed with error */ |
| 1648 | sci_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, | 1593 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
| 1649 | SCI_FAILURE_IO_RESPONSE_VALID); | 1594 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
| 1650 | break; | 1595 | break; |
| 1651 | 1596 | ||
| 1652 | default: | 1597 | default: |
| @@ -1655,8 +1600,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, | |||
| 1655 | "violation occurred\n", __func__, stp_req, | 1600 | "violation occurred\n", __func__, stp_req, |
| 1656 | frame_index); | 1601 | frame_index); |
| 1657 | 1602 | ||
| 1658 | sci_request_set_status(ireq, SCU_TASK_DONE_UNEXP_FIS, | 1603 | ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS; |
| 1659 | SCI_FAILURE_PROTOCOL_VIOLATION); | 1604 | ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION; |
| 1660 | break; | 1605 | break; |
| 1661 | } | 1606 | } |
| 1662 | 1607 | ||
| @@ -1753,10 +1698,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, | |||
| 1753 | frame_header, | 1698 | frame_header, |
| 1754 | frame_buffer); | 1699 | frame_buffer); |
| 1755 | 1700 | ||
| 1756 | sci_request_set_status(ireq, | 1701 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
| 1757 | SCU_TASK_DONE_CHECK_RESPONSE, | 1702 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
| 1758 | SCI_FAILURE_IO_RESPONSE_VALID); | ||
| 1759 | |||
| 1760 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1703 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
| 1761 | break; | 1704 | break; |
| 1762 | 1705 | ||
| @@ -1800,10 +1743,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, | |||
| 1800 | frame_index, | 1743 | frame_index, |
| 1801 | frame_header->fis_type); | 1744 | frame_header->fis_type); |
| 1802 | 1745 | ||
| 1803 | sci_request_set_status(ireq, | 1746 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
| 1804 | SCU_TASK_DONE_GOOD, | 1747 | ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT; |
| 1805 | SCI_FAILURE_IO_REQUIRES_SCSI_ABORT); | ||
| 1806 | |||
| 1807 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1748 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
| 1808 | 1749 | ||
| 1809 | /* Frame is decoded return it to the controller */ | 1750 | /* Frame is decoded return it to the controller */ |
| @@ -1833,10 +1774,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, | |||
| 1833 | return status; | 1774 | return status; |
| 1834 | 1775 | ||
| 1835 | if ((stp_req->status & ATA_BUSY) == 0) { | 1776 | if ((stp_req->status & ATA_BUSY) == 0) { |
| 1836 | sci_request_set_status(ireq, | 1777 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
| 1837 | SCU_TASK_DONE_CHECK_RESPONSE, | 1778 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
| 1838 | SCI_FAILURE_IO_RESPONSE_VALID); | ||
| 1839 | |||
| 1840 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1779 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
| 1841 | } else { | 1780 | } else { |
| 1842 | sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); | 1781 | sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); |
| @@ -1873,9 +1812,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, | |||
| 1873 | frame_buffer); | 1812 | frame_buffer); |
| 1874 | 1813 | ||
| 1875 | /* The command has completed with error */ | 1814 | /* The command has completed with error */ |
| 1876 | sci_request_set_status(ireq, | 1815 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
| 1877 | SCU_TASK_DONE_CHECK_RESPONSE, | 1816 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
| 1878 | SCI_FAILURE_IO_RESPONSE_VALID); | ||
| 1879 | break; | 1817 | break; |
| 1880 | 1818 | ||
| 1881 | default: | 1819 | default: |
| @@ -1886,9 +1824,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, | |||
| 1886 | stp_req, | 1824 | stp_req, |
| 1887 | frame_index); | 1825 | frame_index); |
| 1888 | 1826 | ||
| 1889 | sci_request_set_status(ireq, | 1827 | ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS; |
| 1890 | SCU_TASK_DONE_UNEXP_FIS, | 1828 | ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION; |
| 1891 | SCI_FAILURE_PROTOCOL_VIOLATION); | ||
| 1892 | break; | 1829 | break; |
| 1893 | } | 1830 | } |
| 1894 | 1831 | ||
| @@ -1927,9 +1864,9 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq | |||
| 1927 | 1864 | ||
| 1928 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 1865 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
| 1929 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 1866 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
| 1930 | sci_stp_request_udma_complete_request(ireq, | 1867 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
| 1931 | SCU_TASK_DONE_GOOD, | 1868 | ireq->sci_status = SCI_SUCCESS; |
| 1932 | SCI_SUCCESS); | 1869 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
| 1933 | break; | 1870 | break; |
| 1934 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS): | 1871 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS): |
| 1935 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): | 1872 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): |
| @@ -1941,9 +1878,9 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq | |||
| 1941 | sci_remote_device_suspend(ireq->target_device, | 1878 | sci_remote_device_suspend(ireq->target_device, |
| 1942 | SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); | 1879 | SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); |
| 1943 | 1880 | ||
| 1944 | sci_stp_request_udma_complete_request(ireq, | 1881 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
| 1945 | SCU_TASK_DONE_CHECK_RESPONSE, | 1882 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
| 1946 | SCI_FAILURE_IO_RESPONSE_VALID); | 1883 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
| 1947 | } else { | 1884 | } else { |
| 1948 | /* If we have an error completion status for the | 1885 | /* If we have an error completion status for the |
| 1949 | * TC then we can expect a D2H register FIS from | 1886 | * TC then we can expect a D2H register FIS from |
| @@ -1970,9 +1907,9 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq | |||
| 1970 | /* Fall through to the default case */ | 1907 | /* Fall through to the default case */ |
| 1971 | default: | 1908 | default: |
| 1972 | /* All other completion status cause the IO to be complete. */ | 1909 | /* All other completion status cause the IO to be complete. */ |
| 1973 | sci_stp_request_udma_complete_request(ireq, | 1910 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
| 1974 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), | 1911 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
| 1975 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); | 1912 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
| 1976 | break; | 1913 | break; |
| 1977 | } | 1914 | } |
| 1978 | 1915 | ||
| @@ -1985,9 +1922,8 @@ stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq, | |||
| 1985 | { | 1922 | { |
| 1986 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 1923 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
| 1987 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 1924 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
| 1988 | sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, | 1925 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
| 1989 | SCI_SUCCESS); | 1926 | ireq->sci_status = SCI_SUCCESS; |
| 1990 | |||
| 1991 | sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG); | 1927 | sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG); |
| 1992 | break; | 1928 | break; |
| 1993 | 1929 | ||
| @@ -1997,10 +1933,8 @@ stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq, | |||
| 1997 | * If a NAK was received, then it is up to the user to retry | 1933 | * If a NAK was received, then it is up to the user to retry |
| 1998 | * the request. | 1934 | * the request. |
| 1999 | */ | 1935 | */ |
| 2000 | sci_request_set_status(ireq, | 1936 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
| 2001 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), | 1937 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
| 2002 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); | ||
| 2003 | |||
| 2004 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1938 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
| 2005 | break; | 1939 | break; |
| 2006 | } | 1940 | } |
| @@ -2014,9 +1948,8 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq, | |||
| 2014 | { | 1948 | { |
| 2015 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 1949 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
| 2016 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 1950 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
| 2017 | sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, | 1951 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
| 2018 | SCI_SUCCESS); | 1952 | ireq->sci_status = SCI_SUCCESS; |
| 2019 | |||
| 2020 | sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H); | 1953 | sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H); |
| 2021 | break; | 1954 | break; |
| 2022 | 1955 | ||
| @@ -2025,10 +1958,8 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq, | |||
| 2025 | * a NAK was received, then it is up to the user to retry the | 1958 | * a NAK was received, then it is up to the user to retry the |
| 2026 | * request. | 1959 | * request. |
| 2027 | */ | 1960 | */ |
| 2028 | sci_request_set_status(ireq, | 1961 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
| 2029 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), | 1962 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
| 2030 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); | ||
| 2031 | |||
| 2032 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1963 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
| 2033 | break; | 1964 | break; |
| 2034 | } | 1965 | } |
| @@ -2504,7 +2435,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost, | |||
| 2504 | completion_status); | 2435 | completion_status); |
| 2505 | 2436 | ||
| 2506 | spin_lock(&request->state_lock); | 2437 | spin_lock(&request->state_lock); |
| 2507 | request_status = isci_request_get_state(request); | 2438 | request_status = request->status; |
| 2508 | 2439 | ||
| 2509 | /* Decode the request status. Note that if the request has been | 2440 | /* Decode the request status. Note that if the request has been |
| 2510 | * aborted by a task management function, we don't care | 2441 | * aborted by a task management function, we don't care |
| @@ -2904,24 +2835,21 @@ static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct s | |||
| 2904 | { | 2835 | { |
| 2905 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); | 2836 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); |
| 2906 | 2837 | ||
| 2907 | sci_remote_device_set_working_request(ireq->target_device, | 2838 | ireq->target_device->working_request = ireq; |
| 2908 | ireq); | ||
| 2909 | } | 2839 | } |
| 2910 | 2840 | ||
| 2911 | static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) | 2841 | static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) |
| 2912 | { | 2842 | { |
| 2913 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); | 2843 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); |
| 2914 | 2844 | ||
| 2915 | sci_remote_device_set_working_request(ireq->target_device, | 2845 | ireq->target_device->working_request = ireq; |
| 2916 | ireq); | ||
| 2917 | } | 2846 | } |
| 2918 | 2847 | ||
| 2919 | static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm) | 2848 | static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm) |
| 2920 | { | 2849 | { |
| 2921 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); | 2850 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); |
| 2922 | 2851 | ||
| 2923 | sci_remote_device_set_working_request(ireq->target_device, | 2852 | ireq->target_device->working_request = ireq; |
| 2924 | ireq); | ||
| 2925 | } | 2853 | } |
| 2926 | 2854 | ||
| 2927 | static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm) | 2855 | static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm) |
| @@ -3141,8 +3069,8 @@ sci_io_request_construct_smp(struct device *dev, | |||
| 3141 | 3069 | ||
| 3142 | task_context = ireq->tc; | 3070 | task_context = ireq->tc; |
| 3143 | 3071 | ||
| 3144 | idev = sci_request_get_device(ireq); | 3072 | idev = ireq->target_device; |
| 3145 | iport = sci_request_get_port(ireq); | 3073 | iport = idev->owning_port; |
| 3146 | 3074 | ||
| 3147 | /* | 3075 | /* |
| 3148 | * Fill in the TC with the its required data | 3076 | * Fill in the TC with the its required data |
| @@ -3151,9 +3079,8 @@ sci_io_request_construct_smp(struct device *dev, | |||
| 3151 | task_context->priority = 0; | 3079 | task_context->priority = 0; |
| 3152 | task_context->initiator_request = 1; | 3080 | task_context->initiator_request = 1; |
| 3153 | task_context->connection_rate = idev->connection_rate; | 3081 | task_context->connection_rate = idev->connection_rate; |
| 3154 | task_context->protocol_engine_index = | 3082 | task_context->protocol_engine_index = ISCI_PEG; |
| 3155 | sci_controller_get_protocol_engine_group(ihost); | 3083 | task_context->logical_port_index = iport->physical_port_index; |
| 3156 | task_context->logical_port_index = sci_port_get_index(iport); | ||
| 3157 | task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; | 3084 | task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; |
| 3158 | task_context->abort = 0; | 3085 | task_context->abort = 0; |
| 3159 | task_context->valid = SCU_TASK_CONTEXT_VALID; | 3086 | task_context->valid = SCU_TASK_CONTEXT_VALID; |
| @@ -3195,11 +3122,10 @@ sci_io_request_construct_smp(struct device *dev, | |||
| 3195 | task_context->task_phase = 0; | 3122 | task_context->task_phase = 0; |
| 3196 | 3123 | ||
| 3197 | ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | | 3124 | ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | |
| 3198 | (sci_controller_get_protocol_engine_group(ihost) << | 3125 | (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | |
| 3199 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | | 3126 | (iport->physical_port_index << |
| 3200 | (sci_port_get_index(iport) << | 3127 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | |
| 3201 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | | 3128 | ISCI_TAG_TCI(ireq->io_tag)); |
| 3202 | ISCI_TAG_TCI(ireq->io_tag)); | ||
| 3203 | /* | 3129 | /* |
| 3204 | * Copy the physical address for the command buffer to the SCU Task | 3130 | * Copy the physical address for the command buffer to the SCU Task |
| 3205 | * Context command buffer should not contain command header. | 3131 | * Context command buffer should not contain command header. |
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 08fcf98e70f4..597084027819 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h | |||
| @@ -300,58 +300,6 @@ enum sci_base_request_states { | |||
| 300 | SCI_REQ_FINAL, | 300 | SCI_REQ_FINAL, |
| 301 | }; | 301 | }; |
| 302 | 302 | ||
| 303 | /** | ||
| 304 | * sci_request_get_controller() - | ||
| 305 | * | ||
| 306 | * This macro will return the controller for this io request object | ||
| 307 | */ | ||
| 308 | #define sci_request_get_controller(ireq) \ | ||
| 309 | ((ireq)->owning_controller) | ||
| 310 | |||
| 311 | /** | ||
| 312 | * sci_request_get_device() - | ||
| 313 | * | ||
| 314 | * This macro will return the device for this io request object | ||
| 315 | */ | ||
| 316 | #define sci_request_get_device(ireq) \ | ||
| 317 | ((ireq)->target_device) | ||
| 318 | |||
| 319 | /** | ||
| 320 | * sci_request_get_port() - | ||
| 321 | * | ||
| 322 | * This macro will return the port for this io request object | ||
| 323 | */ | ||
| 324 | #define sci_request_get_port(ireq) \ | ||
| 325 | sci_remote_device_get_port(sci_request_get_device(ireq)) | ||
| 326 | |||
| 327 | /** | ||
| 328 | * sci_request_get_post_context() - | ||
| 329 | * | ||
| 330 | * This macro returns the constructed post context result for the io request. | ||
| 331 | */ | ||
| 332 | #define sci_request_get_post_context(ireq) \ | ||
| 333 | ((ireq)->post_context) | ||
| 334 | |||
| 335 | /** | ||
| 336 | * sci_request_get_task_context() - | ||
| 337 | * | ||
| 338 | * This is a helper macro to return the os handle for this request object. | ||
| 339 | */ | ||
| 340 | #define sci_request_get_task_context(request) \ | ||
| 341 | ((request)->task_context_buffer) | ||
| 342 | |||
| 343 | /** | ||
| 344 | * sci_request_set_status() - | ||
| 345 | * | ||
| 346 | * This macro will set the scu hardware status and sci request completion | ||
| 347 | * status for an io request. | ||
| 348 | */ | ||
| 349 | #define sci_request_set_status(request, scu_status_code, sci_status_code) \ | ||
| 350 | { \ | ||
| 351 | (request)->scu_status = (scu_status_code); \ | ||
| 352 | (request)->sci_status = (sci_status_code); \ | ||
| 353 | } | ||
| 354 | |||
| 355 | enum sci_status sci_request_start(struct isci_request *ireq); | 303 | enum sci_status sci_request_start(struct isci_request *ireq); |
| 356 | enum sci_status sci_io_request_terminate(struct isci_request *ireq); | 304 | enum sci_status sci_io_request_terminate(struct isci_request *ireq); |
| 357 | enum sci_status | 305 | enum sci_status |
| @@ -382,27 +330,6 @@ sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr) | |||
| 382 | } | 330 | } |
| 383 | 331 | ||
| 384 | /** | 332 | /** |
| 385 | * This function gets the status of the request object. | ||
| 386 | * @request: This parameter points to the isci_request object | ||
| 387 | * | ||
| 388 | * status of the object as a isci_request_status enum. | ||
| 389 | */ | ||
| 390 | static inline enum isci_request_status | ||
| 391 | isci_request_get_state(struct isci_request *isci_request) | ||
| 392 | { | ||
| 393 | BUG_ON(isci_request == NULL); | ||
| 394 | |||
| 395 | /*probably a bad sign... */ | ||
| 396 | if (isci_request->status == unallocated) | ||
| 397 | dev_warn(&isci_request->isci_host->pdev->dev, | ||
| 398 | "%s: isci_request->status == unallocated\n", | ||
| 399 | __func__); | ||
| 400 | |||
| 401 | return isci_request->status; | ||
| 402 | } | ||
| 403 | |||
| 404 | |||
| 405 | /** | ||
| 406 | * isci_request_change_state() - This function sets the status of the request | 333 | * isci_request_change_state() - This function sets the status of the request |
| 407 | * object. | 334 | * object. |
| 408 | * @request: This parameter points to the isci_request object | 335 | * @request: This parameter points to the isci_request object |
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index d040aa2f3722..20112cd5b646 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c | |||
| @@ -654,7 +654,7 @@ static void isci_terminate_request_core(struct isci_host *ihost, | |||
| 654 | * needs to be detached and freed here. | 654 | * needs to be detached and freed here. |
| 655 | */ | 655 | */ |
| 656 | spin_lock_irqsave(&isci_request->state_lock, flags); | 656 | spin_lock_irqsave(&isci_request->state_lock, flags); |
| 657 | request_status = isci_request_get_state(isci_request); | 657 | request_status = isci_request->status; |
| 658 | 658 | ||
| 659 | if ((isci_request->ttype == io_task) /* TMFs are in their own thread */ | 659 | if ((isci_request->ttype == io_task) /* TMFs are in their own thread */ |
| 660 | && ((request_status == aborted) | 660 | && ((request_status == aborted) |
