aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/isci
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2011-06-27 17:57:03 -0400
committerDan Williams <dan.j.williams@intel.com>2011-07-03 07:04:52 -0400
commit5076a1a97e2fa61c847a5fdd4b1991faf7716da6 (patch)
tree251d207e75439da25d4d3a0353e0b853c8e79f2b /drivers/scsi/isci
parentba7cb22342a66505a831bb7e4541fef90e0193c9 (diff)
isci: unify isci_request and scic_sds_request
They are one in the same object so remove the distinction. The near duplicate fields (owning_controller, and isci_host) will be cleaned up after the scic_sds_contoller isci_host unification. Reported-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/scsi/isci')
-rw-r--r--drivers/scsi/isci/host.c98
-rw-r--r--drivers/scsi/isci/host.h16
-rw-r--r--drivers/scsi/isci/phy.c2
-rw-r--r--drivers/scsi/isci/port.c4
-rw-r--r--drivers/scsi/isci/port.h6
-rw-r--r--drivers/scsi/isci/remote_device.c92
-rw-r--r--drivers/scsi/isci/remote_device.h8
-rw-r--r--drivers/scsi/isci/remote_node_context.c4
-rw-r--r--drivers/scsi/isci/remote_node_context.h6
-rw-r--r--drivers/scsi/isci/request.c683
-rw-r--r--drivers/scsi/isci/request.h184
-rw-r--r--drivers/scsi/isci/sata.c9
-rw-r--r--drivers/scsi/isci/task.c17
13 files changed, 514 insertions, 615 deletions
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index 0884ae3253e5..d91cd6d82747 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -258,21 +258,20 @@ static void scic_sds_controller_task_completion(struct scic_sds_controller *scic
258 u32 index = SCU_GET_COMPLETION_INDEX(completion_entry); 258 u32 index = SCU_GET_COMPLETION_INDEX(completion_entry);
259 struct isci_host *ihost = scic_to_ihost(scic); 259 struct isci_host *ihost = scic_to_ihost(scic);
260 struct isci_request *ireq = ihost->reqs[index]; 260 struct isci_request *ireq = ihost->reqs[index];
261 struct scic_sds_request *sci_req = &ireq->sci;
262 261
263 /* Make sure that we really want to process this IO request */ 262 /* Make sure that we really want to process this IO request */
264 if (test_bit(IREQ_ACTIVE, &ireq->flags) && 263 if (test_bit(IREQ_ACTIVE, &ireq->flags) &&
265 sci_req->io_tag != SCI_CONTROLLER_INVALID_IO_TAG && 264 ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG &&
266 ISCI_TAG_SEQ(sci_req->io_tag) == scic->io_request_sequence[index]) 265 ISCI_TAG_SEQ(ireq->io_tag) == scic->io_request_sequence[index])
267 /* Yep this is a valid io request pass it along to the io request handler */ 266 /* Yep this is a valid io request pass it along to the io request handler */
268 scic_sds_io_request_tc_completion(sci_req, completion_entry); 267 scic_sds_io_request_tc_completion(ireq, completion_entry);
269} 268}
270 269
271static void scic_sds_controller_sdma_completion(struct scic_sds_controller *scic, 270static void scic_sds_controller_sdma_completion(struct scic_sds_controller *scic,
272 u32 completion_entry) 271 u32 completion_entry)
273{ 272{
274 u32 index; 273 u32 index;
275 struct scic_sds_request *io_request; 274 struct isci_request *ireq;
276 struct scic_sds_remote_device *device; 275 struct scic_sds_remote_device *device;
277 276
278 index = SCU_GET_COMPLETION_INDEX(completion_entry); 277 index = SCU_GET_COMPLETION_INDEX(completion_entry);
@@ -280,41 +279,27 @@ static void scic_sds_controller_sdma_completion(struct scic_sds_controller *scic
280 switch (scu_get_command_request_type(completion_entry)) { 279 switch (scu_get_command_request_type(completion_entry)) {
281 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC: 280 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
282 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC: 281 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
283 io_request = &scic_to_ihost(scic)->reqs[index]->sci; 282 ireq = scic_to_ihost(scic)->reqs[index];
284 dev_warn(scic_to_dev(scic), 283 dev_warn(scic_to_dev(scic), "%s: %x for io request %p\n",
285 "%s: SCIC SDS Completion type SDMA %x for io request " 284 __func__, completion_entry, ireq);
286 "%p\n",
287 __func__,
288 completion_entry,
289 io_request);
290 /* @todo For a post TC operation we need to fail the IO 285 /* @todo For a post TC operation we need to fail the IO
291 * request 286 * request
292 */ 287 */
293 break; 288 break;
294
295 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC: 289 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
296 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC: 290 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
297 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC: 291 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
298 device = scic->device_table[index]; 292 device = scic->device_table[index];
299 dev_warn(scic_to_dev(scic), 293 dev_warn(scic_to_dev(scic), "%s: %x for device %p\n",
300 "%s: SCIC SDS Completion type SDMA %x for remote " 294 __func__, completion_entry, device);
301 "device %p\n",
302 __func__,
303 completion_entry,
304 device);
305 /* @todo For a port RNC operation we need to fail the 295 /* @todo For a port RNC operation we need to fail the
306 * device 296 * device
307 */ 297 */
308 break; 298 break;
309
310 default: 299 default:
311 dev_warn(scic_to_dev(scic), 300 dev_warn(scic_to_dev(scic), "%s: unknown completion type %x\n",
312 "%s: SCIC SDS Completion unknown SDMA completion " 301 __func__, completion_entry);
313 "type %x\n",
314 __func__,
315 completion_entry);
316 break; 302 break;
317
318 } 303 }
319} 304}
320 305
@@ -385,8 +370,8 @@ static void scic_sds_controller_event_completion(struct scic_sds_controller *sci
385 u32 completion_entry) 370 u32 completion_entry)
386{ 371{
387 struct isci_host *ihost = scic_to_ihost(scic); 372 struct isci_host *ihost = scic_to_ihost(scic);
388 struct scic_sds_request *io_request;
389 struct scic_sds_remote_device *device; 373 struct scic_sds_remote_device *device;
374 struct isci_request *ireq;
390 struct scic_sds_phy *phy; 375 struct scic_sds_phy *phy;
391 u32 index; 376 u32 index;
392 377
@@ -418,17 +403,17 @@ static void scic_sds_controller_event_completion(struct scic_sds_controller *sci
418 break; 403 break;
419 404
420 case SCU_EVENT_TYPE_TRANSPORT_ERROR: 405 case SCU_EVENT_TYPE_TRANSPORT_ERROR:
421 io_request = &ihost->reqs[index]->sci; 406 ireq = ihost->reqs[index];
422 scic_sds_io_request_event_handler(io_request, completion_entry); 407 scic_sds_io_request_event_handler(ireq, completion_entry);
423 break; 408 break;
424 409
425 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: 410 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
426 switch (scu_get_event_specifier(completion_entry)) { 411 switch (scu_get_event_specifier(completion_entry)) {
427 case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE: 412 case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
428 case SCU_EVENT_SPECIFIC_TASK_TIMEOUT: 413 case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
429 io_request = &ihost->reqs[index]->sci; 414 ireq = ihost->reqs[index];
430 if (io_request != NULL) 415 if (ireq != NULL)
431 scic_sds_io_request_event_handler(io_request, completion_entry); 416 scic_sds_io_request_event_handler(ireq, completion_entry);
432 else 417 else
433 dev_warn(scic_to_dev(scic), 418 dev_warn(scic_to_dev(scic),
434 "%s: SCIC Controller 0x%p received " 419 "%s: SCIC Controller 0x%p received "
@@ -1185,7 +1170,7 @@ static void isci_host_completion_routine(unsigned long data)
1185 } 1170 }
1186 1171
1187 spin_lock_irq(&isci_host->scic_lock); 1172 spin_lock_irq(&isci_host->scic_lock);
1188 isci_free_tag(isci_host, request->sci.io_tag); 1173 isci_free_tag(isci_host, request->io_tag);
1189 spin_unlock_irq(&isci_host->scic_lock); 1174 spin_unlock_irq(&isci_host->scic_lock);
1190 } 1175 }
1191 list_for_each_entry_safe(request, next_request, &errored_request_list, 1176 list_for_each_entry_safe(request, next_request, &errored_request_list,
@@ -1222,7 +1207,7 @@ static void isci_host_completion_routine(unsigned long data)
1222 * of pending requests. 1207 * of pending requests.
1223 */ 1208 */
1224 list_del_init(&request->dev_node); 1209 list_del_init(&request->dev_node);
1225 isci_free_tag(isci_host, request->sci.io_tag); 1210 isci_free_tag(isci_host, request->io_tag);
1226 spin_unlock_irq(&isci_host->scic_lock); 1211 spin_unlock_irq(&isci_host->scic_lock);
1227 } 1212 }
1228 } 1213 }
@@ -2486,8 +2471,8 @@ int isci_host_init(struct isci_host *isci_host)
2486 if (!ireq) 2471 if (!ireq)
2487 return -ENOMEM; 2472 return -ENOMEM;
2488 2473
2489 ireq->sci.tc = &isci_host->sci.task_context_table[i]; 2474 ireq->tc = &isci_host->sci.task_context_table[i];
2490 ireq->sci.owning_controller = &isci_host->sci; 2475 ireq->owning_controller = &isci_host->sci;
2491 spin_lock_init(&ireq->state_lock); 2476 spin_lock_init(&ireq->state_lock);
2492 ireq->request_daddr = dma; 2477 ireq->request_daddr = dma;
2493 ireq->isci_host = isci_host; 2478 ireq->isci_host = isci_host;
@@ -2600,7 +2585,7 @@ void scic_sds_controller_post_request(
2600 writel(request, &scic->smu_registers->post_context_port); 2585 writel(request, &scic->smu_registers->post_context_port);
2601} 2586}
2602 2587
2603struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic, u16 io_tag) 2588struct isci_request *scic_request_by_tag(struct scic_sds_controller *scic, u16 io_tag)
2604{ 2589{
2605 u16 task_index; 2590 u16 task_index;
2606 u16 task_sequence; 2591 u16 task_sequence;
@@ -2614,7 +2599,7 @@ struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic, u
2614 task_sequence = ISCI_TAG_SEQ(io_tag); 2599 task_sequence = ISCI_TAG_SEQ(io_tag);
2615 2600
2616 if (task_sequence == scic->io_request_sequence[task_index]) 2601 if (task_sequence == scic->io_request_sequence[task_index])
2617 return &ireq->sci; 2602 return ireq;
2618 } 2603 }
2619 } 2604 }
2620 2605
@@ -2814,7 +2799,7 @@ enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag)
2814 */ 2799 */
2815enum sci_status scic_controller_start_io(struct scic_sds_controller *scic, 2800enum sci_status scic_controller_start_io(struct scic_sds_controller *scic,
2816 struct scic_sds_remote_device *rdev, 2801 struct scic_sds_remote_device *rdev,
2817 struct scic_sds_request *req) 2802 struct isci_request *ireq)
2818{ 2803{
2819 enum sci_status status; 2804 enum sci_status status;
2820 2805
@@ -2823,12 +2808,12 @@ enum sci_status scic_controller_start_io(struct scic_sds_controller *scic,
2823 return SCI_FAILURE_INVALID_STATE; 2808 return SCI_FAILURE_INVALID_STATE;
2824 } 2809 }
2825 2810
2826 status = scic_sds_remote_device_start_io(scic, rdev, req); 2811 status = scic_sds_remote_device_start_io(scic, rdev, ireq);
2827 if (status != SCI_SUCCESS) 2812 if (status != SCI_SUCCESS)
2828 return status; 2813 return status;
2829 2814
2830 set_bit(IREQ_ACTIVE, &sci_req_to_ireq(req)->flags); 2815 set_bit(IREQ_ACTIVE, &ireq->flags);
2831 scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(req)); 2816 scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(ireq));
2832 return SCI_SUCCESS; 2817 return SCI_SUCCESS;
2833} 2818}
2834 2819
@@ -2851,7 +2836,7 @@ enum sci_status scic_controller_start_io(struct scic_sds_controller *scic,
2851enum sci_status scic_controller_terminate_request( 2836enum sci_status scic_controller_terminate_request(
2852 struct scic_sds_controller *scic, 2837 struct scic_sds_controller *scic,
2853 struct scic_sds_remote_device *rdev, 2838 struct scic_sds_remote_device *rdev,
2854 struct scic_sds_request *req) 2839 struct isci_request *ireq)
2855{ 2840{
2856 enum sci_status status; 2841 enum sci_status status;
2857 2842
@@ -2861,7 +2846,7 @@ enum sci_status scic_controller_terminate_request(
2861 return SCI_FAILURE_INVALID_STATE; 2846 return SCI_FAILURE_INVALID_STATE;
2862 } 2847 }
2863 2848
2864 status = scic_sds_io_request_terminate(req); 2849 status = scic_sds_io_request_terminate(ireq);
2865 if (status != SCI_SUCCESS) 2850 if (status != SCI_SUCCESS)
2866 return status; 2851 return status;
2867 2852
@@ -2870,7 +2855,7 @@ enum sci_status scic_controller_terminate_request(
2870 * request sub-type. 2855 * request sub-type.
2871 */ 2856 */
2872 scic_sds_controller_post_request(scic, 2857 scic_sds_controller_post_request(scic,
2873 scic_sds_request_get_post_context(req) | 2858 scic_sds_request_get_post_context(ireq) |
2874 SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); 2859 SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
2875 return SCI_SUCCESS; 2860 return SCI_SUCCESS;
2876} 2861}
@@ -2889,7 +2874,7 @@ enum sci_status scic_controller_terminate_request(
2889enum sci_status scic_controller_complete_io( 2874enum sci_status scic_controller_complete_io(
2890 struct scic_sds_controller *scic, 2875 struct scic_sds_controller *scic,
2891 struct scic_sds_remote_device *rdev, 2876 struct scic_sds_remote_device *rdev,
2892 struct scic_sds_request *request) 2877 struct isci_request *ireq)
2893{ 2878{
2894 enum sci_status status; 2879 enum sci_status status;
2895 u16 index; 2880 u16 index;
@@ -2899,12 +2884,12 @@ enum sci_status scic_controller_complete_io(
2899 /* XXX: Implement this function */ 2884 /* XXX: Implement this function */
2900 return SCI_FAILURE; 2885 return SCI_FAILURE;
2901 case SCIC_READY: 2886 case SCIC_READY:
2902 status = scic_sds_remote_device_complete_io(scic, rdev, request); 2887 status = scic_sds_remote_device_complete_io(scic, rdev, ireq);
2903 if (status != SCI_SUCCESS) 2888 if (status != SCI_SUCCESS)
2904 return status; 2889 return status;
2905 2890
2906 index = ISCI_TAG_TCI(request->io_tag); 2891 index = ISCI_TAG_TCI(ireq->io_tag);
2907 clear_bit(IREQ_ACTIVE, &sci_req_to_ireq(request)->flags); 2892 clear_bit(IREQ_ACTIVE, &ireq->flags);
2908 return SCI_SUCCESS; 2893 return SCI_SUCCESS;
2909 default: 2894 default:
2910 dev_warn(scic_to_dev(scic), "invalid state to complete I/O"); 2895 dev_warn(scic_to_dev(scic), "invalid state to complete I/O");
@@ -2913,17 +2898,17 @@ enum sci_status scic_controller_complete_io(
2913 2898
2914} 2899}
2915 2900
2916enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req) 2901enum sci_status scic_controller_continue_io(struct isci_request *ireq)
2917{ 2902{
2918 struct scic_sds_controller *scic = sci_req->owning_controller; 2903 struct scic_sds_controller *scic = ireq->owning_controller;
2919 2904
2920 if (scic->sm.current_state_id != SCIC_READY) { 2905 if (scic->sm.current_state_id != SCIC_READY) {
2921 dev_warn(scic_to_dev(scic), "invalid state to continue I/O"); 2906 dev_warn(scic_to_dev(scic), "invalid state to continue I/O");
2922 return SCI_FAILURE_INVALID_STATE; 2907 return SCI_FAILURE_INVALID_STATE;
2923 } 2908 }
2924 2909
2925 set_bit(IREQ_ACTIVE, &sci_req_to_ireq(sci_req)->flags); 2910 set_bit(IREQ_ACTIVE, &ireq->flags);
2926 scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(sci_req)); 2911 scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(ireq));
2927 return SCI_SUCCESS; 2912 return SCI_SUCCESS;
2928} 2913}
2929 2914
@@ -2939,9 +2924,8 @@ enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req)
2939enum sci_task_status scic_controller_start_task( 2924enum sci_task_status scic_controller_start_task(
2940 struct scic_sds_controller *scic, 2925 struct scic_sds_controller *scic,
2941 struct scic_sds_remote_device *rdev, 2926 struct scic_sds_remote_device *rdev,
2942 struct scic_sds_request *req) 2927 struct isci_request *ireq)
2943{ 2928{
2944 struct isci_request *ireq = sci_req_to_ireq(req);
2945 enum sci_status status; 2929 enum sci_status status;
2946 2930
2947 if (scic->sm.current_state_id != SCIC_READY) { 2931 if (scic->sm.current_state_id != SCIC_READY) {
@@ -2952,7 +2936,7 @@ enum sci_task_status scic_controller_start_task(
2952 return SCI_TASK_FAILURE_INVALID_STATE; 2936 return SCI_TASK_FAILURE_INVALID_STATE;
2953 } 2937 }
2954 2938
2955 status = scic_sds_remote_device_start_task(scic, rdev, req); 2939 status = scic_sds_remote_device_start_task(scic, rdev, ireq);
2956 switch (status) { 2940 switch (status) {
2957 case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS: 2941 case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
2958 set_bit(IREQ_ACTIVE, &ireq->flags); 2942 set_bit(IREQ_ACTIVE, &ireq->flags);
@@ -2967,7 +2951,7 @@ enum sci_task_status scic_controller_start_task(
2967 set_bit(IREQ_ACTIVE, &ireq->flags); 2951 set_bit(IREQ_ACTIVE, &ireq->flags);
2968 2952
2969 scic_sds_controller_post_request(scic, 2953 scic_sds_controller_post_request(scic,
2970 scic_sds_request_get_post_context(req)); 2954 scic_sds_request_get_post_context(ireq));
2971 break; 2955 break;
2972 default: 2956 default:
2973 break; 2957 break;
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 446fade19b3a..0b26d25c19a9 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -64,7 +64,7 @@
64#include "unsolicited_frame_control.h" 64#include "unsolicited_frame_control.h"
65#include "probe_roms.h" 65#include "probe_roms.h"
66 66
67struct scic_sds_request; 67struct isci_request;
68struct scu_task_context; 68struct scu_task_context;
69 69
70 70
@@ -601,7 +601,7 @@ union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffe
601 struct scic_sds_controller *scic, 601 struct scic_sds_controller *scic,
602 u16 node_id); 602 u16 node_id);
603 603
604struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic, 604struct isci_request *scic_request_by_tag(struct scic_sds_controller *scic,
605 u16 io_tag); 605 u16 io_tag);
606 606
607void scic_sds_controller_power_control_queue_insert( 607void scic_sds_controller_power_control_queue_insert(
@@ -628,11 +628,11 @@ void scic_sds_controller_remote_device_stopped(
628 628
629void scic_sds_controller_copy_task_context( 629void scic_sds_controller_copy_task_context(
630 struct scic_sds_controller *scic, 630 struct scic_sds_controller *scic,
631 struct scic_sds_request *this_request); 631 struct isci_request *ireq);
632 632
633void scic_sds_controller_register_setup(struct scic_sds_controller *scic); 633void scic_sds_controller_register_setup(struct scic_sds_controller *scic);
634 634
635enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req); 635enum sci_status scic_controller_continue_io(struct isci_request *ireq);
636int isci_host_scan_finished(struct Scsi_Host *, unsigned long); 636int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
637void isci_host_scan_start(struct Scsi_Host *); 637void isci_host_scan_start(struct Scsi_Host *);
638u16 isci_alloc_tag(struct isci_host *ihost); 638u16 isci_alloc_tag(struct isci_host *ihost);
@@ -665,22 +665,22 @@ void scic_controller_disable_interrupts(
665enum sci_status scic_controller_start_io( 665enum sci_status scic_controller_start_io(
666 struct scic_sds_controller *scic, 666 struct scic_sds_controller *scic,
667 struct scic_sds_remote_device *remote_device, 667 struct scic_sds_remote_device *remote_device,
668 struct scic_sds_request *io_request); 668 struct isci_request *ireq);
669 669
670enum sci_task_status scic_controller_start_task( 670enum sci_task_status scic_controller_start_task(
671 struct scic_sds_controller *scic, 671 struct scic_sds_controller *scic,
672 struct scic_sds_remote_device *remote_device, 672 struct scic_sds_remote_device *remote_device,
673 struct scic_sds_request *task_request); 673 struct isci_request *ireq);
674 674
675enum sci_status scic_controller_terminate_request( 675enum sci_status scic_controller_terminate_request(
676 struct scic_sds_controller *scic, 676 struct scic_sds_controller *scic,
677 struct scic_sds_remote_device *remote_device, 677 struct scic_sds_remote_device *remote_device,
678 struct scic_sds_request *request); 678 struct isci_request *ireq);
679 679
680enum sci_status scic_controller_complete_io( 680enum sci_status scic_controller_complete_io(
681 struct scic_sds_controller *scic, 681 struct scic_sds_controller *scic,
682 struct scic_sds_remote_device *remote_device, 682 struct scic_sds_remote_device *remote_device,
683 struct scic_sds_request *io_request); 683 struct isci_request *ireq);
684 684
685void scic_sds_port_configuration_agent_construct( 685void scic_sds_port_configuration_agent_construct(
686 struct scic_sds_port_configuration_agent *port_agent); 686 struct scic_sds_port_configuration_agent *port_agent);
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index c01d76210aa2..98d93aeea75d 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -983,7 +983,7 @@ enum sci_status scic_sds_phy_frame_handler(struct scic_sds_phy *sci_phy,
983 "%s: in wrong state: %d\n", __func__, state); 983 "%s: in wrong state: %d\n", __func__, state);
984 return SCI_FAILURE_INVALID_STATE; 984 return SCI_FAILURE_INVALID_STATE;
985 } 985 }
986 986
987} 987}
988 988
989static void scic_sds_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm) 989static void scic_sds_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm)
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index 0e84e29335dd..bd091549b4f7 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -1611,7 +1611,7 @@ enum sci_status scic_sds_port_link_down(struct scic_sds_port *sci_port,
1611 1611
1612enum sci_status scic_sds_port_start_io(struct scic_sds_port *sci_port, 1612enum sci_status scic_sds_port_start_io(struct scic_sds_port *sci_port,
1613 struct scic_sds_remote_device *sci_dev, 1613 struct scic_sds_remote_device *sci_dev,
1614 struct scic_sds_request *sci_req) 1614 struct isci_request *ireq)
1615{ 1615{
1616 enum scic_sds_port_states state; 1616 enum scic_sds_port_states state;
1617 1617
@@ -1631,7 +1631,7 @@ enum sci_status scic_sds_port_start_io(struct scic_sds_port *sci_port,
1631 1631
1632enum sci_status scic_sds_port_complete_io(struct scic_sds_port *sci_port, 1632enum sci_status scic_sds_port_complete_io(struct scic_sds_port *sci_port,
1633 struct scic_sds_remote_device *sci_dev, 1633 struct scic_sds_remote_device *sci_dev,
1634 struct scic_sds_request *sci_req) 1634 struct isci_request *ireq)
1635{ 1635{
1636 enum scic_sds_port_states state; 1636 enum scic_sds_port_states state;
1637 1637
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
index a44e541914f5..668f3a14cd70 100644
--- a/drivers/scsi/isci/port.h
+++ b/drivers/scsi/isci/port.h
@@ -354,17 +354,17 @@ enum sci_status scic_sds_port_link_up(struct scic_sds_port *sci_port,
354enum sci_status scic_sds_port_link_down(struct scic_sds_port *sci_port, 354enum sci_status scic_sds_port_link_down(struct scic_sds_port *sci_port,
355 struct scic_sds_phy *sci_phy); 355 struct scic_sds_phy *sci_phy);
356 356
357struct scic_sds_request; 357struct isci_request;
358struct scic_sds_remote_device; 358struct scic_sds_remote_device;
359enum sci_status scic_sds_port_start_io( 359enum sci_status scic_sds_port_start_io(
360 struct scic_sds_port *sci_port, 360 struct scic_sds_port *sci_port,
361 struct scic_sds_remote_device *sci_dev, 361 struct scic_sds_remote_device *sci_dev,
362 struct scic_sds_request *sci_req); 362 struct isci_request *ireq);
363 363
364enum sci_status scic_sds_port_complete_io( 364enum sci_status scic_sds_port_complete_io(
365 struct scic_sds_port *sci_port, 365 struct scic_sds_port *sci_port,
366 struct scic_sds_remote_device *sci_dev, 366 struct scic_sds_remote_device *sci_dev,
367 struct scic_sds_request *sci_req); 367 struct isci_request *ireq);
368 368
369enum sas_linkrate scic_sds_port_get_max_allowed_speed( 369enum sas_linkrate scic_sds_port_get_max_allowed_speed(
370 struct scic_sds_port *sci_port); 370 struct scic_sds_port *sci_port);
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index 5a86bb1e96df..c7cb0c54df57 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -94,7 +94,7 @@ static void isci_remote_device_not_ready(struct isci_host *ihost,
94 94
95 scic_controller_terminate_request(&ihost->sci, 95 scic_controller_terminate_request(&ihost->sci,
96 &idev->sci, 96 &idev->sci,
97 &ireq->sci); 97 ireq);
98 } 98 }
99 /* Fall through into the default case... */ 99 /* Fall through into the default case... */
100 default: 100 default:
@@ -142,14 +142,13 @@ static enum sci_status scic_sds_remote_device_terminate_requests(struct scic_sds
142 142
143 for (i = 0; i < SCI_MAX_IO_REQUESTS && i < request_count; i++) { 143 for (i = 0; i < SCI_MAX_IO_REQUESTS && i < request_count; i++) {
144 struct isci_request *ireq = ihost->reqs[i]; 144 struct isci_request *ireq = ihost->reqs[i];
145 struct scic_sds_request *sci_req = &ireq->sci;
146 enum sci_status s; 145 enum sci_status s;
147 146
148 if (!test_bit(IREQ_ACTIVE, &ireq->flags) || 147 if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
149 sci_req->target_device != sci_dev) 148 ireq->target_device != sci_dev)
150 continue; 149 continue;
151 150
152 s = scic_controller_terminate_request(scic, sci_dev, sci_req); 151 s = scic_controller_terminate_request(scic, sci_dev, ireq);
153 if (s != SCI_SUCCESS) 152 if (s != SCI_SUCCESS)
154 status = s; 153 status = s;
155 } 154 }
@@ -299,7 +298,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct scic_sds_remote_devi
299 case SCI_DEV_STOPPING: 298 case SCI_DEV_STOPPING:
300 case SCI_DEV_FAILED: 299 case SCI_DEV_FAILED:
301 case SCI_DEV_RESETTING: { 300 case SCI_DEV_RESETTING: {
302 struct scic_sds_request *sci_req; 301 struct isci_request *ireq;
303 struct ssp_frame_hdr hdr; 302 struct ssp_frame_hdr hdr;
304 void *frame_header; 303 void *frame_header;
305 ssize_t word_cnt; 304 ssize_t word_cnt;
@@ -313,10 +312,10 @@ enum sci_status scic_sds_remote_device_frame_handler(struct scic_sds_remote_devi
313 word_cnt = sizeof(hdr) / sizeof(u32); 312 word_cnt = sizeof(hdr) / sizeof(u32);
314 sci_swab32_cpy(&hdr, frame_header, word_cnt); 313 sci_swab32_cpy(&hdr, frame_header, word_cnt);
315 314
316 sci_req = scic_request_by_tag(scic, be16_to_cpu(hdr.tag)); 315 ireq = scic_request_by_tag(scic, be16_to_cpu(hdr.tag));
317 if (sci_req && sci_req->target_device == sci_dev) { 316 if (ireq && ireq->target_device == sci_dev) {
318 /* The IO request is now in charge of releasing the frame */ 317 /* The IO request is now in charge of releasing the frame */
319 status = scic_sds_io_request_frame_handler(sci_req, frame_index); 318 status = scic_sds_io_request_frame_handler(ireq, frame_index);
320 } else { 319 } else {
321 /* We could not map this tag to a valid IO 320 /* We could not map this tag to a valid IO
322 * request Just toss the frame and continue 321 * request Just toss the frame and continue
@@ -448,14 +447,14 @@ enum sci_status scic_sds_remote_device_event_handler(struct scic_sds_remote_devi
448} 447}
449 448
450static void scic_sds_remote_device_start_request(struct scic_sds_remote_device *sci_dev, 449static void scic_sds_remote_device_start_request(struct scic_sds_remote_device *sci_dev,
451 struct scic_sds_request *sci_req, 450 struct isci_request *ireq,
452 enum sci_status status) 451 enum sci_status status)
453{ 452{
454 struct scic_sds_port *sci_port = sci_dev->owning_port; 453 struct scic_sds_port *sci_port = sci_dev->owning_port;
455 454
456 /* cleanup requests that failed after starting on the port */ 455 /* cleanup requests that failed after starting on the port */
457 if (status != SCI_SUCCESS) 456 if (status != SCI_SUCCESS)
458 scic_sds_port_complete_io(sci_port, sci_dev, sci_req); 457 scic_sds_port_complete_io(sci_port, sci_dev, ireq);
459 else { 458 else {
460 kref_get(&sci_dev_to_idev(sci_dev)->kref); 459 kref_get(&sci_dev_to_idev(sci_dev)->kref);
461 scic_sds_remote_device_increment_request_count(sci_dev); 460 scic_sds_remote_device_increment_request_count(sci_dev);
@@ -464,12 +463,11 @@ static void scic_sds_remote_device_start_request(struct scic_sds_remote_device *
464 463
465enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic, 464enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic,
466 struct scic_sds_remote_device *sci_dev, 465 struct scic_sds_remote_device *sci_dev,
467 struct scic_sds_request *sci_req) 466 struct isci_request *ireq)
468{ 467{
469 struct sci_base_state_machine *sm = &sci_dev->sm; 468 struct sci_base_state_machine *sm = &sci_dev->sm;
470 enum scic_sds_remote_device_states state = sm->current_state_id; 469 enum scic_sds_remote_device_states state = sm->current_state_id;
471 struct scic_sds_port *sci_port = sci_dev->owning_port; 470 struct scic_sds_port *sci_port = sci_dev->owning_port;
472 struct isci_request *ireq = sci_req_to_ireq(sci_req);
473 enum sci_status status; 471 enum sci_status status;
474 472
475 switch (state) { 473 switch (state) {
@@ -491,15 +489,15 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic
491 * successful it will start the request for the port object then 489 * successful it will start the request for the port object then
492 * increment its own request count. 490 * increment its own request count.
493 */ 491 */
494 status = scic_sds_port_start_io(sci_port, sci_dev, sci_req); 492 status = scic_sds_port_start_io(sci_port, sci_dev, ireq);
495 if (status != SCI_SUCCESS) 493 if (status != SCI_SUCCESS)
496 return status; 494 return status;
497 495
498 status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, sci_req); 496 status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, ireq);
499 if (status != SCI_SUCCESS) 497 if (status != SCI_SUCCESS)
500 break; 498 break;
501 499
502 status = scic_sds_request_start(sci_req); 500 status = scic_sds_request_start(ireq);
503 break; 501 break;
504 case SCI_STP_DEV_IDLE: { 502 case SCI_STP_DEV_IDLE: {
505 /* handle the start io operation for a sata device that is in 503 /* handle the start io operation for a sata device that is in
@@ -513,22 +511,22 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic
513 enum scic_sds_remote_device_states new_state; 511 enum scic_sds_remote_device_states new_state;
514 struct sas_task *task = isci_request_access_task(ireq); 512 struct sas_task *task = isci_request_access_task(ireq);
515 513
516 status = scic_sds_port_start_io(sci_port, sci_dev, sci_req); 514 status = scic_sds_port_start_io(sci_port, sci_dev, ireq);
517 if (status != SCI_SUCCESS) 515 if (status != SCI_SUCCESS)
518 return status; 516 return status;
519 517
520 status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, sci_req); 518 status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, ireq);
521 if (status != SCI_SUCCESS) 519 if (status != SCI_SUCCESS)
522 break; 520 break;
523 521
524 status = scic_sds_request_start(sci_req); 522 status = scic_sds_request_start(ireq);
525 if (status != SCI_SUCCESS) 523 if (status != SCI_SUCCESS)
526 break; 524 break;
527 525
528 if (task->ata_task.use_ncq) 526 if (task->ata_task.use_ncq)
529 new_state = SCI_STP_DEV_NCQ; 527 new_state = SCI_STP_DEV_NCQ;
530 else { 528 else {
531 sci_dev->working_request = sci_req; 529 sci_dev->working_request = ireq;
532 new_state = SCI_STP_DEV_CMD; 530 new_state = SCI_STP_DEV_CMD;
533 } 531 }
534 sci_change_state(sm, new_state); 532 sci_change_state(sm, new_state);
@@ -538,15 +536,15 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic
538 struct sas_task *task = isci_request_access_task(ireq); 536 struct sas_task *task = isci_request_access_task(ireq);
539 537
540 if (task->ata_task.use_ncq) { 538 if (task->ata_task.use_ncq) {
541 status = scic_sds_port_start_io(sci_port, sci_dev, sci_req); 539 status = scic_sds_port_start_io(sci_port, sci_dev, ireq);
542 if (status != SCI_SUCCESS) 540 if (status != SCI_SUCCESS)
543 return status; 541 return status;
544 542
545 status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, sci_req); 543 status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, ireq);
546 if (status != SCI_SUCCESS) 544 if (status != SCI_SUCCESS)
547 break; 545 break;
548 546
549 status = scic_sds_request_start(sci_req); 547 status = scic_sds_request_start(ireq);
550 } else 548 } else
551 return SCI_FAILURE_INVALID_STATE; 549 return SCI_FAILURE_INVALID_STATE;
552 break; 550 break;
@@ -554,19 +552,19 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic
554 case SCI_STP_DEV_AWAIT_RESET: 552 case SCI_STP_DEV_AWAIT_RESET:
555 return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; 553 return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
556 case SCI_SMP_DEV_IDLE: 554 case SCI_SMP_DEV_IDLE:
557 status = scic_sds_port_start_io(sci_port, sci_dev, sci_req); 555 status = scic_sds_port_start_io(sci_port, sci_dev, ireq);
558 if (status != SCI_SUCCESS) 556 if (status != SCI_SUCCESS)
559 return status; 557 return status;
560 558
561 status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, sci_req); 559 status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, ireq);
562 if (status != SCI_SUCCESS) 560 if (status != SCI_SUCCESS)
563 break; 561 break;
564 562
565 status = scic_sds_request_start(sci_req); 563 status = scic_sds_request_start(ireq);
566 if (status != SCI_SUCCESS) 564 if (status != SCI_SUCCESS)
567 break; 565 break;
568 566
569 sci_dev->working_request = sci_req; 567 sci_dev->working_request = ireq;
570 sci_change_state(&sci_dev->sm, SCI_SMP_DEV_CMD); 568 sci_change_state(&sci_dev->sm, SCI_SMP_DEV_CMD);
571 break; 569 break;
572 case SCI_STP_DEV_CMD: 570 case SCI_STP_DEV_CMD:
@@ -577,21 +575,21 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic
577 return SCI_FAILURE_INVALID_STATE; 575 return SCI_FAILURE_INVALID_STATE;
578 } 576 }
579 577
580 scic_sds_remote_device_start_request(sci_dev, sci_req, status); 578 scic_sds_remote_device_start_request(sci_dev, ireq, status);
581 return status; 579 return status;
582} 580}
583 581
584static enum sci_status common_complete_io(struct scic_sds_port *sci_port, 582static enum sci_status common_complete_io(struct scic_sds_port *sci_port,
585 struct scic_sds_remote_device *sci_dev, 583 struct scic_sds_remote_device *sci_dev,
586 struct scic_sds_request *sci_req) 584 struct isci_request *ireq)
587{ 585{
588 enum sci_status status; 586 enum sci_status status;
589 587
590 status = scic_sds_request_complete(sci_req); 588 status = scic_sds_request_complete(ireq);
591 if (status != SCI_SUCCESS) 589 if (status != SCI_SUCCESS)
592 return status; 590 return status;
593 591
594 status = scic_sds_port_complete_io(sci_port, sci_dev, sci_req); 592 status = scic_sds_port_complete_io(sci_port, sci_dev, ireq);
595 if (status != SCI_SUCCESS) 593 if (status != SCI_SUCCESS)
596 return status; 594 return status;
597 595
@@ -601,7 +599,7 @@ static enum sci_status common_complete_io(struct scic_sds_port *sci_port,
601 599
602enum sci_status scic_sds_remote_device_complete_io(struct scic_sds_controller *scic, 600enum sci_status scic_sds_remote_device_complete_io(struct scic_sds_controller *scic,
603 struct scic_sds_remote_device *sci_dev, 601 struct scic_sds_remote_device *sci_dev,
604 struct scic_sds_request *sci_req) 602 struct isci_request *ireq)
605{ 603{
606 struct sci_base_state_machine *sm = &sci_dev->sm; 604 struct sci_base_state_machine *sm = &sci_dev->sm;
607 enum scic_sds_remote_device_states state = sm->current_state_id; 605 enum scic_sds_remote_device_states state = sm->current_state_id;
@@ -623,16 +621,16 @@ enum sci_status scic_sds_remote_device_complete_io(struct scic_sds_controller *s
623 case SCI_DEV_READY: 621 case SCI_DEV_READY:
624 case SCI_STP_DEV_AWAIT_RESET: 622 case SCI_STP_DEV_AWAIT_RESET:
625 case SCI_DEV_RESETTING: 623 case SCI_DEV_RESETTING:
626 status = common_complete_io(sci_port, sci_dev, sci_req); 624 status = common_complete_io(sci_port, sci_dev, ireq);
627 break; 625 break;
628 case SCI_STP_DEV_CMD: 626 case SCI_STP_DEV_CMD:
629 case SCI_STP_DEV_NCQ: 627 case SCI_STP_DEV_NCQ:
630 case SCI_STP_DEV_NCQ_ERROR: 628 case SCI_STP_DEV_NCQ_ERROR:
631 status = common_complete_io(sci_port, sci_dev, sci_req); 629 status = common_complete_io(sci_port, sci_dev, ireq);
632 if (status != SCI_SUCCESS) 630 if (status != SCI_SUCCESS)
633 break; 631 break;
634 632
635 if (sci_req->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { 633 if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
636 /* This request causes hardware error, device needs to be Lun Reset. 634 /* This request causes hardware error, device needs to be Lun Reset.
637 * So here we force the state machine to IDLE state so the rest IOs 635 * So here we force the state machine to IDLE state so the rest IOs
638 * can reach RNC state handler, these IOs will be completed by RNC with 636 * can reach RNC state handler, these IOs will be completed by RNC with
@@ -643,13 +641,13 @@ enum sci_status scic_sds_remote_device_complete_io(struct scic_sds_controller *s
643 sci_change_state(sm, SCI_STP_DEV_IDLE); 641 sci_change_state(sm, SCI_STP_DEV_IDLE);
644 break; 642 break;
645 case SCI_SMP_DEV_CMD: 643 case SCI_SMP_DEV_CMD:
646 status = common_complete_io(sci_port, sci_dev, sci_req); 644 status = common_complete_io(sci_port, sci_dev, ireq);
647 if (status != SCI_SUCCESS) 645 if (status != SCI_SUCCESS)
648 break; 646 break;
649 sci_change_state(sm, SCI_SMP_DEV_IDLE); 647 sci_change_state(sm, SCI_SMP_DEV_IDLE);
650 break; 648 break;
651 case SCI_DEV_STOPPING: 649 case SCI_DEV_STOPPING:
652 status = common_complete_io(sci_port, sci_dev, sci_req); 650 status = common_complete_io(sci_port, sci_dev, ireq);
653 if (status != SCI_SUCCESS) 651 if (status != SCI_SUCCESS)
654 break; 652 break;
655 653
@@ -664,7 +662,7 @@ enum sci_status scic_sds_remote_device_complete_io(struct scic_sds_controller *s
664 dev_err(scirdev_to_dev(sci_dev), 662 dev_err(scirdev_to_dev(sci_dev),
665 "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x " 663 "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
666 "could not complete\n", __func__, sci_port, 664 "could not complete\n", __func__, sci_port,
667 sci_dev, sci_req, status); 665 sci_dev, ireq, status);
668 else 666 else
669 isci_put_device(sci_dev_to_idev(sci_dev)); 667 isci_put_device(sci_dev_to_idev(sci_dev));
670 668
@@ -682,7 +680,7 @@ static void scic_sds_remote_device_continue_request(void *dev)
682 680
683enum sci_status scic_sds_remote_device_start_task(struct scic_sds_controller *scic, 681enum sci_status scic_sds_remote_device_start_task(struct scic_sds_controller *scic,
684 struct scic_sds_remote_device *sci_dev, 682 struct scic_sds_remote_device *sci_dev,
685 struct scic_sds_request *sci_req) 683 struct isci_request *ireq)
686{ 684{
687 struct sci_base_state_machine *sm = &sci_dev->sm; 685 struct sci_base_state_machine *sm = &sci_dev->sm;
688 enum scic_sds_remote_device_states state = sm->current_state_id; 686 enum scic_sds_remote_device_states state = sm->current_state_id;
@@ -708,15 +706,15 @@ enum sci_status scic_sds_remote_device_start_task(struct scic_sds_controller *sc
708 case SCI_STP_DEV_NCQ: 706 case SCI_STP_DEV_NCQ:
709 case SCI_STP_DEV_NCQ_ERROR: 707 case SCI_STP_DEV_NCQ_ERROR:
710 case SCI_STP_DEV_AWAIT_RESET: 708 case SCI_STP_DEV_AWAIT_RESET:
711 status = scic_sds_port_start_io(sci_port, sci_dev, sci_req); 709 status = scic_sds_port_start_io(sci_port, sci_dev, ireq);
712 if (status != SCI_SUCCESS) 710 if (status != SCI_SUCCESS)
713 return status; 711 return status;
714 712
715 status = scic_sds_remote_node_context_start_task(&sci_dev->rnc, sci_req); 713 status = scic_sds_remote_node_context_start_task(&sci_dev->rnc, ireq);
716 if (status != SCI_SUCCESS) 714 if (status != SCI_SUCCESS)
717 goto out; 715 goto out;
718 716
719 status = scic_sds_request_start(sci_req); 717 status = scic_sds_request_start(ireq);
720 if (status != SCI_SUCCESS) 718 if (status != SCI_SUCCESS)
721 goto out; 719 goto out;
722 720
@@ -724,7 +722,7 @@ enum sci_status scic_sds_remote_device_start_task(struct scic_sds_controller *sc
724 * replace the request that probably resulted in the task 722 * replace the request that probably resulted in the task
725 * management request. 723 * management request.
726 */ 724 */
727 sci_dev->working_request = sci_req; 725 sci_dev->working_request = ireq;
728 sci_change_state(sm, SCI_STP_DEV_CMD); 726 sci_change_state(sm, SCI_STP_DEV_CMD);
729 727
730 /* The remote node context must cleanup the TCi to NCQ mapping 728 /* The remote node context must cleanup the TCi to NCQ mapping
@@ -741,25 +739,25 @@ enum sci_status scic_sds_remote_device_start_task(struct scic_sds_controller *sc
741 sci_dev); 739 sci_dev);
742 740
743 out: 741 out:
744 scic_sds_remote_device_start_request(sci_dev, sci_req, status); 742 scic_sds_remote_device_start_request(sci_dev, ireq, status);
745 /* We need to let the controller start request handler know that 743 /* We need to let the controller start request handler know that
746 * it can't post TC yet. We will provide a callback function to 744 * it can't post TC yet. We will provide a callback function to
747 * post TC when RNC gets resumed. 745 * post TC when RNC gets resumed.
748 */ 746 */
749 return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS; 747 return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS;
750 case SCI_DEV_READY: 748 case SCI_DEV_READY:
751 status = scic_sds_port_start_io(sci_port, sci_dev, sci_req); 749 status = scic_sds_port_start_io(sci_port, sci_dev, ireq);
752 if (status != SCI_SUCCESS) 750 if (status != SCI_SUCCESS)
753 return status; 751 return status;
754 752
755 status = scic_sds_remote_node_context_start_task(&sci_dev->rnc, sci_req); 753 status = scic_sds_remote_node_context_start_task(&sci_dev->rnc, ireq);
756 if (status != SCI_SUCCESS) 754 if (status != SCI_SUCCESS)
757 break; 755 break;
758 756
759 status = scic_sds_request_start(sci_req); 757 status = scic_sds_request_start(ireq);
760 break; 758 break;
761 } 759 }
762 scic_sds_remote_device_start_request(sci_dev, sci_req, status); 760 scic_sds_remote_device_start_request(sci_dev, ireq, status);
763 761
764 return status; 762 return status;
765} 763}
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 0d9e37fe734f..6ac5dfb7d1db 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -120,7 +120,7 @@ struct scic_sds_remote_device {
120 * used only for SATA requests since the unsolicited frames we get from the 120 * used only for SATA requests since the unsolicited frames we get from the
121 * hardware have no Tag value to look up the io request object. 121 * hardware have no Tag value to look up the io request object.
122 */ 122 */
123 struct scic_sds_request *working_request; 123 struct isci_request *working_request;
124 124
125 /** 125 /**
126 * This field contains the reason for the remote device going not_ready. It is 126 * This field contains the reason for the remote device going not_ready. It is
@@ -466,17 +466,17 @@ enum sci_status scic_sds_remote_device_event_handler(
466enum sci_status scic_sds_remote_device_start_io( 466enum sci_status scic_sds_remote_device_start_io(
467 struct scic_sds_controller *controller, 467 struct scic_sds_controller *controller,
468 struct scic_sds_remote_device *sci_dev, 468 struct scic_sds_remote_device *sci_dev,
469 struct scic_sds_request *io_request); 469 struct isci_request *ireq);
470 470
471enum sci_status scic_sds_remote_device_start_task( 471enum sci_status scic_sds_remote_device_start_task(
472 struct scic_sds_controller *controller, 472 struct scic_sds_controller *controller,
473 struct scic_sds_remote_device *sci_dev, 473 struct scic_sds_remote_device *sci_dev,
474 struct scic_sds_request *io_request); 474 struct isci_request *ireq);
475 475
476enum sci_status scic_sds_remote_device_complete_io( 476enum sci_status scic_sds_remote_device_complete_io(
477 struct scic_sds_controller *controller, 477 struct scic_sds_controller *controller,
478 struct scic_sds_remote_device *sci_dev, 478 struct scic_sds_remote_device *sci_dev,
479 struct scic_sds_request *io_request); 479 struct isci_request *ireq);
480 480
481enum sci_status scic_sds_remote_device_suspend( 481enum sci_status scic_sds_remote_device_suspend(
482 struct scic_sds_remote_device *sci_dev, 482 struct scic_sds_remote_device *sci_dev,
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index b6774bcdabd8..1b51fe55314d 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -598,7 +598,7 @@ enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_
598} 598}
599 599
600enum sci_status scic_sds_remote_node_context_start_io(struct scic_sds_remote_node_context *sci_rnc, 600enum sci_status scic_sds_remote_node_context_start_io(struct scic_sds_remote_node_context *sci_rnc,
601 struct scic_sds_request *sci_req) 601 struct isci_request *ireq)
602{ 602{
603 enum scis_sds_remote_node_context_states state; 603 enum scis_sds_remote_node_context_states state;
604 604
@@ -623,7 +623,7 @@ enum sci_status scic_sds_remote_node_context_start_io(struct scic_sds_remote_nod
623} 623}
624 624
625enum sci_status scic_sds_remote_node_context_start_task(struct scic_sds_remote_node_context *sci_rnc, 625enum sci_status scic_sds_remote_node_context_start_task(struct scic_sds_remote_node_context *sci_rnc,
626 struct scic_sds_request *sci_req) 626 struct isci_request *ireq)
627{ 627{
628 enum scis_sds_remote_node_context_states state; 628 enum scis_sds_remote_node_context_states state;
629 629
diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h
index 67a45b686a98..35e6ae616903 100644
--- a/drivers/scsi/isci/remote_node_context.h
+++ b/drivers/scsi/isci/remote_node_context.h
@@ -78,7 +78,7 @@
78#define SCU_HARDWARE_SUSPENSION (0) 78#define SCU_HARDWARE_SUSPENSION (0)
79#define SCI_SOFTWARE_SUSPENSION (1) 79#define SCI_SOFTWARE_SUSPENSION (1)
80 80
81struct scic_sds_request; 81struct isci_request;
82struct scic_sds_remote_device; 82struct scic_sds_remote_device;
83struct scic_sds_remote_node_context; 83struct scic_sds_remote_node_context;
84 84
@@ -220,8 +220,8 @@ enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_
220 scics_sds_remote_node_context_callback cb_fn, 220 scics_sds_remote_node_context_callback cb_fn,
221 void *cb_p); 221 void *cb_p);
222enum sci_status scic_sds_remote_node_context_start_task(struct scic_sds_remote_node_context *sci_rnc, 222enum sci_status scic_sds_remote_node_context_start_task(struct scic_sds_remote_node_context *sci_rnc,
223 struct scic_sds_request *sci_req); 223 struct isci_request *ireq);
224enum sci_status scic_sds_remote_node_context_start_io(struct scic_sds_remote_node_context *sci_rnc, 224enum sci_status scic_sds_remote_node_context_start_io(struct scic_sds_remote_node_context *sci_rnc,
225 struct scic_sds_request *sci_req); 225 struct isci_request *ireq);
226 226
227#endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */ 227#endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 8520626b02fa..c544bc79ce17 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -61,35 +61,35 @@
61#include "scu_event_codes.h" 61#include "scu_event_codes.h"
62#include "sas.h" 62#include "sas.h"
63 63
64static struct scu_sgl_element_pair *to_sgl_element_pair(struct scic_sds_request *sci_req, 64static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
65 int idx) 65 int idx)
66{ 66{
67 if (idx == 0) 67 if (idx == 0)
68 return &sci_req->tc->sgl_pair_ab; 68 return &ireq->tc->sgl_pair_ab;
69 else if (idx == 1) 69 else if (idx == 1)
70 return &sci_req->tc->sgl_pair_cd; 70 return &ireq->tc->sgl_pair_cd;
71 else if (idx < 0) 71 else if (idx < 0)
72 return NULL; 72 return NULL;
73 else 73 else
74 return &sci_req->sg_table[idx - 2]; 74 return &ireq->sg_table[idx - 2];
75} 75}
76 76
77static dma_addr_t to_sgl_element_pair_dma(struct scic_sds_controller *scic, 77static dma_addr_t to_sgl_element_pair_dma(struct scic_sds_controller *scic,
78 struct scic_sds_request *sci_req, u32 idx) 78 struct isci_request *ireq, u32 idx)
79{ 79{
80 u32 offset; 80 u32 offset;
81 81
82 if (idx == 0) { 82 if (idx == 0) {
83 offset = (void *) &sci_req->tc->sgl_pair_ab - 83 offset = (void *) &ireq->tc->sgl_pair_ab -
84 (void *) &scic->task_context_table[0]; 84 (void *) &scic->task_context_table[0];
85 return scic->task_context_dma + offset; 85 return scic->task_context_dma + offset;
86 } else if (idx == 1) { 86 } else if (idx == 1) {
87 offset = (void *) &sci_req->tc->sgl_pair_cd - 87 offset = (void *) &ireq->tc->sgl_pair_cd -
88 (void *) &scic->task_context_table[0]; 88 (void *) &scic->task_context_table[0];
89 return scic->task_context_dma + offset; 89 return scic->task_context_dma + offset;
90 } 90 }
91 91
92 return scic_io_request_get_dma_addr(sci_req, &sci_req->sg_table[idx - 2]); 92 return scic_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
93} 93}
94 94
95static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) 95static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
@@ -100,12 +100,11 @@ static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
100 e->address_modifier = 0; 100 e->address_modifier = 0;
101} 101}
102 102
103static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) 103static void scic_sds_request_build_sgl(struct isci_request *ireq)
104{ 104{
105 struct isci_request *isci_request = sci_req_to_ireq(sds_request); 105 struct isci_host *isci_host = ireq->isci_host;
106 struct isci_host *isci_host = isci_request->isci_host;
107 struct scic_sds_controller *scic = &isci_host->sci; 106 struct scic_sds_controller *scic = &isci_host->sci;
108 struct sas_task *task = isci_request_access_task(isci_request); 107 struct sas_task *task = isci_request_access_task(ireq);
109 struct scatterlist *sg = NULL; 108 struct scatterlist *sg = NULL;
110 dma_addr_t dma_addr; 109 dma_addr_t dma_addr;
111 u32 sg_idx = 0; 110 u32 sg_idx = 0;
@@ -116,7 +115,7 @@ static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request)
116 sg = task->scatter; 115 sg = task->scatter;
117 116
118 while (sg) { 117 while (sg) {
119 scu_sg = to_sgl_element_pair(sds_request, sg_idx); 118 scu_sg = to_sgl_element_pair(ireq, sg_idx);
120 init_sgl_element(&scu_sg->A, sg); 119 init_sgl_element(&scu_sg->A, sg);
121 sg = sg_next(sg); 120 sg = sg_next(sg);
122 if (sg) { 121 if (sg) {
@@ -127,7 +126,7 @@ static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request)
127 126
128 if (prev_sg) { 127 if (prev_sg) {
129 dma_addr = to_sgl_element_pair_dma(scic, 128 dma_addr = to_sgl_element_pair_dma(scic,
130 sds_request, 129 ireq,
131 sg_idx); 130 sg_idx);
132 131
133 prev_sg->next_pair_upper = 132 prev_sg->next_pair_upper =
@@ -140,14 +139,14 @@ static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request)
140 sg_idx++; 139 sg_idx++;
141 } 140 }
142 } else { /* handle when no sg */ 141 } else { /* handle when no sg */
143 scu_sg = to_sgl_element_pair(sds_request, sg_idx); 142 scu_sg = to_sgl_element_pair(ireq, sg_idx);
144 143
145 dma_addr = dma_map_single(&isci_host->pdev->dev, 144 dma_addr = dma_map_single(&isci_host->pdev->dev,
146 task->scatter, 145 task->scatter,
147 task->total_xfer_len, 146 task->total_xfer_len,
148 task->data_dir); 147 task->data_dir);
149 148
150 isci_request->zero_scatter_daddr = dma_addr; 149 ireq->zero_scatter_daddr = dma_addr;
151 150
152 scu_sg->A.length = task->total_xfer_len; 151 scu_sg->A.length = task->total_xfer_len;
153 scu_sg->A.address_upper = upper_32_bits(dma_addr); 152 scu_sg->A.address_upper = upper_32_bits(dma_addr);
@@ -160,13 +159,12 @@ static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request)
160 } 159 }
161} 160}
162 161
163static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sci_req) 162static void scic_sds_io_request_build_ssp_command_iu(struct isci_request *ireq)
164{ 163{
165 struct ssp_cmd_iu *cmd_iu; 164 struct ssp_cmd_iu *cmd_iu;
166 struct isci_request *ireq = sci_req_to_ireq(sci_req);
167 struct sas_task *task = isci_request_access_task(ireq); 165 struct sas_task *task = isci_request_access_task(ireq);
168 166
169 cmd_iu = &sci_req->ssp.cmd; 167 cmd_iu = &ireq->ssp.cmd;
170 168
171 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8); 169 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
172 cmd_iu->add_cdb_len = 0; 170 cmd_iu->add_cdb_len = 0;
@@ -181,14 +179,13 @@ static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sc
181 sizeof(task->ssp_task.cdb) / sizeof(u32)); 179 sizeof(task->ssp_task.cdb) / sizeof(u32));
182} 180}
183 181
184static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci_req) 182static void scic_sds_task_request_build_ssp_task_iu(struct isci_request *ireq)
185{ 183{
186 struct ssp_task_iu *task_iu; 184 struct ssp_task_iu *task_iu;
187 struct isci_request *ireq = sci_req_to_ireq(sci_req);
188 struct sas_task *task = isci_request_access_task(ireq); 185 struct sas_task *task = isci_request_access_task(ireq);
189 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); 186 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
190 187
191 task_iu = &sci_req->ssp.tmf; 188 task_iu = &ireq->ssp.tmf;
192 189
193 memset(task_iu, 0, sizeof(struct ssp_task_iu)); 190 memset(task_iu, 0, sizeof(struct ssp_task_iu));
194 191
@@ -208,15 +205,15 @@ static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci
208 * 205 *
209 */ 206 */
210static void scu_ssp_reqeust_construct_task_context( 207static void scu_ssp_reqeust_construct_task_context(
211 struct scic_sds_request *sds_request, 208 struct isci_request *ireq,
212 struct scu_task_context *task_context) 209 struct scu_task_context *task_context)
213{ 210{
214 dma_addr_t dma_addr; 211 dma_addr_t dma_addr;
215 struct scic_sds_remote_device *target_device; 212 struct scic_sds_remote_device *target_device;
216 struct scic_sds_port *target_port; 213 struct scic_sds_port *target_port;
217 214
218 target_device = scic_sds_request_get_device(sds_request); 215 target_device = scic_sds_request_get_device(ireq);
219 target_port = scic_sds_request_get_port(sds_request); 216 target_port = scic_sds_request_get_port(ireq);
220 217
221 /* Fill in the TC with the its required data */ 218 /* Fill in the TC with the its required data */
222 task_context->abort = 0; 219 task_context->abort = 0;
@@ -232,7 +229,7 @@ static void scu_ssp_reqeust_construct_task_context(
232 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 229 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
233 230
234 task_context->remote_node_index = 231 task_context->remote_node_index =
235 scic_sds_remote_device_get_index(sds_request->target_device); 232 scic_sds_remote_device_get_index(ireq->target_device);
236 task_context->command_code = 0; 233 task_context->command_code = 0;
237 234
238 task_context->link_layer_control = 0; 235 task_context->link_layer_control = 0;
@@ -244,22 +241,21 @@ static void scu_ssp_reqeust_construct_task_context(
244 241
245 task_context->address_modifier = 0; 242 task_context->address_modifier = 0;
246 243
247 /* task_context->type.ssp.tag = sci_req->io_tag; */ 244 /* task_context->type.ssp.tag = ireq->io_tag; */
248 task_context->task_phase = 0x01; 245 task_context->task_phase = 0x01;
249 246
250 sds_request->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 247 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
251 (scic_sds_controller_get_protocol_engine_group(controller) << 248 (scic_sds_controller_get_protocol_engine_group(controller) <<
252 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 249 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
253 (scic_sds_port_get_index(target_port) << 250 (scic_sds_port_get_index(target_port) <<
254 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 251 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
255 ISCI_TAG_TCI(sds_request->io_tag)); 252 ISCI_TAG_TCI(ireq->io_tag));
256 253
257 /* 254 /*
258 * Copy the physical address for the command buffer to the 255 * Copy the physical address for the command buffer to the
259 * SCU Task Context 256 * SCU Task Context
260 */ 257 */
261 dma_addr = scic_io_request_get_dma_addr(sds_request, 258 dma_addr = scic_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
262 &sds_request->ssp.cmd);
263 259
264 task_context->command_iu_upper = upper_32_bits(dma_addr); 260 task_context->command_iu_upper = upper_32_bits(dma_addr);
265 task_context->command_iu_lower = lower_32_bits(dma_addr); 261 task_context->command_iu_lower = lower_32_bits(dma_addr);
@@ -268,8 +264,7 @@ static void scu_ssp_reqeust_construct_task_context(
268 * Copy the physical address for the response buffer to the 264 * Copy the physical address for the response buffer to the
269 * SCU Task Context 265 * SCU Task Context
270 */ 266 */
271 dma_addr = scic_io_request_get_dma_addr(sds_request, 267 dma_addr = scic_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
272 &sds_request->ssp.rsp);
273 268
274 task_context->response_iu_upper = upper_32_bits(dma_addr); 269 task_context->response_iu_upper = upper_32_bits(dma_addr);
275 task_context->response_iu_lower = lower_32_bits(dma_addr); 270 task_context->response_iu_lower = lower_32_bits(dma_addr);
@@ -280,13 +275,13 @@ static void scu_ssp_reqeust_construct_task_context(
280 * @sci_req: 275 * @sci_req:
281 * 276 *
282 */ 277 */
283static void scu_ssp_io_request_construct_task_context(struct scic_sds_request *sci_req, 278static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
284 enum dma_data_direction dir, 279 enum dma_data_direction dir,
285 u32 len) 280 u32 len)
286{ 281{
287 struct scu_task_context *task_context = sci_req->tc; 282 struct scu_task_context *task_context = ireq->tc;
288 283
289 scu_ssp_reqeust_construct_task_context(sci_req, task_context); 284 scu_ssp_reqeust_construct_task_context(ireq, task_context);
290 285
291 task_context->ssp_command_iu_length = 286 task_context->ssp_command_iu_length =
292 sizeof(struct ssp_cmd_iu) / sizeof(u32); 287 sizeof(struct ssp_cmd_iu) / sizeof(u32);
@@ -306,7 +301,7 @@ static void scu_ssp_io_request_construct_task_context(struct scic_sds_request *s
306 task_context->transfer_length_bytes = len; 301 task_context->transfer_length_bytes = len;
307 302
308 if (task_context->transfer_length_bytes > 0) 303 if (task_context->transfer_length_bytes > 0)
309 scic_sds_request_build_sgl(sci_req); 304 scic_sds_request_build_sgl(ireq);
310} 305}
311 306
312/** 307/**
@@ -322,11 +317,11 @@ static void scu_ssp_io_request_construct_task_context(struct scic_sds_request *s
322 * constructed. 317 * constructed.
323 * 318 *
324 */ 319 */
325static void scu_ssp_task_request_construct_task_context(struct scic_sds_request *sci_req) 320static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
326{ 321{
327 struct scu_task_context *task_context = sci_req->tc; 322 struct scu_task_context *task_context = ireq->tc;
328 323
329 scu_ssp_reqeust_construct_task_context(sci_req, task_context); 324 scu_ssp_reqeust_construct_task_context(ireq, task_context);
330 325
331 task_context->control_frame = 1; 326 task_context->control_frame = 1;
332 task_context->priority = SCU_TASK_PRIORITY_HIGH; 327 task_context->priority = SCU_TASK_PRIORITY_HIGH;
@@ -350,15 +345,15 @@ static void scu_ssp_task_request_construct_task_context(struct scic_sds_request
350 * determine what is common for SSP/SMP/STP task context structures. 345 * determine what is common for SSP/SMP/STP task context structures.
351 */ 346 */
352static void scu_sata_reqeust_construct_task_context( 347static void scu_sata_reqeust_construct_task_context(
353 struct scic_sds_request *sci_req, 348 struct isci_request *ireq,
354 struct scu_task_context *task_context) 349 struct scu_task_context *task_context)
355{ 350{
356 dma_addr_t dma_addr; 351 dma_addr_t dma_addr;
357 struct scic_sds_remote_device *target_device; 352 struct scic_sds_remote_device *target_device;
358 struct scic_sds_port *target_port; 353 struct scic_sds_port *target_port;
359 354
360 target_device = scic_sds_request_get_device(sci_req); 355 target_device = scic_sds_request_get_device(ireq);
361 target_port = scic_sds_request_get_port(sci_req); 356 target_port = scic_sds_request_get_port(ireq);
362 357
363 /* Fill in the TC with the its required data */ 358 /* Fill in the TC with the its required data */
364 task_context->abort = 0; 359 task_context->abort = 0;
@@ -374,7 +369,7 @@ static void scu_sata_reqeust_construct_task_context(
374 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 369 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
375 370
376 task_context->remote_node_index = 371 task_context->remote_node_index =
377 scic_sds_remote_device_get_index(sci_req->target_device); 372 scic_sds_remote_device_get_index(ireq->target_device);
378 task_context->command_code = 0; 373 task_context->command_code = 0;
379 374
380 task_context->link_layer_control = 0; 375 task_context->link_layer_control = 0;
@@ -391,21 +386,21 @@ static void scu_sata_reqeust_construct_task_context(
391 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32); 386 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
392 387
393 /* Set the first word of the H2D REG FIS */ 388 /* Set the first word of the H2D REG FIS */
394 task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd; 389 task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
395 390
396 sci_req->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 391 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
397 (scic_sds_controller_get_protocol_engine_group(controller) << 392 (scic_sds_controller_get_protocol_engine_group(controller) <<
398 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 393 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
399 (scic_sds_port_get_index(target_port) << 394 (scic_sds_port_get_index(target_port) <<
400 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 395 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
401 ISCI_TAG_TCI(sci_req->io_tag)); 396 ISCI_TAG_TCI(ireq->io_tag));
402 /* 397 /*
403 * Copy the physical address for the command buffer to the SCU Task 398 * Copy the physical address for the command buffer to the SCU Task
404 * Context. We must offset the command buffer by 4 bytes because the 399 * Context. We must offset the command buffer by 4 bytes because the
405 * first 4 bytes are transfered in the body of the TC. 400 * first 4 bytes are transfered in the body of the TC.
406 */ 401 */
407 dma_addr = scic_io_request_get_dma_addr(sci_req, 402 dma_addr = scic_io_request_get_dma_addr(ireq,
408 ((char *) &sci_req->stp.cmd) + 403 ((char *) &ireq->stp.cmd) +
409 sizeof(u32)); 404 sizeof(u32));
410 405
411 task_context->command_iu_upper = upper_32_bits(dma_addr); 406 task_context->command_iu_upper = upper_32_bits(dma_addr);
@@ -416,11 +411,11 @@ static void scu_sata_reqeust_construct_task_context(
416 task_context->response_iu_lower = 0; 411 task_context->response_iu_lower = 0;
417} 412}
418 413
419static void scu_stp_raw_request_construct_task_context(struct scic_sds_request *sci_req) 414static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq)
420{ 415{
421 struct scu_task_context *task_context = sci_req->tc; 416 struct scu_task_context *task_context = ireq->tc;
422 417
423 scu_sata_reqeust_construct_task_context(sci_req, task_context); 418 scu_sata_reqeust_construct_task_context(ireq, task_context);
424 419
425 task_context->control_frame = 0; 420 task_context->control_frame = 0;
426 task_context->priority = SCU_TASK_PRIORITY_NORMAL; 421 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
@@ -429,20 +424,19 @@ static void scu_stp_raw_request_construct_task_context(struct scic_sds_request *
429 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32); 424 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
430} 425}
431 426
432static enum sci_status 427static enum sci_status scic_sds_stp_pio_request_construct(struct isci_request *ireq,
433scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req, 428 bool copy_rx_frame)
434 bool copy_rx_frame)
435{ 429{
436 struct isci_stp_request *stp_req = &sci_req->stp.req; 430 struct isci_stp_request *stp_req = &ireq->stp.req;
437 431
438 scu_stp_raw_request_construct_task_context(sci_req); 432 scu_stp_raw_request_construct_task_context(ireq);
439 433
440 stp_req->status = 0; 434 stp_req->status = 0;
441 stp_req->sgl.offset = 0; 435 stp_req->sgl.offset = 0;
442 stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A; 436 stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
443 437
444 if (copy_rx_frame) { 438 if (copy_rx_frame) {
445 scic_sds_request_build_sgl(sci_req); 439 scic_sds_request_build_sgl(ireq);
446 stp_req->sgl.index = 0; 440 stp_req->sgl.index = 0;
447 } else { 441 } else {
448 /* The user does not want the data copied to the SGL buffer location */ 442 /* The user does not want the data copied to the SGL buffer location */
@@ -464,18 +458,18 @@ scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req,
464 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method 458 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
465 * returns an indication as to whether the construction was successful. 459 * returns an indication as to whether the construction was successful.
466 */ 460 */
467static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req, 461static void scic_sds_stp_optimized_request_construct(struct isci_request *ireq,
468 u8 optimized_task_type, 462 u8 optimized_task_type,
469 u32 len, 463 u32 len,
470 enum dma_data_direction dir) 464 enum dma_data_direction dir)
471{ 465{
472 struct scu_task_context *task_context = sci_req->tc; 466 struct scu_task_context *task_context = ireq->tc;
473 467
474 /* Build the STP task context structure */ 468 /* Build the STP task context structure */
475 scu_sata_reqeust_construct_task_context(sci_req, task_context); 469 scu_sata_reqeust_construct_task_context(ireq, task_context);
476 470
477 /* Copy over the SGL elements */ 471 /* Copy over the SGL elements */
478 scic_sds_request_build_sgl(sci_req); 472 scic_sds_request_build_sgl(ireq);
479 473
480 /* Copy over the number of bytes to be transfered */ 474 /* Copy over the number of bytes to be transfered */
481 task_context->transfer_length_bytes = len; 475 task_context->transfer_length_bytes = len;
@@ -500,13 +494,12 @@ static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sc
500 494
501 495
502static enum sci_status 496static enum sci_status
503scic_io_request_construct_sata(struct scic_sds_request *sci_req, 497scic_io_request_construct_sata(struct isci_request *ireq,
504 u32 len, 498 u32 len,
505 enum dma_data_direction dir, 499 enum dma_data_direction dir,
506 bool copy) 500 bool copy)
507{ 501{
508 enum sci_status status = SCI_SUCCESS; 502 enum sci_status status = SCI_SUCCESS;
509 struct isci_request *ireq = sci_req_to_ireq(sci_req);
510 struct sas_task *task = isci_request_access_task(ireq); 503 struct sas_task *task = isci_request_access_task(ireq);
511 504
512 /* check for management protocols */ 505 /* check for management protocols */
@@ -515,20 +508,20 @@ scic_io_request_construct_sata(struct scic_sds_request *sci_req,
515 508
516 if (tmf->tmf_code == isci_tmf_sata_srst_high || 509 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
517 tmf->tmf_code == isci_tmf_sata_srst_low) { 510 tmf->tmf_code == isci_tmf_sata_srst_low) {
518 scu_stp_raw_request_construct_task_context(sci_req); 511 scu_stp_raw_request_construct_task_context(ireq);
519 return SCI_SUCCESS; 512 return SCI_SUCCESS;
520 } else { 513 } else {
521 dev_err(scic_to_dev(sci_req->owning_controller), 514 dev_err(scic_to_dev(ireq->owning_controller),
522 "%s: Request 0x%p received un-handled SAT " 515 "%s: Request 0x%p received un-handled SAT "
523 "management protocol 0x%x.\n", 516 "management protocol 0x%x.\n",
524 __func__, sci_req, tmf->tmf_code); 517 __func__, ireq, tmf->tmf_code);
525 518
526 return SCI_FAILURE; 519 return SCI_FAILURE;
527 } 520 }
528 } 521 }
529 522
530 if (!sas_protocol_ata(task->task_proto)) { 523 if (!sas_protocol_ata(task->task_proto)) {
531 dev_err(scic_to_dev(sci_req->owning_controller), 524 dev_err(scic_to_dev(ireq->owning_controller),
532 "%s: Non-ATA protocol in SATA path: 0x%x\n", 525 "%s: Non-ATA protocol in SATA path: 0x%x\n",
533 __func__, 526 __func__,
534 task->task_proto); 527 task->task_proto);
@@ -538,13 +531,13 @@ scic_io_request_construct_sata(struct scic_sds_request *sci_req,
538 531
539 /* non data */ 532 /* non data */
540 if (task->data_dir == DMA_NONE) { 533 if (task->data_dir == DMA_NONE) {
541 scu_stp_raw_request_construct_task_context(sci_req); 534 scu_stp_raw_request_construct_task_context(ireq);
542 return SCI_SUCCESS; 535 return SCI_SUCCESS;
543 } 536 }
544 537
545 /* NCQ */ 538 /* NCQ */
546 if (task->ata_task.use_ncq) { 539 if (task->ata_task.use_ncq) {
547 scic_sds_stp_optimized_request_construct(sci_req, 540 scic_sds_stp_optimized_request_construct(ireq,
548 SCU_TASK_TYPE_FPDMAQ_READ, 541 SCU_TASK_TYPE_FPDMAQ_READ,
549 len, dir); 542 len, dir);
550 return SCI_SUCCESS; 543 return SCI_SUCCESS;
@@ -552,74 +545,71 @@ scic_io_request_construct_sata(struct scic_sds_request *sci_req,
552 545
553 /* DMA */ 546 /* DMA */
554 if (task->ata_task.dma_xfer) { 547 if (task->ata_task.dma_xfer) {
555 scic_sds_stp_optimized_request_construct(sci_req, 548 scic_sds_stp_optimized_request_construct(ireq,
556 SCU_TASK_TYPE_DMA_IN, 549 SCU_TASK_TYPE_DMA_IN,
557 len, dir); 550 len, dir);
558 return SCI_SUCCESS; 551 return SCI_SUCCESS;
559 } else /* PIO */ 552 } else /* PIO */
560 return scic_sds_stp_pio_request_construct(sci_req, copy); 553 return scic_sds_stp_pio_request_construct(ireq, copy);
561 554
562 return status; 555 return status;
563} 556}
564 557
565static enum sci_status scic_io_request_construct_basic_ssp(struct scic_sds_request *sci_req) 558static enum sci_status scic_io_request_construct_basic_ssp(struct isci_request *ireq)
566{ 559{
567 struct isci_request *ireq = sci_req_to_ireq(sci_req);
568 struct sas_task *task = isci_request_access_task(ireq); 560 struct sas_task *task = isci_request_access_task(ireq);
569 561
570 sci_req->protocol = SCIC_SSP_PROTOCOL; 562 ireq->protocol = SCIC_SSP_PROTOCOL;
571 563
572 scu_ssp_io_request_construct_task_context(sci_req, 564 scu_ssp_io_request_construct_task_context(ireq,
573 task->data_dir, 565 task->data_dir,
574 task->total_xfer_len); 566 task->total_xfer_len);
575 567
576 scic_sds_io_request_build_ssp_command_iu(sci_req); 568 scic_sds_io_request_build_ssp_command_iu(ireq);
577 569
578 sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED); 570 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
579 571
580 return SCI_SUCCESS; 572 return SCI_SUCCESS;
581} 573}
582 574
583enum sci_status scic_task_request_construct_ssp( 575enum sci_status scic_task_request_construct_ssp(
584 struct scic_sds_request *sci_req) 576 struct isci_request *ireq)
585{ 577{
586 /* Construct the SSP Task SCU Task Context */ 578 /* Construct the SSP Task SCU Task Context */
587 scu_ssp_task_request_construct_task_context(sci_req); 579 scu_ssp_task_request_construct_task_context(ireq);
588 580
589 /* Fill in the SSP Task IU */ 581 /* Fill in the SSP Task IU */
590 scic_sds_task_request_build_ssp_task_iu(sci_req); 582 scic_sds_task_request_build_ssp_task_iu(ireq);
591 583
592 sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED); 584 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
593 585
594 return SCI_SUCCESS; 586 return SCI_SUCCESS;
595} 587}
596 588
597static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_request *sci_req) 589static enum sci_status scic_io_request_construct_basic_sata(struct isci_request *ireq)
598{ 590{
599 enum sci_status status; 591 enum sci_status status;
600 bool copy = false; 592 bool copy = false;
601 struct isci_request *isci_request = sci_req_to_ireq(sci_req); 593 struct sas_task *task = isci_request_access_task(ireq);
602 struct sas_task *task = isci_request_access_task(isci_request);
603 594
604 sci_req->protocol = SCIC_STP_PROTOCOL; 595 ireq->protocol = SCIC_STP_PROTOCOL;
605 596
606 copy = (task->data_dir == DMA_NONE) ? false : true; 597 copy = (task->data_dir == DMA_NONE) ? false : true;
607 598
608 status = scic_io_request_construct_sata(sci_req, 599 status = scic_io_request_construct_sata(ireq,
609 task->total_xfer_len, 600 task->total_xfer_len,
610 task->data_dir, 601 task->data_dir,
611 copy); 602 copy);
612 603
613 if (status == SCI_SUCCESS) 604 if (status == SCI_SUCCESS)
614 sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED); 605 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
615 606
616 return status; 607 return status;
617} 608}
618 609
619enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req) 610enum sci_status scic_task_request_construct_sata(struct isci_request *ireq)
620{ 611{
621 enum sci_status status = SCI_SUCCESS; 612 enum sci_status status = SCI_SUCCESS;
622 struct isci_request *ireq = sci_req_to_ireq(sci_req);
623 613
624 /* check for management protocols */ 614 /* check for management protocols */
625 if (ireq->ttype == tmf_task) { 615 if (ireq->ttype == tmf_task) {
@@ -627,12 +617,12 @@ enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_re
627 617
628 if (tmf->tmf_code == isci_tmf_sata_srst_high || 618 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
629 tmf->tmf_code == isci_tmf_sata_srst_low) { 619 tmf->tmf_code == isci_tmf_sata_srst_low) {
630 scu_stp_raw_request_construct_task_context(sci_req); 620 scu_stp_raw_request_construct_task_context(ireq);
631 } else { 621 } else {
632 dev_err(scic_to_dev(sci_req->owning_controller), 622 dev_err(scic_to_dev(ireq->owning_controller),
633 "%s: Request 0x%p received un-handled SAT " 623 "%s: Request 0x%p received un-handled SAT "
634 "Protocol 0x%x.\n", 624 "Protocol 0x%x.\n",
635 __func__, sci_req, tmf->tmf_code); 625 __func__, ireq, tmf->tmf_code);
636 626
637 return SCI_FAILURE; 627 return SCI_FAILURE;
638 } 628 }
@@ -640,7 +630,7 @@ enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_re
640 630
641 if (status != SCI_SUCCESS) 631 if (status != SCI_SUCCESS)
642 return status; 632 return status;
643 sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED); 633 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
644 634
645 return status; 635 return status;
646} 636}
@@ -650,9 +640,9 @@ enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_re
650 * @sci_req: request that was terminated early 640 * @sci_req: request that was terminated early
651 */ 641 */
652#define SCU_TASK_CONTEXT_SRAM 0x200000 642#define SCU_TASK_CONTEXT_SRAM 0x200000
653static u32 sci_req_tx_bytes(struct scic_sds_request *sci_req) 643static u32 sci_req_tx_bytes(struct isci_request *ireq)
654{ 644{
655 struct scic_sds_controller *scic = sci_req->owning_controller; 645 struct scic_sds_controller *scic = ireq->owning_controller;
656 u32 ret_val = 0; 646 u32 ret_val = 0;
657 647
658 if (readl(&scic->smu_registers->address_modifier) == 0) { 648 if (readl(&scic->smu_registers->address_modifier) == 0) {
@@ -666,19 +656,19 @@ static u32 sci_req_tx_bytes(struct scic_sds_request *sci_req)
666 */ 656 */
667 ret_val = readl(scu_reg_base + 657 ret_val = readl(scu_reg_base +
668 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) + 658 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
669 ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(sci_req->io_tag))); 659 ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag)));
670 } 660 }
671 661
672 return ret_val; 662 return ret_val;
673} 663}
674 664
675enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req) 665enum sci_status scic_sds_request_start(struct isci_request *ireq)
676{ 666{
677 enum sci_base_request_states state; 667 enum sci_base_request_states state;
678 struct scu_task_context *tc = sci_req->tc; 668 struct scu_task_context *tc = ireq->tc;
679 struct scic_sds_controller *scic = sci_req->owning_controller; 669 struct scic_sds_controller *scic = ireq->owning_controller;
680 670
681 state = sci_req->sm.current_state_id; 671 state = ireq->sm.current_state_id;
682 if (state != SCI_REQ_CONSTRUCTED) { 672 if (state != SCI_REQ_CONSTRUCTED) {
683 dev_warn(scic_to_dev(scic), 673 dev_warn(scic_to_dev(scic),
684 "%s: SCIC IO Request requested to start while in wrong " 674 "%s: SCIC IO Request requested to start while in wrong "
@@ -686,19 +676,19 @@ enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req)
686 return SCI_FAILURE_INVALID_STATE; 676 return SCI_FAILURE_INVALID_STATE;
687 } 677 }
688 678
689 tc->task_index = ISCI_TAG_TCI(sci_req->io_tag); 679 tc->task_index = ISCI_TAG_TCI(ireq->io_tag);
690 680
691 switch (tc->protocol_type) { 681 switch (tc->protocol_type) {
692 case SCU_TASK_CONTEXT_PROTOCOL_SMP: 682 case SCU_TASK_CONTEXT_PROTOCOL_SMP:
693 case SCU_TASK_CONTEXT_PROTOCOL_SSP: 683 case SCU_TASK_CONTEXT_PROTOCOL_SSP:
694 /* SSP/SMP Frame */ 684 /* SSP/SMP Frame */
695 tc->type.ssp.tag = sci_req->io_tag; 685 tc->type.ssp.tag = ireq->io_tag;
696 tc->type.ssp.target_port_transfer_tag = 0xFFFF; 686 tc->type.ssp.target_port_transfer_tag = 0xFFFF;
697 break; 687 break;
698 688
699 case SCU_TASK_CONTEXT_PROTOCOL_STP: 689 case SCU_TASK_CONTEXT_PROTOCOL_STP:
700 /* STP/SATA Frame 690 /* STP/SATA Frame
701 * tc->type.stp.ncq_tag = sci_req->ncq_tag; 691 * tc->type.stp.ncq_tag = ireq->ncq_tag;
702 */ 692 */
703 break; 693 break;
704 694
@@ -713,28 +703,28 @@ enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req)
713 } 703 }
714 704
715 /* Add to the post_context the io tag value */ 705 /* Add to the post_context the io tag value */
716 sci_req->post_context |= ISCI_TAG_TCI(sci_req->io_tag); 706 ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag);
717 707
718 /* Everything is good go ahead and change state */ 708 /* Everything is good go ahead and change state */
719 sci_change_state(&sci_req->sm, SCI_REQ_STARTED); 709 sci_change_state(&ireq->sm, SCI_REQ_STARTED);
720 710
721 return SCI_SUCCESS; 711 return SCI_SUCCESS;
722} 712}
723 713
724enum sci_status 714enum sci_status
725scic_sds_io_request_terminate(struct scic_sds_request *sci_req) 715scic_sds_io_request_terminate(struct isci_request *ireq)
726{ 716{
727 enum sci_base_request_states state; 717 enum sci_base_request_states state;
728 718
729 state = sci_req->sm.current_state_id; 719 state = ireq->sm.current_state_id;
730 720
731 switch (state) { 721 switch (state) {
732 case SCI_REQ_CONSTRUCTED: 722 case SCI_REQ_CONSTRUCTED:
733 scic_sds_request_set_status(sci_req, 723 scic_sds_request_set_status(ireq,
734 SCU_TASK_DONE_TASK_ABORT, 724 SCU_TASK_DONE_TASK_ABORT,
735 SCI_FAILURE_IO_TERMINATED); 725 SCI_FAILURE_IO_TERMINATED);
736 726
737 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); 727 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
738 return SCI_SUCCESS; 728 return SCI_SUCCESS;
739 case SCI_REQ_STARTED: 729 case SCI_REQ_STARTED:
740 case SCI_REQ_TASK_WAIT_TC_COMP: 730 case SCI_REQ_TASK_WAIT_TC_COMP:
@@ -751,54 +741,54 @@ scic_sds_io_request_terminate(struct scic_sds_request *sci_req)
751 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED: 741 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
752 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG: 742 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
753 case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: 743 case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
754 sci_change_state(&sci_req->sm, SCI_REQ_ABORTING); 744 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
755 return SCI_SUCCESS; 745 return SCI_SUCCESS;
756 case SCI_REQ_TASK_WAIT_TC_RESP: 746 case SCI_REQ_TASK_WAIT_TC_RESP:
757 sci_change_state(&sci_req->sm, SCI_REQ_ABORTING); 747 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
758 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); 748 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
759 return SCI_SUCCESS; 749 return SCI_SUCCESS;
760 case SCI_REQ_ABORTING: 750 case SCI_REQ_ABORTING:
761 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); 751 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
762 return SCI_SUCCESS; 752 return SCI_SUCCESS;
763 case SCI_REQ_COMPLETED: 753 case SCI_REQ_COMPLETED:
764 default: 754 default:
765 dev_warn(scic_to_dev(sci_req->owning_controller), 755 dev_warn(scic_to_dev(ireq->owning_controller),
766 "%s: SCIC IO Request requested to abort while in wrong " 756 "%s: SCIC IO Request requested to abort while in wrong "
767 "state %d\n", 757 "state %d\n",
768 __func__, 758 __func__,
769 sci_req->sm.current_state_id); 759 ireq->sm.current_state_id);
770 break; 760 break;
771 } 761 }
772 762
773 return SCI_FAILURE_INVALID_STATE; 763 return SCI_FAILURE_INVALID_STATE;
774} 764}
775 765
776enum sci_status scic_sds_request_complete(struct scic_sds_request *sci_req) 766enum sci_status scic_sds_request_complete(struct isci_request *ireq)
777{ 767{
778 enum sci_base_request_states state; 768 enum sci_base_request_states state;
779 struct scic_sds_controller *scic = sci_req->owning_controller; 769 struct scic_sds_controller *scic = ireq->owning_controller;
780 770
781 state = sci_req->sm.current_state_id; 771 state = ireq->sm.current_state_id;
782 if (WARN_ONCE(state != SCI_REQ_COMPLETED, 772 if (WARN_ONCE(state != SCI_REQ_COMPLETED,
783 "isci: request completion from wrong state (%d)\n", state)) 773 "isci: request completion from wrong state (%d)\n", state))
784 return SCI_FAILURE_INVALID_STATE; 774 return SCI_FAILURE_INVALID_STATE;
785 775
786 if (sci_req->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) 776 if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
787 scic_sds_controller_release_frame(scic, 777 scic_sds_controller_release_frame(scic,
788 sci_req->saved_rx_frame_index); 778 ireq->saved_rx_frame_index);
789 779
790 /* XXX can we just stop the machine and remove the 'final' state? */ 780 /* XXX can we just stop the machine and remove the 'final' state? */
791 sci_change_state(&sci_req->sm, SCI_REQ_FINAL); 781 sci_change_state(&ireq->sm, SCI_REQ_FINAL);
792 return SCI_SUCCESS; 782 return SCI_SUCCESS;
793} 783}
794 784
795enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_req, 785enum sci_status scic_sds_io_request_event_handler(struct isci_request *ireq,
796 u32 event_code) 786 u32 event_code)
797{ 787{
798 enum sci_base_request_states state; 788 enum sci_base_request_states state;
799 struct scic_sds_controller *scic = sci_req->owning_controller; 789 struct scic_sds_controller *scic = ireq->owning_controller;
800 790
801 state = sci_req->sm.current_state_id; 791 state = ireq->sm.current_state_id;
802 792
803 if (state != SCI_REQ_STP_PIO_DATA_IN) { 793 if (state != SCI_REQ_STP_PIO_DATA_IN) {
804 dev_warn(scic_to_dev(scic), "%s: (%x) in wrong state %d\n", 794 dev_warn(scic_to_dev(scic), "%s: (%x) in wrong state %d\n",
@@ -812,7 +802,7 @@ enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_r
812 /* We are waiting for data and the SCU has R_ERR the data frame. 802 /* We are waiting for data and the SCU has R_ERR the data frame.
813 * Go back to waiting for the D2H Register FIS 803 * Go back to waiting for the D2H Register FIS
814 */ 804 */
815 sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 805 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
816 return SCI_SUCCESS; 806 return SCI_SUCCESS;
817 default: 807 default:
818 dev_err(scic_to_dev(scic), 808 dev_err(scic_to_dev(scic),
@@ -832,15 +822,14 @@ enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_r
832 * @sci_req: This parameter specifies the request object for which to copy 822 * @sci_req: This parameter specifies the request object for which to copy
833 * the response data. 823 * the response data.
834 */ 824 */
835static void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req) 825static void scic_sds_io_request_copy_response(struct isci_request *ireq)
836{ 826{
837 void *resp_buf; 827 void *resp_buf;
838 u32 len; 828 u32 len;
839 struct ssp_response_iu *ssp_response; 829 struct ssp_response_iu *ssp_response;
840 struct isci_request *ireq = sci_req_to_ireq(sci_req);
841 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); 830 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
842 831
843 ssp_response = &sci_req->ssp.rsp; 832 ssp_response = &ireq->ssp.rsp;
844 833
845 resp_buf = &isci_tmf->resp.resp_iu; 834 resp_buf = &isci_tmf->resp.resp_iu;
846 835
@@ -852,7 +841,7 @@ static void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req)
852} 841}
853 842
854static enum sci_status 843static enum sci_status
855request_started_state_tc_event(struct scic_sds_request *sci_req, 844request_started_state_tc_event(struct isci_request *ireq,
856 u32 completion_code) 845 u32 completion_code)
857{ 846{
858 struct ssp_response_iu *resp_iu; 847 struct ssp_response_iu *resp_iu;
@@ -863,7 +852,7 @@ request_started_state_tc_event(struct scic_sds_request *sci_req,
863 */ 852 */
864 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 853 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
865 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 854 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
866 scic_sds_request_set_status(sci_req, 855 scic_sds_request_set_status(ireq,
867 SCU_TASK_DONE_GOOD, 856 SCU_TASK_DONE_GOOD,
868 SCI_SUCCESS); 857 SCI_SUCCESS);
869 break; 858 break;
@@ -875,19 +864,19 @@ request_started_state_tc_event(struct scic_sds_request *sci_req,
875 * truly a failed request or a good request that just got 864 * truly a failed request or a good request that just got
876 * completed early. 865 * completed early.
877 */ 866 */
878 struct ssp_response_iu *resp = &sci_req->ssp.rsp; 867 struct ssp_response_iu *resp = &ireq->ssp.rsp;
879 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 868 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
880 869
881 sci_swab32_cpy(&sci_req->ssp.rsp, 870 sci_swab32_cpy(&ireq->ssp.rsp,
882 &sci_req->ssp.rsp, 871 &ireq->ssp.rsp,
883 word_cnt); 872 word_cnt);
884 873
885 if (resp->status == 0) { 874 if (resp->status == 0) {
886 scic_sds_request_set_status(sci_req, 875 scic_sds_request_set_status(ireq,
887 SCU_TASK_DONE_GOOD, 876 SCU_TASK_DONE_GOOD,
888 SCI_SUCCESS_IO_DONE_EARLY); 877 SCI_SUCCESS_IO_DONE_EARLY);
889 } else { 878 } else {
890 scic_sds_request_set_status(sci_req, 879 scic_sds_request_set_status(ireq,
891 SCU_TASK_DONE_CHECK_RESPONSE, 880 SCU_TASK_DONE_CHECK_RESPONSE,
892 SCI_FAILURE_IO_RESPONSE_VALID); 881 SCI_FAILURE_IO_RESPONSE_VALID);
893 } 882 }
@@ -896,11 +885,11 @@ request_started_state_tc_event(struct scic_sds_request *sci_req,
896 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): { 885 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
897 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 886 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
898 887
899 sci_swab32_cpy(&sci_req->ssp.rsp, 888 sci_swab32_cpy(&ireq->ssp.rsp,
900 &sci_req->ssp.rsp, 889 &ireq->ssp.rsp,
901 word_cnt); 890 word_cnt);
902 891
903 scic_sds_request_set_status(sci_req, 892 scic_sds_request_set_status(ireq,
904 SCU_TASK_DONE_CHECK_RESPONSE, 893 SCU_TASK_DONE_CHECK_RESPONSE,
905 SCI_FAILURE_IO_RESPONSE_VALID); 894 SCI_FAILURE_IO_RESPONSE_VALID);
906 break; 895 break;
@@ -911,15 +900,15 @@ request_started_state_tc_event(struct scic_sds_request *sci_req,
911 * guaranteed to be received before this completion status is 900 * guaranteed to be received before this completion status is
912 * posted? 901 * posted?
913 */ 902 */
914 resp_iu = &sci_req->ssp.rsp; 903 resp_iu = &ireq->ssp.rsp;
915 datapres = resp_iu->datapres; 904 datapres = resp_iu->datapres;
916 905
917 if (datapres == 1 || datapres == 2) { 906 if (datapres == 1 || datapres == 2) {
918 scic_sds_request_set_status(sci_req, 907 scic_sds_request_set_status(ireq,
919 SCU_TASK_DONE_CHECK_RESPONSE, 908 SCU_TASK_DONE_CHECK_RESPONSE,
920 SCI_FAILURE_IO_RESPONSE_VALID); 909 SCI_FAILURE_IO_RESPONSE_VALID);
921 } else 910 } else
922 scic_sds_request_set_status(sci_req, 911 scic_sds_request_set_status(ireq,
923 SCU_TASK_DONE_GOOD, 912 SCU_TASK_DONE_GOOD,
924 SCI_SUCCESS); 913 SCI_SUCCESS);
925 break; 914 break;
@@ -935,13 +924,13 @@ request_started_state_tc_event(struct scic_sds_request *sci_req,
935 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): 924 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
936 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): 925 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
937 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): 926 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
938 if (sci_req->protocol == SCIC_STP_PROTOCOL) { 927 if (ireq->protocol == SCIC_STP_PROTOCOL) {
939 scic_sds_request_set_status(sci_req, 928 scic_sds_request_set_status(ireq,
940 SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 929 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
941 SCU_COMPLETION_TL_STATUS_SHIFT, 930 SCU_COMPLETION_TL_STATUS_SHIFT,
942 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); 931 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
943 } else { 932 } else {
944 scic_sds_request_set_status(sci_req, 933 scic_sds_request_set_status(ireq,
945 SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 934 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
946 SCU_COMPLETION_TL_STATUS_SHIFT, 935 SCU_COMPLETION_TL_STATUS_SHIFT,
947 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 936 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
@@ -959,7 +948,7 @@ request_started_state_tc_event(struct scic_sds_request *sci_req,
959 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): 948 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
960 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): 949 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
961 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): 950 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
962 scic_sds_request_set_status(sci_req, 951 scic_sds_request_set_status(ireq,
963 SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 952 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
964 SCU_COMPLETION_TL_STATUS_SHIFT, 953 SCU_COMPLETION_TL_STATUS_SHIFT,
965 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); 954 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
@@ -983,7 +972,7 @@ request_started_state_tc_event(struct scic_sds_request *sci_req,
983 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): 972 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
984 default: 973 default:
985 scic_sds_request_set_status( 974 scic_sds_request_set_status(
986 sci_req, 975 ireq,
987 SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 976 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
988 SCU_COMPLETION_TL_STATUS_SHIFT, 977 SCU_COMPLETION_TL_STATUS_SHIFT,
989 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 978 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
@@ -995,21 +984,21 @@ request_started_state_tc_event(struct scic_sds_request *sci_req,
995 */ 984 */
996 985
997 /* In all cases we will treat this as the completion of the IO req. */ 986 /* In all cases we will treat this as the completion of the IO req. */
998 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); 987 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
999 return SCI_SUCCESS; 988 return SCI_SUCCESS;
1000} 989}
1001 990
1002static enum sci_status 991static enum sci_status
1003request_aborting_state_tc_event(struct scic_sds_request *sci_req, 992request_aborting_state_tc_event(struct isci_request *ireq,
1004 u32 completion_code) 993 u32 completion_code)
1005{ 994{
1006 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 995 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1007 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): 996 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
1008 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): 997 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
1009 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_TASK_ABORT, 998 scic_sds_request_set_status(ireq, SCU_TASK_DONE_TASK_ABORT,
1010 SCI_FAILURE_IO_TERMINATED); 999 SCI_FAILURE_IO_TERMINATED);
1011 1000
1012 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); 1001 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1013 break; 1002 break;
1014 1003
1015 default: 1004 default:
@@ -1022,15 +1011,15 @@ request_aborting_state_tc_event(struct scic_sds_request *sci_req,
1022 return SCI_SUCCESS; 1011 return SCI_SUCCESS;
1023} 1012}
1024 1013
1025static enum sci_status ssp_task_request_await_tc_event(struct scic_sds_request *sci_req, 1014static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq,
1026 u32 completion_code) 1015 u32 completion_code)
1027{ 1016{
1028 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1017 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1029 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1018 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1030 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, 1019 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1031 SCI_SUCCESS); 1020 SCI_SUCCESS);
1032 1021
1033 sci_change_state(&sci_req->sm, SCI_REQ_TASK_WAIT_TC_RESP); 1022 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1034 break; 1023 break;
1035 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): 1024 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1036 /* Currently, the decision is to simply allow the task request 1025 /* Currently, the decision is to simply allow the task request
@@ -1038,12 +1027,12 @@ static enum sci_status ssp_task_request_await_tc_event(struct scic_sds_request *
1038 * There is a potential for receiving multiple task responses if 1027 * There is a potential for receiving multiple task responses if
1039 * we decide to send the task IU again. 1028 * we decide to send the task IU again.
1040 */ 1029 */
1041 dev_warn(scic_to_dev(sci_req->owning_controller), 1030 dev_warn(scic_to_dev(ireq->owning_controller),
1042 "%s: TaskRequest:0x%p CompletionCode:%x - " 1031 "%s: TaskRequest:0x%p CompletionCode:%x - "
1043 "ACK/NAK timeout\n", __func__, sci_req, 1032 "ACK/NAK timeout\n", __func__, ireq,
1044 completion_code); 1033 completion_code);
1045 1034
1046 sci_change_state(&sci_req->sm, SCI_REQ_TASK_WAIT_TC_RESP); 1035 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1047 break; 1036 break;
1048 default: 1037 default:
1049 /* 1038 /*
@@ -1051,11 +1040,11 @@ static enum sci_status ssp_task_request_await_tc_event(struct scic_sds_request *
1051 * If a NAK was received, then it is up to the user to retry 1040 * If a NAK was received, then it is up to the user to retry
1052 * the request. 1041 * the request.
1053 */ 1042 */
1054 scic_sds_request_set_status(sci_req, 1043 scic_sds_request_set_status(ireq,
1055 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 1044 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1056 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1045 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1057 1046
1058 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); 1047 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1059 break; 1048 break;
1060 } 1049 }
1061 1050
@@ -1063,7 +1052,7 @@ static enum sci_status ssp_task_request_await_tc_event(struct scic_sds_request *
1063} 1052}
1064 1053
1065static enum sci_status 1054static enum sci_status
1066smp_request_await_response_tc_event(struct scic_sds_request *sci_req, 1055smp_request_await_response_tc_event(struct isci_request *ireq,
1067 u32 completion_code) 1056 u32 completion_code)
1068{ 1057{
1069 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1058 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
@@ -1072,10 +1061,10 @@ smp_request_await_response_tc_event(struct scic_sds_request *sci_req,
1072 * unexpected. but if the TC has success status, we 1061 * unexpected. but if the TC has success status, we
1073 * complete the IO anyway. 1062 * complete the IO anyway.
1074 */ 1063 */
1075 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, 1064 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1076 SCI_SUCCESS); 1065 SCI_SUCCESS);
1077 1066
1078 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); 1067 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1079 break; 1068 break;
1080 1069
1081 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): 1070 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
@@ -1089,21 +1078,21 @@ smp_request_await_response_tc_event(struct scic_sds_request *sci_req,
1089 * these SMP_XXX_XX_ERR status. For these type of error, 1078 * these SMP_XXX_XX_ERR status. For these type of error,
1090 * we ask scic user to retry the request. 1079 * we ask scic user to retry the request.
1091 */ 1080 */
1092 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_SMP_RESP_TO_ERR, 1081 scic_sds_request_set_status(ireq, SCU_TASK_DONE_SMP_RESP_TO_ERR,
1093 SCI_FAILURE_RETRY_REQUIRED); 1082 SCI_FAILURE_RETRY_REQUIRED);
1094 1083
1095 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); 1084 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1096 break; 1085 break;
1097 1086
1098 default: 1087 default:
1099 /* All other completion status cause the IO to be complete. If a NAK 1088 /* All other completion status cause the IO to be complete. If a NAK
1100 * was received, then it is up to the user to retry the request 1089 * was received, then it is up to the user to retry the request
1101 */ 1090 */
1102 scic_sds_request_set_status(sci_req, 1091 scic_sds_request_set_status(ireq,
1103 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 1092 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1104 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1093 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1105 1094
1106 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); 1095 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1107 break; 1096 break;
1108 } 1097 }
1109 1098
@@ -1111,50 +1100,50 @@ smp_request_await_response_tc_event(struct scic_sds_request *sci_req,
1111} 1100}
1112 1101
1113static enum sci_status 1102static enum sci_status
1114smp_request_await_tc_event(struct scic_sds_request *sci_req, 1103smp_request_await_tc_event(struct isci_request *ireq,
1115 u32 completion_code) 1104 u32 completion_code)
1116{ 1105{
1117 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1106 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1118 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1107 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1119 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, 1108 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1120 SCI_SUCCESS); 1109 SCI_SUCCESS);
1121 1110
1122 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); 1111 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1123 break; 1112 break;
1124 default: 1113 default:
1125 /* All other completion status cause the IO to be 1114 /* All other completion status cause the IO to be
1126 * complete. If a NAK was received, then it is up to 1115 * complete. If a NAK was received, then it is up to
1127 * the user to retry the request. 1116 * the user to retry the request.
1128 */ 1117 */
1129 scic_sds_request_set_status(sci_req, 1118 scic_sds_request_set_status(ireq,
1130 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 1119 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1131 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1120 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1132 1121
1133 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); 1122 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1134 break; 1123 break;
1135 } 1124 }
1136 1125
1137 return SCI_SUCCESS; 1126 return SCI_SUCCESS;
1138} 1127}
1139 1128
1140void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *req, 1129void scic_stp_io_request_set_ncq_tag(struct isci_request *ireq,
1141 u16 ncq_tag) 1130 u16 ncq_tag)
1142{ 1131{
1143 /** 1132 /**
1144 * @note This could be made to return an error to the user if the user 1133 * @note This could be made to return an error to the user if the user
1145 * attempts to set the NCQ tag in the wrong state. 1134 * attempts to set the NCQ tag in the wrong state.
1146 */ 1135 */
1147 req->tc->type.stp.ncq_tag = ncq_tag; 1136 ireq->tc->type.stp.ncq_tag = ncq_tag;
1148} 1137}
1149 1138
1150static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req) 1139static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req)
1151{ 1140{
1152 struct scu_sgl_element *sgl; 1141 struct scu_sgl_element *sgl;
1153 struct scu_sgl_element_pair *sgl_pair; 1142 struct scu_sgl_element_pair *sgl_pair;
1154 struct scic_sds_request *sci_req = to_sci_req(stp_req); 1143 struct isci_request *ireq = to_ireq(stp_req);
1155 struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl; 1144 struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl;
1156 1145
1157 sgl_pair = to_sgl_element_pair(sci_req, pio_sgl->index); 1146 sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
1158 if (!sgl_pair) 1147 if (!sgl_pair)
1159 sgl = NULL; 1148 sgl = NULL;
1160 else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) { 1149 else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) {
@@ -1172,7 +1161,7 @@ static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req)
1172 } else { 1161 } else {
1173 pio_sgl->index++; 1162 pio_sgl->index++;
1174 pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A; 1163 pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A;
1175 sgl_pair = to_sgl_element_pair(sci_req, pio_sgl->index); 1164 sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
1176 sgl = &sgl_pair->A; 1165 sgl = &sgl_pair->A;
1177 } 1166 }
1178 } 1167 }
@@ -1181,15 +1170,15 @@ static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req)
1181} 1170}
1182 1171
1183static enum sci_status 1172static enum sci_status
1184stp_request_non_data_await_h2d_tc_event(struct scic_sds_request *sci_req, 1173stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
1185 u32 completion_code) 1174 u32 completion_code)
1186{ 1175{
1187 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1176 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1188 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1177 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1189 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, 1178 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1190 SCI_SUCCESS); 1179 SCI_SUCCESS);
1191 1180
1192 sci_change_state(&sci_req->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H); 1181 sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
1193 break; 1182 break;
1194 1183
1195 default: 1184 default:
@@ -1197,11 +1186,11 @@ stp_request_non_data_await_h2d_tc_event(struct scic_sds_request *sci_req,
1197 * complete. If a NAK was received, then it is up to 1186 * complete. If a NAK was received, then it is up to
1198 * the user to retry the request. 1187 * the user to retry the request.
1199 */ 1188 */
1200 scic_sds_request_set_status(sci_req, 1189 scic_sds_request_set_status(ireq,
1201 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 1190 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1202 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1191 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1203 1192
1204 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); 1193 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1205 break; 1194 break;
1206 } 1195 }
1207 1196
@@ -1214,18 +1203,18 @@ stp_request_non_data_await_h2d_tc_event(struct scic_sds_request *sci_req,
1214 * parameter length. current sgl and offset is alreay stored in the IO request 1203 * parameter length. current sgl and offset is alreay stored in the IO request
1215 */ 1204 */
1216static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame( 1205static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
1217 struct scic_sds_request *sci_req, 1206 struct isci_request *ireq,
1218 u32 length) 1207 u32 length)
1219{ 1208{
1220 struct isci_stp_request *stp_req = &sci_req->stp.req; 1209 struct isci_stp_request *stp_req = &ireq->stp.req;
1221 struct scu_task_context *task_context = sci_req->tc; 1210 struct scu_task_context *task_context = ireq->tc;
1222 struct scu_sgl_element_pair *sgl_pair; 1211 struct scu_sgl_element_pair *sgl_pair;
1223 struct scu_sgl_element *current_sgl; 1212 struct scu_sgl_element *current_sgl;
1224 1213
1225 /* Recycle the TC and reconstruct it for sending out DATA FIS containing 1214 /* Recycle the TC and reconstruct it for sending out DATA FIS containing
1226 * for the data from current_sgl+offset for the input length 1215 * for the data from current_sgl+offset for the input length
1227 */ 1216 */
1228 sgl_pair = to_sgl_element_pair(sci_req, stp_req->sgl.index); 1217 sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
1229 if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) 1218 if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A)
1230 current_sgl = &sgl_pair->A; 1219 current_sgl = &sgl_pair->A;
1231 else 1220 else
@@ -1238,12 +1227,12 @@ static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
1238 task_context->type.stp.fis_type = FIS_DATA; 1227 task_context->type.stp.fis_type = FIS_DATA;
1239 1228
1240 /* send the new TC out. */ 1229 /* send the new TC out. */
1241 return scic_controller_continue_io(sci_req); 1230 return scic_controller_continue_io(ireq);
1242} 1231}
1243 1232
1244static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req) 1233static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
1245{ 1234{
1246 struct isci_stp_request *stp_req = &sci_req->stp.req; 1235 struct isci_stp_request *stp_req = &ireq->stp.req;
1247 struct scu_sgl_element_pair *sgl_pair; 1236 struct scu_sgl_element_pair *sgl_pair;
1248 struct scu_sgl_element *sgl; 1237 struct scu_sgl_element *sgl;
1249 enum sci_status status; 1238 enum sci_status status;
@@ -1251,7 +1240,7 @@ static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct sc
1251 u32 len = 0; 1240 u32 len = 0;
1252 1241
1253 offset = stp_req->sgl.offset; 1242 offset = stp_req->sgl.offset;
1254 sgl_pair = to_sgl_element_pair(sci_req, stp_req->sgl.index); 1243 sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
1255 if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__)) 1244 if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__))
1256 return SCI_FAILURE; 1245 return SCI_FAILURE;
1257 1246
@@ -1267,7 +1256,7 @@ static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct sc
1267 return SCI_SUCCESS; 1256 return SCI_SUCCESS;
1268 1257
1269 if (stp_req->pio_len >= len) { 1258 if (stp_req->pio_len >= len) {
1270 status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, len); 1259 status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
1271 if (status != SCI_SUCCESS) 1260 if (status != SCI_SUCCESS)
1272 return status; 1261 return status;
1273 stp_req->pio_len -= len; 1262 stp_req->pio_len -= len;
@@ -1276,7 +1265,7 @@ static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct sc
1276 sgl = pio_sgl_next(stp_req); 1265 sgl = pio_sgl_next(stp_req);
1277 offset = 0; 1266 offset = 0;
1278 } else if (stp_req->pio_len < len) { 1267 } else if (stp_req->pio_len < len) {
1279 scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->pio_len); 1268 scic_sds_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
1280 1269
1281 /* Sgl offset will be adjusted and saved for future */ 1270 /* Sgl offset will be adjusted and saved for future */
1282 offset += stp_req->pio_len; 1271 offset += stp_req->pio_len;
@@ -1302,7 +1291,6 @@ static enum sci_status
1302scic_sds_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, 1291scic_sds_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
1303 u8 *data_buf, u32 len) 1292 u8 *data_buf, u32 len)
1304{ 1293{
1305 struct scic_sds_request *sci_req;
1306 struct isci_request *ireq; 1294 struct isci_request *ireq;
1307 u8 *src_addr; 1295 u8 *src_addr;
1308 int copy_len; 1296 int copy_len;
@@ -1311,8 +1299,7 @@ scic_sds_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_r
1311 void *kaddr; 1299 void *kaddr;
1312 int total_len = len; 1300 int total_len = len;
1313 1301
1314 sci_req = to_sci_req(stp_req); 1302 ireq = to_ireq(stp_req);
1315 ireq = sci_req_to_ireq(sci_req);
1316 task = isci_request_access_task(ireq); 1303 task = isci_request_access_task(ireq);
1317 src_addr = data_buf; 1304 src_addr = data_buf;
1318 1305
@@ -1373,18 +1360,18 @@ static enum sci_status scic_sds_stp_request_pio_data_in_copy_data(
1373} 1360}
1374 1361
1375static enum sci_status 1362static enum sci_status
1376stp_request_pio_await_h2d_completion_tc_event(struct scic_sds_request *sci_req, 1363stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
1377 u32 completion_code) 1364 u32 completion_code)
1378{ 1365{
1379 enum sci_status status = SCI_SUCCESS; 1366 enum sci_status status = SCI_SUCCESS;
1380 1367
1381 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1368 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1382 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1369 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1383 scic_sds_request_set_status(sci_req, 1370 scic_sds_request_set_status(ireq,
1384 SCU_TASK_DONE_GOOD, 1371 SCU_TASK_DONE_GOOD,
1385 SCI_SUCCESS); 1372 SCI_SUCCESS);
1386 1373
1387 sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 1374 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1388 break; 1375 break;
1389 1376
1390 default: 1377 default:
@@ -1392,11 +1379,11 @@ stp_request_pio_await_h2d_completion_tc_event(struct scic_sds_request *sci_req,
1392 * complete. If a NAK was received, then it is up to 1379 * complete. If a NAK was received, then it is up to
1393 * the user to retry the request. 1380 * the user to retry the request.
1394 */ 1381 */
1395 scic_sds_request_set_status(sci_req, 1382 scic_sds_request_set_status(ireq,
1396 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 1383 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1397 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1384 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1398 1385
1399 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); 1386 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1400 break; 1387 break;
1401 } 1388 }
1402 1389
@@ -1404,18 +1391,18 @@ stp_request_pio_await_h2d_completion_tc_event(struct scic_sds_request *sci_req,
1404} 1391}
1405 1392
1406static enum sci_status 1393static enum sci_status
1407pio_data_out_tx_done_tc_event(struct scic_sds_request *sci_req, 1394pio_data_out_tx_done_tc_event(struct isci_request *ireq,
1408 u32 completion_code) 1395 u32 completion_code)
1409{ 1396{
1410 enum sci_status status = SCI_SUCCESS; 1397 enum sci_status status = SCI_SUCCESS;
1411 bool all_frames_transferred = false; 1398 bool all_frames_transferred = false;
1412 struct isci_stp_request *stp_req = &sci_req->stp.req; 1399 struct isci_stp_request *stp_req = &ireq->stp.req;
1413 1400
1414 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1401 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1415 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1402 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1416 /* Transmit data */ 1403 /* Transmit data */
1417 if (stp_req->pio_len != 0) { 1404 if (stp_req->pio_len != 0) {
1418 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req); 1405 status = scic_sds_stp_request_pio_data_out_transmit_data(ireq);
1419 if (status == SCI_SUCCESS) { 1406 if (status == SCI_SUCCESS) {
1420 if (stp_req->pio_len == 0) 1407 if (stp_req->pio_len == 0)
1421 all_frames_transferred = true; 1408 all_frames_transferred = true;
@@ -1433,7 +1420,7 @@ pio_data_out_tx_done_tc_event(struct scic_sds_request *sci_req,
1433 /* 1420 /*
1434 * Change the state to SCI_REQ_STP_PIO_DATA_IN 1421 * Change the state to SCI_REQ_STP_PIO_DATA_IN
1435 * and wait for PIO_SETUP fis / or D2H REg fis. */ 1422 * and wait for PIO_SETUP fis / or D2H REg fis. */
1436 sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 1423 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1437 } 1424 }
1438 break; 1425 break;
1439 1426
@@ -1444,11 +1431,11 @@ pio_data_out_tx_done_tc_event(struct scic_sds_request *sci_req,
1444 * the request. 1431 * the request.
1445 */ 1432 */
1446 scic_sds_request_set_status( 1433 scic_sds_request_set_status(
1447 sci_req, 1434 ireq,
1448 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 1435 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1449 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1436 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1450 1437
1451 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); 1438 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1452 break; 1439 break;
1453 } 1440 }
1454 1441
@@ -1456,18 +1443,18 @@ pio_data_out_tx_done_tc_event(struct scic_sds_request *sci_req,
1456} 1443}
1457 1444
1458static void scic_sds_stp_request_udma_complete_request( 1445static void scic_sds_stp_request_udma_complete_request(
1459 struct scic_sds_request *request, 1446 struct isci_request *ireq,
1460 u32 scu_status, 1447 u32 scu_status,
1461 enum sci_status sci_status) 1448 enum sci_status sci_status)
1462{ 1449{
1463 scic_sds_request_set_status(request, scu_status, sci_status); 1450 scic_sds_request_set_status(ireq, scu_status, sci_status);
1464 sci_change_state(&request->sm, SCI_REQ_COMPLETED); 1451 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1465} 1452}
1466 1453
1467static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req, 1454static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct isci_request *ireq,
1468 u32 frame_index) 1455 u32 frame_index)
1469{ 1456{
1470 struct scic_sds_controller *scic = sci_req->owning_controller; 1457 struct scic_sds_controller *scic = ireq->owning_controller;
1471 struct dev_to_host_fis *frame_header; 1458 struct dev_to_host_fis *frame_header;
1472 enum sci_status status; 1459 enum sci_status status;
1473 u32 *frame_buffer; 1460 u32 *frame_buffer;
@@ -1482,7 +1469,7 @@ static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct sc
1482 frame_index, 1469 frame_index,
1483 (void **)&frame_buffer); 1470 (void **)&frame_buffer);
1484 1471
1485 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, 1472 scic_sds_controller_copy_sata_response(&ireq->stp.rsp,
1486 frame_header, 1473 frame_header,
1487 frame_buffer); 1474 frame_buffer);
1488 } 1475 }
@@ -1493,16 +1480,16 @@ static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct sc
1493} 1480}
1494 1481
1495enum sci_status 1482enum sci_status
1496scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, 1483scic_sds_io_request_frame_handler(struct isci_request *ireq,
1497 u32 frame_index) 1484 u32 frame_index)
1498{ 1485{
1499 struct scic_sds_controller *scic = sci_req->owning_controller; 1486 struct scic_sds_controller *scic = ireq->owning_controller;
1500 struct isci_stp_request *stp_req = &sci_req->stp.req; 1487 struct isci_stp_request *stp_req = &ireq->stp.req;
1501 enum sci_base_request_states state; 1488 enum sci_base_request_states state;
1502 enum sci_status status; 1489 enum sci_status status;
1503 ssize_t word_cnt; 1490 ssize_t word_cnt;
1504 1491
1505 state = sci_req->sm.current_state_id; 1492 state = ireq->sm.current_state_id;
1506 switch (state) { 1493 switch (state) {
1507 case SCI_REQ_STARTED: { 1494 case SCI_REQ_STARTED: {
1508 struct ssp_frame_hdr ssp_hdr; 1495 struct ssp_frame_hdr ssp_hdr;
@@ -1523,24 +1510,24 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
1523 frame_index, 1510 frame_index,
1524 (void **)&resp_iu); 1511 (void **)&resp_iu);
1525 1512
1526 sci_swab32_cpy(&sci_req->ssp.rsp, resp_iu, word_cnt); 1513 sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt);
1527 1514
1528 resp_iu = &sci_req->ssp.rsp; 1515 resp_iu = &ireq->ssp.rsp;
1529 1516
1530 if (resp_iu->datapres == 0x01 || 1517 if (resp_iu->datapres == 0x01 ||
1531 resp_iu->datapres == 0x02) { 1518 resp_iu->datapres == 0x02) {
1532 scic_sds_request_set_status(sci_req, 1519 scic_sds_request_set_status(ireq,
1533 SCU_TASK_DONE_CHECK_RESPONSE, 1520 SCU_TASK_DONE_CHECK_RESPONSE,
1534 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1521 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1535 } else 1522 } else
1536 scic_sds_request_set_status(sci_req, 1523 scic_sds_request_set_status(ireq,
1537 SCU_TASK_DONE_GOOD, 1524 SCU_TASK_DONE_GOOD,
1538 SCI_SUCCESS); 1525 SCI_SUCCESS);
1539 } else { 1526 } else {
1540 /* not a response frame, why did it get forwarded? */ 1527 /* not a response frame, why did it get forwarded? */
1541 dev_err(scic_to_dev(scic), 1528 dev_err(scic_to_dev(scic),
1542 "%s: SCIC IO Request 0x%p received unexpected " 1529 "%s: SCIC IO Request 0x%p received unexpected "
1543 "frame %d type 0x%02x\n", __func__, sci_req, 1530 "frame %d type 0x%02x\n", __func__, ireq,
1544 frame_index, ssp_hdr.frame_type); 1531 frame_index, ssp_hdr.frame_type);
1545 } 1532 }
1546 1533
@@ -1554,13 +1541,13 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
1554 } 1541 }
1555 1542
1556 case SCI_REQ_TASK_WAIT_TC_RESP: 1543 case SCI_REQ_TASK_WAIT_TC_RESP:
1557 scic_sds_io_request_copy_response(sci_req); 1544 scic_sds_io_request_copy_response(ireq);
1558 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); 1545 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1559 scic_sds_controller_release_frame(scic,frame_index); 1546 scic_sds_controller_release_frame(scic,frame_index);
1560 return SCI_SUCCESS; 1547 return SCI_SUCCESS;
1561 1548
1562 case SCI_REQ_SMP_WAIT_RESP: { 1549 case SCI_REQ_SMP_WAIT_RESP: {
1563 struct smp_resp *rsp_hdr = &sci_req->smp.rsp; 1550 struct smp_resp *rsp_hdr = &ireq->smp.rsp;
1564 void *frame_header; 1551 void *frame_header;
1565 1552
1566 scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, 1553 scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
@@ -1584,10 +1571,10 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
1584 sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ, 1571 sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
1585 smp_resp, word_cnt); 1572 smp_resp, word_cnt);
1586 1573
1587 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, 1574 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1588 SCI_SUCCESS); 1575 SCI_SUCCESS);
1589 1576
1590 sci_change_state(&sci_req->sm, SCI_REQ_SMP_WAIT_TC_COMP); 1577 sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
1591 } else { 1578 } else {
1592 /* 1579 /*
1593 * This was not a response frame why did it get 1580 * This was not a response frame why did it get
@@ -1597,15 +1584,15 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
1597 "%s: SCIC SMP Request 0x%p received unexpected " 1584 "%s: SCIC SMP Request 0x%p received unexpected "
1598 "frame %d type 0x%02x\n", 1585 "frame %d type 0x%02x\n",
1599 __func__, 1586 __func__,
1600 sci_req, 1587 ireq,
1601 frame_index, 1588 frame_index,
1602 rsp_hdr->frame_type); 1589 rsp_hdr->frame_type);
1603 1590
1604 scic_sds_request_set_status(sci_req, 1591 scic_sds_request_set_status(ireq,
1605 SCU_TASK_DONE_SMP_FRM_TYPE_ERR, 1592 SCU_TASK_DONE_SMP_FRM_TYPE_ERR,
1606 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1593 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1607 1594
1608 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); 1595 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1609 } 1596 }
1610 1597
1611 scic_sds_controller_release_frame(scic, frame_index); 1598 scic_sds_controller_release_frame(scic, frame_index);
@@ -1614,18 +1601,18 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
1614 } 1601 }
1615 1602
1616 case SCI_REQ_STP_UDMA_WAIT_TC_COMP: 1603 case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
1617 return scic_sds_stp_request_udma_general_frame_handler(sci_req, 1604 return scic_sds_stp_request_udma_general_frame_handler(ireq,
1618 frame_index); 1605 frame_index);
1619 1606
1620 case SCI_REQ_STP_UDMA_WAIT_D2H: 1607 case SCI_REQ_STP_UDMA_WAIT_D2H:
1621 /* Use the general frame handler to copy the resposne data */ 1608 /* Use the general frame handler to copy the resposne data */
1622 status = scic_sds_stp_request_udma_general_frame_handler(sci_req, 1609 status = scic_sds_stp_request_udma_general_frame_handler(ireq,
1623 frame_index); 1610 frame_index);
1624 1611
1625 if (status != SCI_SUCCESS) 1612 if (status != SCI_SUCCESS)
1626 return status; 1613 return status;
1627 1614
1628 scic_sds_stp_request_udma_complete_request(sci_req, 1615 scic_sds_stp_request_udma_complete_request(ireq,
1629 SCU_TASK_DONE_CHECK_RESPONSE, 1616 SCU_TASK_DONE_CHECK_RESPONSE,
1630 SCI_FAILURE_IO_RESPONSE_VALID); 1617 SCI_FAILURE_IO_RESPONSE_VALID);
1631 1618
@@ -1657,12 +1644,12 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
1657 frame_index, 1644 frame_index,
1658 (void **)&frame_buffer); 1645 (void **)&frame_buffer);
1659 1646
1660 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, 1647 scic_sds_controller_copy_sata_response(&ireq->stp.rsp,
1661 frame_header, 1648 frame_header,
1662 frame_buffer); 1649 frame_buffer);
1663 1650
1664 /* The command has completed with error */ 1651 /* The command has completed with error */
1665 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE, 1652 scic_sds_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE,
1666 SCI_FAILURE_IO_RESPONSE_VALID); 1653 SCI_FAILURE_IO_RESPONSE_VALID);
1667 break; 1654 break;
1668 1655
@@ -1672,12 +1659,12 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
1672 "violation occurred\n", __func__, stp_req, 1659 "violation occurred\n", __func__, stp_req,
1673 frame_index); 1660 frame_index);
1674 1661
1675 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS, 1662 scic_sds_request_set_status(ireq, SCU_TASK_DONE_UNEXP_FIS,
1676 SCI_FAILURE_PROTOCOL_VIOLATION); 1663 SCI_FAILURE_PROTOCOL_VIOLATION);
1677 break; 1664 break;
1678 } 1665 }
1679 1666
1680 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); 1667 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1681 1668
1682 /* Frame has been decoded return it to the controller */ 1669 /* Frame has been decoded return it to the controller */
1683 scic_sds_controller_release_frame(scic, frame_index); 1670 scic_sds_controller_release_frame(scic, frame_index);
@@ -1686,7 +1673,6 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
1686 } 1673 }
1687 1674
1688 case SCI_REQ_STP_PIO_WAIT_FRAME: { 1675 case SCI_REQ_STP_PIO_WAIT_FRAME: {
1689 struct isci_request *ireq = sci_req_to_ireq(sci_req);
1690 struct sas_task *task = isci_request_access_task(ireq); 1676 struct sas_task *task = isci_request_access_task(ireq);
1691 struct dev_to_host_fis *frame_header; 1677 struct dev_to_host_fis *frame_header;
1692 u32 *frame_buffer; 1678 u32 *frame_buffer;
@@ -1722,28 +1708,28 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
1722 /* status: 4th byte in the 3rd dword */ 1708 /* status: 4th byte in the 3rd dword */
1723 stp_req->status = (frame_buffer[2] >> 24) & 0xff; 1709 stp_req->status = (frame_buffer[2] >> 24) & 0xff;
1724 1710
1725 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, 1711 scic_sds_controller_copy_sata_response(&ireq->stp.rsp,
1726 frame_header, 1712 frame_header,
1727 frame_buffer); 1713 frame_buffer);
1728 1714
1729 sci_req->stp.rsp.status = stp_req->status; 1715 ireq->stp.rsp.status = stp_req->status;
1730 1716
1731 /* The next state is dependent on whether the 1717 /* The next state is dependent on whether the
1732 * request was PIO Data-in or Data out 1718 * request was PIO Data-in or Data out
1733 */ 1719 */
1734 if (task->data_dir == DMA_FROM_DEVICE) { 1720 if (task->data_dir == DMA_FROM_DEVICE) {
1735 sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_DATA_IN); 1721 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
1736 } else if (task->data_dir == DMA_TO_DEVICE) { 1722 } else if (task->data_dir == DMA_TO_DEVICE) {
1737 /* Transmit data */ 1723 /* Transmit data */
1738 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req); 1724 status = scic_sds_stp_request_pio_data_out_transmit_data(ireq);
1739 if (status != SCI_SUCCESS) 1725 if (status != SCI_SUCCESS)
1740 break; 1726 break;
1741 sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_DATA_OUT); 1727 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
1742 } 1728 }
1743 break; 1729 break;
1744 1730
1745 case FIS_SETDEVBITS: 1731 case FIS_SETDEVBITS:
1746 sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 1732 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1747 break; 1733 break;
1748 1734
1749 case FIS_REGD2H: 1735 case FIS_REGD2H:
@@ -1767,15 +1753,15 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
1767 frame_index, 1753 frame_index,
1768 (void **)&frame_buffer); 1754 (void **)&frame_buffer);
1769 1755
1770 scic_sds_controller_copy_sata_response(&sci_req->stp.req, 1756 scic_sds_controller_copy_sata_response(&ireq->stp.req,
1771 frame_header, 1757 frame_header,
1772 frame_buffer); 1758 frame_buffer);
1773 1759
1774 scic_sds_request_set_status(sci_req, 1760 scic_sds_request_set_status(ireq,
1775 SCU_TASK_DONE_CHECK_RESPONSE, 1761 SCU_TASK_DONE_CHECK_RESPONSE,
1776 SCI_FAILURE_IO_RESPONSE_VALID); 1762 SCI_FAILURE_IO_RESPONSE_VALID);
1777 1763
1778 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); 1764 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1779 break; 1765 break;
1780 1766
1781 default: 1767 default:
@@ -1818,11 +1804,11 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
1818 frame_index, 1804 frame_index,
1819 frame_header->fis_type); 1805 frame_header->fis_type);
1820 1806
1821 scic_sds_request_set_status(sci_req, 1807 scic_sds_request_set_status(ireq,
1822 SCU_TASK_DONE_GOOD, 1808 SCU_TASK_DONE_GOOD,
1823 SCI_FAILURE_IO_REQUIRES_SCSI_ABORT); 1809 SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
1824 1810
1825 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); 1811 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1826 1812
1827 /* Frame is decoded return it to the controller */ 1813 /* Frame is decoded return it to the controller */
1828 scic_sds_controller_release_frame(scic, frame_index); 1814 scic_sds_controller_release_frame(scic, frame_index);
@@ -1830,7 +1816,7 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
1830 } 1816 }
1831 1817
1832 if (stp_req->sgl.index < 0) { 1818 if (stp_req->sgl.index < 0) {
1833 sci_req->saved_rx_frame_index = frame_index; 1819 ireq->saved_rx_frame_index = frame_index;
1834 stp_req->pio_len = 0; 1820 stp_req->pio_len = 0;
1835 } else { 1821 } else {
1836 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, 1822 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
@@ -1851,13 +1837,13 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
1851 return status; 1837 return status;
1852 1838
1853 if ((stp_req->status & ATA_BUSY) == 0) { 1839 if ((stp_req->status & ATA_BUSY) == 0) {
1854 scic_sds_request_set_status(sci_req, 1840 scic_sds_request_set_status(ireq,
1855 SCU_TASK_DONE_CHECK_RESPONSE, 1841 SCU_TASK_DONE_CHECK_RESPONSE,
1856 SCI_FAILURE_IO_RESPONSE_VALID); 1842 SCI_FAILURE_IO_RESPONSE_VALID);
1857 1843
1858 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); 1844 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1859 } else { 1845 } else {
1860 sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 1846 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1861 } 1847 }
1862 return status; 1848 return status;
1863 } 1849 }
@@ -1886,12 +1872,12 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
1886 frame_index, 1872 frame_index,
1887 (void **)&frame_buffer); 1873 (void **)&frame_buffer);
1888 1874
1889 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp, 1875 scic_sds_controller_copy_sata_response(&ireq->stp.rsp,
1890 frame_header, 1876 frame_header,
1891 frame_buffer); 1877 frame_buffer);
1892 1878
1893 /* The command has completed with error */ 1879 /* The command has completed with error */
1894 scic_sds_request_set_status(sci_req, 1880 scic_sds_request_set_status(ireq,
1895 SCU_TASK_DONE_CHECK_RESPONSE, 1881 SCU_TASK_DONE_CHECK_RESPONSE,
1896 SCI_FAILURE_IO_RESPONSE_VALID); 1882 SCI_FAILURE_IO_RESPONSE_VALID);
1897 break; 1883 break;
@@ -1904,13 +1890,13 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
1904 stp_req, 1890 stp_req,
1905 frame_index); 1891 frame_index);
1906 1892
1907 scic_sds_request_set_status(sci_req, 1893 scic_sds_request_set_status(ireq,
1908 SCU_TASK_DONE_UNEXP_FIS, 1894 SCU_TASK_DONE_UNEXP_FIS,
1909 SCI_FAILURE_PROTOCOL_VIOLATION); 1895 SCI_FAILURE_PROTOCOL_VIOLATION);
1910 break; 1896 break;
1911 } 1897 }
1912 1898
1913 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); 1899 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1914 1900
1915 /* Frame has been decoded return it to the controller */ 1901 /* Frame has been decoded return it to the controller */
1916 scic_sds_controller_release_frame(scic, frame_index); 1902 scic_sds_controller_release_frame(scic, frame_index);
@@ -1938,14 +1924,14 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
1938 } 1924 }
1939} 1925}
1940 1926
1941static enum sci_status stp_request_udma_await_tc_event(struct scic_sds_request *sci_req, 1927static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
1942 u32 completion_code) 1928 u32 completion_code)
1943{ 1929{
1944 enum sci_status status = SCI_SUCCESS; 1930 enum sci_status status = SCI_SUCCESS;
1945 1931
1946 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1932 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1947 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1933 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1948 scic_sds_stp_request_udma_complete_request(sci_req, 1934 scic_sds_stp_request_udma_complete_request(ireq,
1949 SCU_TASK_DONE_GOOD, 1935 SCU_TASK_DONE_GOOD,
1950 SCI_SUCCESS); 1936 SCI_SUCCESS);
1951 break; 1937 break;
@@ -1955,11 +1941,11 @@ static enum sci_status stp_request_udma_await_tc_event(struct scic_sds_request *
1955 * Register FIS was received before we got the TC 1941 * Register FIS was received before we got the TC
1956 * completion. 1942 * completion.
1957 */ 1943 */
1958 if (sci_req->stp.rsp.fis_type == FIS_REGD2H) { 1944 if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
1959 scic_sds_remote_device_suspend(sci_req->target_device, 1945 scic_sds_remote_device_suspend(ireq->target_device,
1960 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); 1946 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1961 1947
1962 scic_sds_stp_request_udma_complete_request(sci_req, 1948 scic_sds_stp_request_udma_complete_request(ireq,
1963 SCU_TASK_DONE_CHECK_RESPONSE, 1949 SCU_TASK_DONE_CHECK_RESPONSE,
1964 SCI_FAILURE_IO_RESPONSE_VALID); 1950 SCI_FAILURE_IO_RESPONSE_VALID);
1965 } else { 1951 } else {
@@ -1968,7 +1954,7 @@ static enum sci_status stp_request_udma_await_tc_event(struct scic_sds_request *
1968 * the device so we must change state to wait 1954 * the device so we must change state to wait
1969 * for it 1955 * for it
1970 */ 1956 */
1971 sci_change_state(&sci_req->sm, SCI_REQ_STP_UDMA_WAIT_D2H); 1957 sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
1972 } 1958 }
1973 break; 1959 break;
1974 1960
@@ -1983,12 +1969,12 @@ static enum sci_status stp_request_udma_await_tc_event(struct scic_sds_request *
1983 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR): 1969 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
1984 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR): 1970 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
1985 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR): 1971 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
1986 scic_sds_remote_device_suspend(sci_req->target_device, 1972 scic_sds_remote_device_suspend(ireq->target_device,
1987 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); 1973 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1988 /* Fall through to the default case */ 1974 /* Fall through to the default case */
1989 default: 1975 default:
1990 /* All other completion status cause the IO to be complete. */ 1976 /* All other completion status cause the IO to be complete. */
1991 scic_sds_stp_request_udma_complete_request(sci_req, 1977 scic_sds_stp_request_udma_complete_request(ireq,
1992 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 1978 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1993 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 1979 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1994 break; 1980 break;
@@ -1998,15 +1984,15 @@ static enum sci_status stp_request_udma_await_tc_event(struct scic_sds_request *
1998} 1984}
1999 1985
2000static enum sci_status 1986static enum sci_status
2001stp_request_soft_reset_await_h2d_asserted_tc_event(struct scic_sds_request *sci_req, 1987stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq,
2002 u32 completion_code) 1988 u32 completion_code)
2003{ 1989{
2004 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1990 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2005 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1991 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2006 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, 1992 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
2007 SCI_SUCCESS); 1993 SCI_SUCCESS);
2008 1994
2009 sci_change_state(&sci_req->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG); 1995 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
2010 break; 1996 break;
2011 1997
2012 default: 1998 default:
@@ -2015,11 +2001,11 @@ stp_request_soft_reset_await_h2d_asserted_tc_event(struct scic_sds_request *sci_
2015 * If a NAK was received, then it is up to the user to retry 2001 * If a NAK was received, then it is up to the user to retry
2016 * the request. 2002 * the request.
2017 */ 2003 */
2018 scic_sds_request_set_status(sci_req, 2004 scic_sds_request_set_status(ireq,
2019 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 2005 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2020 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 2006 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2021 2007
2022 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); 2008 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2023 break; 2009 break;
2024 } 2010 }
2025 2011
@@ -2027,15 +2013,15 @@ stp_request_soft_reset_await_h2d_asserted_tc_event(struct scic_sds_request *sci_
2027} 2013}
2028 2014
2029static enum sci_status 2015static enum sci_status
2030stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct scic_sds_request *sci_req, 2016stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
2031 u32 completion_code) 2017 u32 completion_code)
2032{ 2018{
2033 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 2019 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2034 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 2020 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2035 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, 2021 scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
2036 SCI_SUCCESS); 2022 SCI_SUCCESS);
2037 2023
2038 sci_change_state(&sci_req->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H); 2024 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
2039 break; 2025 break;
2040 2026
2041 default: 2027 default:
@@ -2043,11 +2029,11 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct scic_sds_request *sc
2043 * a NAK was received, then it is up to the user to retry the 2029 * a NAK was received, then it is up to the user to retry the
2044 * request. 2030 * request.
2045 */ 2031 */
2046 scic_sds_request_set_status(sci_req, 2032 scic_sds_request_set_status(ireq,
2047 SCU_NORMALIZE_COMPLETION_STATUS(completion_code), 2033 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2048 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); 2034 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2049 2035
2050 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); 2036 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2051 break; 2037 break;
2052 } 2038 }
2053 2039
@@ -2055,54 +2041,54 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct scic_sds_request *sc
2055} 2041}
2056 2042
2057enum sci_status 2043enum sci_status
2058scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, 2044scic_sds_io_request_tc_completion(struct isci_request *ireq,
2059 u32 completion_code) 2045 u32 completion_code)
2060{ 2046{
2061 enum sci_base_request_states state; 2047 enum sci_base_request_states state;
2062 struct scic_sds_controller *scic = sci_req->owning_controller; 2048 struct scic_sds_controller *scic = ireq->owning_controller;
2063 2049
2064 state = sci_req->sm.current_state_id; 2050 state = ireq->sm.current_state_id;
2065 2051
2066 switch (state) { 2052 switch (state) {
2067 case SCI_REQ_STARTED: 2053 case SCI_REQ_STARTED:
2068 return request_started_state_tc_event(sci_req, completion_code); 2054 return request_started_state_tc_event(ireq, completion_code);
2069 2055
2070 case SCI_REQ_TASK_WAIT_TC_COMP: 2056 case SCI_REQ_TASK_WAIT_TC_COMP:
2071 return ssp_task_request_await_tc_event(sci_req, 2057 return ssp_task_request_await_tc_event(ireq,
2072 completion_code); 2058 completion_code);
2073 2059
2074 case SCI_REQ_SMP_WAIT_RESP: 2060 case SCI_REQ_SMP_WAIT_RESP:
2075 return smp_request_await_response_tc_event(sci_req, 2061 return smp_request_await_response_tc_event(ireq,
2076 completion_code); 2062 completion_code);
2077 2063
2078 case SCI_REQ_SMP_WAIT_TC_COMP: 2064 case SCI_REQ_SMP_WAIT_TC_COMP:
2079 return smp_request_await_tc_event(sci_req, completion_code); 2065 return smp_request_await_tc_event(ireq, completion_code);
2080 2066
2081 case SCI_REQ_STP_UDMA_WAIT_TC_COMP: 2067 case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
2082 return stp_request_udma_await_tc_event(sci_req, 2068 return stp_request_udma_await_tc_event(ireq,
2083 completion_code); 2069 completion_code);
2084 2070
2085 case SCI_REQ_STP_NON_DATA_WAIT_H2D: 2071 case SCI_REQ_STP_NON_DATA_WAIT_H2D:
2086 return stp_request_non_data_await_h2d_tc_event(sci_req, 2072 return stp_request_non_data_await_h2d_tc_event(ireq,
2087 completion_code); 2073 completion_code);
2088 2074
2089 case SCI_REQ_STP_PIO_WAIT_H2D: 2075 case SCI_REQ_STP_PIO_WAIT_H2D:
2090 return stp_request_pio_await_h2d_completion_tc_event(sci_req, 2076 return stp_request_pio_await_h2d_completion_tc_event(ireq,
2091 completion_code); 2077 completion_code);
2092 2078
2093 case SCI_REQ_STP_PIO_DATA_OUT: 2079 case SCI_REQ_STP_PIO_DATA_OUT:
2094 return pio_data_out_tx_done_tc_event(sci_req, completion_code); 2080 return pio_data_out_tx_done_tc_event(ireq, completion_code);
2095 2081
2096 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED: 2082 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
2097 return stp_request_soft_reset_await_h2d_asserted_tc_event(sci_req, 2083 return stp_request_soft_reset_await_h2d_asserted_tc_event(ireq,
2098 completion_code); 2084 completion_code);
2099 2085
2100 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG: 2086 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
2101 return stp_request_soft_reset_await_h2d_diagnostic_tc_event(sci_req, 2087 return stp_request_soft_reset_await_h2d_diagnostic_tc_event(ireq,
2102 completion_code); 2088 completion_code);
2103 2089
2104 case SCI_REQ_ABORTING: 2090 case SCI_REQ_ABORTING:
2105 return request_aborting_state_tc_event(sci_req, 2091 return request_aborting_state_tc_event(ireq,
2106 completion_code); 2092 completion_code);
2107 2093
2108 default: 2094 default:
@@ -2201,7 +2187,7 @@ static void isci_request_handle_controller_specific_errors(
2201{ 2187{
2202 unsigned int cstatus; 2188 unsigned int cstatus;
2203 2189
2204 cstatus = request->sci.scu_status; 2190 cstatus = request->scu_status;
2205 2191
2206 dev_dbg(&request->isci_host->pdev->dev, 2192 dev_dbg(&request->isci_host->pdev->dev,
2207 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR " 2193 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
@@ -2640,13 +2626,13 @@ static void isci_request_io_request_complete(struct isci_host *isci_host,
2640 task); 2626 task);
2641 2627
2642 if (sas_protocol_ata(task->task_proto)) { 2628 if (sas_protocol_ata(task->task_proto)) {
2643 resp_buf = &request->sci.stp.rsp; 2629 resp_buf = &request->stp.rsp;
2644 isci_request_process_stp_response(task, 2630 isci_request_process_stp_response(task,
2645 resp_buf); 2631 resp_buf);
2646 } else if (SAS_PROTOCOL_SSP == task->task_proto) { 2632 } else if (SAS_PROTOCOL_SSP == task->task_proto) {
2647 2633
2648 /* crack the iu response buffer. */ 2634 /* crack the iu response buffer. */
2649 resp_iu = &request->sci.ssp.rsp; 2635 resp_iu = &request->ssp.rsp;
2650 isci_request_process_response_iu(task, resp_iu, 2636 isci_request_process_response_iu(task, resp_iu,
2651 &isci_host->pdev->dev); 2637 &isci_host->pdev->dev);
2652 2638
@@ -2677,7 +2663,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host,
2677 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2663 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2678 2664
2679 if (task->task_proto == SAS_PROTOCOL_SMP) { 2665 if (task->task_proto == SAS_PROTOCOL_SMP) {
2680 void *rsp = &request->sci.smp.rsp; 2666 void *rsp = &request->smp.rsp;
2681 2667
2682 dev_dbg(&isci_host->pdev->dev, 2668 dev_dbg(&isci_host->pdev->dev,
2683 "%s: SMP protocol completion\n", 2669 "%s: SMP protocol completion\n",
@@ -2693,7 +2679,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host,
2693 * There is a possibility that less data than 2679 * There is a possibility that less data than
2694 * the maximum was transferred. 2680 * the maximum was transferred.
2695 */ 2681 */
2696 u32 transferred_length = sci_req_tx_bytes(&request->sci); 2682 u32 transferred_length = sci_req_tx_bytes(request);
2697 2683
2698 task->task_status.residual 2684 task->task_status.residual
2699 = task->total_xfer_len - transferred_length; 2685 = task->total_xfer_len - transferred_length;
@@ -2851,8 +2837,8 @@ static void isci_request_io_request_complete(struct isci_host *isci_host,
2851 2837
2852 /* complete the io request to the core. */ 2838 /* complete the io request to the core. */
2853 scic_controller_complete_io(&isci_host->sci, 2839 scic_controller_complete_io(&isci_host->sci,
2854 request->sci.target_device, 2840 request->target_device,
2855 &request->sci); 2841 request);
2856 isci_put_device(idev); 2842 isci_put_device(idev);
2857 2843
2858 /* set terminated handle so it cannot be completed or 2844 /* set terminated handle so it cannot be completed or
@@ -2864,9 +2850,8 @@ static void isci_request_io_request_complete(struct isci_host *isci_host,
2864 2850
2865static void scic_sds_request_started_state_enter(struct sci_base_state_machine *sm) 2851static void scic_sds_request_started_state_enter(struct sci_base_state_machine *sm)
2866{ 2852{
2867 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); 2853 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2868 struct isci_request *ireq = sci_req_to_ireq(sci_req); 2854 struct domain_device *dev = sci_dev_to_domain(ireq->target_device);
2869 struct domain_device *dev = sci_dev_to_domain(sci_req->target_device);
2870 struct sas_task *task; 2855 struct sas_task *task;
2871 2856
2872 /* XXX as hch said always creating an internal sas_task for tmf 2857 /* XXX as hch said always creating an internal sas_task for tmf
@@ -2902,66 +2887,65 @@ static void scic_sds_request_started_state_enter(struct sci_base_state_machine *
2902 2887
2903static void scic_sds_request_completed_state_enter(struct sci_base_state_machine *sm) 2888static void scic_sds_request_completed_state_enter(struct sci_base_state_machine *sm)
2904{ 2889{
2905 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); 2890 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2906 struct scic_sds_controller *scic = sci_req->owning_controller; 2891 struct scic_sds_controller *scic = ireq->owning_controller;
2907 struct isci_host *ihost = scic_to_ihost(scic); 2892 struct isci_host *ihost = scic_to_ihost(scic);
2908 struct isci_request *ireq = sci_req_to_ireq(sci_req);
2909 2893
2910 /* Tell the SCI_USER that the IO request is complete */ 2894 /* Tell the SCI_USER that the IO request is complete */
2911 if (!test_bit(IREQ_TMF, &ireq->flags)) 2895 if (!test_bit(IREQ_TMF, &ireq->flags))
2912 isci_request_io_request_complete(ihost, ireq, 2896 isci_request_io_request_complete(ihost, ireq,
2913 sci_req->sci_status); 2897 ireq->sci_status);
2914 else 2898 else
2915 isci_task_request_complete(ihost, ireq, sci_req->sci_status); 2899 isci_task_request_complete(ihost, ireq, ireq->sci_status);
2916} 2900}
2917 2901
2918static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine *sm) 2902static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine *sm)
2919{ 2903{
2920 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); 2904 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2921 2905
2922 /* Setting the abort bit in the Task Context is required by the silicon. */ 2906 /* Setting the abort bit in the Task Context is required by the silicon. */
2923 sci_req->tc->abort = 1; 2907 ireq->tc->abort = 1;
2924} 2908}
2925 2909
2926static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) 2910static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
2927{ 2911{
2928 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); 2912 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2929 2913
2930 scic_sds_remote_device_set_working_request(sci_req->target_device, 2914 scic_sds_remote_device_set_working_request(ireq->target_device,
2931 sci_req); 2915 ireq);
2932} 2916}
2933 2917
2934static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) 2918static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
2935{ 2919{
2936 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); 2920 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2937 2921
2938 scic_sds_remote_device_set_working_request(sci_req->target_device, 2922 scic_sds_remote_device_set_working_request(ireq->target_device,
2939 sci_req); 2923 ireq);
2940} 2924}
2941 2925
2942static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm) 2926static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
2943{ 2927{
2944 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); 2928 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2945 2929
2946 scic_sds_remote_device_set_working_request(sci_req->target_device, 2930 scic_sds_remote_device_set_working_request(ireq->target_device,
2947 sci_req); 2931 ireq);
2948} 2932}
2949 2933
2950static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm) 2934static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
2951{ 2935{
2952 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); 2936 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2953 struct scu_task_context *tc = sci_req->tc; 2937 struct scu_task_context *tc = ireq->tc;
2954 struct host_to_dev_fis *h2d_fis; 2938 struct host_to_dev_fis *h2d_fis;
2955 enum sci_status status; 2939 enum sci_status status;
2956 2940
2957 /* Clear the SRST bit */ 2941 /* Clear the SRST bit */
2958 h2d_fis = &sci_req->stp.cmd; 2942 h2d_fis = &ireq->stp.cmd;
2959 h2d_fis->control = 0; 2943 h2d_fis->control = 0;
2960 2944
2961 /* Clear the TC control bit */ 2945 /* Clear the TC control bit */
2962 tc->control_frame = 0; 2946 tc->control_frame = 0;
2963 2947
2964 status = scic_controller_continue_io(sci_req); 2948 status = scic_controller_continue_io(ireq);
2965 WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n"); 2949 WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
2966} 2950}
2967 2951
@@ -3006,29 +2990,29 @@ static const struct sci_base_state scic_sds_request_state_table[] = {
3006static void 2990static void
3007scic_sds_general_request_construct(struct scic_sds_controller *scic, 2991scic_sds_general_request_construct(struct scic_sds_controller *scic,
3008 struct scic_sds_remote_device *sci_dev, 2992 struct scic_sds_remote_device *sci_dev,
3009 struct scic_sds_request *sci_req) 2993 struct isci_request *ireq)
3010{ 2994{
3011 sci_init_sm(&sci_req->sm, scic_sds_request_state_table, SCI_REQ_INIT); 2995 sci_init_sm(&ireq->sm, scic_sds_request_state_table, SCI_REQ_INIT);
3012 2996
3013 sci_req->target_device = sci_dev; 2997 ireq->target_device = sci_dev;
3014 sci_req->protocol = SCIC_NO_PROTOCOL; 2998 ireq->protocol = SCIC_NO_PROTOCOL;
3015 sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; 2999 ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
3016 3000
3017 sci_req->sci_status = SCI_SUCCESS; 3001 ireq->sci_status = SCI_SUCCESS;
3018 sci_req->scu_status = 0; 3002 ireq->scu_status = 0;
3019 sci_req->post_context = 0xFFFFFFFF; 3003 ireq->post_context = 0xFFFFFFFF;
3020} 3004}
3021 3005
3022static enum sci_status 3006static enum sci_status
3023scic_io_request_construct(struct scic_sds_controller *scic, 3007scic_io_request_construct(struct scic_sds_controller *scic,
3024 struct scic_sds_remote_device *sci_dev, 3008 struct scic_sds_remote_device *sci_dev,
3025 struct scic_sds_request *sci_req) 3009 struct isci_request *ireq)
3026{ 3010{
3027 struct domain_device *dev = sci_dev_to_domain(sci_dev); 3011 struct domain_device *dev = sci_dev_to_domain(sci_dev);
3028 enum sci_status status = SCI_SUCCESS; 3012 enum sci_status status = SCI_SUCCESS;
3029 3013
3030 /* Build the common part of the request */ 3014 /* Build the common part of the request */
3031 scic_sds_general_request_construct(scic, sci_dev, sci_req); 3015 scic_sds_general_request_construct(scic, sci_dev, ireq);
3032 3016
3033 if (sci_dev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) 3017 if (sci_dev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
3034 return SCI_FAILURE_INVALID_REMOTE_DEVICE; 3018 return SCI_FAILURE_INVALID_REMOTE_DEVICE;
@@ -3036,31 +3020,31 @@ scic_io_request_construct(struct scic_sds_controller *scic,
3036 if (dev->dev_type == SAS_END_DEV) 3020 if (dev->dev_type == SAS_END_DEV)
3037 /* pass */; 3021 /* pass */;
3038 else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) 3022 else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
3039 memset(&sci_req->stp.cmd, 0, sizeof(sci_req->stp.cmd)); 3023 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
3040 else if (dev_is_expander(dev)) 3024 else if (dev_is_expander(dev))
3041 /* pass */; 3025 /* pass */;
3042 else 3026 else
3043 return SCI_FAILURE_UNSUPPORTED_PROTOCOL; 3027 return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3044 3028
3045 memset(sci_req->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab)); 3029 memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab));
3046 3030
3047 return status; 3031 return status;
3048} 3032}
3049 3033
3050enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, 3034enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
3051 struct scic_sds_remote_device *sci_dev, 3035 struct scic_sds_remote_device *sci_dev,
3052 u16 io_tag, struct scic_sds_request *sci_req) 3036 u16 io_tag, struct isci_request *ireq)
3053{ 3037{
3054 struct domain_device *dev = sci_dev_to_domain(sci_dev); 3038 struct domain_device *dev = sci_dev_to_domain(sci_dev);
3055 enum sci_status status = SCI_SUCCESS; 3039 enum sci_status status = SCI_SUCCESS;
3056 3040
3057 /* Build the common part of the request */ 3041 /* Build the common part of the request */
3058 scic_sds_general_request_construct(scic, sci_dev, sci_req); 3042 scic_sds_general_request_construct(scic, sci_dev, ireq);
3059 3043
3060 if (dev->dev_type == SAS_END_DEV || 3044 if (dev->dev_type == SAS_END_DEV ||
3061 dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { 3045 dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
3062 set_bit(IREQ_TMF, &sci_req_to_ireq(sci_req)->flags); 3046 set_bit(IREQ_TMF, &ireq->flags);
3063 memset(sci_req->tc, 0, sizeof(struct scu_task_context)); 3047 memset(ireq->tc, 0, sizeof(struct scu_task_context));
3064 } else 3048 } else
3065 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; 3049 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3066 3050
@@ -3076,7 +3060,7 @@ static enum sci_status isci_request_ssp_request_construct(
3076 "%s: request = %p\n", 3060 "%s: request = %p\n",
3077 __func__, 3061 __func__,
3078 request); 3062 request);
3079 status = scic_io_request_construct_basic_ssp(&request->sci); 3063 status = scic_io_request_construct_basic_ssp(request);
3080 return status; 3064 return status;
3081} 3065}
3082 3066
@@ -3097,7 +3081,7 @@ static enum sci_status isci_request_stp_request_construct(
3097 */ 3081 */
3098 register_fis = isci_sata_task_to_fis_copy(task); 3082 register_fis = isci_sata_task_to_fis_copy(task);
3099 3083
3100 status = scic_io_request_construct_basic_sata(&request->sci); 3084 status = scic_io_request_construct_basic_sata(request);
3101 3085
3102 /* Set the ncq tag in the fis, from the queue 3086 /* Set the ncq tag in the fis, from the queue
3103 * command in the task. 3087 * command in the task.
@@ -3115,7 +3099,7 @@ static enum sci_status isci_request_stp_request_construct(
3115 3099
3116static enum sci_status 3100static enum sci_status
3117scic_io_request_construct_smp(struct device *dev, 3101scic_io_request_construct_smp(struct device *dev,
3118 struct scic_sds_request *sci_req, 3102 struct isci_request *ireq,
3119 struct sas_task *task) 3103 struct sas_task *task)
3120{ 3104{
3121 struct scatterlist *sg = &task->smp_task.smp_req; 3105 struct scatterlist *sg = &task->smp_task.smp_req;
@@ -3158,14 +3142,14 @@ scic_io_request_construct_smp(struct device *dev,
3158 if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE)) 3142 if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
3159 return SCI_FAILURE; 3143 return SCI_FAILURE;
3160 3144
3161 sci_req->protocol = SCIC_SMP_PROTOCOL; 3145 ireq->protocol = SCIC_SMP_PROTOCOL;
3162 3146
3163 /* byte swap the smp request. */ 3147 /* byte swap the smp request. */
3164 3148
3165 task_context = sci_req->tc; 3149 task_context = ireq->tc;
3166 3150
3167 sci_dev = scic_sds_request_get_device(sci_req); 3151 sci_dev = scic_sds_request_get_device(ireq);
3168 sci_port = scic_sds_request_get_port(sci_req); 3152 sci_port = scic_sds_request_get_port(ireq);
3169 3153
3170 /* 3154 /*
3171 * Fill in the TC with the its required data 3155 * Fill in the TC with the its required data
@@ -3217,12 +3201,12 @@ scic_io_request_construct_smp(struct device *dev,
3217 */ 3201 */
3218 task_context->task_phase = 0; 3202 task_context->task_phase = 0;
3219 3203
3220 sci_req->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 3204 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3221 (scic_sds_controller_get_protocol_engine_group(scic) << 3205 (scic_sds_controller_get_protocol_engine_group(scic) <<
3222 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 3206 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3223 (scic_sds_port_get_index(sci_port) << 3207 (scic_sds_port_get_index(sci_port) <<
3224 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 3208 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
3225 ISCI_TAG_TCI(sci_req->io_tag)); 3209 ISCI_TAG_TCI(ireq->io_tag));
3226 /* 3210 /*
3227 * Copy the physical address for the command buffer to the SCU Task 3211 * Copy the physical address for the command buffer to the SCU Task
3228 * Context command buffer should not contain command header. 3212 * Context command buffer should not contain command header.
@@ -3234,7 +3218,7 @@ scic_io_request_construct_smp(struct device *dev,
3234 task_context->response_iu_upper = 0; 3218 task_context->response_iu_upper = 0;
3235 task_context->response_iu_lower = 0; 3219 task_context->response_iu_lower = 0;
3236 3220
3237 sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED); 3221 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
3238 3222
3239 return SCI_SUCCESS; 3223 return SCI_SUCCESS;
3240} 3224}
@@ -3250,10 +3234,9 @@ static enum sci_status isci_smp_request_build(struct isci_request *ireq)
3250{ 3234{
3251 struct sas_task *task = isci_request_access_task(ireq); 3235 struct sas_task *task = isci_request_access_task(ireq);
3252 struct device *dev = &ireq->isci_host->pdev->dev; 3236 struct device *dev = &ireq->isci_host->pdev->dev;
3253 struct scic_sds_request *sci_req = &ireq->sci;
3254 enum sci_status status = SCI_FAILURE; 3237 enum sci_status status = SCI_FAILURE;
3255 3238
3256 status = scic_io_request_construct_smp(dev, sci_req, task); 3239 status = scic_io_request_construct_smp(dev, ireq, task);
3257 if (status != SCI_SUCCESS) 3240 if (status != SCI_SUCCESS)
3258 dev_warn(&ireq->isci_host->pdev->dev, 3241 dev_warn(&ireq->isci_host->pdev->dev,
3259 "%s: failed with status = %d\n", 3242 "%s: failed with status = %d\n",
@@ -3309,7 +3292,7 @@ static enum sci_status isci_io_request_build(struct isci_host *isci_host,
3309 } 3292 }
3310 3293
3311 status = scic_io_request_construct(&isci_host->sci, sci_device, 3294 status = scic_io_request_construct(&isci_host->sci, sci_device,
3312 &request->sci); 3295 request);
3313 3296
3314 if (status != SCI_SUCCESS) { 3297 if (status != SCI_SUCCESS) {
3315 dev_warn(&isci_host->pdev->dev, 3298 dev_warn(&isci_host->pdev->dev,
@@ -3344,7 +3327,7 @@ static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 t
3344 struct isci_request *ireq; 3327 struct isci_request *ireq;
3345 3328
3346 ireq = ihost->reqs[ISCI_TAG_TCI(tag)]; 3329 ireq = ihost->reqs[ISCI_TAG_TCI(tag)];
3347 ireq->sci.io_tag = tag; 3330 ireq->io_tag = tag;
3348 ireq->io_request_completion = NULL; 3331 ireq->io_request_completion = NULL;
3349 ireq->flags = 0; 3332 ireq->flags = 0;
3350 ireq->num_sg_entries = 0; 3333 ireq->num_sg_entries = 0;
@@ -3416,14 +3399,14 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide
3416 */ 3399 */
3417 status = scic_controller_start_task(&ihost->sci, 3400 status = scic_controller_start_task(&ihost->sci,
3418 &idev->sci, 3401 &idev->sci,
3419 &ireq->sci); 3402 ireq);
3420 } else { 3403 } else {
3421 status = SCI_FAILURE; 3404 status = SCI_FAILURE;
3422 } 3405 }
3423 } else { 3406 } else {
3424 /* send the request, let the core assign the IO TAG. */ 3407 /* send the request, let the core assign the IO TAG. */
3425 status = scic_controller_start_io(&ihost->sci, &idev->sci, 3408 status = scic_controller_start_io(&ihost->sci, &idev->sci,
3426 &ireq->sci); 3409 ireq);
3427 } 3410 }
3428 3411
3429 if (status != SCI_SUCCESS && 3412 if (status != SCI_SUCCESS &&
@@ -3446,8 +3429,6 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide
3446 list_add(&ireq->dev_node, &idev->reqs_in_process); 3429 list_add(&ireq->dev_node, &idev->reqs_in_process);
3447 3430
3448 if (status == SCI_SUCCESS) { 3431 if (status == SCI_SUCCESS) {
3449 /* Save the tag for possible task mgmt later. */
3450 ireq->io_tag = ireq->sci.io_tag;
3451 isci_request_change_state(ireq, started); 3432 isci_request_change_state(ireq, started);
3452 } else { 3433 } else {
3453 /* The request did not really start in the 3434 /* The request did not really start in the
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
index 7fd98531d1f2..68d8a27357eb 100644
--- a/drivers/scsi/isci/request.h
+++ b/drivers/scsi/isci/request.h
@@ -93,7 +93,7 @@ enum sci_request_protocol {
93 * isci_stp_request - extra request infrastructure to handle pio/atapi protocol 93 * isci_stp_request - extra request infrastructure to handle pio/atapi protocol
94 * @pio_len - number of bytes requested at PIO setup 94 * @pio_len - number of bytes requested at PIO setup
95 * @status - pio setup ending status value to tell us if we need 95 * @status - pio setup ending status value to tell us if we need
96 * to wait for another fis or if the transfer is complete. Upon 96 * to wait for another fis or if the transfer is complete. Upon
97 * receipt of a d2h fis this will be the status field of that fis. 97 * receipt of a d2h fis this will be the status field of that fis.
98 * @sgl - track pio transfer progress as we iterate through the sgl 98 * @sgl - track pio transfer progress as we iterate through the sgl
99 * @device_cdb_len - atapi device advertises it's transfer constraints at setup 99 * @device_cdb_len - atapi device advertises it's transfer constraints at setup
@@ -110,69 +110,55 @@ struct isci_stp_request {
110 u32 device_cdb_len; 110 u32 device_cdb_len;
111}; 111};
112 112
113struct scic_sds_request { 113struct isci_request {
114 /* 114 enum isci_request_status status;
115 * This field contains the information for the base request state 115 #define IREQ_COMPLETE_IN_TARGET 0
116 * machine. 116 #define IREQ_TERMINATED 1
117 #define IREQ_TMF 2
118 #define IREQ_ACTIVE 3
119 unsigned long flags;
120 /* XXX kill ttype and ttype_ptr, allocate full sas_task */
121 enum task_type ttype;
122 union ttype_ptr_union {
123 struct sas_task *io_task_ptr; /* When ttype==io_task */
124 struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
125 } ttype_ptr;
126 struct isci_host *isci_host;
127 /* For use in the requests_to_{complete|abort} lists: */
128 struct list_head completed_node;
129 /* For use in the reqs_in_process list: */
130 struct list_head dev_node;
131 spinlock_t state_lock;
132 dma_addr_t request_daddr;
133 dma_addr_t zero_scatter_daddr;
134 unsigned int num_sg_entries;
135 /* Note: "io_request_completion" is completed in two different ways
136 * depending on whether this is a TMF or regular request.
137 * - TMF requests are completed in the thread that started them;
138 * - regular requests are completed in the request completion callback
139 * function.
140 * This difference in operation allows the aborter of a TMF request
141 * to be sure that once the TMF request completes, the I/O that the
142 * TMF was aborting is guaranteed to have completed.
143 *
144 * XXX kill io_request_completion
117 */ 145 */
146 struct completion *io_request_completion;
118 struct sci_base_state_machine sm; 147 struct sci_base_state_machine sm;
119
120 /*
121 * This field simply points to the controller to which this IO request
122 * is associated.
123 */
124 struct scic_sds_controller *owning_controller; 148 struct scic_sds_controller *owning_controller;
125
126 /*
127 * This field simply points to the remote device to which this IO
128 * request is associated.
129 */
130 struct scic_sds_remote_device *target_device; 149 struct scic_sds_remote_device *target_device;
131
132 /*
133 * This field indicates the IO tag for this request. The IO tag is
134 * comprised of the task_index and a sequence count. The sequence count
135 * is utilized to help identify tasks from one life to another.
136 */
137 u16 io_tag; 150 u16 io_tag;
138
139 /*
140 * This field specifies the protocol being utilized for this
141 * IO request.
142 */
143 enum sci_request_protocol protocol; 151 enum sci_request_protocol protocol;
144 152 u32 scu_status; /* hardware result */
145 /* 153 u32 sci_status; /* upper layer disposition */
146 * This field indicates the completion status taken from the SCUs
147 * completion code. It indicates the completion result for the SCU
148 * hardware.
149 */
150 u32 scu_status;
151
152 /*
153 * This field indicates the completion status returned to the SCI user.
154 * It indicates the users view of the io request completion.
155 */
156 u32 sci_status;
157
158 /*
159 * This field contains the value to be utilized when posting
160 * (e.g. Post_TC, * Post_TC_Abort) this request to the silicon.
161 */
162 u32 post_context; 154 u32 post_context;
163
164 struct scu_task_context *tc; 155 struct scu_task_context *tc;
165
166 /* could be larger with sg chaining */ 156 /* could be larger with sg chaining */
167 #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2) 157 #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2)
168 struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32))); 158 struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32)));
169 159 /* This field is a pointer to the stored rx frame data. It is used in
170 /*
171 * This field is a pointer to the stored rx frame data. It is used in
172 * STP internal requests and SMP response frames. If this field is 160 * STP internal requests and SMP response frames. If this field is
173 * non-NULL the saved frame must be released on IO request completion. 161 * non-NULL the saved frame must be released on IO request completion.
174 *
175 * @todo In the future do we want to keep a list of RX frame buffers?
176 */ 162 */
177 u32 saved_rx_frame_index; 163 u32 saved_rx_frame_index;
178 164
@@ -187,11 +173,9 @@ struct scic_sds_request {
187 u8 rsp_buf[SSP_RESP_IU_MAX_SIZE]; 173 u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
188 }; 174 };
189 } ssp; 175 } ssp;
190
191 struct { 176 struct {
192 struct smp_resp rsp; 177 struct smp_resp rsp;
193 } smp; 178 } smp;
194
195 struct { 179 struct {
196 struct isci_stp_request req; 180 struct isci_stp_request req;
197 struct host_to_dev_fis cmd; 181 struct host_to_dev_fis cmd;
@@ -200,56 +184,11 @@ struct scic_sds_request {
200 }; 184 };
201}; 185};
202 186
203static inline struct scic_sds_request *to_sci_req(struct isci_stp_request *stp_req) 187static inline struct isci_request *to_ireq(struct isci_stp_request *stp_req)
204{
205 struct scic_sds_request *sci_req;
206
207 sci_req = container_of(stp_req, typeof(*sci_req), stp.req);
208 return sci_req;
209}
210
211struct isci_request {
212 enum isci_request_status status;
213 enum task_type ttype;
214 unsigned short io_tag;
215 #define IREQ_COMPLETE_IN_TARGET 0
216 #define IREQ_TERMINATED 1
217 #define IREQ_TMF 2
218 #define IREQ_ACTIVE 3
219 unsigned long flags;
220
221 union ttype_ptr_union {
222 struct sas_task *io_task_ptr; /* When ttype==io_task */
223 struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
224 } ttype_ptr;
225 struct isci_host *isci_host;
226 /* For use in the requests_to_{complete|abort} lists: */
227 struct list_head completed_node;
228 /* For use in the reqs_in_process list: */
229 struct list_head dev_node;
230 spinlock_t state_lock;
231 dma_addr_t request_daddr;
232 dma_addr_t zero_scatter_daddr;
233
234 unsigned int num_sg_entries; /* returned by pci_alloc_sg */
235
236 /** Note: "io_request_completion" is completed in two different ways
237 * depending on whether this is a TMF or regular request.
238 * - TMF requests are completed in the thread that started them;
239 * - regular requests are completed in the request completion callback
240 * function.
241 * This difference in operation allows the aborter of a TMF request
242 * to be sure that once the TMF request completes, the I/O that the
243 * TMF was aborting is guaranteed to have completed.
244 */
245 struct completion *io_request_completion;
246 struct scic_sds_request sci;
247};
248
249static inline struct isci_request *sci_req_to_ireq(struct scic_sds_request *sci_req)
250{ 188{
251 struct isci_request *ireq = container_of(sci_req, typeof(*ireq), sci); 189 struct isci_request *ireq;
252 190
191 ireq = container_of(stp_req, typeof(*ireq), stp.req);
253 return ireq; 192 return ireq;
254} 193}
255 194
@@ -366,32 +305,32 @@ enum sci_base_request_states {
366 * 305 *
367 * This macro will return the controller for this io request object 306 * This macro will return the controller for this io request object
368 */ 307 */
369#define scic_sds_request_get_controller(sci_req) \ 308#define scic_sds_request_get_controller(ireq) \
370 ((sci_req)->owning_controller) 309 ((ireq)->owning_controller)
371 310
372/** 311/**
373 * scic_sds_request_get_device() - 312 * scic_sds_request_get_device() -
374 * 313 *
375 * This macro will return the device for this io request object 314 * This macro will return the device for this io request object
376 */ 315 */
377#define scic_sds_request_get_device(sci_req) \ 316#define scic_sds_request_get_device(ireq) \
378 ((sci_req)->target_device) 317 ((ireq)->target_device)
379 318
380/** 319/**
381 * scic_sds_request_get_port() - 320 * scic_sds_request_get_port() -
382 * 321 *
383 * This macro will return the port for this io request object 322 * This macro will return the port for this io request object
384 */ 323 */
385#define scic_sds_request_get_port(sci_req) \ 324#define scic_sds_request_get_port(ireq) \
386 scic_sds_remote_device_get_port(scic_sds_request_get_device(sci_req)) 325 scic_sds_remote_device_get_port(scic_sds_request_get_device(ireq))
387 326
388/** 327/**
389 * scic_sds_request_get_post_context() - 328 * scic_sds_request_get_post_context() -
390 * 329 *
391 * This macro returns the constructed post context result for the io request. 330 * This macro returns the constructed post context result for the io request.
392 */ 331 */
393#define scic_sds_request_get_post_context(sci_req) \ 332#define scic_sds_request_get_post_context(ireq) \
394 ((sci_req)->post_context) 333 ((ireq)->post_context)
395 334
396/** 335/**
397 * scic_sds_request_get_task_context() - 336 * scic_sds_request_get_task_context() -
@@ -413,26 +352,25 @@ enum sci_base_request_states {
413 (request)->sci_status = (sci_status_code); \ 352 (request)->sci_status = (sci_status_code); \
414 } 353 }
415 354
416enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req); 355enum sci_status scic_sds_request_start(struct isci_request *ireq);
417enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req); 356enum sci_status scic_sds_io_request_terminate(struct isci_request *ireq);
418enum sci_status 357enum sci_status
419scic_sds_io_request_event_handler(struct scic_sds_request *sci_req, 358scic_sds_io_request_event_handler(struct isci_request *ireq,
420 u32 event_code); 359 u32 event_code);
421enum sci_status 360enum sci_status
422scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, 361scic_sds_io_request_frame_handler(struct isci_request *ireq,
423 u32 frame_index); 362 u32 frame_index);
424enum sci_status 363enum sci_status
425scic_sds_task_request_terminate(struct scic_sds_request *sci_req); 364scic_sds_task_request_terminate(struct isci_request *ireq);
426extern enum sci_status 365extern enum sci_status
427scic_sds_request_complete(struct scic_sds_request *sci_req); 366scic_sds_request_complete(struct isci_request *ireq);
428extern enum sci_status 367extern enum sci_status
429scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, u32 code); 368scic_sds_io_request_tc_completion(struct isci_request *ireq, u32 code);
430 369
431/* XXX open code in caller */ 370/* XXX open code in caller */
432static inline dma_addr_t 371static inline dma_addr_t
433scic_io_request_get_dma_addr(struct scic_sds_request *sci_req, void *virt_addr) 372scic_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr)
434{ 373{
435 struct isci_request *ireq = sci_req_to_ireq(sci_req);
436 374
437 char *requested_addr = (char *)virt_addr; 375 char *requested_addr = (char *)virt_addr;
438 char *base_addr = (char *)ireq; 376 char *base_addr = (char *)ireq;
@@ -565,14 +503,14 @@ enum sci_status
565scic_task_request_construct(struct scic_sds_controller *scic, 503scic_task_request_construct(struct scic_sds_controller *scic,
566 struct scic_sds_remote_device *sci_dev, 504 struct scic_sds_remote_device *sci_dev,
567 u16 io_tag, 505 u16 io_tag,
568 struct scic_sds_request *sci_req); 506 struct isci_request *ireq);
569enum sci_status 507enum sci_status
570scic_task_request_construct_ssp(struct scic_sds_request *sci_req); 508scic_task_request_construct_ssp(struct isci_request *ireq);
571enum sci_status 509enum sci_status
572scic_task_request_construct_sata(struct scic_sds_request *sci_req); 510scic_task_request_construct_sata(struct isci_request *ireq);
573void 511void
574scic_stp_io_request_set_ncq_tag(struct scic_sds_request *sci_req, u16 ncq_tag); 512scic_stp_io_request_set_ncq_tag(struct isci_request *ireq, u16 ncq_tag);
575void scic_sds_smp_request_copy_response(struct scic_sds_request *sci_req); 513void scic_sds_smp_request_copy_response(struct isci_request *ireq);
576 514
577static inline int isci_task_is_ncq_recovery(struct sas_task *task) 515static inline int isci_task_is_ncq_recovery(struct sas_task *task)
578{ 516{
diff --git a/drivers/scsi/isci/sata.c b/drivers/scsi/isci/sata.c
index e7ce46924465..87d8cc1a6e39 100644
--- a/drivers/scsi/isci/sata.c
+++ b/drivers/scsi/isci/sata.c
@@ -70,7 +70,7 @@
70struct host_to_dev_fis *isci_sata_task_to_fis_copy(struct sas_task *task) 70struct host_to_dev_fis *isci_sata_task_to_fis_copy(struct sas_task *task)
71{ 71{
72 struct isci_request *ireq = task->lldd_task; 72 struct isci_request *ireq = task->lldd_task;
73 struct host_to_dev_fis *fis = &ireq->sci.stp.cmd; 73 struct host_to_dev_fis *fis = &ireq->stp.cmd;
74 74
75 memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); 75 memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
76 76
@@ -116,7 +116,7 @@ void isci_sata_set_ncq_tag(
116 struct isci_request *request = task->lldd_task; 116 struct isci_request *request = task->lldd_task;
117 117
118 register_fis->sector_count = qc->tag << 3; 118 register_fis->sector_count = qc->tag << 3;
119 scic_stp_io_request_set_ncq_tag(&request->sci, qc->tag); 119 scic_stp_io_request_set_ncq_tag(request, qc->tag);
120} 120}
121 121
122/** 122/**
@@ -154,7 +154,6 @@ void isci_request_process_stp_response(struct sas_task *task,
154 154
155enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq) 155enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq)
156{ 156{
157 struct scic_sds_request *sci_req = &ireq->sci;
158 struct isci_tmf *isci_tmf; 157 struct isci_tmf *isci_tmf;
159 enum sci_status status; 158 enum sci_status status;
160 159
@@ -167,7 +166,7 @@ enum sci_status isci_sata_management_task_request_build(struct isci_request *ire
167 166
168 case isci_tmf_sata_srst_high: 167 case isci_tmf_sata_srst_high:
169 case isci_tmf_sata_srst_low: { 168 case isci_tmf_sata_srst_low: {
170 struct host_to_dev_fis *fis = &sci_req->stp.cmd; 169 struct host_to_dev_fis *fis = &ireq->stp.cmd;
171 170
172 memset(fis, 0, sizeof(*fis)); 171 memset(fis, 0, sizeof(*fis));
173 172
@@ -188,7 +187,7 @@ enum sci_status isci_sata_management_task_request_build(struct isci_request *ire
188 /* core builds the protocol specific request 187 /* core builds the protocol specific request
189 * based on the h2d fis. 188 * based on the h2d fis.
190 */ 189 */
191 status = scic_task_request_construct_sata(&ireq->sci); 190 status = scic_task_request_construct_sata(ireq);
192 191
193 return status; 192 return status;
194} 193}
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index d2dba8354899..700708c82678 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -258,7 +258,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
258 258
259 /* let the core do it's construct. */ 259 /* let the core do it's construct. */
260 status = scic_task_request_construct(&ihost->sci, &idev->sci, tag, 260 status = scic_task_request_construct(&ihost->sci, &idev->sci, tag,
261 &ireq->sci); 261 ireq);
262 262
263 if (status != SCI_SUCCESS) { 263 if (status != SCI_SUCCESS) {
264 dev_warn(&ihost->pdev->dev, 264 dev_warn(&ihost->pdev->dev,
@@ -272,7 +272,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
272 /* XXX convert to get this from task->tproto like other drivers */ 272 /* XXX convert to get this from task->tproto like other drivers */
273 if (dev->dev_type == SAS_END_DEV) { 273 if (dev->dev_type == SAS_END_DEV) {
274 isci_tmf->proto = SAS_PROTOCOL_SSP; 274 isci_tmf->proto = SAS_PROTOCOL_SSP;
275 status = scic_task_request_construct_ssp(&ireq->sci); 275 status = scic_task_request_construct_ssp(ireq);
276 if (status != SCI_SUCCESS) 276 if (status != SCI_SUCCESS)
277 return NULL; 277 return NULL;
278 } 278 }
@@ -337,7 +337,7 @@ int isci_task_execute_tmf(struct isci_host *ihost,
337 /* start the TMF io. */ 337 /* start the TMF io. */
338 status = scic_controller_start_task(&ihost->sci, 338 status = scic_controller_start_task(&ihost->sci,
339 sci_device, 339 sci_device,
340 &ireq->sci); 340 ireq);
341 341
342 if (status != SCI_TASK_SUCCESS) { 342 if (status != SCI_TASK_SUCCESS) {
343 dev_warn(&ihost->pdev->dev, 343 dev_warn(&ihost->pdev->dev,
@@ -371,7 +371,7 @@ int isci_task_execute_tmf(struct isci_host *ihost,
371 371
372 scic_controller_terminate_request(&ihost->sci, 372 scic_controller_terminate_request(&ihost->sci,
373 &isci_device->sci, 373 &isci_device->sci,
374 &ireq->sci); 374 ireq);
375 375
376 spin_unlock_irqrestore(&ihost->scic_lock, flags); 376 spin_unlock_irqrestore(&ihost->scic_lock, flags);
377 377
@@ -565,7 +565,7 @@ static void isci_terminate_request_core(
565 status = scic_controller_terminate_request( 565 status = scic_controller_terminate_request(
566 &isci_host->sci, 566 &isci_host->sci,
567 &isci_device->sci, 567 &isci_device->sci,
568 &isci_request->sci); 568 isci_request);
569 } 569 }
570 spin_unlock_irqrestore(&isci_host->scic_lock, flags); 570 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
571 571
@@ -1235,7 +1235,6 @@ isci_task_request_complete(struct isci_host *ihost,
1235{ 1235{
1236 struct isci_tmf *tmf = isci_request_access_tmf(ireq); 1236 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
1237 struct completion *tmf_complete; 1237 struct completion *tmf_complete;
1238 struct scic_sds_request *sci_req = &ireq->sci;
1239 1238
1240 dev_dbg(&ihost->pdev->dev, 1239 dev_dbg(&ihost->pdev->dev,
1241 "%s: request = %p, status=%d\n", 1240 "%s: request = %p, status=%d\n",
@@ -1248,18 +1247,18 @@ isci_task_request_complete(struct isci_host *ihost,
1248 1247
1249 if (tmf->proto == SAS_PROTOCOL_SSP) { 1248 if (tmf->proto == SAS_PROTOCOL_SSP) {
1250 memcpy(&tmf->resp.resp_iu, 1249 memcpy(&tmf->resp.resp_iu,
1251 &sci_req->ssp.rsp, 1250 &ireq->ssp.rsp,
1252 SSP_RESP_IU_MAX_SIZE); 1251 SSP_RESP_IU_MAX_SIZE);
1253 } else if (tmf->proto == SAS_PROTOCOL_SATA) { 1252 } else if (tmf->proto == SAS_PROTOCOL_SATA) {
1254 memcpy(&tmf->resp.d2h_fis, 1253 memcpy(&tmf->resp.d2h_fis,
1255 &sci_req->stp.rsp, 1254 &ireq->stp.rsp,
1256 sizeof(struct dev_to_host_fis)); 1255 sizeof(struct dev_to_host_fis));
1257 } 1256 }
1258 1257
1259 /* PRINT_TMF( ((struct isci_tmf *)request->task)); */ 1258 /* PRINT_TMF( ((struct isci_tmf *)request->task)); */
1260 tmf_complete = tmf->complete; 1259 tmf_complete = tmf->complete;
1261 1260
1262 scic_controller_complete_io(&ihost->sci, ireq->sci.target_device, &ireq->sci); 1261 scic_controller_complete_io(&ihost->sci, ireq->target_device, ireq);
1263 /* set the 'terminated' flag handle to make sure it cannot be terminated 1262 /* set the 'terminated' flag handle to make sure it cannot be terminated
1264 * or completed again. 1263 * or completed again.
1265 */ 1264 */