diff options
author | Dan Williams <dan.j.williams@intel.com> | 2011-06-28 16:47:09 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2011-07-03 07:04:51 -0400 |
commit | 312e0c2455c18716cf640d4336dcb1e9e5053818 (patch) | |
tree | be2dbc9a3e5ba39783448f0029231ea43e6e0428 /drivers | |
parent | 9274f45ea551421cd3bf329de9dd8d1e6208285a (diff) |
isci: unify can_queue tracking on the tci_pool, uplevel tag assignment
The tci_pool tracks our outstanding command slots which are also the 'index'
portion of our tags. Grabbing the tag early in ->lldd_execute_task let's us
drop the isci_host_can_queue() and ->was_tag_assigned_by_user infrastructure.
->was_tag_assigned_by_user required the task context to be duplicated in
request-local buffer. With the tci established early we can build the
task_context directly into its final location and skip a memcpy.
With the task context buffer at a known address at request construction we
have the opportunity/obligation to also fix sgl handling. This rework feels
like it belongs in another patch but the sgl handling and task_context are too
intertwined.
1/ fix the 'ab' pair embedded in the task context to point to the 'cd' pair in
the task context (previously we were prematurely linking to the staging
buffer).
2/ fix the broken iteration of pio sgls that assumes all sgls are relative to
the request, and does a dangerous looking reverse lookup of physical
address to virtual address.
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/scsi/isci/host.c | 265 | ||||
-rw-r--r-- | drivers/scsi/isci/host.h | 55 | ||||
-rw-r--r-- | drivers/scsi/isci/port.c | 61 | ||||
-rw-r--r-- | drivers/scsi/isci/port.h | 2 | ||||
-rw-r--r-- | drivers/scsi/isci/request.c | 469 | ||||
-rw-r--r-- | drivers/scsi/isci/request.h | 58 | ||||
-rw-r--r-- | drivers/scsi/isci/task.c | 80 |
7 files changed, 303 insertions, 687 deletions
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index b08455f0d350..c99fab53dd0c 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c | |||
@@ -1018,33 +1018,11 @@ done: | |||
1018 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1018 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1019 | } | 1019 | } |
1020 | 1020 | ||
1021 | static void isci_tci_free(struct isci_host *ihost, u16 tci) | ||
1022 | { | ||
1023 | u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1); | ||
1024 | |||
1025 | ihost->tci_pool[tail] = tci; | ||
1026 | ihost->tci_tail = tail + 1; | ||
1027 | } | ||
1028 | |||
1029 | static u16 isci_tci_alloc(struct isci_host *ihost) | ||
1030 | { | ||
1031 | u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1); | ||
1032 | u16 tci = ihost->tci_pool[head]; | ||
1033 | |||
1034 | ihost->tci_head = head + 1; | ||
1035 | return tci; | ||
1036 | } | ||
1037 | |||
1038 | static u16 isci_tci_active(struct isci_host *ihost) | 1021 | static u16 isci_tci_active(struct isci_host *ihost) |
1039 | { | 1022 | { |
1040 | return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); | 1023 | return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); |
1041 | } | 1024 | } |
1042 | 1025 | ||
1043 | static u16 isci_tci_space(struct isci_host *ihost) | ||
1044 | { | ||
1045 | return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); | ||
1046 | } | ||
1047 | |||
1048 | static enum sci_status scic_controller_start(struct scic_sds_controller *scic, | 1026 | static enum sci_status scic_controller_start(struct scic_sds_controller *scic, |
1049 | u32 timeout) | 1027 | u32 timeout) |
1050 | { | 1028 | { |
@@ -1205,6 +1183,11 @@ static void isci_host_completion_routine(unsigned long data) | |||
1205 | task->task_done(task); | 1183 | task->task_done(task); |
1206 | } | 1184 | } |
1207 | } | 1185 | } |
1186 | |||
1187 | spin_lock_irq(&isci_host->scic_lock); | ||
1188 | isci_free_tag(isci_host, request->sci.io_tag); | ||
1189 | spin_unlock_irq(&isci_host->scic_lock); | ||
1190 | |||
1208 | /* Free the request object. */ | 1191 | /* Free the request object. */ |
1209 | isci_request_free(isci_host, request); | 1192 | isci_request_free(isci_host, request); |
1210 | } | 1193 | } |
@@ -1242,6 +1225,7 @@ static void isci_host_completion_routine(unsigned long data) | |||
1242 | * of pending requests. | 1225 | * of pending requests. |
1243 | */ | 1226 | */ |
1244 | list_del_init(&request->dev_node); | 1227 | list_del_init(&request->dev_node); |
1228 | isci_free_tag(isci_host, request->sci.io_tag); | ||
1245 | spin_unlock_irq(&isci_host->scic_lock); | 1229 | spin_unlock_irq(&isci_host->scic_lock); |
1246 | 1230 | ||
1247 | /* Free the request object. */ | 1231 | /* Free the request object. */ |
@@ -2375,6 +2359,7 @@ static int scic_controller_mem_init(struct scic_sds_controller *scic) | |||
2375 | if (!scic->task_context_table) | 2359 | if (!scic->task_context_table) |
2376 | return -ENOMEM; | 2360 | return -ENOMEM; |
2377 | 2361 | ||
2362 | scic->task_context_dma = dma; | ||
2378 | writel(lower_32_bits(dma), &scic->smu_registers->host_task_table_lower); | 2363 | writel(lower_32_bits(dma), &scic->smu_registers->host_task_table_lower); |
2379 | writel(upper_32_bits(dma), &scic->smu_registers->host_task_table_upper); | 2364 | writel(upper_32_bits(dma), &scic->smu_registers->host_task_table_upper); |
2380 | 2365 | ||
@@ -2409,11 +2394,9 @@ int isci_host_init(struct isci_host *isci_host) | |||
2409 | 2394 | ||
2410 | spin_lock_init(&isci_host->state_lock); | 2395 | spin_lock_init(&isci_host->state_lock); |
2411 | spin_lock_init(&isci_host->scic_lock); | 2396 | spin_lock_init(&isci_host->scic_lock); |
2412 | spin_lock_init(&isci_host->queue_lock); | ||
2413 | init_waitqueue_head(&isci_host->eventq); | 2397 | init_waitqueue_head(&isci_host->eventq); |
2414 | 2398 | ||
2415 | isci_host_change_state(isci_host, isci_starting); | 2399 | isci_host_change_state(isci_host, isci_starting); |
2416 | isci_host->can_queue = ISCI_CAN_QUEUE_VAL; | ||
2417 | 2400 | ||
2418 | status = scic_controller_construct(&isci_host->sci, scu_base(isci_host), | 2401 | status = scic_controller_construct(&isci_host->sci, scu_base(isci_host), |
2419 | smu_base(isci_host)); | 2402 | smu_base(isci_host)); |
@@ -2611,51 +2594,6 @@ void scic_sds_controller_post_request( | |||
2611 | writel(request, &scic->smu_registers->post_context_port); | 2594 | writel(request, &scic->smu_registers->post_context_port); |
2612 | } | 2595 | } |
2613 | 2596 | ||
2614 | /** | ||
2615 | * This method will copy the soft copy of the task context into the physical | ||
2616 | * memory accessible by the controller. | ||
2617 | * @scic: This parameter specifies the controller for which to copy | ||
2618 | * the task context. | ||
2619 | * @sci_req: This parameter specifies the request for which the task | ||
2620 | * context is being copied. | ||
2621 | * | ||
2622 | * After this call is made the SCIC_SDS_IO_REQUEST object will always point to | ||
2623 | * the physical memory version of the task context. Thus, all subsequent | ||
2624 | * updates to the task context are performed in the TC table (i.e. DMAable | ||
2625 | * memory). none | ||
2626 | */ | ||
2627 | void scic_sds_controller_copy_task_context( | ||
2628 | struct scic_sds_controller *scic, | ||
2629 | struct scic_sds_request *sci_req) | ||
2630 | { | ||
2631 | struct scu_task_context *task_context_buffer; | ||
2632 | |||
2633 | task_context_buffer = scic_sds_controller_get_task_context_buffer( | ||
2634 | scic, sci_req->io_tag); | ||
2635 | |||
2636 | memcpy(task_context_buffer, | ||
2637 | sci_req->task_context_buffer, | ||
2638 | offsetof(struct scu_task_context, sgl_snapshot_ac)); | ||
2639 | |||
2640 | /* | ||
2641 | * Now that the soft copy of the TC has been copied into the TC | ||
2642 | * table accessible by the silicon. Thus, any further changes to | ||
2643 | * the TC (e.g. TC termination) occur in the appropriate location. */ | ||
2644 | sci_req->task_context_buffer = task_context_buffer; | ||
2645 | } | ||
2646 | |||
2647 | struct scu_task_context *scic_sds_controller_get_task_context_buffer(struct scic_sds_controller *scic, | ||
2648 | u16 io_tag) | ||
2649 | { | ||
2650 | u16 tci = ISCI_TAG_TCI(io_tag); | ||
2651 | |||
2652 | if (tci < scic->task_context_entries) { | ||
2653 | return &scic->task_context_table[tci]; | ||
2654 | } | ||
2655 | |||
2656 | return NULL; | ||
2657 | } | ||
2658 | |||
2659 | struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic, u16 io_tag) | 2597 | struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic, u16 io_tag) |
2660 | { | 2598 | { |
2661 | u16 task_index; | 2599 | u16 task_index; |
@@ -2801,6 +2739,60 @@ void scic_sds_controller_release_frame( | |||
2801 | &scic->scu_registers->sdma.unsolicited_frame_get_pointer); | 2739 | &scic->scu_registers->sdma.unsolicited_frame_get_pointer); |
2802 | } | 2740 | } |
2803 | 2741 | ||
2742 | void isci_tci_free(struct isci_host *ihost, u16 tci) | ||
2743 | { | ||
2744 | u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1); | ||
2745 | |||
2746 | ihost->tci_pool[tail] = tci; | ||
2747 | ihost->tci_tail = tail + 1; | ||
2748 | } | ||
2749 | |||
2750 | static u16 isci_tci_alloc(struct isci_host *ihost) | ||
2751 | { | ||
2752 | u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1); | ||
2753 | u16 tci = ihost->tci_pool[head]; | ||
2754 | |||
2755 | ihost->tci_head = head + 1; | ||
2756 | return tci; | ||
2757 | } | ||
2758 | |||
2759 | static u16 isci_tci_space(struct isci_host *ihost) | ||
2760 | { | ||
2761 | return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); | ||
2762 | } | ||
2763 | |||
2764 | u16 isci_alloc_tag(struct isci_host *ihost) | ||
2765 | { | ||
2766 | if (isci_tci_space(ihost)) { | ||
2767 | u16 tci = isci_tci_alloc(ihost); | ||
2768 | u8 seq = ihost->sci.io_request_sequence[tci]; | ||
2769 | |||
2770 | return ISCI_TAG(seq, tci); | ||
2771 | } | ||
2772 | |||
2773 | return SCI_CONTROLLER_INVALID_IO_TAG; | ||
2774 | } | ||
2775 | |||
2776 | enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag) | ||
2777 | { | ||
2778 | struct scic_sds_controller *scic = &ihost->sci; | ||
2779 | u16 tci = ISCI_TAG_TCI(io_tag); | ||
2780 | u16 seq = ISCI_TAG_SEQ(io_tag); | ||
2781 | |||
2782 | /* prevent tail from passing head */ | ||
2783 | if (isci_tci_active(ihost) == 0) | ||
2784 | return SCI_FAILURE_INVALID_IO_TAG; | ||
2785 | |||
2786 | if (seq == scic->io_request_sequence[tci]) { | ||
2787 | scic->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1); | ||
2788 | |||
2789 | isci_tci_free(ihost, tci); | ||
2790 | |||
2791 | return SCI_SUCCESS; | ||
2792 | } | ||
2793 | return SCI_FAILURE_INVALID_IO_TAG; | ||
2794 | } | ||
2795 | |||
2804 | /** | 2796 | /** |
2805 | * scic_controller_start_io() - This method is called by the SCI user to | 2797 | * scic_controller_start_io() - This method is called by the SCI user to |
2806 | * send/start an IO request. If the method invocation is successful, then | 2798 | * send/start an IO request. If the method invocation is successful, then |
@@ -2811,27 +2803,11 @@ void scic_sds_controller_release_frame( | |||
2811 | * IO request. | 2803 | * IO request. |
2812 | * @io_request: the handle to the io request object to start. | 2804 | * @io_request: the handle to the io request object to start. |
2813 | * @io_tag: This parameter specifies a previously allocated IO tag that the | 2805 | * @io_tag: This parameter specifies a previously allocated IO tag that the |
2814 | * user desires to be utilized for this request. This parameter is optional. | 2806 | * user desires to be utilized for this request. |
2815 | * The user is allowed to supply SCI_CONTROLLER_INVALID_IO_TAG as the value | ||
2816 | * for this parameter. | ||
2817 | * | ||
2818 | * - IO tags are a protected resource. It is incumbent upon the SCI Core user | ||
2819 | * to ensure that each of the methods that may allocate or free available IO | ||
2820 | * tags are handled in a mutually exclusive manner. This method is one of said | ||
2821 | * methods requiring proper critical code section protection (e.g. semaphore, | ||
2822 | * spin-lock, etc.). - For SATA, the user is required to manage NCQ tags. As a | ||
2823 | * result, it is expected the user will have set the NCQ tag field in the host | ||
2824 | * to device register FIS prior to calling this method. There is also a | ||
2825 | * requirement for the user to call scic_stp_io_set_ncq_tag() prior to invoking | ||
2826 | * the scic_controller_start_io() method. scic_controller_allocate_tag() for | ||
2827 | * more information on allocating a tag. Indicate if the controller | ||
2828 | * successfully started the IO request. SCI_SUCCESS if the IO request was | ||
2829 | * successfully started. Determine the failure situations and return values. | ||
2830 | */ | 2807 | */ |
2831 | enum sci_status scic_controller_start_io(struct scic_sds_controller *scic, | 2808 | enum sci_status scic_controller_start_io(struct scic_sds_controller *scic, |
2832 | struct scic_sds_remote_device *rdev, | 2809 | struct scic_sds_remote_device *rdev, |
2833 | struct scic_sds_request *req, | 2810 | struct scic_sds_request *req) |
2834 | u16 io_tag) | ||
2835 | { | 2811 | { |
2836 | enum sci_status status; | 2812 | enum sci_status status; |
2837 | 2813 | ||
@@ -2902,17 +2878,6 @@ enum sci_status scic_controller_terminate_request( | |||
2902 | * @remote_device: The handle to the remote device object for which to complete | 2878 | * @remote_device: The handle to the remote device object for which to complete |
2903 | * the IO request. | 2879 | * the IO request. |
2904 | * @io_request: the handle to the io request object to complete. | 2880 | * @io_request: the handle to the io request object to complete. |
2905 | * | ||
2906 | * - IO tags are a protected resource. It is incumbent upon the SCI Core user | ||
2907 | * to ensure that each of the methods that may allocate or free available IO | ||
2908 | * tags are handled in a mutually exclusive manner. This method is one of said | ||
2909 | * methods requiring proper critical code section protection (e.g. semaphore, | ||
2910 | * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI | ||
2911 | * Core user, using the scic_controller_allocate_io_tag() method, then it is | ||
2912 | * the responsibility of the caller to invoke the scic_controller_free_io_tag() | ||
2913 | * method to free the tag (i.e. this method will not free the IO tag). Indicate | ||
2914 | * if the controller successfully completed the IO request. SCI_SUCCESS if the | ||
2915 | * completion process was successful. | ||
2916 | */ | 2881 | */ |
2917 | enum sci_status scic_controller_complete_io( | 2882 | enum sci_status scic_controller_complete_io( |
2918 | struct scic_sds_controller *scic, | 2883 | struct scic_sds_controller *scic, |
@@ -2963,31 +2928,11 @@ enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req) | |||
2963 | * @remote_device: the handle to the remote device object for which to start | 2928 | * @remote_device: the handle to the remote device object for which to start |
2964 | * the task management request. | 2929 | * the task management request. |
2965 | * @task_request: the handle to the task request object to start. | 2930 | * @task_request: the handle to the task request object to start. |
2966 | * @io_tag: This parameter specifies a previously allocated IO tag that the | ||
2967 | * user desires to be utilized for this request. Note this not the io_tag | ||
2968 | * of the request being managed. It is to be utilized for the task request | ||
2969 | * itself. This parameter is optional. The user is allowed to supply | ||
2970 | * SCI_CONTROLLER_INVALID_IO_TAG as the value for this parameter. | ||
2971 | * | ||
2972 | * - IO tags are a protected resource. It is incumbent upon the SCI Core user | ||
2973 | * to ensure that each of the methods that may allocate or free available IO | ||
2974 | * tags are handled in a mutually exclusive manner. This method is one of said | ||
2975 | * methods requiring proper critical code section protection (e.g. semaphore, | ||
2976 | * spin-lock, etc.). - The user must synchronize this task with completion | ||
2977 | * queue processing. If they are not synchronized then it is possible for the | ||
2978 | * io requests that are being managed by the task request can complete before | ||
2979 | * starting the task request. scic_controller_allocate_tag() for more | ||
2980 | * information on allocating a tag. Indicate if the controller successfully | ||
2981 | * started the IO request. SCI_TASK_SUCCESS if the task request was | ||
2982 | * successfully started. SCI_TASK_FAILURE_REQUIRES_SCSI_ABORT This value is | ||
2983 | * returned if there is/are task(s) outstanding that require termination or | ||
2984 | * completion before this request can succeed. | ||
2985 | */ | 2931 | */ |
2986 | enum sci_task_status scic_controller_start_task( | 2932 | enum sci_task_status scic_controller_start_task( |
2987 | struct scic_sds_controller *scic, | 2933 | struct scic_sds_controller *scic, |
2988 | struct scic_sds_remote_device *rdev, | 2934 | struct scic_sds_remote_device *rdev, |
2989 | struct scic_sds_request *req, | 2935 | struct scic_sds_request *req) |
2990 | u16 task_tag) | ||
2991 | { | 2936 | { |
2992 | enum sci_status status; | 2937 | enum sci_status status; |
2993 | 2938 | ||
@@ -3022,85 +2967,3 @@ enum sci_task_status scic_controller_start_task( | |||
3022 | 2967 | ||
3023 | return status; | 2968 | return status; |
3024 | } | 2969 | } |
3025 | |||
3026 | /** | ||
3027 | * scic_controller_allocate_io_tag() - This method will allocate a tag from the | ||
3028 | * pool of free IO tags. Direct allocation of IO tags by the SCI Core user | ||
3029 | * is optional. The scic_controller_start_io() method will allocate an IO | ||
3030 | * tag if this method is not utilized and the tag is not supplied to the IO | ||
3031 | * construct routine. Direct allocation of IO tags may provide additional | ||
3032 | * performance improvements in environments capable of supporting this usage | ||
3033 | * model. Additionally, direct allocation of IO tags also provides | ||
3034 | * additional flexibility to the SCI Core user. Specifically, the user may | ||
3035 | * retain IO tags across the lives of multiple IO requests. | ||
3036 | * @controller: the handle to the controller object for which to allocate the | ||
3037 | * tag. | ||
3038 | * | ||
3039 | * IO tags are a protected resource. It is incumbent upon the SCI Core user to | ||
3040 | * ensure that each of the methods that may allocate or free available IO tags | ||
3041 | * are handled in a mutually exclusive manner. This method is one of said | ||
3042 | * methods requiring proper critical code section protection (e.g. semaphore, | ||
3043 | * spin-lock, etc.). An unsigned integer representing an available IO tag. | ||
3044 | * SCI_CONTROLLER_INVALID_IO_TAG This value is returned if there are no | ||
3045 | * currently available tags to be allocated. All return other values indicate a | ||
3046 | * legitimate tag. | ||
3047 | */ | ||
3048 | u16 scic_controller_allocate_io_tag(struct scic_sds_controller *scic) | ||
3049 | { | ||
3050 | struct isci_host *ihost = scic_to_ihost(scic); | ||
3051 | |||
3052 | if (isci_tci_space(ihost)) { | ||
3053 | u16 tci = isci_tci_alloc(ihost); | ||
3054 | u8 seq = scic->io_request_sequence[tci]; | ||
3055 | |||
3056 | return ISCI_TAG(seq, tci); | ||
3057 | } | ||
3058 | |||
3059 | return SCI_CONTROLLER_INVALID_IO_TAG; | ||
3060 | } | ||
3061 | |||
3062 | /** | ||
3063 | * scic_controller_free_io_tag() - This method will free an IO tag to the pool | ||
3064 | * of free IO tags. This method provides the SCI Core user more flexibility | ||
3065 | * with regards to IO tags. The user may desire to keep an IO tag after an | ||
3066 | * IO request has completed, because they plan on re-using the tag for a | ||
3067 | * subsequent IO request. This method is only legal if the tag was | ||
3068 | * allocated via scic_controller_allocate_io_tag(). | ||
3069 | * @controller: This parameter specifies the handle to the controller object | ||
3070 | * for which to free/return the tag. | ||
3071 | * @io_tag: This parameter represents the tag to be freed to the pool of | ||
3072 | * available tags. | ||
3073 | * | ||
3074 | * - IO tags are a protected resource. It is incumbent upon the SCI Core user | ||
3075 | * to ensure that each of the methods that may allocate or free available IO | ||
3076 | * tags are handled in a mutually exclusive manner. This method is one of said | ||
3077 | * methods requiring proper critical code section protection (e.g. semaphore, | ||
3078 | * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI | ||
3079 | * Core user, using the scic_controller_allocate_io_tag() method, then it is | ||
3080 | * the responsibility of the caller to invoke this method to free the tag. This | ||
3081 | * method returns an indication of whether the tag was successfully put back | ||
3082 | * (freed) to the pool of available tags. SCI_SUCCESS This return value | ||
3083 | * indicates the tag was successfully placed into the pool of available IO | ||
3084 | * tags. SCI_FAILURE_INVALID_IO_TAG This value is returned if the supplied tag | ||
3085 | * is not a valid IO tag value. | ||
3086 | */ | ||
3087 | enum sci_status scic_controller_free_io_tag(struct scic_sds_controller *scic, | ||
3088 | u16 io_tag) | ||
3089 | { | ||
3090 | struct isci_host *ihost = scic_to_ihost(scic); | ||
3091 | u16 tci = ISCI_TAG_TCI(io_tag); | ||
3092 | u16 seq = ISCI_TAG_SEQ(io_tag); | ||
3093 | |||
3094 | /* prevent tail from passing head */ | ||
3095 | if (isci_tci_active(ihost) == 0) | ||
3096 | return SCI_FAILURE_INVALID_IO_TAG; | ||
3097 | |||
3098 | if (seq == scic->io_request_sequence[tci]) { | ||
3099 | scic->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1); | ||
3100 | |||
3101 | isci_tci_free(ihost, ISCI_TAG_TCI(io_tag)); | ||
3102 | |||
3103 | return SCI_SUCCESS; | ||
3104 | } | ||
3105 | return SCI_FAILURE_INVALID_IO_TAG; | ||
3106 | } | ||
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index a54397e1bf16..d8164f5d7988 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h | |||
@@ -192,6 +192,7 @@ struct scic_sds_controller { | |||
192 | * context table. This data is shared between the hardware and software. | 192 | * context table. This data is shared between the hardware and software. |
193 | */ | 193 | */ |
194 | struct scu_task_context *task_context_table; | 194 | struct scu_task_context *task_context_table; |
195 | dma_addr_t task_context_dma; | ||
195 | 196 | ||
196 | /** | 197 | /** |
197 | * This field is a pointer to the memory allocated by the driver for the | 198 | * This field is a pointer to the memory allocated by the driver for the |
@@ -302,12 +303,8 @@ struct isci_host { | |||
302 | struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */ | 303 | struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */ |
303 | struct sas_ha_struct sas_ha; | 304 | struct sas_ha_struct sas_ha; |
304 | 305 | ||
305 | int can_queue; | ||
306 | spinlock_t queue_lock; | ||
307 | spinlock_t state_lock; | 306 | spinlock_t state_lock; |
308 | |||
309 | struct pci_dev *pdev; | 307 | struct pci_dev *pdev; |
310 | |||
311 | enum isci_status status; | 308 | enum isci_status status; |
312 | #define IHOST_START_PENDING 0 | 309 | #define IHOST_START_PENDING 0 |
313 | #define IHOST_STOP_PENDING 1 | 310 | #define IHOST_STOP_PENDING 1 |
@@ -451,36 +448,6 @@ static inline void isci_host_change_state(struct isci_host *isci_host, | |||
451 | 448 | ||
452 | } | 449 | } |
453 | 450 | ||
454 | static inline int isci_host_can_queue(struct isci_host *isci_host, int num) | ||
455 | { | ||
456 | int ret = 0; | ||
457 | unsigned long flags; | ||
458 | |||
459 | spin_lock_irqsave(&isci_host->queue_lock, flags); | ||
460 | if ((isci_host->can_queue - num) < 0) { | ||
461 | dev_dbg(&isci_host->pdev->dev, | ||
462 | "%s: isci_host->can_queue = %d\n", | ||
463 | __func__, | ||
464 | isci_host->can_queue); | ||
465 | ret = -SAS_QUEUE_FULL; | ||
466 | |||
467 | } else | ||
468 | isci_host->can_queue -= num; | ||
469 | |||
470 | spin_unlock_irqrestore(&isci_host->queue_lock, flags); | ||
471 | |||
472 | return ret; | ||
473 | } | ||
474 | |||
475 | static inline void isci_host_can_dequeue(struct isci_host *isci_host, int num) | ||
476 | { | ||
477 | unsigned long flags; | ||
478 | |||
479 | spin_lock_irqsave(&isci_host->queue_lock, flags); | ||
480 | isci_host->can_queue += num; | ||
481 | spin_unlock_irqrestore(&isci_host->queue_lock, flags); | ||
482 | } | ||
483 | |||
484 | static inline void wait_for_start(struct isci_host *ihost) | 451 | static inline void wait_for_start(struct isci_host *ihost) |
485 | { | 452 | { |
486 | wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags)); | 453 | wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags)); |
@@ -646,10 +613,6 @@ union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffe | |||
646 | struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic, | 613 | struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic, |
647 | u16 io_tag); | 614 | u16 io_tag); |
648 | 615 | ||
649 | struct scu_task_context *scic_sds_controller_get_task_context_buffer( | ||
650 | struct scic_sds_controller *scic, | ||
651 | u16 io_tag); | ||
652 | |||
653 | void scic_sds_controller_power_control_queue_insert( | 616 | void scic_sds_controller_power_control_queue_insert( |
654 | struct scic_sds_controller *scic, | 617 | struct scic_sds_controller *scic, |
655 | struct scic_sds_phy *sci_phy); | 618 | struct scic_sds_phy *sci_phy); |
@@ -681,6 +644,9 @@ void scic_sds_controller_register_setup(struct scic_sds_controller *scic); | |||
681 | enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req); | 644 | enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req); |
682 | int isci_host_scan_finished(struct Scsi_Host *, unsigned long); | 645 | int isci_host_scan_finished(struct Scsi_Host *, unsigned long); |
683 | void isci_host_scan_start(struct Scsi_Host *); | 646 | void isci_host_scan_start(struct Scsi_Host *); |
647 | u16 isci_alloc_tag(struct isci_host *ihost); | ||
648 | enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag); | ||
649 | void isci_tci_free(struct isci_host *ihost, u16 tci); | ||
684 | 650 | ||
685 | int isci_host_init(struct isci_host *); | 651 | int isci_host_init(struct isci_host *); |
686 | 652 | ||
@@ -708,14 +674,12 @@ void scic_controller_disable_interrupts( | |||
708 | enum sci_status scic_controller_start_io( | 674 | enum sci_status scic_controller_start_io( |
709 | struct scic_sds_controller *scic, | 675 | struct scic_sds_controller *scic, |
710 | struct scic_sds_remote_device *remote_device, | 676 | struct scic_sds_remote_device *remote_device, |
711 | struct scic_sds_request *io_request, | 677 | struct scic_sds_request *io_request); |
712 | u16 io_tag); | ||
713 | 678 | ||
714 | enum sci_task_status scic_controller_start_task( | 679 | enum sci_task_status scic_controller_start_task( |
715 | struct scic_sds_controller *scic, | 680 | struct scic_sds_controller *scic, |
716 | struct scic_sds_remote_device *remote_device, | 681 | struct scic_sds_remote_device *remote_device, |
717 | struct scic_sds_request *task_request, | 682 | struct scic_sds_request *task_request); |
718 | u16 io_tag); | ||
719 | 683 | ||
720 | enum sci_status scic_controller_terminate_request( | 684 | enum sci_status scic_controller_terminate_request( |
721 | struct scic_sds_controller *scic, | 685 | struct scic_sds_controller *scic, |
@@ -727,13 +691,6 @@ enum sci_status scic_controller_complete_io( | |||
727 | struct scic_sds_remote_device *remote_device, | 691 | struct scic_sds_remote_device *remote_device, |
728 | struct scic_sds_request *io_request); | 692 | struct scic_sds_request *io_request); |
729 | 693 | ||
730 | u16 scic_controller_allocate_io_tag( | ||
731 | struct scic_sds_controller *scic); | ||
732 | |||
733 | enum sci_status scic_controller_free_io_tag( | ||
734 | struct scic_sds_controller *scic, | ||
735 | u16 io_tag); | ||
736 | |||
737 | void scic_sds_port_configuration_agent_construct( | 694 | void scic_sds_port_configuration_agent_construct( |
738 | struct scic_sds_port_configuration_agent *port_agent); | 695 | struct scic_sds_port_configuration_agent *port_agent); |
739 | 696 | ||
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c index 5f4a4e3954db..0e84e29335dd 100644 --- a/drivers/scsi/isci/port.c +++ b/drivers/scsi/isci/port.c | |||
@@ -695,35 +695,21 @@ static void scic_sds_port_construct_dummy_rnc(struct scic_sds_port *sci_port, u1 | |||
695 | */ | 695 | */ |
696 | static void scic_sds_port_construct_dummy_task(struct scic_sds_port *sci_port, u16 tag) | 696 | static void scic_sds_port_construct_dummy_task(struct scic_sds_port *sci_port, u16 tag) |
697 | { | 697 | { |
698 | struct scic_sds_controller *scic = sci_port->owning_controller; | ||
698 | struct scu_task_context *task_context; | 699 | struct scu_task_context *task_context; |
699 | 700 | ||
700 | task_context = scic_sds_controller_get_task_context_buffer(sci_port->owning_controller, tag); | 701 | task_context = &scic->task_context_table[ISCI_TAG_TCI(tag)]; |
701 | |||
702 | memset(task_context, 0, sizeof(struct scu_task_context)); | 702 | memset(task_context, 0, sizeof(struct scu_task_context)); |
703 | 703 | ||
704 | task_context->abort = 0; | ||
705 | task_context->priority = 0; | ||
706 | task_context->initiator_request = 1; | 704 | task_context->initiator_request = 1; |
707 | task_context->connection_rate = 1; | 705 | task_context->connection_rate = 1; |
708 | task_context->protocol_engine_index = 0; | ||
709 | task_context->logical_port_index = sci_port->physical_port_index; | 706 | task_context->logical_port_index = sci_port->physical_port_index; |
710 | task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; | 707 | task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; |
711 | task_context->task_index = ISCI_TAG_TCI(tag); | 708 | task_context->task_index = ISCI_TAG_TCI(tag); |
712 | task_context->valid = SCU_TASK_CONTEXT_VALID; | 709 | task_context->valid = SCU_TASK_CONTEXT_VALID; |
713 | task_context->context_type = SCU_TASK_CONTEXT_TYPE; | 710 | task_context->context_type = SCU_TASK_CONTEXT_TYPE; |
714 | |||
715 | task_context->remote_node_index = sci_port->reserved_rni; | 711 | task_context->remote_node_index = sci_port->reserved_rni; |
716 | task_context->command_code = 0; | ||
717 | |||
718 | task_context->link_layer_control = 0; | ||
719 | task_context->do_not_dma_ssp_good_response = 1; | 712 | task_context->do_not_dma_ssp_good_response = 1; |
720 | task_context->strict_ordering = 0; | ||
721 | task_context->control_frame = 0; | ||
722 | task_context->timeout_enable = 0; | ||
723 | task_context->block_guard_enable = 0; | ||
724 | |||
725 | task_context->address_modifier = 0; | ||
726 | |||
727 | task_context->task_phase = 0x01; | 713 | task_context->task_phase = 0x01; |
728 | } | 714 | } |
729 | 715 | ||
@@ -731,15 +717,15 @@ static void scic_sds_port_destroy_dummy_resources(struct scic_sds_port *sci_port | |||
731 | { | 717 | { |
732 | struct scic_sds_controller *scic = sci_port->owning_controller; | 718 | struct scic_sds_controller *scic = sci_port->owning_controller; |
733 | 719 | ||
734 | if (sci_port->reserved_tci != SCU_DUMMY_INDEX) | 720 | if (sci_port->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG) |
735 | scic_controller_free_io_tag(scic, sci_port->reserved_tci); | 721 | isci_free_tag(scic_to_ihost(scic), sci_port->reserved_tag); |
736 | 722 | ||
737 | if (sci_port->reserved_rni != SCU_DUMMY_INDEX) | 723 | if (sci_port->reserved_rni != SCU_DUMMY_INDEX) |
738 | scic_sds_remote_node_table_release_remote_node_index(&scic->available_remote_nodes, | 724 | scic_sds_remote_node_table_release_remote_node_index(&scic->available_remote_nodes, |
739 | 1, sci_port->reserved_rni); | 725 | 1, sci_port->reserved_rni); |
740 | 726 | ||
741 | sci_port->reserved_rni = SCU_DUMMY_INDEX; | 727 | sci_port->reserved_rni = SCU_DUMMY_INDEX; |
742 | sci_port->reserved_tci = SCU_DUMMY_INDEX; | 728 | sci_port->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG; |
743 | } | 729 | } |
744 | 730 | ||
745 | /** | 731 | /** |
@@ -1119,18 +1105,17 @@ scic_sds_port_suspend_port_task_scheduler(struct scic_sds_port *port) | |||
1119 | */ | 1105 | */ |
1120 | static void scic_sds_port_post_dummy_request(struct scic_sds_port *sci_port) | 1106 | static void scic_sds_port_post_dummy_request(struct scic_sds_port *sci_port) |
1121 | { | 1107 | { |
1122 | u32 command; | ||
1123 | struct scu_task_context *task_context; | ||
1124 | struct scic_sds_controller *scic = sci_port->owning_controller; | 1108 | struct scic_sds_controller *scic = sci_port->owning_controller; |
1125 | u16 tci = sci_port->reserved_tci; | 1109 | u16 tag = sci_port->reserved_tag; |
1126 | 1110 | struct scu_task_context *tc; | |
1127 | task_context = scic_sds_controller_get_task_context_buffer(scic, tci); | 1111 | u32 command; |
1128 | 1112 | ||
1129 | task_context->abort = 0; | 1113 | tc = &scic->task_context_table[ISCI_TAG_TCI(tag)]; |
1114 | tc->abort = 0; | ||
1130 | 1115 | ||
1131 | command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | | 1116 | command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | |
1132 | sci_port->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | | 1117 | sci_port->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | |
1133 | tci; | 1118 | ISCI_TAG_TCI(tag); |
1134 | 1119 | ||
1135 | scic_sds_controller_post_request(scic, command); | 1120 | scic_sds_controller_post_request(scic, command); |
1136 | } | 1121 | } |
@@ -1145,17 +1130,16 @@ static void scic_sds_port_post_dummy_request(struct scic_sds_port *sci_port) | |||
1145 | static void scic_sds_port_abort_dummy_request(struct scic_sds_port *sci_port) | 1130 | static void scic_sds_port_abort_dummy_request(struct scic_sds_port *sci_port) |
1146 | { | 1131 | { |
1147 | struct scic_sds_controller *scic = sci_port->owning_controller; | 1132 | struct scic_sds_controller *scic = sci_port->owning_controller; |
1148 | u16 tci = sci_port->reserved_tci; | 1133 | u16 tag = sci_port->reserved_tag; |
1149 | struct scu_task_context *tc; | 1134 | struct scu_task_context *tc; |
1150 | u32 command; | 1135 | u32 command; |
1151 | 1136 | ||
1152 | tc = scic_sds_controller_get_task_context_buffer(scic, tci); | 1137 | tc = &scic->task_context_table[ISCI_TAG_TCI(tag)]; |
1153 | |||
1154 | tc->abort = 1; | 1138 | tc->abort = 1; |
1155 | 1139 | ||
1156 | command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT | | 1140 | command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT | |
1157 | sci_port->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | | 1141 | sci_port->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | |
1158 | tci; | 1142 | ISCI_TAG_TCI(tag); |
1159 | 1143 | ||
1160 | scic_sds_controller_post_request(scic, command); | 1144 | scic_sds_controller_post_request(scic, command); |
1161 | } | 1145 | } |
@@ -1333,15 +1317,16 @@ enum sci_status scic_sds_port_start(struct scic_sds_port *sci_port) | |||
1333 | sci_port->reserved_rni = rni; | 1317 | sci_port->reserved_rni = rni; |
1334 | } | 1318 | } |
1335 | 1319 | ||
1336 | if (sci_port->reserved_tci == SCU_DUMMY_INDEX) { | 1320 | if (sci_port->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) { |
1337 | /* Allocate a TCI and remove the sequence nibble */ | 1321 | struct isci_host *ihost = scic_to_ihost(scic); |
1338 | u16 tci = scic_controller_allocate_io_tag(scic); | 1322 | u16 tag; |
1339 | 1323 | ||
1340 | if (tci != SCU_DUMMY_INDEX) | 1324 | tag = isci_alloc_tag(ihost); |
1341 | scic_sds_port_construct_dummy_task(sci_port, tci); | 1325 | if (tag == SCI_CONTROLLER_INVALID_IO_TAG) |
1342 | else | ||
1343 | status = SCI_FAILURE_INSUFFICIENT_RESOURCES; | 1326 | status = SCI_FAILURE_INSUFFICIENT_RESOURCES; |
1344 | sci_port->reserved_tci = tci; | 1327 | else |
1328 | scic_sds_port_construct_dummy_task(sci_port, tag); | ||
1329 | sci_port->reserved_tag = tag; | ||
1345 | } | 1330 | } |
1346 | 1331 | ||
1347 | if (status == SCI_SUCCESS) { | 1332 | if (status == SCI_SUCCESS) { |
@@ -1859,7 +1844,7 @@ void scic_sds_port_construct(struct scic_sds_port *sci_port, u8 index, | |||
1859 | sci_port->assigned_device_count = 0; | 1844 | sci_port->assigned_device_count = 0; |
1860 | 1845 | ||
1861 | sci_port->reserved_rni = SCU_DUMMY_INDEX; | 1846 | sci_port->reserved_rni = SCU_DUMMY_INDEX; |
1862 | sci_port->reserved_tci = SCU_DUMMY_INDEX; | 1847 | sci_port->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG; |
1863 | 1848 | ||
1864 | sci_init_timer(&sci_port->timer, port_timeout); | 1849 | sci_init_timer(&sci_port->timer, port_timeout); |
1865 | 1850 | ||
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h index 45c01f80bf83..a44e541914f5 100644 --- a/drivers/scsi/isci/port.h +++ b/drivers/scsi/isci/port.h | |||
@@ -108,7 +108,7 @@ struct scic_sds_port { | |||
108 | u8 active_phy_mask; | 108 | u8 active_phy_mask; |
109 | 109 | ||
110 | u16 reserved_rni; | 110 | u16 reserved_rni; |
111 | u16 reserved_tci; | 111 | u16 reserved_tag; |
112 | 112 | ||
113 | /** | 113 | /** |
114 | * This field contains the count of the io requests started on this port | 114 | * This field contains the count of the io requests started on this port |
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 08a7340b33bf..55859d5331b1 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c | |||
@@ -61,42 +61,50 @@ | |||
61 | #include "scu_event_codes.h" | 61 | #include "scu_event_codes.h" |
62 | #include "sas.h" | 62 | #include "sas.h" |
63 | 63 | ||
64 | /** | 64 | static struct scu_sgl_element_pair *to_sgl_element_pair(struct scic_sds_request *sci_req, |
65 | * This method returns the sgl element pair for the specificed sgl_pair index. | 65 | int idx) |
66 | * @sci_req: This parameter specifies the IO request for which to retrieve | 66 | { |
67 | * the Scatter-Gather List element pair. | 67 | if (idx == 0) |
68 | * @sgl_pair_index: This parameter specifies the index into the SGL element | 68 | return &sci_req->tc->sgl_pair_ab; |
69 | * pair to be retrieved. | 69 | else if (idx == 1) |
70 | * | 70 | return &sci_req->tc->sgl_pair_cd; |
71 | * This method returns a pointer to an struct scu_sgl_element_pair. | 71 | else if (idx < 0) |
72 | */ | 72 | return NULL; |
73 | static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair( | 73 | else |
74 | struct scic_sds_request *sci_req, | 74 | return &sci_req->sg_table[idx - 2]; |
75 | u32 sgl_pair_index | 75 | } |
76 | ) { | ||
77 | struct scu_task_context *task_context; | ||
78 | 76 | ||
79 | task_context = (struct scu_task_context *)sci_req->task_context_buffer; | 77 | static dma_addr_t to_sgl_element_pair_dma(struct scic_sds_controller *scic, |
78 | struct scic_sds_request *sci_req, u32 idx) | ||
79 | { | ||
80 | u32 offset; | ||
80 | 81 | ||
81 | if (sgl_pair_index == 0) { | 82 | if (idx == 0) { |
82 | return &task_context->sgl_pair_ab; | 83 | offset = (void *) &sci_req->tc->sgl_pair_ab - |
83 | } else if (sgl_pair_index == 1) { | 84 | (void *) &scic->task_context_table[0]; |
84 | return &task_context->sgl_pair_cd; | 85 | return scic->task_context_dma + offset; |
86 | } else if (idx == 1) { | ||
87 | offset = (void *) &sci_req->tc->sgl_pair_cd - | ||
88 | (void *) &scic->task_context_table[0]; | ||
89 | return scic->task_context_dma + offset; | ||
85 | } | 90 | } |
86 | 91 | ||
87 | return &sci_req->sg_table[sgl_pair_index - 2]; | 92 | return scic_io_request_get_dma_addr(sci_req, &sci_req->sg_table[idx - 2]); |
93 | } | ||
94 | |||
95 | static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) | ||
96 | { | ||
97 | e->length = sg_dma_len(sg); | ||
98 | e->address_upper = upper_32_bits(sg_dma_address(sg)); | ||
99 | e->address_lower = lower_32_bits(sg_dma_address(sg)); | ||
100 | e->address_modifier = 0; | ||
88 | } | 101 | } |
89 | 102 | ||
90 | /** | ||
91 | * This function will build the SGL list for an IO request. | ||
92 | * @sci_req: This parameter specifies the IO request for which to build | ||
93 | * the Scatter-Gather List. | ||
94 | * | ||
95 | */ | ||
96 | static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) | 103 | static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) |
97 | { | 104 | { |
98 | struct isci_request *isci_request = sci_req_to_ireq(sds_request); | 105 | struct isci_request *isci_request = sci_req_to_ireq(sds_request); |
99 | struct isci_host *isci_host = isci_request->isci_host; | 106 | struct isci_host *isci_host = isci_request->isci_host; |
107 | struct scic_sds_controller *scic = &isci_host->sci; | ||
100 | struct sas_task *task = isci_request_access_task(isci_request); | 108 | struct sas_task *task = isci_request_access_task(isci_request); |
101 | struct scatterlist *sg = NULL; | 109 | struct scatterlist *sg = NULL; |
102 | dma_addr_t dma_addr; | 110 | dma_addr_t dma_addr; |
@@ -108,25 +116,19 @@ static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) | |||
108 | sg = task->scatter; | 116 | sg = task->scatter; |
109 | 117 | ||
110 | while (sg) { | 118 | while (sg) { |
111 | scu_sg = scic_sds_request_get_sgl_element_pair( | 119 | scu_sg = to_sgl_element_pair(sds_request, sg_idx); |
112 | sds_request, | 120 | init_sgl_element(&scu_sg->A, sg); |
113 | sg_idx); | ||
114 | |||
115 | SCU_SGL_COPY(scu_sg->A, sg); | ||
116 | |||
117 | sg = sg_next(sg); | 121 | sg = sg_next(sg); |
118 | |||
119 | if (sg) { | 122 | if (sg) { |
120 | SCU_SGL_COPY(scu_sg->B, sg); | 123 | init_sgl_element(&scu_sg->B, sg); |
121 | sg = sg_next(sg); | 124 | sg = sg_next(sg); |
122 | } else | 125 | } else |
123 | SCU_SGL_ZERO(scu_sg->B); | 126 | memset(&scu_sg->B, 0, sizeof(scu_sg->B)); |
124 | 127 | ||
125 | if (prev_sg) { | 128 | if (prev_sg) { |
126 | dma_addr = | 129 | dma_addr = to_sgl_element_pair_dma(scic, |
127 | scic_io_request_get_dma_addr( | 130 | sds_request, |
128 | sds_request, | 131 | sg_idx); |
129 | scu_sg); | ||
130 | 132 | ||
131 | prev_sg->next_pair_upper = | 133 | prev_sg->next_pair_upper = |
132 | upper_32_bits(dma_addr); | 134 | upper_32_bits(dma_addr); |
@@ -138,8 +140,7 @@ static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) | |||
138 | sg_idx++; | 140 | sg_idx++; |
139 | } | 141 | } |
140 | } else { /* handle when no sg */ | 142 | } else { /* handle when no sg */ |
141 | scu_sg = scic_sds_request_get_sgl_element_pair(sds_request, | 143 | scu_sg = to_sgl_element_pair(sds_request, sg_idx); |
142 | sg_idx); | ||
143 | 144 | ||
144 | dma_addr = dma_map_single(&isci_host->pdev->dev, | 145 | dma_addr = dma_map_single(&isci_host->pdev->dev, |
145 | task->scatter, | 146 | task->scatter, |
@@ -246,35 +247,12 @@ static void scu_ssp_reqeust_construct_task_context( | |||
246 | /* task_context->type.ssp.tag = sci_req->io_tag; */ | 247 | /* task_context->type.ssp.tag = sci_req->io_tag; */ |
247 | task_context->task_phase = 0x01; | 248 | task_context->task_phase = 0x01; |
248 | 249 | ||
249 | if (sds_request->was_tag_assigned_by_user) { | 250 | sds_request->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | |
250 | /* | 251 | (scic_sds_controller_get_protocol_engine_group(controller) << |
251 | * Build the task context now since we have already read | 252 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | |
252 | * the data | 253 | (scic_sds_port_get_index(target_port) << |
253 | */ | 254 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | |
254 | sds_request->post_context = | 255 | ISCI_TAG_TCI(sds_request->io_tag)); |
255 | (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | | ||
256 | (scic_sds_controller_get_protocol_engine_group( | ||
257 | controller) << | ||
258 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | | ||
259 | (scic_sds_port_get_index(target_port) << | ||
260 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | | ||
261 | ISCI_TAG_TCI(sds_request->io_tag)); | ||
262 | } else { | ||
263 | /* | ||
264 | * Build the task context now since we have already read | ||
265 | * the data | ||
266 | * | ||
267 | * I/O tag index is not assigned because we have to wait | ||
268 | * until we get a TCi | ||
269 | */ | ||
270 | sds_request->post_context = | ||
271 | (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | | ||
272 | (scic_sds_controller_get_protocol_engine_group( | ||
273 | owning_controller) << | ||
274 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | | ||
275 | (scic_sds_port_get_index(target_port) << | ||
276 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT)); | ||
277 | } | ||
278 | 256 | ||
279 | /* | 257 | /* |
280 | * Copy the physical address for the command buffer to the | 258 | * Copy the physical address for the command buffer to the |
@@ -302,14 +280,11 @@ static void scu_ssp_reqeust_construct_task_context( | |||
302 | * @sci_req: | 280 | * @sci_req: |
303 | * | 281 | * |
304 | */ | 282 | */ |
305 | static void scu_ssp_io_request_construct_task_context( | 283 | static void scu_ssp_io_request_construct_task_context(struct scic_sds_request *sci_req, |
306 | struct scic_sds_request *sci_req, | 284 | enum dma_data_direction dir, |
307 | enum dma_data_direction dir, | 285 | u32 len) |
308 | u32 len) | ||
309 | { | 286 | { |
310 | struct scu_task_context *task_context; | 287 | struct scu_task_context *task_context = sci_req->tc; |
311 | |||
312 | task_context = scic_sds_request_get_task_context(sci_req); | ||
313 | 288 | ||
314 | scu_ssp_reqeust_construct_task_context(sci_req, task_context); | 289 | scu_ssp_reqeust_construct_task_context(sci_req, task_context); |
315 | 290 | ||
@@ -347,12 +322,9 @@ static void scu_ssp_io_request_construct_task_context( | |||
347 | * constructed. | 322 | * constructed. |
348 | * | 323 | * |
349 | */ | 324 | */ |
350 | static void scu_ssp_task_request_construct_task_context( | 325 | static void scu_ssp_task_request_construct_task_context(struct scic_sds_request *sci_req) |
351 | struct scic_sds_request *sci_req) | ||
352 | { | 326 | { |
353 | struct scu_task_context *task_context; | 327 | struct scu_task_context *task_context = sci_req->tc; |
354 | |||
355 | task_context = scic_sds_request_get_task_context(sci_req); | ||
356 | 328 | ||
357 | scu_ssp_reqeust_construct_task_context(sci_req, task_context); | 329 | scu_ssp_reqeust_construct_task_context(sci_req, task_context); |
358 | 330 | ||
@@ -421,35 +393,12 @@ static void scu_sata_reqeust_construct_task_context( | |||
421 | /* Set the first word of the H2D REG FIS */ | 393 | /* Set the first word of the H2D REG FIS */ |
422 | task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd; | 394 | task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd; |
423 | 395 | ||
424 | if (sci_req->was_tag_assigned_by_user) { | 396 | sci_req->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | |
425 | /* | 397 | (scic_sds_controller_get_protocol_engine_group(controller) << |
426 | * Build the task context now since we have already read | 398 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | |
427 | * the data | 399 | (scic_sds_port_get_index(target_port) << |
428 | */ | 400 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | |
429 | sci_req->post_context = | 401 | ISCI_TAG_TCI(sci_req->io_tag)); |
430 | (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | | ||
431 | (scic_sds_controller_get_protocol_engine_group( | ||
432 | controller) << | ||
433 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | | ||
434 | (scic_sds_port_get_index(target_port) << | ||
435 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | | ||
436 | ISCI_TAG_TCI(sci_req->io_tag)); | ||
437 | } else { | ||
438 | /* | ||
439 | * Build the task context now since we have already read | ||
440 | * the data. | ||
441 | * I/O tag index is not assigned because we have to wait | ||
442 | * until we get a TCi. | ||
443 | */ | ||
444 | sci_req->post_context = | ||
445 | (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | | ||
446 | (scic_sds_controller_get_protocol_engine_group( | ||
447 | controller) << | ||
448 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | | ||
449 | (scic_sds_port_get_index(target_port) << | ||
450 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT)); | ||
451 | } | ||
452 | |||
453 | /* | 402 | /* |
454 | * Copy the physical address for the command buffer to the SCU Task | 403 | * Copy the physical address for the command buffer to the SCU Task |
455 | * Context. We must offset the command buffer by 4 bytes because the | 404 | * Context. We must offset the command buffer by 4 bytes because the |
@@ -467,22 +416,9 @@ static void scu_sata_reqeust_construct_task_context( | |||
467 | task_context->response_iu_lower = 0; | 416 | task_context->response_iu_lower = 0; |
468 | } | 417 | } |
469 | 418 | ||
470 | 419 | static void scu_stp_raw_request_construct_task_context(struct scic_sds_request *sci_req) | |
471 | |||
472 | /** | ||
473 | * scu_stp_raw_request_construct_task_context - | ||
474 | * @sci_req: This parameter specifies the STP request object for which to | ||
475 | * construct a RAW command frame task context. | ||
476 | * @task_context: This parameter specifies the SCU specific task context buffer | ||
477 | * to construct. | ||
478 | * | ||
479 | * This method performs the operations common to all SATA/STP requests | ||
480 | * utilizing the raw frame method. none | ||
481 | */ | ||
482 | static void scu_stp_raw_request_construct_task_context(struct scic_sds_stp_request *stp_req, | ||
483 | struct scu_task_context *task_context) | ||
484 | { | 420 | { |
485 | struct scic_sds_request *sci_req = to_sci_req(stp_req); | 421 | struct scu_task_context *task_context = sci_req->tc; |
486 | 422 | ||
487 | scu_sata_reqeust_construct_task_context(sci_req, task_context); | 423 | scu_sata_reqeust_construct_task_context(sci_req, task_context); |
488 | 424 | ||
@@ -500,8 +436,7 @@ scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req, | |||
500 | struct scic_sds_stp_request *stp_req = &sci_req->stp.req; | 436 | struct scic_sds_stp_request *stp_req = &sci_req->stp.req; |
501 | struct scic_sds_stp_pio_request *pio = &stp_req->type.pio; | 437 | struct scic_sds_stp_pio_request *pio = &stp_req->type.pio; |
502 | 438 | ||
503 | scu_stp_raw_request_construct_task_context(stp_req, | 439 | scu_stp_raw_request_construct_task_context(sci_req); |
504 | sci_req->task_context_buffer); | ||
505 | 440 | ||
506 | pio->current_transfer_bytes = 0; | 441 | pio->current_transfer_bytes = 0; |
507 | pio->ending_error = 0; | 442 | pio->ending_error = 0; |
@@ -512,13 +447,10 @@ scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req, | |||
512 | 447 | ||
513 | if (copy_rx_frame) { | 448 | if (copy_rx_frame) { |
514 | scic_sds_request_build_sgl(sci_req); | 449 | scic_sds_request_build_sgl(sci_req); |
515 | /* Since the IO request copy of the TC contains the same data as | 450 | pio->request_current.sgl_index = 0; |
516 | * the actual TC this pointer is vaild for either. | ||
517 | */ | ||
518 | pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab; | ||
519 | } else { | 451 | } else { |
520 | /* The user does not want the data copied to the SGL buffer location */ | 452 | /* The user does not want the data copied to the SGL buffer location */ |
521 | pio->request_current.sgl_pair = NULL; | 453 | pio->request_current.sgl_index = -1; |
522 | } | 454 | } |
523 | 455 | ||
524 | return SCI_SUCCESS; | 456 | return SCI_SUCCESS; |
@@ -541,7 +473,7 @@ static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sc | |||
541 | u32 len, | 473 | u32 len, |
542 | enum dma_data_direction dir) | 474 | enum dma_data_direction dir) |
543 | { | 475 | { |
544 | struct scu_task_context *task_context = sci_req->task_context_buffer; | 476 | struct scu_task_context *task_context = sci_req->tc; |
545 | 477 | ||
546 | /* Build the STP task context structure */ | 478 | /* Build the STP task context structure */ |
547 | scu_sata_reqeust_construct_task_context(sci_req, task_context); | 479 | scu_sata_reqeust_construct_task_context(sci_req, task_context); |
@@ -587,8 +519,7 @@ scic_io_request_construct_sata(struct scic_sds_request *sci_req, | |||
587 | 519 | ||
588 | if (tmf->tmf_code == isci_tmf_sata_srst_high || | 520 | if (tmf->tmf_code == isci_tmf_sata_srst_high || |
589 | tmf->tmf_code == isci_tmf_sata_srst_low) { | 521 | tmf->tmf_code == isci_tmf_sata_srst_low) { |
590 | scu_stp_raw_request_construct_task_context(&sci_req->stp.req, | 522 | scu_stp_raw_request_construct_task_context(sci_req); |
591 | sci_req->task_context_buffer); | ||
592 | return SCI_SUCCESS; | 523 | return SCI_SUCCESS; |
593 | } else { | 524 | } else { |
594 | dev_err(scic_to_dev(sci_req->owning_controller), | 525 | dev_err(scic_to_dev(sci_req->owning_controller), |
@@ -611,8 +542,7 @@ scic_io_request_construct_sata(struct scic_sds_request *sci_req, | |||
611 | 542 | ||
612 | /* non data */ | 543 | /* non data */ |
613 | if (task->data_dir == DMA_NONE) { | 544 | if (task->data_dir == DMA_NONE) { |
614 | scu_stp_raw_request_construct_task_context(&sci_req->stp.req, | 545 | scu_stp_raw_request_construct_task_context(sci_req); |
615 | sci_req->task_context_buffer); | ||
616 | return SCI_SUCCESS; | 546 | return SCI_SUCCESS; |
617 | } | 547 | } |
618 | 548 | ||
@@ -701,8 +631,7 @@ enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_re | |||
701 | 631 | ||
702 | if (tmf->tmf_code == isci_tmf_sata_srst_high || | 632 | if (tmf->tmf_code == isci_tmf_sata_srst_high || |
703 | tmf->tmf_code == isci_tmf_sata_srst_low) { | 633 | tmf->tmf_code == isci_tmf_sata_srst_low) { |
704 | scu_stp_raw_request_construct_task_context(&sci_req->stp.req, | 634 | scu_stp_raw_request_construct_task_context(sci_req); |
705 | sci_req->task_context_buffer); | ||
706 | } else { | 635 | } else { |
707 | dev_err(scic_to_dev(sci_req->owning_controller), | 636 | dev_err(scic_to_dev(sci_req->owning_controller), |
708 | "%s: Request 0x%p received un-handled SAT " | 637 | "%s: Request 0x%p received un-handled SAT " |
@@ -749,9 +678,9 @@ static u32 sci_req_tx_bytes(struct scic_sds_request *sci_req) | |||
749 | 678 | ||
750 | enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req) | 679 | enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req) |
751 | { | 680 | { |
752 | struct scic_sds_controller *scic = sci_req->owning_controller; | ||
753 | struct scu_task_context *task_context; | ||
754 | enum sci_base_request_states state; | 681 | enum sci_base_request_states state; |
682 | struct scu_task_context *tc = sci_req->tc; | ||
683 | struct scic_sds_controller *scic = sci_req->owning_controller; | ||
755 | 684 | ||
756 | state = sci_req->sm.current_state_id; | 685 | state = sci_req->sm.current_state_id; |
757 | if (state != SCI_REQ_CONSTRUCTED) { | 686 | if (state != SCI_REQ_CONSTRUCTED) { |
@@ -761,61 +690,39 @@ enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req) | |||
761 | return SCI_FAILURE_INVALID_STATE; | 690 | return SCI_FAILURE_INVALID_STATE; |
762 | } | 691 | } |
763 | 692 | ||
764 | /* if necessary, allocate a TCi for the io request object and then will, | 693 | tc->task_index = ISCI_TAG_TCI(sci_req->io_tag); |
765 | * if necessary, copy the constructed TC data into the actual TC buffer. | ||
766 | * If everything is successful the post context field is updated with | ||
767 | * the TCi so the controller can post the request to the hardware. | ||
768 | */ | ||
769 | if (sci_req->io_tag == SCI_CONTROLLER_INVALID_IO_TAG) | ||
770 | sci_req->io_tag = scic_controller_allocate_io_tag(scic); | ||
771 | |||
772 | /* Record the IO Tag in the request */ | ||
773 | if (sci_req->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) { | ||
774 | task_context = sci_req->task_context_buffer; | ||
775 | |||
776 | task_context->task_index = ISCI_TAG_TCI(sci_req->io_tag); | ||
777 | |||
778 | switch (task_context->protocol_type) { | ||
779 | case SCU_TASK_CONTEXT_PROTOCOL_SMP: | ||
780 | case SCU_TASK_CONTEXT_PROTOCOL_SSP: | ||
781 | /* SSP/SMP Frame */ | ||
782 | task_context->type.ssp.tag = sci_req->io_tag; | ||
783 | task_context->type.ssp.target_port_transfer_tag = | ||
784 | 0xFFFF; | ||
785 | break; | ||
786 | 694 | ||
787 | case SCU_TASK_CONTEXT_PROTOCOL_STP: | 695 | switch (tc->protocol_type) { |
788 | /* STP/SATA Frame | 696 | case SCU_TASK_CONTEXT_PROTOCOL_SMP: |
789 | * task_context->type.stp.ncq_tag = sci_req->ncq_tag; | 697 | case SCU_TASK_CONTEXT_PROTOCOL_SSP: |
790 | */ | 698 | /* SSP/SMP Frame */ |
791 | break; | 699 | tc->type.ssp.tag = sci_req->io_tag; |
792 | 700 | tc->type.ssp.target_port_transfer_tag = 0xFFFF; | |
793 | case SCU_TASK_CONTEXT_PROTOCOL_NONE: | 701 | break; |
794 | /* / @todo When do we set no protocol type? */ | ||
795 | break; | ||
796 | 702 | ||
797 | default: | 703 | case SCU_TASK_CONTEXT_PROTOCOL_STP: |
798 | /* This should never happen since we build the IO | 704 | /* STP/SATA Frame |
799 | * requests */ | 705 | * tc->type.stp.ncq_tag = sci_req->ncq_tag; |
800 | break; | 706 | */ |
801 | } | 707 | break; |
802 | 708 | ||
803 | /* | 709 | case SCU_TASK_CONTEXT_PROTOCOL_NONE: |
804 | * Check to see if we need to copy the task context buffer | 710 | /* / @todo When do we set no protocol type? */ |
805 | * or have been building into the task context buffer */ | 711 | break; |
806 | if (sci_req->was_tag_assigned_by_user == false) | ||
807 | scic_sds_controller_copy_task_context(scic, sci_req); | ||
808 | 712 | ||
809 | /* Add to the post_context the io tag value */ | 713 | default: |
810 | sci_req->post_context |= ISCI_TAG_TCI(sci_req->io_tag); | 714 | /* This should never happen since we build the IO |
715 | * requests */ | ||
716 | break; | ||
717 | } | ||
811 | 718 | ||
812 | /* Everything is good go ahead and change state */ | 719 | /* Add to the post_context the io tag value */ |
813 | sci_change_state(&sci_req->sm, SCI_REQ_STARTED); | 720 | sci_req->post_context |= ISCI_TAG_TCI(sci_req->io_tag); |
814 | 721 | ||
815 | return SCI_SUCCESS; | 722 | /* Everything is good go ahead and change state */ |
816 | } | 723 | sci_change_state(&sci_req->sm, SCI_REQ_STARTED); |
817 | 724 | ||
818 | return SCI_FAILURE_INSUFFICIENT_RESOURCES; | 725 | return SCI_SUCCESS; |
819 | } | 726 | } |
820 | 727 | ||
821 | enum sci_status | 728 | enum sci_status |
@@ -880,9 +787,6 @@ enum sci_status scic_sds_request_complete(struct scic_sds_request *sci_req) | |||
880 | "isci: request completion from wrong state (%d)\n", state)) | 787 | "isci: request completion from wrong state (%d)\n", state)) |
881 | return SCI_FAILURE_INVALID_STATE; | 788 | return SCI_FAILURE_INVALID_STATE; |
882 | 789 | ||
883 | if (!sci_req->was_tag_assigned_by_user) | ||
884 | scic_controller_free_io_tag(scic, sci_req->io_tag); | ||
885 | |||
886 | if (sci_req->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) | 790 | if (sci_req->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) |
887 | scic_sds_controller_release_frame(scic, | 791 | scic_sds_controller_release_frame(scic, |
888 | sci_req->saved_rx_frame_index); | 792 | sci_req->saved_rx_frame_index); |
@@ -1244,51 +1148,40 @@ void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *req, | |||
1244 | * @note This could be made to return an error to the user if the user | 1148 | * @note This could be made to return an error to the user if the user |
1245 | * attempts to set the NCQ tag in the wrong state. | 1149 | * attempts to set the NCQ tag in the wrong state. |
1246 | */ | 1150 | */ |
1247 | req->task_context_buffer->type.stp.ncq_tag = ncq_tag; | 1151 | req->tc->type.stp.ncq_tag = ncq_tag; |
1248 | } | 1152 | } |
1249 | 1153 | ||
1250 | /** | 1154 | static struct scu_sgl_element *pio_sgl_next(struct scic_sds_stp_request *stp_req) |
1251 | * | ||
1252 | * @sci_req: | ||
1253 | * | ||
1254 | * Get the next SGL element from the request. - Check on which SGL element pair | ||
1255 | * we are working - if working on SLG pair element A - advance to element B - | ||
1256 | * else - check to see if there are more SGL element pairs for this IO request | ||
1257 | * - if there are more SGL element pairs - advance to the next pair and return | ||
1258 | * element A struct scu_sgl_element* | ||
1259 | */ | ||
1260 | static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req) | ||
1261 | { | 1155 | { |
1262 | struct scu_sgl_element *current_sgl; | 1156 | struct scu_sgl_element *sgl; |
1157 | struct scu_sgl_element_pair *sgl_pair; | ||
1263 | struct scic_sds_request *sci_req = to_sci_req(stp_req); | 1158 | struct scic_sds_request *sci_req = to_sci_req(stp_req); |
1264 | struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current; | 1159 | struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current; |
1265 | 1160 | ||
1266 | if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) { | 1161 | sgl_pair = to_sgl_element_pair(sci_req, pio_sgl->sgl_index); |
1267 | if (pio_sgl->sgl_pair->B.address_lower == 0 && | 1162 | if (!sgl_pair) |
1268 | pio_sgl->sgl_pair->B.address_upper == 0) { | 1163 | sgl = NULL; |
1269 | current_sgl = NULL; | 1164 | else if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) { |
1165 | if (sgl_pair->B.address_lower == 0 && | ||
1166 | sgl_pair->B.address_upper == 0) { | ||
1167 | sgl = NULL; | ||
1270 | } else { | 1168 | } else { |
1271 | pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B; | 1169 | pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B; |
1272 | current_sgl = &pio_sgl->sgl_pair->B; | 1170 | sgl = &sgl_pair->B; |
1273 | } | 1171 | } |
1274 | } else { | 1172 | } else { |
1275 | if (pio_sgl->sgl_pair->next_pair_lower == 0 && | 1173 | if (sgl_pair->next_pair_lower == 0 && |
1276 | pio_sgl->sgl_pair->next_pair_upper == 0) { | 1174 | sgl_pair->next_pair_upper == 0) { |
1277 | current_sgl = NULL; | 1175 | sgl = NULL; |
1278 | } else { | 1176 | } else { |
1279 | u64 phys_addr; | 1177 | pio_sgl->sgl_index++; |
1280 | |||
1281 | phys_addr = pio_sgl->sgl_pair->next_pair_upper; | ||
1282 | phys_addr <<= 32; | ||
1283 | phys_addr |= pio_sgl->sgl_pair->next_pair_lower; | ||
1284 | |||
1285 | pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr); | ||
1286 | pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A; | 1178 | pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A; |
1287 | current_sgl = &pio_sgl->sgl_pair->A; | 1179 | sgl_pair = to_sgl_element_pair(sci_req, pio_sgl->sgl_index); |
1180 | sgl = &sgl_pair->A; | ||
1288 | } | 1181 | } |
1289 | } | 1182 | } |
1290 | 1183 | ||
1291 | return current_sgl; | 1184 | return sgl; |
1292 | } | 1185 | } |
1293 | 1186 | ||
1294 | static enum sci_status | 1187 | static enum sci_status |
@@ -1328,21 +1221,19 @@ static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame( | |||
1328 | struct scic_sds_request *sci_req, | 1221 | struct scic_sds_request *sci_req, |
1329 | u32 length) | 1222 | u32 length) |
1330 | { | 1223 | { |
1331 | struct scic_sds_controller *scic = sci_req->owning_controller; | ||
1332 | struct scic_sds_stp_request *stp_req = &sci_req->stp.req; | 1224 | struct scic_sds_stp_request *stp_req = &sci_req->stp.req; |
1333 | struct scu_task_context *task_context; | 1225 | struct scu_task_context *task_context = sci_req->tc; |
1226 | struct scu_sgl_element_pair *sgl_pair; | ||
1334 | struct scu_sgl_element *current_sgl; | 1227 | struct scu_sgl_element *current_sgl; |
1335 | 1228 | ||
1336 | /* Recycle the TC and reconstruct it for sending out DATA FIS containing | 1229 | /* Recycle the TC and reconstruct it for sending out DATA FIS containing |
1337 | * for the data from current_sgl+offset for the input length | 1230 | * for the data from current_sgl+offset for the input length |
1338 | */ | 1231 | */ |
1339 | task_context = scic_sds_controller_get_task_context_buffer(scic, | 1232 | sgl_pair = to_sgl_element_pair(sci_req, stp_req->type.pio.request_current.sgl_index); |
1340 | sci_req->io_tag); | ||
1341 | |||
1342 | if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) | 1233 | if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) |
1343 | current_sgl = &stp_req->type.pio.request_current.sgl_pair->A; | 1234 | current_sgl = &sgl_pair->A; |
1344 | else | 1235 | else |
1345 | current_sgl = &stp_req->type.pio.request_current.sgl_pair->B; | 1236 | current_sgl = &sgl_pair->B; |
1346 | 1237 | ||
1347 | /* update the TC */ | 1238 | /* update the TC */ |
1348 | task_context->command_iu_upper = current_sgl->address_upper; | 1239 | task_context->command_iu_upper = current_sgl->address_upper; |
@@ -1362,18 +1253,21 @@ static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct sc | |||
1362 | u32 remaining_bytes_in_current_sgl = 0; | 1253 | u32 remaining_bytes_in_current_sgl = 0; |
1363 | enum sci_status status = SCI_SUCCESS; | 1254 | enum sci_status status = SCI_SUCCESS; |
1364 | struct scic_sds_stp_request *stp_req = &sci_req->stp.req; | 1255 | struct scic_sds_stp_request *stp_req = &sci_req->stp.req; |
1256 | struct scu_sgl_element_pair *sgl_pair; | ||
1365 | 1257 | ||
1366 | sgl_offset = stp_req->type.pio.request_current.sgl_offset; | 1258 | sgl_offset = stp_req->type.pio.request_current.sgl_offset; |
1259 | sgl_pair = to_sgl_element_pair(sci_req, stp_req->type.pio.request_current.sgl_index); | ||
1260 | if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__)) | ||
1261 | return SCI_FAILURE; | ||
1367 | 1262 | ||
1368 | if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) { | 1263 | if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) { |
1369 | current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A); | 1264 | current_sgl = &sgl_pair->A; |
1370 | remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset; | 1265 | remaining_bytes_in_current_sgl = sgl_pair->A.length - sgl_offset; |
1371 | } else { | 1266 | } else { |
1372 | current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B); | 1267 | current_sgl = &sgl_pair->B; |
1373 | remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset; | 1268 | remaining_bytes_in_current_sgl = sgl_pair->B.length - sgl_offset; |
1374 | } | 1269 | } |
1375 | 1270 | ||
1376 | |||
1377 | if (stp_req->type.pio.pio_transfer_bytes > 0) { | 1271 | if (stp_req->type.pio.pio_transfer_bytes > 0) { |
1378 | if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) { | 1272 | if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) { |
1379 | /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */ | 1273 | /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */ |
@@ -1382,7 +1276,7 @@ static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct sc | |||
1382 | stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl; | 1276 | stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl; |
1383 | 1277 | ||
1384 | /* update the current sgl, sgl_offset and save for future */ | 1278 | /* update the current sgl, sgl_offset and save for future */ |
1385 | current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req); | 1279 | current_sgl = pio_sgl_next(stp_req); |
1386 | sgl_offset = 0; | 1280 | sgl_offset = 0; |
1387 | } | 1281 | } |
1388 | } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) { | 1282 | } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) { |
@@ -1945,7 +1839,7 @@ scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, | |||
1945 | return status; | 1839 | return status; |
1946 | } | 1840 | } |
1947 | 1841 | ||
1948 | if (stp_req->type.pio.request_current.sgl_pair == NULL) { | 1842 | if (stp_req->type.pio.request_current.sgl_index < 0) { |
1949 | sci_req->saved_rx_frame_index = frame_index; | 1843 | sci_req->saved_rx_frame_index = frame_index; |
1950 | stp_req->type.pio.pio_transfer_bytes = 0; | 1844 | stp_req->type.pio.pio_transfer_bytes = 0; |
1951 | } else { | 1845 | } else { |
@@ -2977,8 +2871,6 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, | |||
2977 | * task to recognize the already completed case. | 2871 | * task to recognize the already completed case. |
2978 | */ | 2872 | */ |
2979 | request->terminated = true; | 2873 | request->terminated = true; |
2980 | |||
2981 | isci_host_can_dequeue(isci_host, 1); | ||
2982 | } | 2874 | } |
2983 | 2875 | ||
2984 | static void scic_sds_request_started_state_enter(struct sci_base_state_machine *sm) | 2876 | static void scic_sds_request_started_state_enter(struct sci_base_state_machine *sm) |
@@ -3039,7 +2931,7 @@ static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine | |||
3039 | struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); | 2931 | struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); |
3040 | 2932 | ||
3041 | /* Setting the abort bit in the Task Context is required by the silicon. */ | 2933 | /* Setting the abort bit in the Task Context is required by the silicon. */ |
3042 | sci_req->task_context_buffer->abort = 1; | 2934 | sci_req->tc->abort = 1; |
3043 | } | 2935 | } |
3044 | 2936 | ||
3045 | static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) | 2937 | static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) |
@@ -3069,7 +2961,7 @@ static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completio | |||
3069 | static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm) | 2961 | static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm) |
3070 | { | 2962 | { |
3071 | struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); | 2963 | struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm); |
3072 | struct scu_task_context *task_context; | 2964 | struct scu_task_context *tc = sci_req->tc; |
3073 | struct host_to_dev_fis *h2d_fis; | 2965 | struct host_to_dev_fis *h2d_fis; |
3074 | enum sci_status status; | 2966 | enum sci_status status; |
3075 | 2967 | ||
@@ -3078,9 +2970,7 @@ static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_complet | |||
3078 | h2d_fis->control = 0; | 2970 | h2d_fis->control = 0; |
3079 | 2971 | ||
3080 | /* Clear the TC control bit */ | 2972 | /* Clear the TC control bit */ |
3081 | task_context = scic_sds_controller_get_task_context_buffer( | 2973 | tc->control_frame = 0; |
3082 | sci_req->owning_controller, sci_req->io_tag); | ||
3083 | task_context->control_frame = 0; | ||
3084 | 2974 | ||
3085 | status = scic_controller_continue_io(sci_req); | 2975 | status = scic_controller_continue_io(sci_req); |
3086 | WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n"); | 2976 | WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n"); |
@@ -3141,18 +3031,10 @@ scic_sds_general_request_construct(struct scic_sds_controller *scic, | |||
3141 | sci_req->sci_status = SCI_SUCCESS; | 3031 | sci_req->sci_status = SCI_SUCCESS; |
3142 | sci_req->scu_status = 0; | 3032 | sci_req->scu_status = 0; |
3143 | sci_req->post_context = 0xFFFFFFFF; | 3033 | sci_req->post_context = 0xFFFFFFFF; |
3034 | sci_req->tc = &scic->task_context_table[ISCI_TAG_TCI(io_tag)]; | ||
3144 | 3035 | ||
3145 | sci_req->is_task_management_request = false; | 3036 | sci_req->is_task_management_request = false; |
3146 | 3037 | WARN_ONCE(io_tag == SCI_CONTROLLER_INVALID_IO_TAG, "straggling invalid tag usage\n"); | |
3147 | if (io_tag == SCI_CONTROLLER_INVALID_IO_TAG) { | ||
3148 | sci_req->was_tag_assigned_by_user = false; | ||
3149 | sci_req->task_context_buffer = &sci_req->tc; | ||
3150 | } else { | ||
3151 | sci_req->was_tag_assigned_by_user = true; | ||
3152 | |||
3153 | sci_req->task_context_buffer = | ||
3154 | scic_sds_controller_get_task_context_buffer(scic, io_tag); | ||
3155 | } | ||
3156 | } | 3038 | } |
3157 | 3039 | ||
3158 | static enum sci_status | 3040 | static enum sci_status |
@@ -3178,8 +3060,7 @@ scic_io_request_construct(struct scic_sds_controller *scic, | |||
3178 | else | 3060 | else |
3179 | return SCI_FAILURE_UNSUPPORTED_PROTOCOL; | 3061 | return SCI_FAILURE_UNSUPPORTED_PROTOCOL; |
3180 | 3062 | ||
3181 | memset(sci_req->task_context_buffer, 0, | 3063 | memset(sci_req->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab)); |
3182 | offsetof(struct scu_task_context, sgl_pair_ab)); | ||
3183 | 3064 | ||
3184 | return status; | 3065 | return status; |
3185 | } | 3066 | } |
@@ -3197,7 +3078,7 @@ enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, | |||
3197 | if (dev->dev_type == SAS_END_DEV || | 3078 | if (dev->dev_type == SAS_END_DEV || |
3198 | dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { | 3079 | dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { |
3199 | sci_req->is_task_management_request = true; | 3080 | sci_req->is_task_management_request = true; |
3200 | memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context)); | 3081 | memset(sci_req->tc, 0, sizeof(struct scu_task_context)); |
3201 | } else | 3082 | } else |
3202 | status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; | 3083 | status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; |
3203 | 3084 | ||
@@ -3299,7 +3180,7 @@ scic_io_request_construct_smp(struct device *dev, | |||
3299 | 3180 | ||
3300 | /* byte swap the smp request. */ | 3181 | /* byte swap the smp request. */ |
3301 | 3182 | ||
3302 | task_context = scic_sds_request_get_task_context(sci_req); | 3183 | task_context = sci_req->tc; |
3303 | 3184 | ||
3304 | sci_dev = scic_sds_request_get_device(sci_req); | 3185 | sci_dev = scic_sds_request_get_device(sci_req); |
3305 | sci_port = scic_sds_request_get_port(sci_req); | 3186 | sci_port = scic_sds_request_get_port(sci_req); |
@@ -3354,33 +3235,12 @@ scic_io_request_construct_smp(struct device *dev, | |||
3354 | */ | 3235 | */ |
3355 | task_context->task_phase = 0; | 3236 | task_context->task_phase = 0; |
3356 | 3237 | ||
3357 | if (sci_req->was_tag_assigned_by_user) { | 3238 | sci_req->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | |
3358 | /* | 3239 | (scic_sds_controller_get_protocol_engine_group(scic) << |
3359 | * Build the task context now since we have already read | 3240 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | |
3360 | * the data | 3241 | (scic_sds_port_get_index(sci_port) << |
3361 | */ | 3242 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | |
3362 | sci_req->post_context = | 3243 | ISCI_TAG_TCI(sci_req->io_tag)); |
3363 | (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | | ||
3364 | (scic_sds_controller_get_protocol_engine_group(scic) << | ||
3365 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | | ||
3366 | (scic_sds_port_get_index(sci_port) << | ||
3367 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | | ||
3368 | ISCI_TAG_TCI(sci_req->io_tag)); | ||
3369 | } else { | ||
3370 | /* | ||
3371 | * Build the task context now since we have already read | ||
3372 | * the data. | ||
3373 | * I/O tag index is not assigned because we have to wait | ||
3374 | * until we get a TCi. | ||
3375 | */ | ||
3376 | sci_req->post_context = | ||
3377 | (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | | ||
3378 | (scic_sds_controller_get_protocol_engine_group(scic) << | ||
3379 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | | ||
3380 | (scic_sds_port_get_index(sci_port) << | ||
3381 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT)); | ||
3382 | } | ||
3383 | |||
3384 | /* | 3244 | /* |
3385 | * Copy the physical address for the command buffer to the SCU Task | 3245 | * Copy the physical address for the command buffer to the SCU Task |
3386 | * Context command buffer should not contain command header. | 3246 | * Context command buffer should not contain command header. |
@@ -3431,10 +3291,10 @@ static enum sci_status isci_smp_request_build(struct isci_request *ireq) | |||
3431 | * | 3291 | * |
3432 | * SCI_SUCCESS on successfull completion, or specific failure code. | 3292 | * SCI_SUCCESS on successfull completion, or specific failure code. |
3433 | */ | 3293 | */ |
3434 | static enum sci_status isci_io_request_build( | 3294 | static enum sci_status isci_io_request_build(struct isci_host *isci_host, |
3435 | struct isci_host *isci_host, | 3295 | struct isci_request *request, |
3436 | struct isci_request *request, | 3296 | struct isci_remote_device *isci_device, |
3437 | struct isci_remote_device *isci_device) | 3297 | u16 tag) |
3438 | { | 3298 | { |
3439 | enum sci_status status = SCI_SUCCESS; | 3299 | enum sci_status status = SCI_SUCCESS; |
3440 | struct sas_task *task = isci_request_access_task(request); | 3300 | struct sas_task *task = isci_request_access_task(request); |
@@ -3471,8 +3331,7 @@ static enum sci_status isci_io_request_build( | |||
3471 | * we will let the core allocate the IO tag. | 3331 | * we will let the core allocate the IO tag. |
3472 | */ | 3332 | */ |
3473 | status = scic_io_request_construct(&isci_host->sci, sci_device, | 3333 | status = scic_io_request_construct(&isci_host->sci, sci_device, |
3474 | SCI_CONTROLLER_INVALID_IO_TAG, | 3334 | tag, &request->sci); |
3475 | &request->sci); | ||
3476 | 3335 | ||
3477 | if (status != SCI_SUCCESS) { | 3336 | if (status != SCI_SUCCESS) { |
3478 | dev_warn(&isci_host->pdev->dev, | 3337 | dev_warn(&isci_host->pdev->dev, |
@@ -3564,7 +3423,7 @@ struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost, | |||
3564 | } | 3423 | } |
3565 | 3424 | ||
3566 | int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, | 3425 | int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, |
3567 | struct sas_task *task, gfp_t gfp_flags) | 3426 | struct sas_task *task, u16 tag, gfp_t gfp_flags) |
3568 | { | 3427 | { |
3569 | enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; | 3428 | enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; |
3570 | struct isci_request *ireq; | 3429 | struct isci_request *ireq; |
@@ -3576,7 +3435,7 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide | |||
3576 | if (!ireq) | 3435 | if (!ireq) |
3577 | goto out; | 3436 | goto out; |
3578 | 3437 | ||
3579 | status = isci_io_request_build(ihost, ireq, idev); | 3438 | status = isci_io_request_build(ihost, ireq, idev, tag); |
3580 | if (status != SCI_SUCCESS) { | 3439 | if (status != SCI_SUCCESS) { |
3581 | dev_warn(&ihost->pdev->dev, | 3440 | dev_warn(&ihost->pdev->dev, |
3582 | "%s: request_construct failed - status = 0x%x\n", | 3441 | "%s: request_construct failed - status = 0x%x\n", |
@@ -3599,18 +3458,16 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide | |||
3599 | */ | 3458 | */ |
3600 | status = scic_controller_start_task(&ihost->sci, | 3459 | status = scic_controller_start_task(&ihost->sci, |
3601 | &idev->sci, | 3460 | &idev->sci, |
3602 | &ireq->sci, | 3461 | &ireq->sci); |
3603 | SCI_CONTROLLER_INVALID_IO_TAG); | ||
3604 | } else { | 3462 | } else { |
3605 | status = SCI_FAILURE; | 3463 | status = SCI_FAILURE; |
3606 | } | 3464 | } |
3607 | } else { | 3465 | } else { |
3608 | |||
3609 | /* send the request, let the core assign the IO TAG. */ | 3466 | /* send the request, let the core assign the IO TAG. */ |
3610 | status = scic_controller_start_io(&ihost->sci, &idev->sci, | 3467 | status = scic_controller_start_io(&ihost->sci, &idev->sci, |
3611 | &ireq->sci, | 3468 | &ireq->sci); |
3612 | SCI_CONTROLLER_INVALID_IO_TAG); | ||
3613 | } | 3469 | } |
3470 | |||
3614 | if (status != SCI_SUCCESS && | 3471 | if (status != SCI_SUCCESS && |
3615 | status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { | 3472 | status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { |
3616 | dev_warn(&ihost->pdev->dev, | 3473 | dev_warn(&ihost->pdev->dev, |
@@ -3647,23 +3504,23 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide | |||
3647 | if (status == | 3504 | if (status == |
3648 | SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { | 3505 | SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { |
3649 | /* Signal libsas that we need the SCSI error | 3506 | /* Signal libsas that we need the SCSI error |
3650 | * handler thread to work on this I/O and that | 3507 | * handler thread to work on this I/O and that |
3651 | * we want a device reset. | 3508 | * we want a device reset. |
3652 | */ | 3509 | */ |
3653 | spin_lock_irqsave(&task->task_state_lock, flags); | 3510 | spin_lock_irqsave(&task->task_state_lock, flags); |
3654 | task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; | 3511 | task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; |
3655 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 3512 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
3656 | 3513 | ||
3657 | /* Cause this task to be scheduled in the SCSI error | 3514 | /* Cause this task to be scheduled in the SCSI error |
3658 | * handler thread. | 3515 | * handler thread. |
3659 | */ | 3516 | */ |
3660 | isci_execpath_callback(ihost, task, | 3517 | isci_execpath_callback(ihost, task, |
3661 | sas_task_abort); | 3518 | sas_task_abort); |
3662 | 3519 | ||
3663 | /* Change the status, since we are holding | 3520 | /* Change the status, since we are holding |
3664 | * the I/O until it is managed by the SCSI | 3521 | * the I/O until it is managed by the SCSI |
3665 | * error handler. | 3522 | * error handler. |
3666 | */ | 3523 | */ |
3667 | status = SCI_SUCCESS; | 3524 | status = SCI_SUCCESS; |
3668 | } | 3525 | } |
3669 | 3526 | ||
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 9130f22a63b8..8c77c4cbe04a 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h | |||
@@ -136,7 +136,7 @@ struct scic_sds_stp_request { | |||
136 | u8 ending_error; | 136 | u8 ending_error; |
137 | 137 | ||
138 | struct scic_sds_request_pio_sgl { | 138 | struct scic_sds_request_pio_sgl { |
139 | struct scu_sgl_element_pair *sgl_pair; | 139 | int sgl_index; |
140 | u8 sgl_set; | 140 | u8 sgl_set; |
141 | u32 sgl_offset; | 141 | u32 sgl_offset; |
142 | } request_current; | 142 | } request_current; |
@@ -172,12 +172,6 @@ struct scic_sds_request { | |||
172 | struct scic_sds_remote_device *target_device; | 172 | struct scic_sds_remote_device *target_device; |
173 | 173 | ||
174 | /* | 174 | /* |
175 | * This field is utilized to determine if the SCI user is managing | ||
176 | * the IO tag for this request or if the core is managing it. | ||
177 | */ | ||
178 | bool was_tag_assigned_by_user; | ||
179 | |||
180 | /* | ||
181 | * This field indicates the IO tag for this request. The IO tag is | 175 | * This field indicates the IO tag for this request. The IO tag is |
182 | * comprised of the task_index and a sequence count. The sequence count | 176 | * comprised of the task_index and a sequence count. The sequence count |
183 | * is utilized to help identify tasks from one life to another. | 177 | * is utilized to help identify tasks from one life to another. |
@@ -209,8 +203,7 @@ struct scic_sds_request { | |||
209 | */ | 203 | */ |
210 | u32 post_context; | 204 | u32 post_context; |
211 | 205 | ||
212 | struct scu_task_context *task_context_buffer; | 206 | struct scu_task_context *tc; |
213 | struct scu_task_context tc ____cacheline_aligned; | ||
214 | 207 | ||
215 | /* could be larger with sg chaining */ | 208 | /* could be larger with sg chaining */ |
216 | #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2) | 209 | #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2) |
@@ -465,35 +458,6 @@ enum sci_base_request_states { | |||
465 | (request)->sci_status = (sci_status_code); \ | 458 | (request)->sci_status = (sci_status_code); \ |
466 | } | 459 | } |
467 | 460 | ||
468 | /** | ||
469 | * SCU_SGL_ZERO() - | ||
470 | * | ||
471 | * This macro zeros the hardware SGL element data | ||
472 | */ | ||
473 | #define SCU_SGL_ZERO(scu_sge) \ | ||
474 | { \ | ||
475 | (scu_sge).length = 0; \ | ||
476 | (scu_sge).address_lower = 0; \ | ||
477 | (scu_sge).address_upper = 0; \ | ||
478 | (scu_sge).address_modifier = 0; \ | ||
479 | } | ||
480 | |||
481 | /** | ||
482 | * SCU_SGL_COPY() - | ||
483 | * | ||
484 | * This macro copys the SGL Element data from the host os to the hardware SGL | ||
485 | * elment data | ||
486 | */ | ||
487 | #define SCU_SGL_COPY(scu_sge, os_sge) \ | ||
488 | { \ | ||
489 | (scu_sge).length = sg_dma_len(sg); \ | ||
490 | (scu_sge).address_upper = \ | ||
491 | upper_32_bits(sg_dma_address(sg)); \ | ||
492 | (scu_sge).address_lower = \ | ||
493 | lower_32_bits(sg_dma_address(sg)); \ | ||
494 | (scu_sge).address_modifier = 0; \ | ||
495 | } | ||
496 | |||
497 | enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req); | 461 | enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req); |
498 | enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req); | 462 | enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req); |
499 | enum sci_status | 463 | enum sci_status |
@@ -510,22 +474,6 @@ extern enum sci_status | |||
510 | scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, u32 code); | 474 | scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, u32 code); |
511 | 475 | ||
512 | /* XXX open code in caller */ | 476 | /* XXX open code in caller */ |
513 | static inline void *scic_request_get_virt_addr(struct scic_sds_request *sci_req, | ||
514 | dma_addr_t phys_addr) | ||
515 | { | ||
516 | struct isci_request *ireq = sci_req_to_ireq(sci_req); | ||
517 | dma_addr_t offset; | ||
518 | |||
519 | BUG_ON(phys_addr < ireq->request_daddr); | ||
520 | |||
521 | offset = phys_addr - ireq->request_daddr; | ||
522 | |||
523 | BUG_ON(offset >= sizeof(*ireq)); | ||
524 | |||
525 | return (char *)ireq + offset; | ||
526 | } | ||
527 | |||
528 | /* XXX open code in caller */ | ||
529 | static inline dma_addr_t | 477 | static inline dma_addr_t |
530 | scic_io_request_get_dma_addr(struct scic_sds_request *sci_req, void *virt_addr) | 478 | scic_io_request_get_dma_addr(struct scic_sds_request *sci_req, void *virt_addr) |
531 | { | 479 | { |
@@ -672,7 +620,7 @@ struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost, | |||
672 | struct isci_tmf *isci_tmf, | 620 | struct isci_tmf *isci_tmf, |
673 | gfp_t gfp_flags); | 621 | gfp_t gfp_flags); |
674 | int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, | 622 | int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, |
675 | struct sas_task *task, gfp_t gfp_flags); | 623 | struct sas_task *task, u16 tag, gfp_t gfp_flags); |
676 | void isci_terminate_pending_requests(struct isci_host *ihost, | 624 | void isci_terminate_pending_requests(struct isci_host *ihost, |
677 | struct isci_remote_device *idev); | 625 | struct isci_remote_device *idev); |
678 | enum sci_status | 626 | enum sci_status |
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 157e9978183a..22f6fe171111 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c | |||
@@ -63,6 +63,7 @@ | |||
63 | #include "request.h" | 63 | #include "request.h" |
64 | #include "sata.h" | 64 | #include "sata.h" |
65 | #include "task.h" | 65 | #include "task.h" |
66 | #include "host.h" | ||
66 | 67 | ||
67 | /** | 68 | /** |
68 | * isci_task_refuse() - complete the request to the upper layer driver in | 69 | * isci_task_refuse() - complete the request to the upper layer driver in |
@@ -156,25 +157,19 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) | |||
156 | { | 157 | { |
157 | struct isci_host *ihost = dev_to_ihost(task->dev); | 158 | struct isci_host *ihost = dev_to_ihost(task->dev); |
158 | struct isci_remote_device *idev; | 159 | struct isci_remote_device *idev; |
159 | enum sci_status status; | ||
160 | unsigned long flags; | 160 | unsigned long flags; |
161 | bool io_ready; | 161 | bool io_ready; |
162 | int ret; | 162 | u16 tag; |
163 | 163 | ||
164 | dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num); | 164 | dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num); |
165 | 165 | ||
166 | /* Check if we have room for more tasks */ | ||
167 | ret = isci_host_can_queue(ihost, num); | ||
168 | |||
169 | if (ret) { | ||
170 | dev_warn(&ihost->pdev->dev, "%s: queue full\n", __func__); | ||
171 | return ret; | ||
172 | } | ||
173 | |||
174 | for_each_sas_task(num, task) { | 166 | for_each_sas_task(num, task) { |
167 | enum sci_status status = SCI_FAILURE; | ||
168 | |||
175 | spin_lock_irqsave(&ihost->scic_lock, flags); | 169 | spin_lock_irqsave(&ihost->scic_lock, flags); |
176 | idev = isci_lookup_device(task->dev); | 170 | idev = isci_lookup_device(task->dev); |
177 | io_ready = isci_device_io_ready(idev, task); | 171 | io_ready = isci_device_io_ready(idev, task); |
172 | tag = isci_alloc_tag(ihost); | ||
178 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 173 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
179 | 174 | ||
180 | dev_dbg(&ihost->pdev->dev, | 175 | dev_dbg(&ihost->pdev->dev, |
@@ -185,15 +180,12 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) | |||
185 | if (!idev) { | 180 | if (!idev) { |
186 | isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED, | 181 | isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED, |
187 | SAS_DEVICE_UNKNOWN); | 182 | SAS_DEVICE_UNKNOWN); |
188 | isci_host_can_dequeue(ihost, 1); | 183 | } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) { |
189 | } else if (!io_ready) { | ||
190 | |||
191 | /* Indicate QUEUE_FULL so that the scsi midlayer | 184 | /* Indicate QUEUE_FULL so that the scsi midlayer |
192 | * retries. | 185 | * retries. |
193 | */ | 186 | */ |
194 | isci_task_refuse(ihost, task, SAS_TASK_COMPLETE, | 187 | isci_task_refuse(ihost, task, SAS_TASK_COMPLETE, |
195 | SAS_QUEUE_FULL); | 188 | SAS_QUEUE_FULL); |
196 | isci_host_can_dequeue(ihost, 1); | ||
197 | } else { | 189 | } else { |
198 | /* There is a device and it's ready for I/O. */ | 190 | /* There is a device and it's ready for I/O. */ |
199 | spin_lock_irqsave(&task->task_state_lock, flags); | 191 | spin_lock_irqsave(&task->task_state_lock, flags); |
@@ -206,13 +198,12 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) | |||
206 | isci_task_refuse(ihost, task, | 198 | isci_task_refuse(ihost, task, |
207 | SAS_TASK_UNDELIVERED, | 199 | SAS_TASK_UNDELIVERED, |
208 | SAM_STAT_TASK_ABORTED); | 200 | SAM_STAT_TASK_ABORTED); |
209 | isci_host_can_dequeue(ihost, 1); | ||
210 | } else { | 201 | } else { |
211 | task->task_state_flags |= SAS_TASK_AT_INITIATOR; | 202 | task->task_state_flags |= SAS_TASK_AT_INITIATOR; |
212 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 203 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
213 | 204 | ||
214 | /* build and send the request. */ | 205 | /* build and send the request. */ |
215 | status = isci_request_execute(ihost, idev, task, gfp_flags); | 206 | status = isci_request_execute(ihost, idev, task, tag, gfp_flags); |
216 | 207 | ||
217 | if (status != SCI_SUCCESS) { | 208 | if (status != SCI_SUCCESS) { |
218 | 209 | ||
@@ -231,10 +222,17 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) | |||
231 | isci_task_refuse(ihost, task, | 222 | isci_task_refuse(ihost, task, |
232 | SAS_TASK_COMPLETE, | 223 | SAS_TASK_COMPLETE, |
233 | SAS_QUEUE_FULL); | 224 | SAS_QUEUE_FULL); |
234 | isci_host_can_dequeue(ihost, 1); | ||
235 | } | 225 | } |
236 | } | 226 | } |
237 | } | 227 | } |
228 | if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) { | ||
229 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
230 | /* command never hit the device, so just free | ||
231 | * the tci and skip the sequence increment | ||
232 | */ | ||
233 | isci_tci_free(ihost, ISCI_TAG_TCI(tag)); | ||
234 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
235 | } | ||
238 | isci_put_device(idev); | 236 | isci_put_device(idev); |
239 | } | 237 | } |
240 | return 0; | 238 | return 0; |
@@ -242,7 +240,7 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) | |||
242 | 240 | ||
243 | static struct isci_request *isci_task_request_build(struct isci_host *ihost, | 241 | static struct isci_request *isci_task_request_build(struct isci_host *ihost, |
244 | struct isci_remote_device *idev, | 242 | struct isci_remote_device *idev, |
245 | struct isci_tmf *isci_tmf) | 243 | u16 tag, struct isci_tmf *isci_tmf) |
246 | { | 244 | { |
247 | enum sci_status status = SCI_FAILURE; | 245 | enum sci_status status = SCI_FAILURE; |
248 | struct isci_request *ireq = NULL; | 246 | struct isci_request *ireq = NULL; |
@@ -259,8 +257,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, | |||
259 | return NULL; | 257 | return NULL; |
260 | 258 | ||
261 | /* let the core do it's construct. */ | 259 | /* let the core do it's construct. */ |
262 | status = scic_task_request_construct(&ihost->sci, &idev->sci, | 260 | status = scic_task_request_construct(&ihost->sci, &idev->sci, tag, |
263 | SCI_CONTROLLER_INVALID_IO_TAG, | ||
264 | &ireq->sci); | 261 | &ireq->sci); |
265 | 262 | ||
266 | if (status != SCI_SUCCESS) { | 263 | if (status != SCI_SUCCESS) { |
@@ -290,8 +287,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, | |||
290 | return ireq; | 287 | return ireq; |
291 | errout: | 288 | errout: |
292 | isci_request_free(ihost, ireq); | 289 | isci_request_free(ihost, ireq); |
293 | ireq = NULL; | 290 | return NULL; |
294 | return ireq; | ||
295 | } | 291 | } |
296 | 292 | ||
297 | int isci_task_execute_tmf(struct isci_host *ihost, | 293 | int isci_task_execute_tmf(struct isci_host *ihost, |
@@ -305,6 +301,14 @@ int isci_task_execute_tmf(struct isci_host *ihost, | |||
305 | int ret = TMF_RESP_FUNC_FAILED; | 301 | int ret = TMF_RESP_FUNC_FAILED; |
306 | unsigned long flags; | 302 | unsigned long flags; |
307 | unsigned long timeleft; | 303 | unsigned long timeleft; |
304 | u16 tag; | ||
305 | |||
306 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
307 | tag = isci_alloc_tag(ihost); | ||
308 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
309 | |||
310 | if (tag == SCI_CONTROLLER_INVALID_IO_TAG) | ||
311 | return ret; | ||
308 | 312 | ||
309 | /* sanity check, return TMF_RESP_FUNC_FAILED | 313 | /* sanity check, return TMF_RESP_FUNC_FAILED |
310 | * if the device is not there and ready. | 314 | * if the device is not there and ready. |
@@ -316,7 +320,7 @@ int isci_task_execute_tmf(struct isci_host *ihost, | |||
316 | "%s: isci_device = %p not ready (%#lx)\n", | 320 | "%s: isci_device = %p not ready (%#lx)\n", |
317 | __func__, | 321 | __func__, |
318 | isci_device, isci_device ? isci_device->flags : 0); | 322 | isci_device, isci_device ? isci_device->flags : 0); |
319 | return TMF_RESP_FUNC_FAILED; | 323 | goto err_tci; |
320 | } else | 324 | } else |
321 | dev_dbg(&ihost->pdev->dev, | 325 | dev_dbg(&ihost->pdev->dev, |
322 | "%s: isci_device = %p\n", | 326 | "%s: isci_device = %p\n", |
@@ -327,22 +331,16 @@ int isci_task_execute_tmf(struct isci_host *ihost, | |||
327 | /* Assign the pointer to the TMF's completion kernel wait structure. */ | 331 | /* Assign the pointer to the TMF's completion kernel wait structure. */ |
328 | tmf->complete = &completion; | 332 | tmf->complete = &completion; |
329 | 333 | ||
330 | ireq = isci_task_request_build(ihost, isci_device, tmf); | 334 | ireq = isci_task_request_build(ihost, isci_device, tag, tmf); |
331 | if (!ireq) { | 335 | if (!ireq) |
332 | dev_warn(&ihost->pdev->dev, | 336 | goto err_tci; |
333 | "%s: isci_task_request_build failed\n", | ||
334 | __func__); | ||
335 | return TMF_RESP_FUNC_FAILED; | ||
336 | } | ||
337 | 337 | ||
338 | spin_lock_irqsave(&ihost->scic_lock, flags); | 338 | spin_lock_irqsave(&ihost->scic_lock, flags); |
339 | 339 | ||
340 | /* start the TMF io. */ | 340 | /* start the TMF io. */ |
341 | status = scic_controller_start_task( | 341 | status = scic_controller_start_task(&ihost->sci, |
342 | &ihost->sci, | 342 | sci_device, |
343 | sci_device, | 343 | &ireq->sci); |
344 | &ireq->sci, | ||
345 | SCI_CONTROLLER_INVALID_IO_TAG); | ||
346 | 344 | ||
347 | if (status != SCI_TASK_SUCCESS) { | 345 | if (status != SCI_TASK_SUCCESS) { |
348 | dev_warn(&ihost->pdev->dev, | 346 | dev_warn(&ihost->pdev->dev, |
@@ -351,8 +349,7 @@ int isci_task_execute_tmf(struct isci_host *ihost, | |||
351 | status, | 349 | status, |
352 | ireq); | 350 | ireq); |
353 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 351 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
354 | isci_request_free(ihost, ireq); | 352 | goto err_ireq; |
355 | return ret; | ||
356 | } | 353 | } |
357 | 354 | ||
358 | if (tmf->cb_state_func != NULL) | 355 | if (tmf->cb_state_func != NULL) |
@@ -403,6 +400,15 @@ int isci_task_execute_tmf(struct isci_host *ihost, | |||
403 | ireq); | 400 | ireq); |
404 | 401 | ||
405 | return ret; | 402 | return ret; |
403 | |||
404 | err_ireq: | ||
405 | isci_request_free(ihost, ireq); | ||
406 | err_tci: | ||
407 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
408 | isci_tci_free(ihost, ISCI_TAG_TCI(tag)); | ||
409 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
410 | |||
411 | return ret; | ||
406 | } | 412 | } |
407 | 413 | ||
408 | void isci_task_build_tmf( | 414 | void isci_task_build_tmf( |