diff options
author | Jeff Skirvin <jeffrey.d.skirvin@intel.com> | 2012-03-09 01:41:54 -0500 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2012-05-17 17:33:38 -0400 |
commit | 14aaa9f0a318bd04cbb9d822524b817e95d8b343 (patch) | |
tree | 5f6292ec671d0993c1df51586dea4fb7750d1cd9 | |
parent | d80ecd5726ce49b5da457d562804b40f0183e8f7 (diff) |
isci: Redesign device suspension, abort, cleanup.
This commit changes the means by which outstanding I/Os are handled
for cleanup.
The likelihood is that this commit will be broken into smaller pieces,
however that will be a later revision. Among the changes:
- All completion structures have been removed from the tmf and
abort paths.
- Now using one completed I/O list, with the I/O completed in host bit being
used to select error or normal callback paths.
Signed-off-by: Jeff Skirvin <jeffrey.d.skirvin@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r-- | drivers/scsi/isci/host.c | 86 | ||||
-rw-r--r-- | drivers/scsi/isci/host.h | 1 | ||||
-rw-r--r-- | drivers/scsi/isci/init.c | 1 | ||||
-rw-r--r-- | drivers/scsi/isci/remote_device.c | 255 | ||||
-rw-r--r-- | drivers/scsi/isci/remote_device.h | 15 | ||||
-rw-r--r-- | drivers/scsi/isci/remote_node_context.c | 23 | ||||
-rw-r--r-- | drivers/scsi/isci/remote_node_context.h | 4 | ||||
-rw-r--r-- | drivers/scsi/isci/request.c | 509 | ||||
-rw-r--r-- | drivers/scsi/isci/request.h | 108 | ||||
-rw-r--r-- | drivers/scsi/isci/task.c | 758 | ||||
-rw-r--r-- | drivers/scsi/isci/task.h | 132 |
11 files changed, 418 insertions, 1474 deletions
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index d241b5722eb3..25d537e2f5c4 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c | |||
@@ -1089,33 +1089,25 @@ void isci_host_completion_routine(unsigned long data) | |||
1089 | { | 1089 | { |
1090 | struct isci_host *ihost = (struct isci_host *)data; | 1090 | struct isci_host *ihost = (struct isci_host *)data; |
1091 | struct list_head completed_request_list; | 1091 | struct list_head completed_request_list; |
1092 | struct list_head errored_request_list; | ||
1093 | struct list_head *current_position; | 1092 | struct list_head *current_position; |
1094 | struct list_head *next_position; | 1093 | struct list_head *next_position; |
1095 | struct isci_request *request; | 1094 | struct isci_request *request; |
1096 | struct isci_request *next_request; | ||
1097 | struct sas_task *task; | 1095 | struct sas_task *task; |
1098 | u16 active; | 1096 | u16 active; |
1099 | 1097 | ||
1100 | INIT_LIST_HEAD(&completed_request_list); | 1098 | INIT_LIST_HEAD(&completed_request_list); |
1101 | INIT_LIST_HEAD(&errored_request_list); | ||
1102 | 1099 | ||
1103 | spin_lock_irq(&ihost->scic_lock); | 1100 | spin_lock_irq(&ihost->scic_lock); |
1104 | 1101 | ||
1105 | sci_controller_completion_handler(ihost); | 1102 | sci_controller_completion_handler(ihost); |
1106 | 1103 | ||
1107 | /* Take the lists of completed I/Os from the host. */ | 1104 | /* Take the lists of completed I/Os from the host. */ |
1108 | |||
1109 | list_splice_init(&ihost->requests_to_complete, | 1105 | list_splice_init(&ihost->requests_to_complete, |
1110 | &completed_request_list); | 1106 | &completed_request_list); |
1111 | 1107 | ||
1112 | /* Take the list of errored I/Os from the host. */ | ||
1113 | list_splice_init(&ihost->requests_to_errorback, | ||
1114 | &errored_request_list); | ||
1115 | |||
1116 | spin_unlock_irq(&ihost->scic_lock); | 1108 | spin_unlock_irq(&ihost->scic_lock); |
1117 | 1109 | ||
1118 | /* Process any completions in the lists. */ | 1110 | /* Process any completions in the list. */ |
1119 | list_for_each_safe(current_position, next_position, | 1111 | list_for_each_safe(current_position, next_position, |
1120 | &completed_request_list) { | 1112 | &completed_request_list) { |
1121 | 1113 | ||
@@ -1123,23 +1115,30 @@ void isci_host_completion_routine(unsigned long data) | |||
1123 | completed_node); | 1115 | completed_node); |
1124 | task = isci_request_access_task(request); | 1116 | task = isci_request_access_task(request); |
1125 | 1117 | ||
1126 | /* Normal notification (task_done) */ | ||
1127 | dev_dbg(&ihost->pdev->dev, | ||
1128 | "%s: Normal - request/task = %p/%p\n", | ||
1129 | __func__, | ||
1130 | request, | ||
1131 | task); | ||
1132 | 1118 | ||
1133 | /* Return the task to libsas */ | 1119 | /* Return the task to libsas */ |
1134 | if (task != NULL) { | 1120 | if (task != NULL) { |
1135 | 1121 | ||
1136 | task->lldd_task = NULL; | 1122 | task->lldd_task = NULL; |
1137 | if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) { | 1123 | if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &request->flags) && |
1138 | 1124 | !(task->task_state_flags & SAS_TASK_STATE_ABORTED)) { | |
1139 | /* If the task is already in the abort path, | 1125 | if (test_bit(IREQ_COMPLETE_IN_TARGET, |
1140 | * the task_done callback cannot be called. | 1126 | &request->flags)) { |
1141 | */ | 1127 | |
1142 | task->task_done(task); | 1128 | /* Normal notification (task_done) */ |
1129 | dev_dbg(&ihost->pdev->dev, "%s: Normal" | ||
1130 | " - request/task = %p/%p\n", | ||
1131 | __func__, request, task); | ||
1132 | |||
1133 | task->task_done(task); | ||
1134 | } else { | ||
1135 | dev_warn(&ihost->pdev->dev, | ||
1136 | "%s: Error - request/task" | ||
1137 | " = %p/%p\n", | ||
1138 | __func__, request, task); | ||
1139 | |||
1140 | sas_task_abort(task); | ||
1141 | } | ||
1143 | } | 1142 | } |
1144 | } | 1143 | } |
1145 | 1144 | ||
@@ -1147,44 +1146,6 @@ void isci_host_completion_routine(unsigned long data) | |||
1147 | isci_free_tag(ihost, request->io_tag); | 1146 | isci_free_tag(ihost, request->io_tag); |
1148 | spin_unlock_irq(&ihost->scic_lock); | 1147 | spin_unlock_irq(&ihost->scic_lock); |
1149 | } | 1148 | } |
1150 | list_for_each_entry_safe(request, next_request, &errored_request_list, | ||
1151 | completed_node) { | ||
1152 | |||
1153 | task = isci_request_access_task(request); | ||
1154 | |||
1155 | /* Use sas_task_abort */ | ||
1156 | dev_warn(&ihost->pdev->dev, | ||
1157 | "%s: Error - request/task = %p/%p\n", | ||
1158 | __func__, | ||
1159 | request, | ||
1160 | task); | ||
1161 | |||
1162 | if (task != NULL) { | ||
1163 | |||
1164 | /* Put the task into the abort path if it's not there | ||
1165 | * already. | ||
1166 | */ | ||
1167 | if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) | ||
1168 | sas_task_abort(task); | ||
1169 | |||
1170 | } else { | ||
1171 | /* This is a case where the request has completed with a | ||
1172 | * status such that it needed further target servicing, | ||
1173 | * but the sas_task reference has already been removed | ||
1174 | * from the request. Since it was errored, it was not | ||
1175 | * being aborted, so there is nothing to do except free | ||
1176 | * it. | ||
1177 | */ | ||
1178 | |||
1179 | spin_lock_irq(&ihost->scic_lock); | ||
1180 | /* Remove the request from the remote device's list | ||
1181 | * of pending requests. | ||
1182 | */ | ||
1183 | list_del_init(&request->dev_node); | ||
1184 | isci_free_tag(ihost, request->io_tag); | ||
1185 | spin_unlock_irq(&ihost->scic_lock); | ||
1186 | } | ||
1187 | } | ||
1188 | 1149 | ||
1189 | /* the coalesence timeout doubles at each encoding step, so | 1150 | /* the coalesence timeout doubles at each encoding step, so |
1190 | * update it based on the ilog2 value of the outstanding requests | 1151 | * update it based on the ilog2 value of the outstanding requests |
@@ -2345,7 +2306,6 @@ static int sci_controller_dma_alloc(struct isci_host *ihost) | |||
2345 | 2306 | ||
2346 | ireq->tc = &ihost->task_context_table[i]; | 2307 | ireq->tc = &ihost->task_context_table[i]; |
2347 | ireq->owning_controller = ihost; | 2308 | ireq->owning_controller = ihost; |
2348 | spin_lock_init(&ireq->state_lock); | ||
2349 | ireq->request_daddr = dma; | 2309 | ireq->request_daddr = dma; |
2350 | ireq->isci_host = ihost; | 2310 | ireq->isci_host = ihost; |
2351 | ihost->reqs[i] = ireq; | 2311 | ihost->reqs[i] = ireq; |
@@ -2697,6 +2657,10 @@ enum sci_status sci_controller_terminate_request(struct isci_host *ihost, | |||
2697 | return SCI_FAILURE_INVALID_STATE; | 2657 | return SCI_FAILURE_INVALID_STATE; |
2698 | } | 2658 | } |
2699 | status = sci_io_request_terminate(ireq); | 2659 | status = sci_io_request_terminate(ireq); |
2660 | |||
2661 | dev_dbg(&ihost->pdev->dev, "%s: status=%d; ireq=%p; flags=%lx\n", | ||
2662 | __func__, status, ireq, ireq->flags); | ||
2663 | |||
2700 | if ((status == SCI_SUCCESS) && | 2664 | if ((status == SCI_SUCCESS) && |
2701 | !test_bit(IREQ_PENDING_ABORT, &ireq->flags) && | 2665 | !test_bit(IREQ_PENDING_ABORT, &ireq->flags) && |
2702 | !test_and_set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags)) { | 2666 | !test_and_set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags)) { |
@@ -2739,6 +2703,8 @@ enum sci_status sci_controller_complete_io(struct isci_host *ihost, | |||
2739 | 2703 | ||
2740 | index = ISCI_TAG_TCI(ireq->io_tag); | 2704 | index = ISCI_TAG_TCI(ireq->io_tag); |
2741 | clear_bit(IREQ_ACTIVE, &ireq->flags); | 2705 | clear_bit(IREQ_ACTIVE, &ireq->flags); |
2706 | if (test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags)) | ||
2707 | wake_up_all(&ihost->eventq); | ||
2742 | return SCI_SUCCESS; | 2708 | return SCI_SUCCESS; |
2743 | default: | 2709 | default: |
2744 | dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", | 2710 | dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", |
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index 7272a0a375f2..eaa13c0be09a 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h | |||
@@ -205,7 +205,6 @@ struct isci_host { | |||
205 | wait_queue_head_t eventq; | 205 | wait_queue_head_t eventq; |
206 | struct tasklet_struct completion_tasklet; | 206 | struct tasklet_struct completion_tasklet; |
207 | struct list_head requests_to_complete; | 207 | struct list_head requests_to_complete; |
208 | struct list_head requests_to_errorback; | ||
209 | spinlock_t scic_lock; | 208 | spinlock_t scic_lock; |
210 | struct isci_request *reqs[SCI_MAX_IO_REQUESTS]; | 209 | struct isci_request *reqs[SCI_MAX_IO_REQUESTS]; |
211 | struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES]; | 210 | struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES]; |
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 9e1c83e425ed..39f12703b893 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c | |||
@@ -556,7 +556,6 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) | |||
556 | } | 556 | } |
557 | 557 | ||
558 | INIT_LIST_HEAD(&ihost->requests_to_complete); | 558 | INIT_LIST_HEAD(&ihost->requests_to_complete); |
559 | INIT_LIST_HEAD(&ihost->requests_to_errorback); | ||
560 | for (i = 0; i < SCI_MAX_PORTS; i++) { | 559 | for (i = 0; i < SCI_MAX_PORTS; i++) { |
561 | struct isci_port *iport = &ihost->ports[i]; | 560 | struct isci_port *iport = &ihost->ports[i]; |
562 | 561 | ||
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 3048e02aeb7b..c47304cea45d 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c | |||
@@ -81,49 +81,6 @@ static enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev | |||
81 | } | 81 | } |
82 | 82 | ||
83 | /** | 83 | /** |
84 | * isci_remote_device_not_ready() - This function is called by the ihost when | ||
85 | * the remote device is not ready. We mark the isci device as ready (not | ||
86 | * "ready_for_io") and signal the waiting proccess. | ||
87 | * @isci_host: This parameter specifies the isci host object. | ||
88 | * @isci_device: This parameter specifies the remote device | ||
89 | * | ||
90 | * sci_lock is held on entrance to this function. | ||
91 | */ | ||
92 | static void isci_remote_device_not_ready(struct isci_host *ihost, | ||
93 | struct isci_remote_device *idev, u32 reason) | ||
94 | { | ||
95 | struct isci_request *ireq; | ||
96 | |||
97 | dev_dbg(&ihost->pdev->dev, | ||
98 | "%s: isci_device = %p\n", __func__, idev); | ||
99 | |||
100 | switch (reason) { | ||
101 | case SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED: | ||
102 | set_bit(IDEV_GONE, &idev->flags); | ||
103 | break; | ||
104 | case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED: | ||
105 | set_bit(IDEV_IO_NCQERROR, &idev->flags); | ||
106 | |||
107 | /* Suspend the remote device so the I/O can be terminated. */ | ||
108 | sci_remote_device_suspend(idev); | ||
109 | |||
110 | /* Kill all outstanding requests for the device. */ | ||
111 | list_for_each_entry(ireq, &idev->reqs_in_process, dev_node) { | ||
112 | |||
113 | dev_dbg(&ihost->pdev->dev, | ||
114 | "%s: isci_device = %p request = %p\n", | ||
115 | __func__, idev, ireq); | ||
116 | |||
117 | sci_controller_terminate_request(ihost, idev, ireq); | ||
118 | } | ||
119 | /* Fall through into the default case... */ | ||
120 | default: | ||
121 | clear_bit(IDEV_IO_READY, &idev->flags); | ||
122 | break; | ||
123 | } | ||
124 | } | ||
125 | |||
126 | /** | ||
127 | * isci_remote_device_ready() - This function is called by the ihost when the | 84 | * isci_remote_device_ready() - This function is called by the ihost when the |
128 | * remote device is ready. We mark the isci device as ready and signal the | 85 | * remote device is ready. We mark the isci device as ready and signal the |
129 | * waiting proccess. | 86 | * waiting proccess. |
@@ -142,49 +99,121 @@ static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote | |||
142 | wake_up(&ihost->eventq); | 99 | wake_up(&ihost->eventq); |
143 | } | 100 | } |
144 | 101 | ||
145 | static int isci_remote_device_suspendcheck(struct isci_remote_device *idev) | 102 | static enum sci_status sci_remote_device_terminate_req( |
103 | struct isci_host *ihost, | ||
104 | struct isci_remote_device *idev, | ||
105 | int check_abort, | ||
106 | struct isci_request *ireq) | ||
107 | { | ||
108 | dev_dbg(&ihost->pdev->dev, | ||
109 | "%s: idev=%p; flags=%lx; req=%p; req target=%p\n", | ||
110 | __func__, idev, idev->flags, ireq, ireq->target_device); | ||
111 | |||
112 | if (!test_bit(IREQ_ACTIVE, &ireq->flags) || | ||
113 | (ireq->target_device != idev) || | ||
114 | (check_abort && !test_bit(IREQ_PENDING_ABORT, &ireq->flags))) | ||
115 | return SCI_SUCCESS; | ||
116 | |||
117 | set_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags); | ||
118 | |||
119 | return sci_controller_terminate_request(ihost, idev, ireq); | ||
120 | } | ||
121 | |||
122 | static enum sci_status sci_remote_device_terminate_reqs_checkabort( | ||
123 | struct isci_remote_device *idev, | ||
124 | int chk) | ||
146 | { | 125 | { |
147 | return test_bit(IDEV_TXRX_SUSPENDED, &idev->flags) | 126 | struct isci_host *ihost = idev->owning_port->owning_controller; |
148 | || !test_bit(IDEV_ALLOCATED, &idev->flags); | 127 | enum sci_status status = SCI_SUCCESS; |
128 | u32 i; | ||
129 | |||
130 | for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) { | ||
131 | struct isci_request *ireq = ihost->reqs[i]; | ||
132 | enum sci_status s; | ||
133 | |||
134 | s = sci_remote_device_terminate_req(ihost, idev, chk, ireq); | ||
135 | if (s != SCI_SUCCESS) | ||
136 | status = s; | ||
137 | } | ||
138 | return status; | ||
149 | } | 139 | } |
150 | 140 | ||
151 | enum sci_status isci_remote_device_suspend( | 141 | enum sci_status isci_remote_device_terminate_requests( |
152 | struct isci_host *ihost, | 142 | struct isci_host *ihost, |
153 | struct isci_remote_device *idev) | 143 | struct isci_remote_device *idev, |
144 | struct isci_request *ireq) | ||
154 | { | 145 | { |
155 | enum sci_status status; | 146 | enum sci_status status = SCI_SUCCESS; |
156 | unsigned long flags; | 147 | unsigned long flags; |
157 | 148 | ||
158 | spin_lock_irqsave(&ihost->scic_lock, flags); | 149 | spin_lock_irqsave(&ihost->scic_lock, flags); |
159 | if (isci_get_device(idev->domain_dev) == NULL) { | 150 | if (isci_get_device(idev) == NULL) { |
151 | dev_dbg(&ihost->pdev->dev, "%s: failed isci_get_device(idev=%p)\n", | ||
152 | __func__, idev); | ||
160 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 153 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
161 | status = SCI_FAILURE; | 154 | status = SCI_FAILURE; |
162 | } else { | 155 | } else { |
163 | status = sci_remote_device_suspend(idev); | 156 | dev_dbg(&ihost->pdev->dev, |
164 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 157 | "%s: idev=%p, ireq=%p; started_request_count=%d, " |
165 | if (status == SCI_SUCCESS) { | 158 | "about to wait\n", |
166 | dev_dbg(&ihost->pdev->dev, | 159 | __func__, idev, ireq, idev->started_request_count); |
167 | "%s: idev=%p, about to wait\n", | 160 | if (ireq) { |
168 | __func__, idev); | 161 | /* Terminate a specific TC. */ |
169 | wait_event(ihost->eventq, | 162 | sci_remote_device_terminate_req(ihost, idev, 0, ireq); |
170 | isci_remote_device_suspendcheck(idev)); | 163 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
171 | status = test_bit(IDEV_TXRX_SUSPENDED, &idev->flags) | 164 | wait_event(ihost->eventq, !test_bit(IREQ_ACTIVE, |
172 | ? SCI_SUCCESS : SCI_FAILURE; | 165 | &ireq->flags)); |
173 | dev_dbg(&ihost->pdev->dev, | ||
174 | "%s: idev=%p, wait done, device is %s\n", | ||
175 | __func__, idev, | ||
176 | test_bit(IDEV_TXRX_SUSPENDED, &idev->flags) | ||
177 | ? "<suspended>" : "<deallocated!>"); | ||
178 | 166 | ||
179 | } else | 167 | } else { |
180 | dev_dbg(scirdev_to_dev(idev), | 168 | /* Terminate all TCs. */ |
181 | "%s: sci_remote_device_suspend failed, " | 169 | sci_remote_device_terminate_requests(idev); |
182 | "status = %d\n", __func__, status); | 170 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
171 | wait_event(ihost->eventq, | ||
172 | idev->started_request_count == 0); | ||
173 | } | ||
174 | dev_dbg(&ihost->pdev->dev, "%s: idev=%p, wait done\n", | ||
175 | __func__, idev); | ||
183 | isci_put_device(idev); | 176 | isci_put_device(idev); |
184 | } | 177 | } |
185 | return status; | 178 | return status; |
186 | } | 179 | } |
187 | 180 | ||
181 | /** | ||
182 | * isci_remote_device_not_ready() - This function is called by the ihost when | ||
183 | * the remote device is not ready. We mark the isci device as ready (not | ||
184 | * "ready_for_io") and signal the waiting proccess. | ||
185 | * @isci_host: This parameter specifies the isci host object. | ||
186 | * @isci_device: This parameter specifies the remote device | ||
187 | * | ||
188 | * sci_lock is held on entrance to this function. | ||
189 | */ | ||
190 | static void isci_remote_device_not_ready(struct isci_host *ihost, | ||
191 | struct isci_remote_device *idev, | ||
192 | u32 reason) | ||
193 | { | ||
194 | dev_dbg(&ihost->pdev->dev, | ||
195 | "%s: isci_device = %p\n", __func__, idev); | ||
196 | |||
197 | switch (reason) { | ||
198 | case SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED: | ||
199 | set_bit(IDEV_GONE, &idev->flags); | ||
200 | break; | ||
201 | case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED: | ||
202 | set_bit(IDEV_IO_NCQERROR, &idev->flags); | ||
203 | |||
204 | /* Suspend the remote device so the I/O can be terminated. */ | ||
205 | sci_remote_device_suspend(idev); | ||
206 | |||
207 | /* Kill all outstanding requests for the device. */ | ||
208 | sci_remote_device_terminate_requests(idev); | ||
209 | |||
210 | /* Fall through into the default case... */ | ||
211 | default: | ||
212 | clear_bit(IDEV_IO_READY, &idev->flags); | ||
213 | break; | ||
214 | } | ||
215 | } | ||
216 | |||
188 | /* called once the remote node context is ready to be freed. | 217 | /* called once the remote node context is ready to be freed. |
189 | * The remote device can now report that its stop operation is complete. none | 218 | * The remote device can now report that its stop operation is complete. none |
190 | */ | 219 | */ |
@@ -196,36 +225,10 @@ static void rnc_destruct_done(void *_dev) | |||
196 | sci_change_state(&idev->sm, SCI_DEV_STOPPED); | 225 | sci_change_state(&idev->sm, SCI_DEV_STOPPED); |
197 | } | 226 | } |
198 | 227 | ||
199 | static enum sci_status sci_remote_device_terminate_requests_checkabort( | ||
200 | struct isci_remote_device *idev, | ||
201 | int check_abort_pending) | ||
202 | { | ||
203 | struct isci_host *ihost = idev->owning_port->owning_controller; | ||
204 | enum sci_status status = SCI_SUCCESS; | ||
205 | u32 i; | ||
206 | |||
207 | for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) { | ||
208 | struct isci_request *ireq = ihost->reqs[i]; | ||
209 | enum sci_status s; | ||
210 | |||
211 | if (!test_bit(IREQ_ACTIVE, &ireq->flags) || | ||
212 | (ireq->target_device != idev) || | ||
213 | (check_abort_pending && !test_bit(IREQ_PENDING_ABORT, | ||
214 | &ireq->flags))) | ||
215 | continue; | ||
216 | |||
217 | s = sci_controller_terminate_request(ihost, idev, ireq); | ||
218 | if (s != SCI_SUCCESS) | ||
219 | status = s; | ||
220 | } | ||
221 | |||
222 | return status; | ||
223 | } | ||
224 | |||
225 | enum sci_status sci_remote_device_terminate_requests( | 228 | enum sci_status sci_remote_device_terminate_requests( |
226 | struct isci_remote_device *idev) | 229 | struct isci_remote_device *idev) |
227 | { | 230 | { |
228 | return sci_remote_device_terminate_requests_checkabort(idev, 0); | 231 | return sci_remote_device_terminate_reqs_checkabort(idev, 0); |
229 | } | 232 | } |
230 | 233 | ||
231 | enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, | 234 | enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, |
@@ -771,10 +774,6 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost, | |||
771 | if (status != SCI_SUCCESS) | 774 | if (status != SCI_SUCCESS) |
772 | return status; | 775 | return status; |
773 | 776 | ||
774 | status = sci_remote_node_context_start_task(&idev->rnc, ireq); | ||
775 | if (status != SCI_SUCCESS) | ||
776 | goto out; | ||
777 | |||
778 | status = sci_request_start(ireq); | 777 | status = sci_request_start(ireq); |
779 | if (status != SCI_SUCCESS) | 778 | if (status != SCI_SUCCESS) |
780 | goto out; | 779 | goto out; |
@@ -796,8 +795,9 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost, | |||
796 | sci_remote_node_context_suspend( | 795 | sci_remote_node_context_suspend( |
797 | &idev->rnc, SCI_SOFTWARE_SUSPENSION, | 796 | &idev->rnc, SCI_SOFTWARE_SUSPENSION, |
798 | SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT, NULL, NULL); | 797 | SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT, NULL, NULL); |
799 | sci_remote_node_context_resume( | 798 | |
800 | &idev->rnc, sci_remote_device_continue_request, idev); | 799 | status = sci_remote_node_context_start_task(&idev->rnc, ireq, |
800 | sci_remote_device_continue_request, idev); | ||
801 | 801 | ||
802 | out: | 802 | out: |
803 | sci_remote_device_start_request(idev, ireq, status); | 803 | sci_remote_device_start_request(idev, ireq, status); |
@@ -811,7 +811,9 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost, | |||
811 | if (status != SCI_SUCCESS) | 811 | if (status != SCI_SUCCESS) |
812 | return status; | 812 | return status; |
813 | 813 | ||
814 | status = sci_remote_node_context_start_task(&idev->rnc, ireq); | 814 | /* Resume the RNC as needed: */ |
815 | status = sci_remote_node_context_start_task(&idev->rnc, ireq, | ||
816 | NULL, NULL); | ||
815 | if (status != SCI_SUCCESS) | 817 | if (status != SCI_SUCCESS) |
816 | break; | 818 | break; |
817 | 819 | ||
@@ -1322,20 +1324,6 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport, | |||
1322 | return status; | 1324 | return status; |
1323 | } | 1325 | } |
1324 | 1326 | ||
1325 | void isci_remote_device_nuke_requests(struct isci_host *ihost, struct isci_remote_device *idev) | ||
1326 | { | ||
1327 | DECLARE_COMPLETION_ONSTACK(aborted_task_completion); | ||
1328 | |||
1329 | dev_dbg(&ihost->pdev->dev, | ||
1330 | "%s: idev = %p\n", __func__, idev); | ||
1331 | |||
1332 | /* Cleanup all requests pending for this device. */ | ||
1333 | isci_terminate_pending_requests(ihost, idev); | ||
1334 | |||
1335 | dev_dbg(&ihost->pdev->dev, | ||
1336 | "%s: idev = %p, done\n", __func__, idev); | ||
1337 | } | ||
1338 | |||
1339 | /** | 1327 | /** |
1340 | * This function builds the isci_remote_device when a libsas dev_found message | 1328 | * This function builds the isci_remote_device when a libsas dev_found message |
1341 | * is received. | 1329 | * is received. |
@@ -1495,32 +1483,28 @@ int isci_remote_device_found(struct domain_device *dev) | |||
1495 | return status == SCI_SUCCESS ? 0 : -ENODEV; | 1483 | return status == SCI_SUCCESS ? 0 : -ENODEV; |
1496 | } | 1484 | } |
1497 | 1485 | ||
1498 | enum sci_status isci_remote_device_reset( | 1486 | enum sci_status isci_remote_device_suspend_terminate( |
1499 | struct isci_host *ihost, | 1487 | struct isci_host *ihost, |
1500 | struct isci_remote_device *idev) | 1488 | struct isci_remote_device *idev, |
1489 | struct isci_request *ireq) | ||
1501 | { | 1490 | { |
1502 | unsigned long flags; | 1491 | unsigned long flags; |
1503 | enum sci_status status; | 1492 | enum sci_status status; |
1504 | 1493 | ||
1505 | /* Put the device into a reset state so the suspension will not | 1494 | /* Put the device into suspension. */ |
1506 | * automatically resume. | ||
1507 | */ | ||
1508 | spin_lock_irqsave(&ihost->scic_lock, flags); | 1495 | spin_lock_irqsave(&ihost->scic_lock, flags); |
1509 | status = sci_remote_device_reset(idev); | 1496 | sci_remote_device_suspend(idev); |
1510 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1497 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1511 | if (status != SCI_SUCCESS) { | 1498 | |
1512 | dev_dbg(&ihost->pdev->dev, | 1499 | /* Terminate and wait for the completions. */ |
1513 | "%s: sci_remote_device_reset(%p) returned %d!\n", | 1500 | status = isci_remote_device_terminate_requests(ihost, idev, ireq); |
1514 | __func__, idev, status); | 1501 | if (status != SCI_SUCCESS) |
1515 | return status; | ||
1516 | } | ||
1517 | /* Wait for the device suspend. */ | ||
1518 | status = isci_remote_device_suspend(ihost, idev); | ||
1519 | if (status != SCI_SUCCESS) { | ||
1520 | dev_dbg(&ihost->pdev->dev, | 1502 | dev_dbg(&ihost->pdev->dev, |
1521 | "%s: isci_remote_device_suspend(%p) returned %d!\n", | 1503 | "%s: isci_remote_device_terminate_requests(%p) " |
1504 | "returned %d!\n", | ||
1522 | __func__, idev, status); | 1505 | __func__, idev, status); |
1523 | } | 1506 | |
1507 | /* NOTE: RNC resumption is left to the caller! */ | ||
1524 | return status; | 1508 | return status; |
1525 | } | 1509 | } |
1526 | 1510 | ||
@@ -1533,7 +1517,7 @@ int isci_remote_device_is_safe_to_abort( | |||
1533 | enum sci_status sci_remote_device_abort_requests_pending_abort( | 1517 | enum sci_status sci_remote_device_abort_requests_pending_abort( |
1534 | struct isci_remote_device *idev) | 1518 | struct isci_remote_device *idev) |
1535 | { | 1519 | { |
1536 | return sci_remote_device_terminate_requests_checkabort(idev, 1); | 1520 | return sci_remote_device_terminate_reqs_checkabort(idev, 1); |
1537 | } | 1521 | } |
1538 | 1522 | ||
1539 | enum sci_status isci_remote_device_reset_complete( | 1523 | enum sci_status isci_remote_device_reset_complete( |
@@ -1545,7 +1529,6 @@ enum sci_status isci_remote_device_reset_complete( | |||
1545 | 1529 | ||
1546 | spin_lock_irqsave(&ihost->scic_lock, flags); | 1530 | spin_lock_irqsave(&ihost->scic_lock, flags); |
1547 | status = sci_remote_device_reset_complete(idev); | 1531 | status = sci_remote_device_reset_complete(idev); |
1548 | sci_remote_device_resume(idev, NULL, NULL); | ||
1549 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1532 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1550 | 1533 | ||
1551 | return status; | 1534 | return status; |
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h index a6a376e200ef..da43698e9eba 100644 --- a/drivers/scsi/isci/remote_device.h +++ b/drivers/scsi/isci/remote_device.h | |||
@@ -85,7 +85,6 @@ struct isci_remote_device { | |||
85 | #define IDEV_GONE 3 | 85 | #define IDEV_GONE 3 |
86 | #define IDEV_IO_READY 4 | 86 | #define IDEV_IO_READY 4 |
87 | #define IDEV_IO_NCQERROR 5 | 87 | #define IDEV_IO_NCQERROR 5 |
88 | #define IDEV_TXRX_SUSPENDED 6 | ||
89 | unsigned long flags; | 88 | unsigned long flags; |
90 | struct kref kref; | 89 | struct kref kref; |
91 | struct isci_port *isci_port; | 90 | struct isci_port *isci_port; |
@@ -107,10 +106,8 @@ struct isci_remote_device { | |||
107 | 106 | ||
108 | /* device reference routines must be called under sci_lock */ | 107 | /* device reference routines must be called under sci_lock */ |
109 | static inline struct isci_remote_device *isci_get_device( | 108 | static inline struct isci_remote_device *isci_get_device( |
110 | struct domain_device *dev) | 109 | struct isci_remote_device *idev) |
111 | { | 110 | { |
112 | struct isci_remote_device *idev = dev->lldd_dev; | ||
113 | |||
114 | if (idev) | 111 | if (idev) |
115 | kref_get(&idev->kref); | 112 | kref_get(&idev->kref); |
116 | return idev; | 113 | return idev; |
@@ -378,4 +375,14 @@ enum sci_status isci_remote_device_reset( | |||
378 | enum sci_status isci_remote_device_reset_complete( | 375 | enum sci_status isci_remote_device_reset_complete( |
379 | struct isci_host *ihost, | 376 | struct isci_host *ihost, |
380 | struct isci_remote_device *idev); | 377 | struct isci_remote_device *idev); |
378 | |||
379 | enum sci_status isci_remote_device_suspend_terminate( | ||
380 | struct isci_host *ihost, | ||
381 | struct isci_remote_device *idev, | ||
382 | struct isci_request *ireq); | ||
383 | |||
384 | enum sci_status isci_remote_device_terminate_requests( | ||
385 | struct isci_host *ihost, | ||
386 | struct isci_remote_device *idev, | ||
387 | struct isci_request *ireq); | ||
381 | #endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */ | 388 | #endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */ |
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c index 7a8347e51767..feeca17f0f13 100644 --- a/drivers/scsi/isci/remote_node_context.c +++ b/drivers/scsi/isci/remote_node_context.c | |||
@@ -317,8 +317,6 @@ static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_ | |||
317 | struct isci_remote_device *idev = rnc_to_dev(rnc); | 317 | struct isci_remote_device *idev = rnc_to_dev(rnc); |
318 | struct isci_host *ihost = idev->owning_port->owning_controller; | 318 | struct isci_host *ihost = idev->owning_port->owning_controller; |
319 | 319 | ||
320 | set_bit(IDEV_TXRX_SUSPENDED, &idev->flags); | ||
321 | |||
322 | /* Terminate outstanding requests pending abort. */ | 320 | /* Terminate outstanding requests pending abort. */ |
323 | sci_remote_device_abort_requests_pending_abort(idev); | 321 | sci_remote_device_abort_requests_pending_abort(idev); |
324 | 322 | ||
@@ -326,16 +324,6 @@ static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_ | |||
326 | sci_remote_node_context_continue_state_transitions(rnc); | 324 | sci_remote_node_context_continue_state_transitions(rnc); |
327 | } | 325 | } |
328 | 326 | ||
329 | static void sci_remote_node_context_tx_rx_suspended_state_exit( | ||
330 | struct sci_base_state_machine *sm) | ||
331 | { | ||
332 | struct sci_remote_node_context *rnc | ||
333 | = container_of(sm, typeof(*rnc), sm); | ||
334 | struct isci_remote_device *idev = rnc_to_dev(rnc); | ||
335 | |||
336 | clear_bit(IDEV_TXRX_SUSPENDED, &idev->flags); | ||
337 | } | ||
338 | |||
339 | static void sci_remote_node_context_await_suspend_state_exit( | 327 | static void sci_remote_node_context_await_suspend_state_exit( |
340 | struct sci_base_state_machine *sm) | 328 | struct sci_base_state_machine *sm) |
341 | { | 329 | { |
@@ -366,8 +354,6 @@ static const struct sci_base_state sci_remote_node_context_state_table[] = { | |||
366 | }, | 354 | }, |
367 | [SCI_RNC_TX_RX_SUSPENDED] = { | 355 | [SCI_RNC_TX_RX_SUSPENDED] = { |
368 | .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter, | 356 | .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter, |
369 | .exit_state | ||
370 | = sci_remote_node_context_tx_rx_suspended_state_exit, | ||
371 | }, | 357 | }, |
372 | [SCI_RNC_AWAIT_SUSPENSION] = { | 358 | [SCI_RNC_AWAIT_SUSPENSION] = { |
373 | .exit_state = sci_remote_node_context_await_suspend_state_exit, | 359 | .exit_state = sci_remote_node_context_await_suspend_state_exit, |
@@ -671,8 +657,11 @@ enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context | |||
671 | } | 657 | } |
672 | } | 658 | } |
673 | 659 | ||
674 | enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, | 660 | enum sci_status sci_remote_node_context_start_task( |
675 | struct isci_request *ireq) | 661 | struct sci_remote_node_context *sci_rnc, |
662 | struct isci_request *ireq, | ||
663 | scics_sds_remote_node_context_callback cb_fn, | ||
664 | void *cb_p) | ||
676 | { | 665 | { |
677 | enum scis_sds_remote_node_context_states state; | 666 | enum scis_sds_remote_node_context_states state; |
678 | 667 | ||
@@ -684,7 +673,7 @@ enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_contex | |||
684 | return SCI_SUCCESS; | 673 | return SCI_SUCCESS; |
685 | case SCI_RNC_TX_SUSPENDED: | 674 | case SCI_RNC_TX_SUSPENDED: |
686 | case SCI_RNC_TX_RX_SUSPENDED: | 675 | case SCI_RNC_TX_RX_SUSPENDED: |
687 | sci_remote_node_context_resume(sci_rnc, NULL, NULL); | 676 | sci_remote_node_context_resume(sci_rnc, cb_fn, cb_p); |
688 | return SCI_SUCCESS; | 677 | return SCI_SUCCESS; |
689 | default: | 678 | default: |
690 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), | 679 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), |
diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h index 5ddf88b53133..2870af14edab 100644 --- a/drivers/scsi/isci/remote_node_context.h +++ b/drivers/scsi/isci/remote_node_context.h | |||
@@ -211,7 +211,9 @@ enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *s | |||
211 | scics_sds_remote_node_context_callback cb_fn, | 211 | scics_sds_remote_node_context_callback cb_fn, |
212 | void *cb_p); | 212 | void *cb_p); |
213 | enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, | 213 | enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, |
214 | struct isci_request *ireq); | 214 | struct isci_request *ireq, |
215 | scics_sds_remote_node_context_callback cb_fn, | ||
216 | void *cb_p); | ||
215 | enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc, | 217 | enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc, |
216 | struct isci_request *ireq); | 218 | struct isci_request *ireq); |
217 | int sci_remote_node_context_is_safe_to_abort( | 219 | int sci_remote_node_context_is_safe_to_abort( |
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 1f314d0d71d5..f4e80f31423c 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c | |||
@@ -2491,9 +2491,6 @@ static void isci_request_process_response_iu( | |||
2491 | * @request: This parameter is the completed isci_request object. | 2491 | * @request: This parameter is the completed isci_request object. |
2492 | * @response_ptr: This parameter specifies the service response for the I/O. | 2492 | * @response_ptr: This parameter specifies the service response for the I/O. |
2493 | * @status_ptr: This parameter specifies the exec status for the I/O. | 2493 | * @status_ptr: This parameter specifies the exec status for the I/O. |
2494 | * @complete_to_host_ptr: This parameter specifies the action to be taken by | ||
2495 | * the LLDD with respect to completing this request or forcing an abort | ||
2496 | * condition on the I/O. | ||
2497 | * @open_rej_reason: This parameter specifies the encoded reason for the | 2494 | * @open_rej_reason: This parameter specifies the encoded reason for the |
2498 | * abandon-class reject. | 2495 | * abandon-class reject. |
2499 | * | 2496 | * |
@@ -2504,14 +2501,12 @@ static void isci_request_set_open_reject_status( | |||
2504 | struct sas_task *task, | 2501 | struct sas_task *task, |
2505 | enum service_response *response_ptr, | 2502 | enum service_response *response_ptr, |
2506 | enum exec_status *status_ptr, | 2503 | enum exec_status *status_ptr, |
2507 | enum isci_completion_selection *complete_to_host_ptr, | ||
2508 | enum sas_open_rej_reason open_rej_reason) | 2504 | enum sas_open_rej_reason open_rej_reason) |
2509 | { | 2505 | { |
2510 | /* Task in the target is done. */ | 2506 | /* Task in the target is done. */ |
2511 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2507 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2512 | *response_ptr = SAS_TASK_UNDELIVERED; | 2508 | *response_ptr = SAS_TASK_UNDELIVERED; |
2513 | *status_ptr = SAS_OPEN_REJECT; | 2509 | *status_ptr = SAS_OPEN_REJECT; |
2514 | *complete_to_host_ptr = isci_perform_normal_io_completion; | ||
2515 | task->task_status.open_rej_reason = open_rej_reason; | 2510 | task->task_status.open_rej_reason = open_rej_reason; |
2516 | } | 2511 | } |
2517 | 2512 | ||
@@ -2521,9 +2516,6 @@ static void isci_request_set_open_reject_status( | |||
2521 | * @request: This parameter is the completed isci_request object. | 2516 | * @request: This parameter is the completed isci_request object. |
2522 | * @response_ptr: This parameter specifies the service response for the I/O. | 2517 | * @response_ptr: This parameter specifies the service response for the I/O. |
2523 | * @status_ptr: This parameter specifies the exec status for the I/O. | 2518 | * @status_ptr: This parameter specifies the exec status for the I/O. |
2524 | * @complete_to_host_ptr: This parameter specifies the action to be taken by | ||
2525 | * the LLDD with respect to completing this request or forcing an abort | ||
2526 | * condition on the I/O. | ||
2527 | * | 2519 | * |
2528 | * none. | 2520 | * none. |
2529 | */ | 2521 | */ |
@@ -2532,8 +2524,7 @@ static void isci_request_handle_controller_specific_errors( | |||
2532 | struct isci_request *request, | 2524 | struct isci_request *request, |
2533 | struct sas_task *task, | 2525 | struct sas_task *task, |
2534 | enum service_response *response_ptr, | 2526 | enum service_response *response_ptr, |
2535 | enum exec_status *status_ptr, | 2527 | enum exec_status *status_ptr) |
2536 | enum isci_completion_selection *complete_to_host_ptr) | ||
2537 | { | 2528 | { |
2538 | unsigned int cstatus; | 2529 | unsigned int cstatus; |
2539 | 2530 | ||
@@ -2574,9 +2565,6 @@ static void isci_request_handle_controller_specific_errors( | |||
2574 | *status_ptr = SAS_ABORTED_TASK; | 2565 | *status_ptr = SAS_ABORTED_TASK; |
2575 | 2566 | ||
2576 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2567 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2577 | |||
2578 | *complete_to_host_ptr = | ||
2579 | isci_perform_normal_io_completion; | ||
2580 | } else { | 2568 | } else { |
2581 | /* Task in the target is not done. */ | 2569 | /* Task in the target is not done. */ |
2582 | *response_ptr = SAS_TASK_UNDELIVERED; | 2570 | *response_ptr = SAS_TASK_UNDELIVERED; |
@@ -2587,9 +2575,6 @@ static void isci_request_handle_controller_specific_errors( | |||
2587 | *status_ptr = SAM_STAT_TASK_ABORTED; | 2575 | *status_ptr = SAM_STAT_TASK_ABORTED; |
2588 | 2576 | ||
2589 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2577 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2590 | |||
2591 | *complete_to_host_ptr = | ||
2592 | isci_perform_error_io_completion; | ||
2593 | } | 2578 | } |
2594 | 2579 | ||
2595 | break; | 2580 | break; |
@@ -2618,8 +2603,6 @@ static void isci_request_handle_controller_specific_errors( | |||
2618 | *status_ptr = SAS_ABORTED_TASK; | 2603 | *status_ptr = SAS_ABORTED_TASK; |
2619 | 2604 | ||
2620 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2605 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2621 | |||
2622 | *complete_to_host_ptr = isci_perform_normal_io_completion; | ||
2623 | break; | 2606 | break; |
2624 | 2607 | ||
2625 | 2608 | ||
@@ -2630,7 +2613,7 @@ static void isci_request_handle_controller_specific_errors( | |||
2630 | 2613 | ||
2631 | isci_request_set_open_reject_status( | 2614 | isci_request_set_open_reject_status( |
2632 | request, task, response_ptr, status_ptr, | 2615 | request, task, response_ptr, status_ptr, |
2633 | complete_to_host_ptr, SAS_OREJ_WRONG_DEST); | 2616 | SAS_OREJ_WRONG_DEST); |
2634 | break; | 2617 | break; |
2635 | 2618 | ||
2636 | case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: | 2619 | case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: |
@@ -2640,56 +2623,56 @@ static void isci_request_handle_controller_specific_errors( | |||
2640 | */ | 2623 | */ |
2641 | isci_request_set_open_reject_status( | 2624 | isci_request_set_open_reject_status( |
2642 | request, task, response_ptr, status_ptr, | 2625 | request, task, response_ptr, status_ptr, |
2643 | complete_to_host_ptr, SAS_OREJ_RESV_AB0); | 2626 | SAS_OREJ_RESV_AB0); |
2644 | break; | 2627 | break; |
2645 | 2628 | ||
2646 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: | 2629 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: |
2647 | 2630 | ||
2648 | isci_request_set_open_reject_status( | 2631 | isci_request_set_open_reject_status( |
2649 | request, task, response_ptr, status_ptr, | 2632 | request, task, response_ptr, status_ptr, |
2650 | complete_to_host_ptr, SAS_OREJ_RESV_AB1); | 2633 | SAS_OREJ_RESV_AB1); |
2651 | break; | 2634 | break; |
2652 | 2635 | ||
2653 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: | 2636 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: |
2654 | 2637 | ||
2655 | isci_request_set_open_reject_status( | 2638 | isci_request_set_open_reject_status( |
2656 | request, task, response_ptr, status_ptr, | 2639 | request, task, response_ptr, status_ptr, |
2657 | complete_to_host_ptr, SAS_OREJ_RESV_AB2); | 2640 | SAS_OREJ_RESV_AB2); |
2658 | break; | 2641 | break; |
2659 | 2642 | ||
2660 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: | 2643 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: |
2661 | 2644 | ||
2662 | isci_request_set_open_reject_status( | 2645 | isci_request_set_open_reject_status( |
2663 | request, task, response_ptr, status_ptr, | 2646 | request, task, response_ptr, status_ptr, |
2664 | complete_to_host_ptr, SAS_OREJ_RESV_AB3); | 2647 | SAS_OREJ_RESV_AB3); |
2665 | break; | 2648 | break; |
2666 | 2649 | ||
2667 | case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: | 2650 | case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: |
2668 | 2651 | ||
2669 | isci_request_set_open_reject_status( | 2652 | isci_request_set_open_reject_status( |
2670 | request, task, response_ptr, status_ptr, | 2653 | request, task, response_ptr, status_ptr, |
2671 | complete_to_host_ptr, SAS_OREJ_BAD_DEST); | 2654 | SAS_OREJ_BAD_DEST); |
2672 | break; | 2655 | break; |
2673 | 2656 | ||
2674 | case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: | 2657 | case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: |
2675 | 2658 | ||
2676 | isci_request_set_open_reject_status( | 2659 | isci_request_set_open_reject_status( |
2677 | request, task, response_ptr, status_ptr, | 2660 | request, task, response_ptr, status_ptr, |
2678 | complete_to_host_ptr, SAS_OREJ_STP_NORES); | 2661 | SAS_OREJ_STP_NORES); |
2679 | break; | 2662 | break; |
2680 | 2663 | ||
2681 | case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: | 2664 | case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: |
2682 | 2665 | ||
2683 | isci_request_set_open_reject_status( | 2666 | isci_request_set_open_reject_status( |
2684 | request, task, response_ptr, status_ptr, | 2667 | request, task, response_ptr, status_ptr, |
2685 | complete_to_host_ptr, SAS_OREJ_EPROTO); | 2668 | SAS_OREJ_EPROTO); |
2686 | break; | 2669 | break; |
2687 | 2670 | ||
2688 | case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: | 2671 | case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: |
2689 | 2672 | ||
2690 | isci_request_set_open_reject_status( | 2673 | isci_request_set_open_reject_status( |
2691 | request, task, response_ptr, status_ptr, | 2674 | request, task, response_ptr, status_ptr, |
2692 | complete_to_host_ptr, SAS_OREJ_CONN_RATE); | 2675 | SAS_OREJ_CONN_RATE); |
2693 | break; | 2676 | break; |
2694 | 2677 | ||
2695 | case SCU_TASK_DONE_LL_R_ERR: | 2678 | case SCU_TASK_DONE_LL_R_ERR: |
@@ -2721,95 +2704,12 @@ static void isci_request_handle_controller_specific_errors( | |||
2721 | *response_ptr = SAS_TASK_UNDELIVERED; | 2704 | *response_ptr = SAS_TASK_UNDELIVERED; |
2722 | *status_ptr = SAM_STAT_TASK_ABORTED; | 2705 | *status_ptr = SAM_STAT_TASK_ABORTED; |
2723 | 2706 | ||
2724 | if (task->task_proto == SAS_PROTOCOL_SMP) { | 2707 | if (task->task_proto == SAS_PROTOCOL_SMP) |
2725 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2708 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2726 | 2709 | else | |
2727 | *complete_to_host_ptr = isci_perform_normal_io_completion; | ||
2728 | } else { | ||
2729 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2710 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2730 | |||
2731 | *complete_to_host_ptr = isci_perform_error_io_completion; | ||
2732 | } | ||
2733 | break; | ||
2734 | } | ||
2735 | } | ||
2736 | |||
2737 | /** | ||
2738 | * isci_task_save_for_upper_layer_completion() - This function saves the | ||
2739 | * request for later completion to the upper layer driver. | ||
2740 | * @host: This parameter is a pointer to the host on which the the request | ||
2741 | * should be queued (either as an error or success). | ||
2742 | * @request: This parameter is the completed request. | ||
2743 | * @response: This parameter is the response code for the completed task. | ||
2744 | * @status: This parameter is the status code for the completed task. | ||
2745 | * | ||
2746 | * none. | ||
2747 | */ | ||
2748 | static void isci_task_save_for_upper_layer_completion( | ||
2749 | struct isci_host *host, | ||
2750 | struct isci_request *request, | ||
2751 | enum service_response response, | ||
2752 | enum exec_status status, | ||
2753 | enum isci_completion_selection task_notification_selection) | ||
2754 | { | ||
2755 | struct sas_task *task = isci_request_access_task(request); | ||
2756 | |||
2757 | task_notification_selection | ||
2758 | = isci_task_set_completion_status(task, response, status, | ||
2759 | task_notification_selection); | ||
2760 | |||
2761 | /* Tasks aborted specifically by a call to the lldd_abort_task | ||
2762 | * function should not be completed to the host in the regular path. | ||
2763 | */ | ||
2764 | switch (task_notification_selection) { | ||
2765 | |||
2766 | case isci_perform_normal_io_completion: | ||
2767 | /* Normal notification (task_done) */ | ||
2768 | |||
2769 | /* Add to the completed list. */ | ||
2770 | list_add(&request->completed_node, | ||
2771 | &host->requests_to_complete); | ||
2772 | |||
2773 | /* Take the request off the device's pending request list. */ | ||
2774 | list_del_init(&request->dev_node); | ||
2775 | break; | ||
2776 | |||
2777 | case isci_perform_aborted_io_completion: | ||
2778 | /* No notification to libsas because this request is | ||
2779 | * already in the abort path. | ||
2780 | */ | ||
2781 | /* Wake up whatever process was waiting for this | ||
2782 | * request to complete. | ||
2783 | */ | ||
2784 | WARN_ON(request->io_request_completion == NULL); | ||
2785 | |||
2786 | if (request->io_request_completion != NULL) { | ||
2787 | |||
2788 | /* Signal whoever is waiting that this | ||
2789 | * request is complete. | ||
2790 | */ | ||
2791 | complete(request->io_request_completion); | ||
2792 | } | ||
2793 | break; | ||
2794 | |||
2795 | case isci_perform_error_io_completion: | ||
2796 | /* Use sas_task_abort */ | ||
2797 | /* Add to the aborted list. */ | ||
2798 | list_add(&request->completed_node, | ||
2799 | &host->requests_to_errorback); | ||
2800 | break; | ||
2801 | |||
2802 | default: | ||
2803 | /* Add to the error to libsas list. */ | ||
2804 | list_add(&request->completed_node, | ||
2805 | &host->requests_to_errorback); | ||
2806 | break; | 2711 | break; |
2807 | } | 2712 | } |
2808 | dev_dbg(&host->pdev->dev, | ||
2809 | "%s: %d - task = %p, response=%d (%d), status=%d (%d)\n", | ||
2810 | __func__, task_notification_selection, task, | ||
2811 | (task) ? task->task_status.resp : 0, response, | ||
2812 | (task) ? task->task_status.stat : 0, status); | ||
2813 | } | 2713 | } |
2814 | 2714 | ||
2815 | static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) | 2715 | static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) |
@@ -2844,9 +2744,6 @@ static void isci_request_io_request_complete(struct isci_host *ihost, | |||
2844 | struct isci_remote_device *idev = request->target_device; | 2744 | struct isci_remote_device *idev = request->target_device; |
2845 | enum service_response response = SAS_TASK_UNDELIVERED; | 2745 | enum service_response response = SAS_TASK_UNDELIVERED; |
2846 | enum exec_status status = SAS_ABORTED_TASK; | 2746 | enum exec_status status = SAS_ABORTED_TASK; |
2847 | enum isci_request_status request_status; | ||
2848 | enum isci_completion_selection complete_to_host | ||
2849 | = isci_perform_normal_io_completion; | ||
2850 | 2747 | ||
2851 | dev_dbg(&ihost->pdev->dev, | 2748 | dev_dbg(&ihost->pdev->dev, |
2852 | "%s: request = %p, task = %p,\n" | 2749 | "%s: request = %p, task = %p,\n" |
@@ -2857,282 +2754,158 @@ static void isci_request_io_request_complete(struct isci_host *ihost, | |||
2857 | task->data_dir, | 2754 | task->data_dir, |
2858 | completion_status); | 2755 | completion_status); |
2859 | 2756 | ||
2860 | spin_lock(&request->state_lock); | 2757 | /* The request is done from an SCU HW perspective. */ |
2861 | request_status = request->status; | ||
2862 | |||
2863 | /* Decode the request status. Note that if the request has been | ||
2864 | * aborted by a task management function, we don't care | ||
2865 | * what the status is. | ||
2866 | */ | ||
2867 | switch (request_status) { | ||
2868 | |||
2869 | case aborted: | ||
2870 | /* "aborted" indicates that the request was aborted by a task | ||
2871 | * management function, since once a task management request is | ||
2872 | * perfomed by the device, the request only completes because | ||
2873 | * of the subsequent driver terminate. | ||
2874 | * | ||
2875 | * Aborted also means an external thread is explicitly managing | ||
2876 | * this request, so that we do not complete it up the stack. | ||
2877 | * | ||
2878 | * The target is still there (since the TMF was successful). | ||
2879 | */ | ||
2880 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | ||
2881 | response = SAS_TASK_COMPLETE; | ||
2882 | 2758 | ||
2883 | /* See if the device has been/is being stopped. Note | 2759 | /* This is an active request being completed from the core. */ |
2884 | * that we ignore the quiesce state, since we are | 2760 | switch (completion_status) { |
2885 | * concerned about the actual device state. | ||
2886 | */ | ||
2887 | if (!idev) | ||
2888 | status = SAS_DEVICE_UNKNOWN; | ||
2889 | else | ||
2890 | status = SAS_ABORTED_TASK; | ||
2891 | 2761 | ||
2892 | complete_to_host = isci_perform_aborted_io_completion; | 2762 | case SCI_IO_FAILURE_RESPONSE_VALID: |
2893 | /* This was an aborted request. */ | 2763 | dev_dbg(&ihost->pdev->dev, |
2764 | "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", | ||
2765 | __func__, request, task); | ||
2894 | 2766 | ||
2895 | spin_unlock(&request->state_lock); | 2767 | if (sas_protocol_ata(task->task_proto)) { |
2896 | break; | 2768 | isci_process_stp_response(task, &request->stp.rsp); |
2769 | } else if (SAS_PROTOCOL_SSP == task->task_proto) { | ||
2897 | 2770 | ||
2898 | case aborting: | 2771 | /* crack the iu response buffer. */ |
2899 | /* aborting means that the task management function tried and | 2772 | resp_iu = &request->ssp.rsp; |
2900 | * failed to abort the request. We need to note the request | 2773 | isci_request_process_response_iu(task, resp_iu, |
2901 | * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the | 2774 | &ihost->pdev->dev); |
2902 | * target as down. | ||
2903 | * | ||
2904 | * Aborting also means an external thread is explicitly managing | ||
2905 | * this request, so that we do not complete it up the stack. | ||
2906 | */ | ||
2907 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | ||
2908 | response = SAS_TASK_UNDELIVERED; | ||
2909 | 2775 | ||
2910 | if (!idev) | 2776 | } else if (SAS_PROTOCOL_SMP == task->task_proto) { |
2911 | /* The device has been /is being stopped. Note that | ||
2912 | * we ignore the quiesce state, since we are | ||
2913 | * concerned about the actual device state. | ||
2914 | */ | ||
2915 | status = SAS_DEVICE_UNKNOWN; | ||
2916 | else | ||
2917 | status = SAS_PHY_DOWN; | ||
2918 | 2777 | ||
2919 | complete_to_host = isci_perform_aborted_io_completion; | 2778 | dev_err(&ihost->pdev->dev, |
2779 | "%s: SCI_IO_FAILURE_RESPONSE_VALID: " | ||
2780 | "SAS_PROTOCOL_SMP protocol\n", | ||
2781 | __func__); | ||
2920 | 2782 | ||
2921 | /* This was an aborted request. */ | 2783 | } else |
2784 | dev_err(&ihost->pdev->dev, | ||
2785 | "%s: unknown protocol\n", __func__); | ||
2922 | 2786 | ||
2923 | spin_unlock(&request->state_lock); | 2787 | /* use the task status set in the task struct by the |
2788 | * isci_request_process_response_iu call. | ||
2789 | */ | ||
2790 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | ||
2791 | response = task->task_status.resp; | ||
2792 | status = task->task_status.stat; | ||
2924 | break; | 2793 | break; |
2925 | 2794 | ||
2926 | case terminating: | 2795 | case SCI_IO_SUCCESS: |
2796 | case SCI_IO_SUCCESS_IO_DONE_EARLY: | ||
2927 | 2797 | ||
2928 | /* This was an terminated request. This happens when | 2798 | response = SAS_TASK_COMPLETE; |
2929 | * the I/O is being terminated because of an action on | 2799 | status = SAM_STAT_GOOD; |
2930 | * the device (reset, tear down, etc.), and the I/O needs | ||
2931 | * to be completed up the stack. | ||
2932 | */ | ||
2933 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2800 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2934 | response = SAS_TASK_UNDELIVERED; | ||
2935 | 2801 | ||
2936 | /* See if the device has been/is being stopped. Note | 2802 | if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) { |
2937 | * that we ignore the quiesce state, since we are | ||
2938 | * concerned about the actual device state. | ||
2939 | */ | ||
2940 | if (!idev) | ||
2941 | status = SAS_DEVICE_UNKNOWN; | ||
2942 | else | ||
2943 | status = SAS_ABORTED_TASK; | ||
2944 | |||
2945 | complete_to_host = isci_perform_aborted_io_completion; | ||
2946 | |||
2947 | /* This was a terminated request. */ | ||
2948 | |||
2949 | spin_unlock(&request->state_lock); | ||
2950 | break; | ||
2951 | 2803 | ||
2952 | case dead: | 2804 | /* This was an SSP / STP / SATA transfer. |
2953 | /* This was a terminated request that timed-out during the | 2805 | * There is a possibility that less data than |
2954 | * termination process. There is no task to complete to | 2806 | * the maximum was transferred. |
2955 | * libsas. | 2807 | */ |
2956 | */ | 2808 | u32 transferred_length = sci_req_tx_bytes(request); |
2957 | complete_to_host = isci_perform_normal_io_completion; | ||
2958 | spin_unlock(&request->state_lock); | ||
2959 | break; | ||
2960 | |||
2961 | default: | ||
2962 | |||
2963 | /* The request is done from an SCU HW perspective. */ | ||
2964 | request->status = completed; | ||
2965 | 2809 | ||
2966 | spin_unlock(&request->state_lock); | 2810 | task->task_status.residual |
2811 | = task->total_xfer_len - transferred_length; | ||
2967 | 2812 | ||
2968 | /* This is an active request being completed from the core. */ | 2813 | /* If there were residual bytes, call this an |
2969 | switch (completion_status) { | 2814 | * underrun. |
2815 | */ | ||
2816 | if (task->task_status.residual != 0) | ||
2817 | status = SAS_DATA_UNDERRUN; | ||
2970 | 2818 | ||
2971 | case SCI_IO_FAILURE_RESPONSE_VALID: | ||
2972 | dev_dbg(&ihost->pdev->dev, | 2819 | dev_dbg(&ihost->pdev->dev, |
2973 | "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", | 2820 | "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", |
2974 | __func__, | 2821 | __func__, status); |
2975 | request, | ||
2976 | task); | ||
2977 | |||
2978 | if (sas_protocol_ata(task->task_proto)) { | ||
2979 | isci_process_stp_response(task, &request->stp.rsp); | ||
2980 | } else if (SAS_PROTOCOL_SSP == task->task_proto) { | ||
2981 | |||
2982 | /* crack the iu response buffer. */ | ||
2983 | resp_iu = &request->ssp.rsp; | ||
2984 | isci_request_process_response_iu(task, resp_iu, | ||
2985 | &ihost->pdev->dev); | ||
2986 | |||
2987 | } else if (SAS_PROTOCOL_SMP == task->task_proto) { | ||
2988 | 2822 | ||
2989 | dev_err(&ihost->pdev->dev, | 2823 | } else |
2990 | "%s: SCI_IO_FAILURE_RESPONSE_VALID: " | 2824 | dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_SUCCESS\n", |
2991 | "SAS_PROTOCOL_SMP protocol\n", | 2825 | __func__); |
2992 | __func__); | 2826 | break; |
2993 | |||
2994 | } else | ||
2995 | dev_err(&ihost->pdev->dev, | ||
2996 | "%s: unknown protocol\n", __func__); | ||
2997 | |||
2998 | /* use the task status set in the task struct by the | ||
2999 | * isci_request_process_response_iu call. | ||
3000 | */ | ||
3001 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | ||
3002 | response = task->task_status.resp; | ||
3003 | status = task->task_status.stat; | ||
3004 | break; | ||
3005 | |||
3006 | case SCI_IO_SUCCESS: | ||
3007 | case SCI_IO_SUCCESS_IO_DONE_EARLY: | ||
3008 | |||
3009 | response = SAS_TASK_COMPLETE; | ||
3010 | status = SAM_STAT_GOOD; | ||
3011 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | ||
3012 | |||
3013 | if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) { | ||
3014 | |||
3015 | /* This was an SSP / STP / SATA transfer. | ||
3016 | * There is a possibility that less data than | ||
3017 | * the maximum was transferred. | ||
3018 | */ | ||
3019 | u32 transferred_length = sci_req_tx_bytes(request); | ||
3020 | |||
3021 | task->task_status.residual | ||
3022 | = task->total_xfer_len - transferred_length; | ||
3023 | 2827 | ||
3024 | /* If there were residual bytes, call this an | 2828 | case SCI_IO_FAILURE_TERMINATED: |
3025 | * underrun. | ||
3026 | */ | ||
3027 | if (task->task_status.residual != 0) | ||
3028 | status = SAS_DATA_UNDERRUN; | ||
3029 | 2829 | ||
3030 | dev_dbg(&ihost->pdev->dev, | 2830 | dev_dbg(&ihost->pdev->dev, |
3031 | "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", | 2831 | "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", |
3032 | __func__, | 2832 | __func__, request, task); |
3033 | status); | ||
3034 | 2833 | ||
3035 | } else | 2834 | /* The request was terminated explicitly. */ |
3036 | dev_dbg(&ihost->pdev->dev, | 2835 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
3037 | "%s: SCI_IO_SUCCESS\n", | 2836 | response = SAS_TASK_UNDELIVERED; |
3038 | __func__); | ||
3039 | 2837 | ||
3040 | break; | 2838 | /* See if the device has been/is being stopped. Note |
2839 | * that we ignore the quiesce state, since we are | ||
2840 | * concerned about the actual device state. | ||
2841 | */ | ||
2842 | if (!idev) | ||
2843 | status = SAS_DEVICE_UNKNOWN; | ||
2844 | else | ||
2845 | status = SAS_ABORTED_TASK; | ||
2846 | break; | ||
3041 | 2847 | ||
3042 | case SCI_IO_FAILURE_TERMINATED: | 2848 | case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: |
3043 | dev_dbg(&ihost->pdev->dev, | ||
3044 | "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", | ||
3045 | __func__, | ||
3046 | request, | ||
3047 | task); | ||
3048 | 2849 | ||
3049 | /* The request was terminated explicitly. No handling | 2850 | isci_request_handle_controller_specific_errors(idev, request, |
3050 | * is needed in the SCSI error handler path. | 2851 | task, &response, |
3051 | */ | 2852 | &status); |
3052 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2853 | break; |
3053 | response = SAS_TASK_UNDELIVERED; | ||
3054 | 2854 | ||
3055 | /* See if the device has been/is being stopped. Note | 2855 | case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: |
3056 | * that we ignore the quiesce state, since we are | 2856 | /* This is a special case, in that the I/O completion |
3057 | * concerned about the actual device state. | 2857 | * is telling us that the device needs a reset. |
3058 | */ | 2858 | * In order for the device reset condition to be |
3059 | if (!idev) | 2859 | * noticed, the I/O has to be handled in the error |
3060 | status = SAS_DEVICE_UNKNOWN; | 2860 | * handler. Set the reset flag and cause the |
3061 | else | 2861 | * SCSI error thread to be scheduled. |
3062 | status = SAS_ABORTED_TASK; | 2862 | */ |
2863 | spin_lock_irqsave(&task->task_state_lock, task_flags); | ||
2864 | task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; | ||
2865 | spin_unlock_irqrestore(&task->task_state_lock, task_flags); | ||
3063 | 2866 | ||
3064 | complete_to_host = isci_perform_normal_io_completion; | 2867 | /* Fail the I/O. */ |
3065 | break; | 2868 | response = SAS_TASK_UNDELIVERED; |
2869 | status = SAM_STAT_TASK_ABORTED; | ||
3066 | 2870 | ||
3067 | case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: | 2871 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2872 | break; | ||
3068 | 2873 | ||
3069 | isci_request_handle_controller_specific_errors( | 2874 | case SCI_FAILURE_RETRY_REQUIRED: |
3070 | idev, request, task, &response, &status, | ||
3071 | &complete_to_host); | ||
3072 | 2875 | ||
3073 | break; | 2876 | /* Fail the I/O so it can be retried. */ |
2877 | response = SAS_TASK_UNDELIVERED; | ||
2878 | if (!idev) | ||
2879 | status = SAS_DEVICE_UNKNOWN; | ||
2880 | else | ||
2881 | status = SAS_ABORTED_TASK; | ||
3074 | 2882 | ||
3075 | case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: | 2883 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
3076 | /* This is a special case, in that the I/O completion | 2884 | break; |
3077 | * is telling us that the device needs a reset. | ||
3078 | * In order for the device reset condition to be | ||
3079 | * noticed, the I/O has to be handled in the error | ||
3080 | * handler. Set the reset flag and cause the | ||
3081 | * SCSI error thread to be scheduled. | ||
3082 | */ | ||
3083 | spin_lock_irqsave(&task->task_state_lock, task_flags); | ||
3084 | task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; | ||
3085 | spin_unlock_irqrestore(&task->task_state_lock, task_flags); | ||
3086 | 2885 | ||
3087 | /* Fail the I/O. */ | ||
3088 | response = SAS_TASK_UNDELIVERED; | ||
3089 | status = SAM_STAT_TASK_ABORTED; | ||
3090 | 2886 | ||
3091 | complete_to_host = isci_perform_error_io_completion; | 2887 | default: |
3092 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2888 | /* Catch any otherwise unhandled error codes here. */ |
3093 | break; | 2889 | dev_dbg(&ihost->pdev->dev, |
2890 | "%s: invalid completion code: 0x%x - " | ||
2891 | "isci_request = %p\n", | ||
2892 | __func__, completion_status, request); | ||
3094 | 2893 | ||
3095 | case SCI_FAILURE_RETRY_REQUIRED: | 2894 | response = SAS_TASK_UNDELIVERED; |
3096 | 2895 | ||
3097 | /* Fail the I/O so it can be retried. */ | 2896 | /* See if the device has been/is being stopped. Note |
3098 | response = SAS_TASK_UNDELIVERED; | 2897 | * that we ignore the quiesce state, since we are |
3099 | if (!idev) | 2898 | * concerned about the actual device state. |
3100 | status = SAS_DEVICE_UNKNOWN; | 2899 | */ |
3101 | else | 2900 | if (!idev) |
3102 | status = SAS_ABORTED_TASK; | 2901 | status = SAS_DEVICE_UNKNOWN; |
2902 | else | ||
2903 | status = SAS_ABORTED_TASK; | ||
3103 | 2904 | ||
3104 | complete_to_host = isci_perform_normal_io_completion; | 2905 | if (SAS_PROTOCOL_SMP == task->task_proto) |
3105 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2906 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
3106 | break; | 2907 | else |
3107 | 2908 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | |
3108 | |||
3109 | default: | ||
3110 | /* Catch any otherwise unhandled error codes here. */ | ||
3111 | dev_dbg(&ihost->pdev->dev, | ||
3112 | "%s: invalid completion code: 0x%x - " | ||
3113 | "isci_request = %p\n", | ||
3114 | __func__, completion_status, request); | ||
3115 | |||
3116 | response = SAS_TASK_UNDELIVERED; | ||
3117 | |||
3118 | /* See if the device has been/is being stopped. Note | ||
3119 | * that we ignore the quiesce state, since we are | ||
3120 | * concerned about the actual device state. | ||
3121 | */ | ||
3122 | if (!idev) | ||
3123 | status = SAS_DEVICE_UNKNOWN; | ||
3124 | else | ||
3125 | status = SAS_ABORTED_TASK; | ||
3126 | |||
3127 | if (SAS_PROTOCOL_SMP == task->task_proto) { | ||
3128 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | ||
3129 | complete_to_host = isci_perform_normal_io_completion; | ||
3130 | } else { | ||
3131 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | ||
3132 | complete_to_host = isci_perform_error_io_completion; | ||
3133 | } | ||
3134 | break; | ||
3135 | } | ||
3136 | break; | 2909 | break; |
3137 | } | 2910 | } |
3138 | 2911 | ||
@@ -3167,10 +2940,24 @@ static void isci_request_io_request_complete(struct isci_host *ihost, | |||
3167 | break; | 2940 | break; |
3168 | } | 2941 | } |
3169 | 2942 | ||
3170 | /* Put the completed request on the correct list */ | 2943 | spin_lock_irqsave(&task->task_state_lock, task_flags); |
3171 | isci_task_save_for_upper_layer_completion(ihost, request, response, | 2944 | |
3172 | status, complete_to_host | 2945 | task->task_status.resp = response; |
3173 | ); | 2946 | task->task_status.stat = status; |
2947 | |||
2948 | if (test_bit(IREQ_COMPLETE_IN_TARGET, &request->flags)) { | ||
2949 | /* Normal notification (task_done) */ | ||
2950 | task->task_state_flags |= SAS_TASK_STATE_DONE; | ||
2951 | task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | | ||
2952 | SAS_TASK_STATE_PENDING); | ||
2953 | } | ||
2954 | spin_unlock_irqrestore(&task->task_state_lock, task_flags); | ||
2955 | |||
2956 | /* Add to the completed list. */ | ||
2957 | list_add(&request->completed_node, &ihost->requests_to_complete); | ||
2958 | |||
2959 | /* Take the request off the device's pending request list. */ | ||
2960 | list_del_init(&request->dev_node); | ||
3174 | 2961 | ||
3175 | /* complete the io request to the core. */ | 2962 | /* complete the io request to the core. */ |
3176 | sci_controller_complete_io(ihost, request->target_device, request); | 2963 | sci_controller_complete_io(ihost, request->target_device, request); |
@@ -3626,7 +3413,6 @@ static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 t | |||
3626 | ireq->num_sg_entries = 0; | 3413 | ireq->num_sg_entries = 0; |
3627 | INIT_LIST_HEAD(&ireq->completed_node); | 3414 | INIT_LIST_HEAD(&ireq->completed_node); |
3628 | INIT_LIST_HEAD(&ireq->dev_node); | 3415 | INIT_LIST_HEAD(&ireq->dev_node); |
3629 | isci_request_change_state(ireq, allocated); | ||
3630 | 3416 | ||
3631 | return ireq; | 3417 | return ireq; |
3632 | } | 3418 | } |
@@ -3721,15 +3507,12 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide | |||
3721 | */ | 3507 | */ |
3722 | list_add(&ireq->dev_node, &idev->reqs_in_process); | 3508 | list_add(&ireq->dev_node, &idev->reqs_in_process); |
3723 | 3509 | ||
3724 | if (status == SCI_SUCCESS) { | 3510 | if (status != SCI_SUCCESS) { |
3725 | isci_request_change_state(ireq, started); | ||
3726 | } else { | ||
3727 | /* The request did not really start in the | 3511 | /* The request did not really start in the |
3728 | * hardware, so clear the request handle | 3512 | * hardware, so clear the request handle |
3729 | * here so no terminations will be done. | 3513 | * here so no terminations will be done. |
3730 | */ | 3514 | */ |
3731 | set_bit(IREQ_TERMINATED, &ireq->flags); | 3515 | set_bit(IREQ_TERMINATED, &ireq->flags); |
3732 | isci_request_change_state(ireq, completed); | ||
3733 | } | 3516 | } |
3734 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 3517 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
3735 | 3518 | ||
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 8d55f78010aa..f3116a51235f 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h | |||
@@ -61,23 +61,6 @@ | |||
61 | #include "scu_task_context.h" | 61 | #include "scu_task_context.h" |
62 | 62 | ||
63 | /** | 63 | /** |
64 | * struct isci_request_status - This enum defines the possible states of an I/O | ||
65 | * request. | ||
66 | * | ||
67 | * | ||
68 | */ | ||
69 | enum isci_request_status { | ||
70 | unallocated = 0x00, | ||
71 | allocated = 0x01, | ||
72 | started = 0x02, | ||
73 | completed = 0x03, | ||
74 | aborting = 0x04, | ||
75 | aborted = 0x05, | ||
76 | terminating = 0x06, | ||
77 | dead = 0x07 | ||
78 | }; | ||
79 | |||
80 | /** | ||
81 | * isci_stp_request - extra request infrastructure to handle pio/atapi protocol | 64 | * isci_stp_request - extra request infrastructure to handle pio/atapi protocol |
82 | * @pio_len - number of bytes requested at PIO setup | 65 | * @pio_len - number of bytes requested at PIO setup |
83 | * @status - pio setup ending status value to tell us if we need | 66 | * @status - pio setup ending status value to tell us if we need |
@@ -97,13 +80,13 @@ struct isci_stp_request { | |||
97 | }; | 80 | }; |
98 | 81 | ||
99 | struct isci_request { | 82 | struct isci_request { |
100 | enum isci_request_status status; | ||
101 | #define IREQ_COMPLETE_IN_TARGET 0 | 83 | #define IREQ_COMPLETE_IN_TARGET 0 |
102 | #define IREQ_TERMINATED 1 | 84 | #define IREQ_TERMINATED 1 |
103 | #define IREQ_TMF 2 | 85 | #define IREQ_TMF 2 |
104 | #define IREQ_ACTIVE 3 | 86 | #define IREQ_ACTIVE 3 |
105 | #define IREQ_PENDING_ABORT 4 /* Set == device was not suspended yet */ | 87 | #define IREQ_PENDING_ABORT 4 /* Set == device was not suspended yet */ |
106 | #define IREQ_TC_ABORT_POSTED 5 | 88 | #define IREQ_TC_ABORT_POSTED 5 |
89 | #define IREQ_ABORT_PATH_ACTIVE 6 | ||
107 | unsigned long flags; | 90 | unsigned long flags; |
108 | /* XXX kill ttype and ttype_ptr, allocate full sas_task */ | 91 | /* XXX kill ttype and ttype_ptr, allocate full sas_task */ |
109 | union ttype_ptr_union { | 92 | union ttype_ptr_union { |
@@ -115,7 +98,6 @@ struct isci_request { | |||
115 | struct list_head completed_node; | 98 | struct list_head completed_node; |
116 | /* For use in the reqs_in_process list: */ | 99 | /* For use in the reqs_in_process list: */ |
117 | struct list_head dev_node; | 100 | struct list_head dev_node; |
118 | spinlock_t state_lock; | ||
119 | dma_addr_t request_daddr; | 101 | dma_addr_t request_daddr; |
120 | dma_addr_t zero_scatter_daddr; | 102 | dma_addr_t zero_scatter_daddr; |
121 | unsigned int num_sg_entries; | 103 | unsigned int num_sg_entries; |
@@ -304,92 +286,6 @@ sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr) | |||
304 | return ireq->request_daddr + (requested_addr - base_addr); | 286 | return ireq->request_daddr + (requested_addr - base_addr); |
305 | } | 287 | } |
306 | 288 | ||
307 | /** | ||
308 | * isci_request_change_state() - This function sets the status of the request | ||
309 | * object. | ||
310 | * @request: This parameter points to the isci_request object | ||
311 | * @status: This Parameter is the new status of the object | ||
312 | * | ||
313 | */ | ||
314 | static inline enum isci_request_status | ||
315 | isci_request_change_state(struct isci_request *isci_request, | ||
316 | enum isci_request_status status) | ||
317 | { | ||
318 | enum isci_request_status old_state; | ||
319 | unsigned long flags; | ||
320 | |||
321 | dev_dbg(&isci_request->isci_host->pdev->dev, | ||
322 | "%s: isci_request = %p, state = 0x%x\n", | ||
323 | __func__, | ||
324 | isci_request, | ||
325 | status); | ||
326 | |||
327 | BUG_ON(isci_request == NULL); | ||
328 | |||
329 | spin_lock_irqsave(&isci_request->state_lock, flags); | ||
330 | old_state = isci_request->status; | ||
331 | isci_request->status = status; | ||
332 | spin_unlock_irqrestore(&isci_request->state_lock, flags); | ||
333 | |||
334 | return old_state; | ||
335 | } | ||
336 | |||
337 | /** | ||
338 | * isci_request_change_started_to_newstate() - This function sets the status of | ||
339 | * the request object. | ||
340 | * @request: This parameter points to the isci_request object | ||
341 | * @status: This Parameter is the new status of the object | ||
342 | * | ||
343 | * state previous to any change. | ||
344 | */ | ||
345 | static inline enum isci_request_status | ||
346 | isci_request_change_started_to_newstate(struct isci_request *isci_request, | ||
347 | struct completion *completion_ptr, | ||
348 | enum isci_request_status newstate) | ||
349 | { | ||
350 | enum isci_request_status old_state; | ||
351 | unsigned long flags; | ||
352 | |||
353 | spin_lock_irqsave(&isci_request->state_lock, flags); | ||
354 | |||
355 | old_state = isci_request->status; | ||
356 | |||
357 | if (old_state == started || old_state == aborting) { | ||
358 | BUG_ON(isci_request->io_request_completion != NULL); | ||
359 | |||
360 | isci_request->io_request_completion = completion_ptr; | ||
361 | isci_request->status = newstate; | ||
362 | } | ||
363 | |||
364 | spin_unlock_irqrestore(&isci_request->state_lock, flags); | ||
365 | |||
366 | dev_dbg(&isci_request->isci_host->pdev->dev, | ||
367 | "%s: isci_request = %p, old_state = 0x%x\n", | ||
368 | __func__, | ||
369 | isci_request, | ||
370 | old_state); | ||
371 | |||
372 | return old_state; | ||
373 | } | ||
374 | |||
375 | /** | ||
376 | * isci_request_change_started_to_aborted() - This function sets the status of | ||
377 | * the request object. | ||
378 | * @request: This parameter points to the isci_request object | ||
379 | * @completion_ptr: This parameter is saved as the kernel completion structure | ||
380 | * signalled when the old request completes. | ||
381 | * | ||
382 | * state previous to any change. | ||
383 | */ | ||
384 | static inline enum isci_request_status | ||
385 | isci_request_change_started_to_aborted(struct isci_request *isci_request, | ||
386 | struct completion *completion_ptr) | ||
387 | { | ||
388 | return isci_request_change_started_to_newstate(isci_request, | ||
389 | completion_ptr, | ||
390 | aborted); | ||
391 | } | ||
392 | |||
393 | #define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr) | 289 | #define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr) |
394 | 290 | ||
395 | #define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr) | 291 | #define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr) |
@@ -399,8 +295,6 @@ struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost, | |||
399 | u16 tag); | 295 | u16 tag); |
400 | int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, | 296 | int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, |
401 | struct sas_task *task, u16 tag); | 297 | struct sas_task *task, u16 tag); |
402 | void isci_terminate_pending_requests(struct isci_host *ihost, | ||
403 | struct isci_remote_device *idev); | ||
404 | enum sci_status | 298 | enum sci_status |
405 | sci_task_request_construct(struct isci_host *ihost, | 299 | sci_task_request_construct(struct isci_host *ihost, |
406 | struct isci_remote_device *idev, | 300 | struct isci_remote_device *idev, |
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 26de06ef688e..29ce8815e799 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c | |||
@@ -78,54 +78,25 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task, | |||
78 | enum exec_status status) | 78 | enum exec_status status) |
79 | 79 | ||
80 | { | 80 | { |
81 | enum isci_completion_selection disposition; | 81 | unsigned long flags; |
82 | 82 | ||
83 | disposition = isci_perform_normal_io_completion; | 83 | /* Normal notification (task_done) */ |
84 | disposition = isci_task_set_completion_status(task, response, status, | 84 | dev_dbg(&ihost->pdev->dev, "%s: task = %p, response=%d, status=%d\n", |
85 | disposition); | 85 | __func__, task, response, status); |
86 | 86 | ||
87 | /* Tasks aborted specifically by a call to the lldd_abort_task | 87 | spin_lock_irqsave(&task->task_state_lock, flags); |
88 | * function should not be completed to the host in the regular path. | ||
89 | */ | ||
90 | switch (disposition) { | ||
91 | case isci_perform_normal_io_completion: | ||
92 | /* Normal notification (task_done) */ | ||
93 | dev_dbg(&ihost->pdev->dev, | ||
94 | "%s: Normal - task = %p, response=%d, " | ||
95 | "status=%d\n", | ||
96 | __func__, task, response, status); | ||
97 | |||
98 | task->lldd_task = NULL; | ||
99 | task->task_done(task); | ||
100 | break; | ||
101 | |||
102 | case isci_perform_aborted_io_completion: | ||
103 | /* | ||
104 | * No notification because this request is already in the | ||
105 | * abort path. | ||
106 | */ | ||
107 | dev_dbg(&ihost->pdev->dev, | ||
108 | "%s: Aborted - task = %p, response=%d, " | ||
109 | "status=%d\n", | ||
110 | __func__, task, response, status); | ||
111 | break; | ||
112 | 88 | ||
113 | case isci_perform_error_io_completion: | 89 | task->task_status.resp = response; |
114 | /* Use sas_task_abort */ | 90 | task->task_status.stat = status; |
115 | dev_dbg(&ihost->pdev->dev, | ||
116 | "%s: Error - task = %p, response=%d, " | ||
117 | "status=%d\n", | ||
118 | __func__, task, response, status); | ||
119 | sas_task_abort(task); | ||
120 | break; | ||
121 | 91 | ||
122 | default: | 92 | /* Normal notification (task_done) */ |
123 | dev_dbg(&ihost->pdev->dev, | 93 | task->task_state_flags |= SAS_TASK_STATE_DONE; |
124 | "%s: isci task notification default case!", | 94 | task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | |
125 | __func__); | 95 | SAS_TASK_STATE_PENDING); |
126 | sas_task_abort(task); | 96 | task->lldd_task = NULL; |
127 | break; | 97 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
128 | } | 98 | |
99 | task->task_done(task); | ||
129 | } | 100 | } |
130 | 101 | ||
131 | #define for_each_sas_task(num, task) \ | 102 | #define for_each_sas_task(num, task) \ |
@@ -289,60 +260,6 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, | |||
289 | return ireq; | 260 | return ireq; |
290 | } | 261 | } |
291 | 262 | ||
292 | /** | ||
293 | * isci_request_mark_zombie() - This function must be called with scic_lock held. | ||
294 | */ | ||
295 | static void isci_request_mark_zombie(struct isci_host *ihost, struct isci_request *ireq) | ||
296 | { | ||
297 | struct completion *tmf_completion = NULL; | ||
298 | struct completion *req_completion; | ||
299 | |||
300 | /* Set the request state to "dead". */ | ||
301 | ireq->status = dead; | ||
302 | |||
303 | req_completion = ireq->io_request_completion; | ||
304 | ireq->io_request_completion = NULL; | ||
305 | |||
306 | if (test_bit(IREQ_TMF, &ireq->flags)) { | ||
307 | /* Break links with the TMF request. */ | ||
308 | struct isci_tmf *tmf = isci_request_access_tmf(ireq); | ||
309 | |||
310 | /* In the case where a task request is dying, | ||
311 | * the thread waiting on the complete will sit and | ||
312 | * timeout unless we wake it now. Since the TMF | ||
313 | * has a default error status, complete it here | ||
314 | * to wake the waiting thread. | ||
315 | */ | ||
316 | if (tmf) { | ||
317 | tmf_completion = tmf->complete; | ||
318 | tmf->complete = NULL; | ||
319 | } | ||
320 | ireq->ttype_ptr.tmf_task_ptr = NULL; | ||
321 | dev_dbg(&ihost->pdev->dev, "%s: tmf_code %d, managed tag %#x\n", | ||
322 | __func__, tmf->tmf_code, tmf->io_tag); | ||
323 | } else { | ||
324 | /* Break links with the sas_task - the callback is done | ||
325 | * elsewhere. | ||
326 | */ | ||
327 | struct sas_task *task = isci_request_access_task(ireq); | ||
328 | |||
329 | if (task) | ||
330 | task->lldd_task = NULL; | ||
331 | |||
332 | ireq->ttype_ptr.io_task_ptr = NULL; | ||
333 | } | ||
334 | |||
335 | dev_warn(&ihost->pdev->dev, "task context unrecoverable (tag: %#x)\n", | ||
336 | ireq->io_tag); | ||
337 | |||
338 | /* Don't force waiting threads to timeout. */ | ||
339 | if (req_completion) | ||
340 | complete(req_completion); | ||
341 | |||
342 | if (tmf_completion != NULL) | ||
343 | complete(tmf_completion); | ||
344 | } | ||
345 | |||
346 | static int isci_task_execute_tmf(struct isci_host *ihost, | 263 | static int isci_task_execute_tmf(struct isci_host *ihost, |
347 | struct isci_remote_device *idev, | 264 | struct isci_remote_device *idev, |
348 | struct isci_tmf *tmf, unsigned long timeout_ms) | 265 | struct isci_tmf *tmf, unsigned long timeout_ms) |
@@ -400,15 +317,12 @@ static int isci_task_execute_tmf(struct isci_host *ihost, | |||
400 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 317 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
401 | goto err_tci; | 318 | goto err_tci; |
402 | } | 319 | } |
403 | |||
404 | if (tmf->cb_state_func != NULL) | ||
405 | tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data); | ||
406 | |||
407 | isci_request_change_state(ireq, started); | ||
408 | |||
409 | /* add the request to the remote device request list. */ | 320 | /* add the request to the remote device request list. */ |
410 | list_add(&ireq->dev_node, &idev->reqs_in_process); | 321 | list_add(&ireq->dev_node, &idev->reqs_in_process); |
411 | 322 | ||
323 | /* The RNC must be unsuspended before the TMF can get a response. */ | ||
324 | sci_remote_device_resume(idev, NULL, NULL); | ||
325 | |||
412 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 326 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
413 | 327 | ||
414 | /* Wait for the TMF to complete, or a timeout. */ | 328 | /* Wait for the TMF to complete, or a timeout. */ |
@@ -419,32 +333,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost, | |||
419 | /* The TMF did not complete - this could be because | 333 | /* The TMF did not complete - this could be because |
420 | * of an unplug. Terminate the TMF request now. | 334 | * of an unplug. Terminate the TMF request now. |
421 | */ | 335 | */ |
422 | spin_lock_irqsave(&ihost->scic_lock, flags); | 336 | isci_remote_device_suspend_terminate(ihost, idev, ireq); |
423 | |||
424 | if (tmf->cb_state_func != NULL) | ||
425 | tmf->cb_state_func(isci_tmf_timed_out, tmf, | ||
426 | tmf->cb_data); | ||
427 | |||
428 | sci_controller_terminate_request(ihost, idev, ireq); | ||
429 | |||
430 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
431 | |||
432 | timeleft = wait_for_completion_timeout( | ||
433 | &completion, | ||
434 | msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC)); | ||
435 | |||
436 | if (!timeleft) { | ||
437 | /* Strange condition - the termination of the TMF | ||
438 | * request timed-out. | ||
439 | */ | ||
440 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
441 | |||
442 | /* If the TMF status has not changed, kill it. */ | ||
443 | if (tmf->status == SCI_FAILURE_TIMEOUT) | ||
444 | isci_request_mark_zombie(ihost, ireq); | ||
445 | |||
446 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
447 | } | ||
448 | } | 337 | } |
449 | 338 | ||
450 | isci_print_tmf(ihost, tmf); | 339 | isci_print_tmf(ihost, tmf); |
@@ -476,317 +365,21 @@ static int isci_task_execute_tmf(struct isci_host *ihost, | |||
476 | } | 365 | } |
477 | 366 | ||
478 | static void isci_task_build_tmf(struct isci_tmf *tmf, | 367 | static void isci_task_build_tmf(struct isci_tmf *tmf, |
479 | enum isci_tmf_function_codes code, | 368 | enum isci_tmf_function_codes code) |
480 | void (*tmf_sent_cb)(enum isci_tmf_cb_state, | ||
481 | struct isci_tmf *, | ||
482 | void *), | ||
483 | void *cb_data) | ||
484 | { | 369 | { |
485 | memset(tmf, 0, sizeof(*tmf)); | 370 | memset(tmf, 0, sizeof(*tmf)); |
486 | 371 | tmf->tmf_code = code; | |
487 | tmf->tmf_code = code; | ||
488 | tmf->cb_state_func = tmf_sent_cb; | ||
489 | tmf->cb_data = cb_data; | ||
490 | } | 372 | } |
491 | 373 | ||
492 | static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf, | 374 | static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf, |
493 | enum isci_tmf_function_codes code, | 375 | enum isci_tmf_function_codes code, |
494 | void (*tmf_sent_cb)(enum isci_tmf_cb_state, | ||
495 | struct isci_tmf *, | ||
496 | void *), | ||
497 | struct isci_request *old_request) | 376 | struct isci_request *old_request) |
498 | { | 377 | { |
499 | isci_task_build_tmf(tmf, code, tmf_sent_cb, old_request); | 378 | isci_task_build_tmf(tmf, code); |
500 | tmf->io_tag = old_request->io_tag; | 379 | tmf->io_tag = old_request->io_tag; |
501 | } | 380 | } |
502 | 381 | ||
503 | /** | 382 | /** |
504 | * isci_task_validate_request_to_abort() - This function checks the given I/O | ||
505 | * against the "started" state. If the request is still "started", it's | ||
506 | * state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD | ||
507 | * BEFORE CALLING THIS FUNCTION. | ||
508 | * @isci_request: This parameter specifies the request object to control. | ||
509 | * @isci_host: This parameter specifies the ISCI host object | ||
510 | * @isci_device: This is the device to which the request is pending. | ||
511 | * @aborted_io_completion: This is a completion structure that will be added to | ||
512 | * the request in case it is changed to aborting; this completion is | ||
513 | * triggered when the request is fully completed. | ||
514 | * | ||
515 | * Either "started" on successful change of the task status to "aborted", or | ||
516 | * "unallocated" if the task cannot be controlled. | ||
517 | */ | ||
518 | static enum isci_request_status isci_task_validate_request_to_abort( | ||
519 | struct isci_request *isci_request, | ||
520 | struct isci_host *isci_host, | ||
521 | struct isci_remote_device *isci_device, | ||
522 | struct completion *aborted_io_completion) | ||
523 | { | ||
524 | enum isci_request_status old_state = unallocated; | ||
525 | |||
526 | /* Only abort the task if it's in the | ||
527 | * device's request_in_process list | ||
528 | */ | ||
529 | if (isci_request && !list_empty(&isci_request->dev_node)) { | ||
530 | old_state = isci_request_change_started_to_aborted( | ||
531 | isci_request, aborted_io_completion); | ||
532 | |||
533 | } | ||
534 | |||
535 | return old_state; | ||
536 | } | ||
537 | |||
538 | static int isci_request_is_dealloc_managed(enum isci_request_status stat) | ||
539 | { | ||
540 | switch (stat) { | ||
541 | case aborted: | ||
542 | case aborting: | ||
543 | case terminating: | ||
544 | case completed: | ||
545 | case dead: | ||
546 | return true; | ||
547 | default: | ||
548 | return false; | ||
549 | } | ||
550 | } | ||
551 | |||
552 | /** | ||
553 | * isci_terminate_request_core() - This function will terminate the given | ||
554 | * request, and wait for it to complete. This function must only be called | ||
555 | * from a thread that can wait. Note that the request is terminated and | ||
556 | * completed (back to the host, if started there). | ||
557 | * @ihost: This SCU. | ||
558 | * @idev: The target. | ||
559 | * @isci_request: The I/O request to be terminated. | ||
560 | * | ||
561 | */ | ||
562 | static void isci_terminate_request_core(struct isci_host *ihost, | ||
563 | struct isci_remote_device *idev, | ||
564 | struct isci_request *isci_request) | ||
565 | { | ||
566 | enum sci_status status = SCI_SUCCESS; | ||
567 | bool was_terminated = false; | ||
568 | bool needs_cleanup_handling = false; | ||
569 | unsigned long flags; | ||
570 | unsigned long termination_completed = 1; | ||
571 | struct completion *io_request_completion; | ||
572 | |||
573 | dev_dbg(&ihost->pdev->dev, | ||
574 | "%s: device = %p; request = %p\n", | ||
575 | __func__, idev, isci_request); | ||
576 | |||
577 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
578 | |||
579 | io_request_completion = isci_request->io_request_completion; | ||
580 | |||
581 | /* Note that we are not going to control | ||
582 | * the target to abort the request. | ||
583 | */ | ||
584 | set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags); | ||
585 | |||
586 | /* Make sure the request wasn't just sitting around signalling | ||
587 | * device condition (if the request handle is NULL, then the | ||
588 | * request completed but needed additional handling here). | ||
589 | */ | ||
590 | if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) { | ||
591 | was_terminated = true; | ||
592 | needs_cleanup_handling = true; | ||
593 | status = sci_controller_terminate_request(ihost, | ||
594 | idev, | ||
595 | isci_request); | ||
596 | } | ||
597 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
598 | |||
599 | /* | ||
600 | * The only time the request to terminate will | ||
601 | * fail is when the io request is completed and | ||
602 | * being aborted. | ||
603 | */ | ||
604 | if (status != SCI_SUCCESS) { | ||
605 | dev_dbg(&ihost->pdev->dev, | ||
606 | "%s: sci_controller_terminate_request" | ||
607 | " returned = 0x%x\n", | ||
608 | __func__, status); | ||
609 | |||
610 | isci_request->io_request_completion = NULL; | ||
611 | |||
612 | } else { | ||
613 | if (was_terminated) { | ||
614 | dev_dbg(&ihost->pdev->dev, | ||
615 | "%s: before completion wait (%p/%p)\n", | ||
616 | __func__, isci_request, io_request_completion); | ||
617 | |||
618 | /* Wait here for the request to complete. */ | ||
619 | termination_completed | ||
620 | = wait_for_completion_timeout( | ||
621 | io_request_completion, | ||
622 | msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC)); | ||
623 | |||
624 | if (!termination_completed) { | ||
625 | |||
626 | /* The request to terminate has timed out. */ | ||
627 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
628 | |||
629 | /* Check for state changes. */ | ||
630 | if (!test_bit(IREQ_TERMINATED, | ||
631 | &isci_request->flags)) { | ||
632 | |||
633 | /* The best we can do is to have the | ||
634 | * request die a silent death if it | ||
635 | * ever really completes. | ||
636 | */ | ||
637 | isci_request_mark_zombie(ihost, | ||
638 | isci_request); | ||
639 | needs_cleanup_handling = true; | ||
640 | } else | ||
641 | termination_completed = 1; | ||
642 | |||
643 | spin_unlock_irqrestore(&ihost->scic_lock, | ||
644 | flags); | ||
645 | |||
646 | if (!termination_completed) { | ||
647 | |||
648 | dev_dbg(&ihost->pdev->dev, | ||
649 | "%s: *** Timeout waiting for " | ||
650 | "termination(%p/%p)\n", | ||
651 | __func__, io_request_completion, | ||
652 | isci_request); | ||
653 | |||
654 | /* The request can no longer be referenced | ||
655 | * safely since it may go away if the | ||
656 | * termination every really does complete. | ||
657 | */ | ||
658 | isci_request = NULL; | ||
659 | } | ||
660 | } | ||
661 | if (termination_completed) | ||
662 | dev_dbg(&ihost->pdev->dev, | ||
663 | "%s: after completion wait (%p/%p)\n", | ||
664 | __func__, isci_request, io_request_completion); | ||
665 | } | ||
666 | |||
667 | if (termination_completed) { | ||
668 | |||
669 | isci_request->io_request_completion = NULL; | ||
670 | |||
671 | /* Peek at the status of the request. This will tell | ||
672 | * us if there was special handling on the request such that it | ||
673 | * needs to be detached and freed here. | ||
674 | */ | ||
675 | spin_lock_irqsave(&isci_request->state_lock, flags); | ||
676 | |||
677 | needs_cleanup_handling | ||
678 | = isci_request_is_dealloc_managed( | ||
679 | isci_request->status); | ||
680 | |||
681 | spin_unlock_irqrestore(&isci_request->state_lock, flags); | ||
682 | |||
683 | } | ||
684 | if (needs_cleanup_handling) { | ||
685 | |||
686 | dev_dbg(&ihost->pdev->dev, | ||
687 | "%s: cleanup isci_device=%p, request=%p\n", | ||
688 | __func__, idev, isci_request); | ||
689 | |||
690 | if (isci_request != NULL) { | ||
691 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
692 | isci_free_tag(ihost, isci_request->io_tag); | ||
693 | isci_request_change_state(isci_request, unallocated); | ||
694 | list_del_init(&isci_request->dev_node); | ||
695 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
696 | } | ||
697 | } | ||
698 | } | ||
699 | } | ||
700 | |||
701 | /** | ||
702 | * isci_terminate_pending_requests() - This function will change the all of the | ||
703 | * requests on the given device's state to "aborting", will terminate the | ||
704 | * requests, and wait for them to complete. This function must only be | ||
705 | * called from a thread that can wait. Note that the requests are all | ||
706 | * terminated and completed (back to the host, if started there). | ||
707 | * @isci_host: This parameter specifies SCU. | ||
708 | * @idev: This parameter specifies the target. | ||
709 | * | ||
710 | */ | ||
711 | void isci_terminate_pending_requests(struct isci_host *ihost, | ||
712 | struct isci_remote_device *idev) | ||
713 | { | ||
714 | struct completion request_completion; | ||
715 | enum isci_request_status old_state; | ||
716 | unsigned long flags; | ||
717 | LIST_HEAD(list); | ||
718 | |||
719 | isci_remote_device_suspend(ihost, idev); | ||
720 | |||
721 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
722 | list_splice_init(&idev->reqs_in_process, &list); | ||
723 | |||
724 | /* assumes that isci_terminate_request_core deletes from the list */ | ||
725 | while (!list_empty(&list)) { | ||
726 | struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node); | ||
727 | |||
728 | /* Change state to "terminating" if it is currently | ||
729 | * "started". | ||
730 | */ | ||
731 | old_state = isci_request_change_started_to_newstate(ireq, | ||
732 | &request_completion, | ||
733 | terminating); | ||
734 | switch (old_state) { | ||
735 | case started: | ||
736 | case completed: | ||
737 | case aborting: | ||
738 | break; | ||
739 | default: | ||
740 | /* termination in progress, or otherwise dispositioned. | ||
741 | * We know the request was on 'list' so should be safe | ||
742 | * to move it back to reqs_in_process | ||
743 | */ | ||
744 | list_move(&ireq->dev_node, &idev->reqs_in_process); | ||
745 | ireq = NULL; | ||
746 | break; | ||
747 | } | ||
748 | |||
749 | if (!ireq) | ||
750 | continue; | ||
751 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
752 | |||
753 | init_completion(&request_completion); | ||
754 | |||
755 | dev_dbg(&ihost->pdev->dev, | ||
756 | "%s: idev=%p request=%p; task=%p old_state=%d\n", | ||
757 | __func__, idev, ireq, | ||
758 | (!test_bit(IREQ_TMF, &ireq->flags) | ||
759 | ? isci_request_access_task(ireq) | ||
760 | : NULL), | ||
761 | old_state); | ||
762 | |||
763 | /* If the old_state is started: | ||
764 | * This request was not already being aborted. If it had been, | ||
765 | * then the aborting I/O (ie. the TMF request) would not be in | ||
766 | * the aborting state, and thus would be terminated here. Note | ||
767 | * that since the TMF completion's call to the kernel function | ||
768 | * "complete()" does not happen until the pending I/O request | ||
769 | * terminate fully completes, we do not have to implement a | ||
770 | * special wait here for already aborting requests - the | ||
771 | * termination of the TMF request will force the request | ||
772 | * to finish it's already started terminate. | ||
773 | * | ||
774 | * If old_state == completed: | ||
775 | * This request completed from the SCU hardware perspective | ||
776 | * and now just needs cleaning up in terms of freeing the | ||
777 | * request and potentially calling up to libsas. | ||
778 | * | ||
779 | * If old_state == aborting: | ||
780 | * This request has already gone through a TMF timeout, but may | ||
781 | * not have been terminated; needs cleaning up at least. | ||
782 | */ | ||
783 | isci_terminate_request_core(ihost, idev, ireq); | ||
784 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
785 | } | ||
786 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | ||
787 | } | ||
788 | |||
789 | /** | ||
790 | * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain | 383 | * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain |
791 | * Template functions. | 384 | * Template functions. |
792 | * @lun: This parameter specifies the lun to be reset. | 385 | * @lun: This parameter specifies the lun to be reset. |
@@ -809,7 +402,7 @@ static int isci_task_send_lu_reset_sas( | |||
809 | * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or | 402 | * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or |
810 | * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED"). | 403 | * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED"). |
811 | */ | 404 | */ |
812 | isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL); | 405 | isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset); |
813 | 406 | ||
814 | #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */ | 407 | #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */ |
815 | ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS); | 408 | ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS); |
@@ -829,48 +422,41 @@ static int isci_task_send_lu_reset_sas( | |||
829 | int isci_task_lu_reset(struct domain_device *dev, u8 *lun) | 422 | int isci_task_lu_reset(struct domain_device *dev, u8 *lun) |
830 | { | 423 | { |
831 | struct isci_host *ihost = dev_to_ihost(dev); | 424 | struct isci_host *ihost = dev_to_ihost(dev); |
832 | struct isci_remote_device *isci_device; | 425 | struct isci_remote_device *idev; |
833 | unsigned long flags; | 426 | unsigned long flags; |
834 | int ret; | 427 | int ret; |
835 | 428 | ||
836 | spin_lock_irqsave(&ihost->scic_lock, flags); | 429 | spin_lock_irqsave(&ihost->scic_lock, flags); |
837 | isci_device = isci_lookup_device(dev); | 430 | idev = isci_lookup_device(dev); |
838 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 431 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
839 | 432 | ||
840 | dev_dbg(&ihost->pdev->dev, | 433 | dev_dbg(&ihost->pdev->dev, |
841 | "%s: domain_device=%p, isci_host=%p; isci_device=%p\n", | 434 | "%s: domain_device=%p, isci_host=%p; isci_device=%p\n", |
842 | __func__, dev, ihost, isci_device); | 435 | __func__, dev, ihost, idev); |
843 | 436 | ||
844 | if (!isci_device) { | 437 | if (!idev) { |
845 | /* If the device is gone, escalate to I_T_Nexus_Reset. */ | 438 | /* If the device is gone, escalate to I_T_Nexus_Reset. */ |
846 | dev_dbg(&ihost->pdev->dev, "%s: No dev\n", __func__); | 439 | dev_dbg(&ihost->pdev->dev, "%s: No dev\n", __func__); |
847 | 440 | ||
848 | ret = TMF_RESP_FUNC_FAILED; | 441 | ret = TMF_RESP_FUNC_FAILED; |
849 | goto out; | 442 | goto out; |
850 | } | 443 | } |
851 | if (isci_remote_device_suspend(ihost, isci_device) != SCI_SUCCESS) { | ||
852 | dev_dbg(&ihost->pdev->dev, | ||
853 | "%s: device = %p; failed to suspend\n", | ||
854 | __func__, isci_device); | ||
855 | ret = TMF_RESP_FUNC_FAILED; | ||
856 | goto out; | ||
857 | } | ||
858 | 444 | ||
859 | /* Send the task management part of the reset. */ | ||
860 | if (dev_is_sata(dev)) { | 445 | if (dev_is_sata(dev)) { |
861 | sas_ata_schedule_reset(dev); | 446 | sas_ata_schedule_reset(dev); |
862 | ret = TMF_RESP_FUNC_COMPLETE; | 447 | ret = TMF_RESP_FUNC_COMPLETE; |
863 | } else | 448 | } else { |
864 | ret = isci_task_send_lu_reset_sas(ihost, isci_device, lun); | 449 | /* Suspend the RNC, kill all TCs */ |
865 | 450 | if (isci_remote_device_suspend_terminate(ihost, idev, NULL) | |
866 | /* If the LUN reset worked, all the I/O can now be terminated. */ | 451 | != SCI_SUCCESS) { |
867 | if (ret == TMF_RESP_FUNC_COMPLETE) { | 452 | ret = TMF_RESP_FUNC_FAILED; |
868 | /* Terminate all I/O now. */ | 453 | goto out; |
869 | isci_terminate_pending_requests(ihost, isci_device); | 454 | } |
870 | isci_remote_device_resume(ihost, isci_device, NULL, NULL); | 455 | /* Send the task management part of the reset. */ |
456 | ret = isci_task_send_lu_reset_sas(ihost, idev, lun); | ||
871 | } | 457 | } |
872 | out: | 458 | out: |
873 | isci_put_device(isci_device); | 459 | isci_put_device(idev); |
874 | return ret; | 460 | return ret; |
875 | } | 461 | } |
876 | 462 | ||
@@ -891,63 +477,6 @@ int isci_task_clear_nexus_ha(struct sas_ha_struct *ha) | |||
891 | /* Task Management Functions. Must be called from process context. */ | 477 | /* Task Management Functions. Must be called from process context. */ |
892 | 478 | ||
893 | /** | 479 | /** |
894 | * isci_abort_task_process_cb() - This is a helper function for the abort task | ||
895 | * TMF command. It manages the request state with respect to the successful | ||
896 | * transmission / completion of the abort task request. | ||
897 | * @cb_state: This parameter specifies when this function was called - after | ||
898 | * the TMF request has been started and after it has timed-out. | ||
899 | * @tmf: This parameter specifies the TMF in progress. | ||
900 | * | ||
901 | * | ||
902 | */ | ||
903 | static void isci_abort_task_process_cb( | ||
904 | enum isci_tmf_cb_state cb_state, | ||
905 | struct isci_tmf *tmf, | ||
906 | void *cb_data) | ||
907 | { | ||
908 | struct isci_request *old_request; | ||
909 | |||
910 | old_request = (struct isci_request *)cb_data; | ||
911 | |||
912 | dev_dbg(&old_request->isci_host->pdev->dev, | ||
913 | "%s: tmf=%p, old_request=%p\n", | ||
914 | __func__, tmf, old_request); | ||
915 | |||
916 | switch (cb_state) { | ||
917 | |||
918 | case isci_tmf_started: | ||
919 | /* The TMF has been started. Nothing to do here, since the | ||
920 | * request state was already set to "aborted" by the abort | ||
921 | * task function. | ||
922 | */ | ||
923 | if ((old_request->status != aborted) | ||
924 | && (old_request->status != completed)) | ||
925 | dev_dbg(&old_request->isci_host->pdev->dev, | ||
926 | "%s: Bad request status (%d): tmf=%p, old_request=%p\n", | ||
927 | __func__, old_request->status, tmf, old_request); | ||
928 | break; | ||
929 | |||
930 | case isci_tmf_timed_out: | ||
931 | |||
932 | /* Set the task's state to "aborting", since the abort task | ||
933 | * function thread set it to "aborted" (above) in anticipation | ||
934 | * of the task management request working correctly. Since the | ||
935 | * timeout has now fired, the TMF request failed. We set the | ||
936 | * state such that the request completion will indicate the | ||
937 | * device is no longer present. | ||
938 | */ | ||
939 | isci_request_change_state(old_request, aborting); | ||
940 | break; | ||
941 | |||
942 | default: | ||
943 | dev_dbg(&old_request->isci_host->pdev->dev, | ||
944 | "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n", | ||
945 | __func__, cb_state, tmf, old_request); | ||
946 | break; | ||
947 | } | ||
948 | } | ||
949 | |||
950 | /** | ||
951 | * isci_task_abort_task() - This function is one of the SAS Domain Template | 480 | * isci_task_abort_task() - This function is one of the SAS Domain Template |
952 | * functions. This function is called by libsas to abort a specified task. | 481 | * functions. This function is called by libsas to abort a specified task. |
953 | * @task: This parameter specifies the SAS task to abort. | 482 | * @task: This parameter specifies the SAS task to abort. |
@@ -956,22 +485,20 @@ static void isci_abort_task_process_cb( | |||
956 | */ | 485 | */ |
957 | int isci_task_abort_task(struct sas_task *task) | 486 | int isci_task_abort_task(struct sas_task *task) |
958 | { | 487 | { |
959 | struct isci_host *isci_host = dev_to_ihost(task->dev); | 488 | struct isci_host *ihost = dev_to_ihost(task->dev); |
960 | DECLARE_COMPLETION_ONSTACK(aborted_io_completion); | 489 | DECLARE_COMPLETION_ONSTACK(aborted_io_completion); |
961 | struct isci_request *old_request = NULL; | 490 | struct isci_request *old_request = NULL; |
962 | enum isci_request_status old_state; | 491 | struct isci_remote_device *idev = NULL; |
963 | struct isci_remote_device *isci_device = NULL; | ||
964 | struct isci_tmf tmf; | 492 | struct isci_tmf tmf; |
965 | int ret = TMF_RESP_FUNC_FAILED; | 493 | int ret = TMF_RESP_FUNC_FAILED; |
966 | unsigned long flags; | 494 | unsigned long flags; |
967 | int perform_termination = 0; | ||
968 | 495 | ||
969 | /* Get the isci_request reference from the task. Note that | 496 | /* Get the isci_request reference from the task. Note that |
970 | * this check does not depend on the pending request list | 497 | * this check does not depend on the pending request list |
971 | * in the device, because tasks driving resets may land here | 498 | * in the device, because tasks driving resets may land here |
972 | * after completion in the core. | 499 | * after completion in the core. |
973 | */ | 500 | */ |
974 | spin_lock_irqsave(&isci_host->scic_lock, flags); | 501 | spin_lock_irqsave(&ihost->scic_lock, flags); |
975 | spin_lock(&task->task_state_lock); | 502 | spin_lock(&task->task_state_lock); |
976 | 503 | ||
977 | old_request = task->lldd_task; | 504 | old_request = task->lldd_task; |
@@ -980,20 +507,20 @@ int isci_task_abort_task(struct sas_task *task) | |||
980 | if (!(task->task_state_flags & SAS_TASK_STATE_DONE) && | 507 | if (!(task->task_state_flags & SAS_TASK_STATE_DONE) && |
981 | (task->task_state_flags & SAS_TASK_AT_INITIATOR) && | 508 | (task->task_state_flags & SAS_TASK_AT_INITIATOR) && |
982 | old_request) | 509 | old_request) |
983 | isci_device = isci_lookup_device(task->dev); | 510 | idev = isci_lookup_device(task->dev); |
984 | 511 | ||
985 | spin_unlock(&task->task_state_lock); | 512 | spin_unlock(&task->task_state_lock); |
986 | spin_unlock_irqrestore(&isci_host->scic_lock, flags); | 513 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
987 | 514 | ||
988 | dev_warn(&isci_host->pdev->dev, | 515 | dev_warn(&ihost->pdev->dev, |
989 | "%s: dev = %p, task = %p, old_request == %p\n", | 516 | "%s: dev = %p, task = %p, old_request == %p\n", |
990 | __func__, isci_device, task, old_request); | 517 | __func__, idev, task, old_request); |
991 | 518 | ||
992 | /* Device reset conditions signalled in task_state_flags are the | 519 | /* Device reset conditions signalled in task_state_flags are the |
993 | * responsbility of libsas to observe at the start of the error | 520 | * responsbility of libsas to observe at the start of the error |
994 | * handler thread. | 521 | * handler thread. |
995 | */ | 522 | */ |
996 | if (!isci_device || !old_request) { | 523 | if (!idev || !old_request) { |
997 | /* The request has already completed and there | 524 | /* The request has already completed and there |
998 | * is nothing to do here other than to set the task | 525 | * is nothing to do here other than to set the task |
999 | * done bit, and indicate that the task abort function | 526 | * done bit, and indicate that the task abort function |
@@ -1007,126 +534,66 @@ int isci_task_abort_task(struct sas_task *task) | |||
1007 | 534 | ||
1008 | ret = TMF_RESP_FUNC_COMPLETE; | 535 | ret = TMF_RESP_FUNC_COMPLETE; |
1009 | 536 | ||
1010 | dev_warn(&isci_host->pdev->dev, | 537 | dev_warn(&ihost->pdev->dev, |
1011 | "%s: abort task not needed for %p\n", | 538 | "%s: abort task not needed for %p\n", |
1012 | __func__, task); | 539 | __func__, task); |
1013 | goto out; | 540 | goto out; |
1014 | } | 541 | } |
1015 | 542 | /* Suspend the RNC, kill the TC */ | |
1016 | spin_lock_irqsave(&isci_host->scic_lock, flags); | 543 | if (isci_remote_device_suspend_terminate(ihost, idev, old_request) |
1017 | 544 | != SCI_SUCCESS) { | |
1018 | /* Check the request status and change to "aborted" if currently | 545 | dev_warn(&ihost->pdev->dev, |
1019 | * "starting"; if true then set the I/O kernel completion | 546 | "%s: isci_remote_device_reset_terminate(dev=%p, " |
1020 | * struct that will be triggered when the request completes. | 547 | "req=%p, task=%p) failed\n", |
1021 | */ | 548 | __func__, idev, old_request, task); |
1022 | old_state = isci_task_validate_request_to_abort( | 549 | ret = TMF_RESP_FUNC_FAILED; |
1023 | old_request, isci_host, isci_device, | ||
1024 | &aborted_io_completion); | ||
1025 | if ((old_state != started) && | ||
1026 | (old_state != completed) && | ||
1027 | (old_state != aborting)) { | ||
1028 | |||
1029 | spin_unlock_irqrestore(&isci_host->scic_lock, flags); | ||
1030 | |||
1031 | /* The request was already being handled by someone else (because | ||
1032 | * they got to set the state away from started). | ||
1033 | */ | ||
1034 | dev_warn(&isci_host->pdev->dev, | ||
1035 | "%s: device = %p; old_request %p already being aborted\n", | ||
1036 | __func__, | ||
1037 | isci_device, old_request); | ||
1038 | ret = TMF_RESP_FUNC_COMPLETE; | ||
1039 | goto out; | 550 | goto out; |
1040 | } | 551 | } |
552 | spin_lock_irqsave(&ihost->scic_lock, flags); | ||
553 | |||
1041 | if (task->task_proto == SAS_PROTOCOL_SMP || | 554 | if (task->task_proto == SAS_PROTOCOL_SMP || |
1042 | sas_protocol_ata(task->task_proto) || | 555 | sas_protocol_ata(task->task_proto) || |
1043 | test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) { | 556 | test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) { |
1044 | 557 | ||
1045 | spin_unlock_irqrestore(&isci_host->scic_lock, flags); | 558 | /* No task to send, so explicitly resume the device here */ |
559 | sci_remote_device_resume(idev, NULL, NULL); | ||
1046 | 560 | ||
1047 | dev_warn(&isci_host->pdev->dev, | 561 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1048 | "%s: %s request" | ||
1049 | " or complete_in_target (%d), thus no TMF\n", | ||
1050 | __func__, | ||
1051 | ((task->task_proto == SAS_PROTOCOL_SMP) | ||
1052 | ? "SMP" | ||
1053 | : (sas_protocol_ata(task->task_proto) | ||
1054 | ? "SATA/STP" | ||
1055 | : "<other>") | ||
1056 | ), | ||
1057 | test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)); | ||
1058 | |||
1059 | if (test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) { | ||
1060 | spin_lock_irqsave(&task->task_state_lock, flags); | ||
1061 | task->task_state_flags |= SAS_TASK_STATE_DONE; | ||
1062 | task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | | ||
1063 | SAS_TASK_STATE_PENDING); | ||
1064 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
1065 | ret = TMF_RESP_FUNC_COMPLETE; | ||
1066 | } else { | ||
1067 | spin_lock_irqsave(&task->task_state_lock, flags); | ||
1068 | task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | | ||
1069 | SAS_TASK_STATE_PENDING); | ||
1070 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
1071 | } | ||
1072 | 562 | ||
1073 | /* STP and SMP devices are not sent a TMF, but the | 563 | dev_warn(&ihost->pdev->dev, |
1074 | * outstanding I/O request is terminated below. This is | 564 | "%s: %s request" |
1075 | * because SATA/STP and SMP discovery path timeouts directly | 565 | " or complete_in_target (%d), thus no TMF\n", |
1076 | * call the abort task interface for cleanup. | 566 | __func__, |
1077 | */ | 567 | ((task->task_proto == SAS_PROTOCOL_SMP) |
1078 | perform_termination = 1; | 568 | ? "SMP" |
1079 | 569 | : (sas_protocol_ata(task->task_proto) | |
1080 | if (isci_device && !test_bit(IDEV_GONE, &isci_device->flags) && | 570 | ? "SATA/STP" |
1081 | (isci_remote_device_suspend(isci_host, isci_device) | 571 | : "<other>") |
1082 | != SCI_SUCCESS)) { | 572 | ), |
1083 | dev_warn(&isci_host->pdev->dev, | 573 | test_bit(IREQ_COMPLETE_IN_TARGET, |
1084 | "%s: device = %p; failed to suspend\n", | 574 | &old_request->flags)); |
1085 | __func__, isci_device); | ||
1086 | goto out; | ||
1087 | } | ||
1088 | 575 | ||
576 | spin_lock_irqsave(&task->task_state_lock, flags); | ||
577 | task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | | ||
578 | SAS_TASK_STATE_PENDING); | ||
579 | task->task_state_flags |= SAS_TASK_STATE_DONE; | ||
580 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
581 | |||
582 | ret = TMF_RESP_FUNC_COMPLETE; | ||
1089 | } else { | 583 | } else { |
1090 | /* Fill in the tmf stucture */ | 584 | /* Fill in the tmf stucture */ |
1091 | isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort, | 585 | isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort, |
1092 | isci_abort_task_process_cb, | ||
1093 | old_request); | 586 | old_request); |
1094 | 587 | ||
1095 | spin_unlock_irqrestore(&isci_host->scic_lock, flags); | 588 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1096 | |||
1097 | if (isci_remote_device_suspend(isci_host, isci_device) | ||
1098 | != SCI_SUCCESS) { | ||
1099 | dev_warn(&isci_host->pdev->dev, | ||
1100 | "%s: device = %p; failed to suspend\n", | ||
1101 | __func__, isci_device); | ||
1102 | goto out; | ||
1103 | } | ||
1104 | 589 | ||
590 | /* Send the task management request. */ | ||
1105 | #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */ | 591 | #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */ |
1106 | ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, | 592 | ret = isci_task_execute_tmf(ihost, idev, &tmf, |
1107 | ISCI_ABORT_TASK_TIMEOUT_MS); | 593 | ISCI_ABORT_TASK_TIMEOUT_MS); |
1108 | |||
1109 | if (ret == TMF_RESP_FUNC_COMPLETE) | ||
1110 | perform_termination = 1; | ||
1111 | else | ||
1112 | dev_warn(&isci_host->pdev->dev, | ||
1113 | "%s: isci_task_send_tmf failed\n", __func__); | ||
1114 | } | ||
1115 | if (perform_termination) { | ||
1116 | set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags); | ||
1117 | |||
1118 | /* Clean up the request on our side, and wait for the aborted | ||
1119 | * I/O to complete. | ||
1120 | */ | ||
1121 | isci_terminate_request_core(isci_host, isci_device, | ||
1122 | old_request); | ||
1123 | isci_remote_device_resume(isci_host, isci_device, NULL, NULL); | ||
1124 | } | 594 | } |
1125 | 595 | out: | |
1126 | /* Make sure we do not leave a reference to aborted_io_completion */ | 596 | isci_put_device(idev); |
1127 | old_request->io_request_completion = NULL; | ||
1128 | out: | ||
1129 | isci_put_device(isci_device); | ||
1130 | return ret; | 597 | return ret; |
1131 | } | 598 | } |
1132 | 599 | ||
@@ -1222,14 +689,11 @@ isci_task_request_complete(struct isci_host *ihost, | |||
1222 | { | 689 | { |
1223 | struct isci_tmf *tmf = isci_request_access_tmf(ireq); | 690 | struct isci_tmf *tmf = isci_request_access_tmf(ireq); |
1224 | struct completion *tmf_complete = NULL; | 691 | struct completion *tmf_complete = NULL; |
1225 | struct completion *request_complete = ireq->io_request_completion; | ||
1226 | 692 | ||
1227 | dev_dbg(&ihost->pdev->dev, | 693 | dev_dbg(&ihost->pdev->dev, |
1228 | "%s: request = %p, status=%d\n", | 694 | "%s: request = %p, status=%d\n", |
1229 | __func__, ireq, completion_status); | 695 | __func__, ireq, completion_status); |
1230 | 696 | ||
1231 | isci_request_change_state(ireq, completed); | ||
1232 | |||
1233 | set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags); | 697 | set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags); |
1234 | 698 | ||
1235 | if (tmf) { | 699 | if (tmf) { |
@@ -1253,20 +717,8 @@ isci_task_request_complete(struct isci_host *ihost, | |||
1253 | */ | 717 | */ |
1254 | set_bit(IREQ_TERMINATED, &ireq->flags); | 718 | set_bit(IREQ_TERMINATED, &ireq->flags); |
1255 | 719 | ||
1256 | /* As soon as something is in the terminate path, deallocation is | 720 | isci_free_tag(ihost, ireq->io_tag); |
1257 | * managed there. Note that the final non-managed state of a task | 721 | list_del_init(&ireq->dev_node); |
1258 | * request is "completed". | ||
1259 | */ | ||
1260 | if ((ireq->status == completed) || | ||
1261 | !isci_request_is_dealloc_managed(ireq->status)) { | ||
1262 | isci_request_change_state(ireq, unallocated); | ||
1263 | isci_free_tag(ihost, ireq->io_tag); | ||
1264 | list_del_init(&ireq->dev_node); | ||
1265 | } | ||
1266 | |||
1267 | /* "request_complete" is set if the task was being terminated. */ | ||
1268 | if (request_complete) | ||
1269 | complete(request_complete); | ||
1270 | 722 | ||
1271 | /* The task management part completes last. */ | 723 | /* The task management part completes last. */ |
1272 | if (tmf_complete) | 724 | if (tmf_complete) |
@@ -1277,37 +729,37 @@ static int isci_reset_device(struct isci_host *ihost, | |||
1277 | struct domain_device *dev, | 729 | struct domain_device *dev, |
1278 | struct isci_remote_device *idev) | 730 | struct isci_remote_device *idev) |
1279 | { | 731 | { |
1280 | int rc; | 732 | int rc = TMF_RESP_FUNC_COMPLETE, reset_stat; |
1281 | enum sci_status status; | ||
1282 | struct sas_phy *phy = sas_get_local_phy(dev); | 733 | struct sas_phy *phy = sas_get_local_phy(dev); |
1283 | struct isci_port *iport = dev->port->lldd_port; | 734 | struct isci_port *iport = dev->port->lldd_port; |
1284 | 735 | ||
1285 | dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev); | 736 | dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev); |
1286 | 737 | ||
1287 | if (isci_remote_device_reset(ihost, idev) != SCI_SUCCESS) { | 738 | /* Suspend the RNC, terminate all outstanding TCs. */ |
739 | if (isci_remote_device_suspend_terminate(ihost, idev, NULL) | ||
740 | != SCI_SUCCESS) { | ||
1288 | rc = TMF_RESP_FUNC_FAILED; | 741 | rc = TMF_RESP_FUNC_FAILED; |
1289 | goto out; | 742 | goto out; |
1290 | } | 743 | } |
744 | /* Note that since the termination for outstanding requests succeeded, | ||
745 | * this function will return success. This is because the resets will | ||
746 | * only fail if the device has been removed (ie. hotplug), and the | ||
747 | * primary duty of this function is to cleanup tasks, so that is the | ||
748 | * relevant status. | ||
749 | */ | ||
1291 | 750 | ||
1292 | if (scsi_is_sas_phy_local(phy)) { | 751 | if (scsi_is_sas_phy_local(phy)) { |
1293 | struct isci_phy *iphy = &ihost->phys[phy->number]; | 752 | struct isci_phy *iphy = &ihost->phys[phy->number]; |
1294 | 753 | ||
1295 | rc = isci_port_perform_hard_reset(ihost, iport, iphy); | 754 | reset_stat = isci_port_perform_hard_reset(ihost, iport, iphy); |
1296 | } else | 755 | } else |
1297 | rc = sas_phy_reset(phy, !dev_is_sata(dev)); | 756 | reset_stat = sas_phy_reset(phy, !dev_is_sata(dev)); |
1298 | 757 | ||
1299 | /* Terminate in-progress I/O now. */ | 758 | /* Explicitly resume the RNC here, since there was no task sent. */ |
1300 | isci_remote_device_nuke_requests(ihost, idev); | 759 | isci_remote_device_resume(ihost, idev, NULL, NULL); |
1301 | |||
1302 | /* Since all pending TCs have been cleaned, resume the RNC. */ | ||
1303 | status = isci_remote_device_reset_complete(ihost, idev); | ||
1304 | |||
1305 | if (status != SCI_SUCCESS) | ||
1306 | dev_dbg(&ihost->pdev->dev, | ||
1307 | "%s: isci_remote_device_reset_complete(%p) " | ||
1308 | "returned %d!\n", __func__, idev, status); | ||
1309 | 760 | ||
1310 | dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev); | 761 | dev_dbg(&ihost->pdev->dev, "%s: idev %p complete, reset_stat=%d.\n", |
762 | __func__, idev, reset_stat); | ||
1311 | out: | 763 | out: |
1312 | sas_put_local_phy(phy); | 764 | sas_put_local_phy(phy); |
1313 | return rc; | 765 | return rc; |
@@ -1321,7 +773,7 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev) | |||
1321 | int ret; | 773 | int ret; |
1322 | 774 | ||
1323 | spin_lock_irqsave(&ihost->scic_lock, flags); | 775 | spin_lock_irqsave(&ihost->scic_lock, flags); |
1324 | idev = isci_get_device(dev); | 776 | idev = isci_get_device(dev->lldd_dev); |
1325 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 777 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1326 | 778 | ||
1327 | if (!idev) { | 779 | if (!idev) { |
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h index 7b6d0e32fd9b..9c06cbad1d26 100644 --- a/drivers/scsi/isci/task.h +++ b/drivers/scsi/isci/task.h | |||
@@ -63,19 +63,6 @@ | |||
63 | struct isci_request; | 63 | struct isci_request; |
64 | 64 | ||
65 | /** | 65 | /** |
66 | * enum isci_tmf_cb_state - This enum defines the possible states in which the | ||
67 | * TMF callback function is invoked during the TMF execution process. | ||
68 | * | ||
69 | * | ||
70 | */ | ||
71 | enum isci_tmf_cb_state { | ||
72 | |||
73 | isci_tmf_init_state = 0, | ||
74 | isci_tmf_started, | ||
75 | isci_tmf_timed_out | ||
76 | }; | ||
77 | |||
78 | /** | ||
79 | * enum isci_tmf_function_codes - This enum defines the possible preparations | 66 | * enum isci_tmf_function_codes - This enum defines the possible preparations |
80 | * of task management requests. | 67 | * of task management requests. |
81 | * | 68 | * |
@@ -87,6 +74,7 @@ enum isci_tmf_function_codes { | |||
87 | isci_tmf_ssp_task_abort = TMF_ABORT_TASK, | 74 | isci_tmf_ssp_task_abort = TMF_ABORT_TASK, |
88 | isci_tmf_ssp_lun_reset = TMF_LU_RESET, | 75 | isci_tmf_ssp_lun_reset = TMF_LU_RESET, |
89 | }; | 76 | }; |
77 | |||
90 | /** | 78 | /** |
91 | * struct isci_tmf - This class represents the task management object which | 79 | * struct isci_tmf - This class represents the task management object which |
92 | * acts as an interface to libsas for processing task management requests | 80 | * acts as an interface to libsas for processing task management requests |
@@ -106,15 +94,6 @@ struct isci_tmf { | |||
106 | u16 io_tag; | 94 | u16 io_tag; |
107 | enum isci_tmf_function_codes tmf_code; | 95 | enum isci_tmf_function_codes tmf_code; |
108 | int status; | 96 | int status; |
109 | |||
110 | /* The optional callback function allows the user process to | ||
111 | * track the TMF transmit / timeout conditions. | ||
112 | */ | ||
113 | void (*cb_state_func)( | ||
114 | enum isci_tmf_cb_state, | ||
115 | struct isci_tmf *, void *); | ||
116 | void *cb_data; | ||
117 | |||
118 | }; | 97 | }; |
119 | 98 | ||
120 | static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf) | 99 | static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf) |
@@ -208,113 +187,4 @@ int isci_queuecommand( | |||
208 | struct scsi_cmnd *scsi_cmd, | 187 | struct scsi_cmnd *scsi_cmd, |
209 | void (*donefunc)(struct scsi_cmnd *)); | 188 | void (*donefunc)(struct scsi_cmnd *)); |
210 | 189 | ||
211 | /** | ||
212 | * enum isci_completion_selection - This enum defines the possible actions to | ||
213 | * take with respect to a given request's notification back to libsas. | ||
214 | * | ||
215 | * | ||
216 | */ | ||
217 | enum isci_completion_selection { | ||
218 | |||
219 | isci_perform_normal_io_completion, /* Normal notify (task_done) */ | ||
220 | isci_perform_aborted_io_completion, /* No notification. */ | ||
221 | isci_perform_error_io_completion /* Use sas_task_abort */ | ||
222 | }; | ||
223 | |||
224 | /** | ||
225 | * isci_task_set_completion_status() - This function sets the completion status | ||
226 | * for the request. | ||
227 | * @task: This parameter is the completed request. | ||
228 | * @response: This parameter is the response code for the completed task. | ||
229 | * @status: This parameter is the status code for the completed task. | ||
230 | * | ||
231 | * @return The new notification mode for the request. | ||
232 | */ | ||
233 | static inline enum isci_completion_selection | ||
234 | isci_task_set_completion_status( | ||
235 | struct sas_task *task, | ||
236 | enum service_response response, | ||
237 | enum exec_status status, | ||
238 | enum isci_completion_selection task_notification_selection) | ||
239 | { | ||
240 | unsigned long flags; | ||
241 | |||
242 | spin_lock_irqsave(&task->task_state_lock, flags); | ||
243 | |||
244 | /* If a device reset is being indicated, make sure the I/O | ||
245 | * is in the error path. | ||
246 | */ | ||
247 | if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) { | ||
248 | /* Fail the I/O to make sure it goes into the error path. */ | ||
249 | response = SAS_TASK_UNDELIVERED; | ||
250 | status = SAM_STAT_TASK_ABORTED; | ||
251 | |||
252 | task_notification_selection = isci_perform_error_io_completion; | ||
253 | } | ||
254 | task->task_status.resp = response; | ||
255 | task->task_status.stat = status; | ||
256 | |||
257 | switch (task->task_proto) { | ||
258 | |||
259 | case SAS_PROTOCOL_SATA: | ||
260 | case SAS_PROTOCOL_STP: | ||
261 | case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: | ||
262 | |||
263 | if (task_notification_selection | ||
264 | == isci_perform_error_io_completion) { | ||
265 | /* SATA/STP I/O has it's own means of scheduling device | ||
266 | * error handling on the normal path. | ||
267 | */ | ||
268 | task_notification_selection | ||
269 | = isci_perform_normal_io_completion; | ||
270 | } | ||
271 | break; | ||
272 | default: | ||
273 | break; | ||
274 | } | ||
275 | |||
276 | switch (task_notification_selection) { | ||
277 | |||
278 | case isci_perform_error_io_completion: | ||
279 | |||
280 | if (task->task_proto == SAS_PROTOCOL_SMP) { | ||
281 | /* There is no error escalation in the SMP case. | ||
282 | * Convert to a normal completion to avoid the | ||
283 | * timeout in the discovery path and to let the | ||
284 | * next action take place quickly. | ||
285 | */ | ||
286 | task_notification_selection | ||
287 | = isci_perform_normal_io_completion; | ||
288 | |||
289 | /* Fall through to the normal case... */ | ||
290 | } else { | ||
291 | /* Use sas_task_abort */ | ||
292 | /* Leave SAS_TASK_STATE_DONE clear | ||
293 | * Leave SAS_TASK_AT_INITIATOR set. | ||
294 | */ | ||
295 | break; | ||
296 | } | ||
297 | |||
298 | case isci_perform_aborted_io_completion: | ||
299 | /* This path can occur with task-managed requests as well as | ||
300 | * requests terminated because of LUN or device resets. | ||
301 | */ | ||
302 | /* Fall through to the normal case... */ | ||
303 | case isci_perform_normal_io_completion: | ||
304 | /* Normal notification (task_done) */ | ||
305 | task->task_state_flags |= SAS_TASK_STATE_DONE; | ||
306 | task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | | ||
307 | SAS_TASK_STATE_PENDING); | ||
308 | break; | ||
309 | default: | ||
310 | WARN_ONCE(1, "unknown task_notification_selection: %d\n", | ||
311 | task_notification_selection); | ||
312 | break; | ||
313 | } | ||
314 | |||
315 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
316 | |||
317 | return task_notification_selection; | ||
318 | |||
319 | } | ||
320 | #endif /* !defined(_SCI_TASK_H_) */ | 190 | #endif /* !defined(_SCI_TASK_H_) */ |