aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/isci/task.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2011-03-11 13:13:51 -0500
committerDan Williams <dan.j.williams@intel.com>2011-07-03 06:55:30 -0400
commit1077a574103177bff22b7cdd155d960f46ac1e8f (patch)
tree4ccbb76979a661c240464d5a951fbd96893b5586 /drivers/scsi/isci/task.c
parent34cad85d18d6da1cc11e410046d7572e65b19fcf (diff)
isci: fix incorrect assumptions about task->dev and task->dev->port being NULL
A domain_device has the same lifetime as its related scsi_target. The scsi_target is reference counted based on outstanding commands, therefore it is safe to assume that if we have a valid sas_task that the ->dev pointer is also valid. The asd_sas_port of a domain_device has the same lifetime as the driver so it can also never be NULL as long as the sas_task is valid and the driver is loaded. This also cleans up isci_task_complete_for_upper_layer(), renames it to isci_task_refuse() and notices that the isci_completion_selection parameter was set to isci_perform_normal_io_completion by all callers. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/scsi/isci/task.c')
-rw-r--r--drivers/scsi/isci/task.c168
1 files changed, 55 insertions, 113 deletions
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index d00b4c97b85b..d48368002504 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -67,39 +67,36 @@
67#include "task.h" 67#include "task.h"
68 68
69/** 69/**
70* isci_task_complete_for_upper_layer() - This function completes the request 70* isci_task_refuse() - complete the request to the upper layer driver in
71* to the upper layer driver in the case where an I/O needs to be completed 71* the case where an I/O needs to be completed back in the submit path.
72* back in the submit path. 72* @ihost: host on which the the request was queued
73* @host: This parameter is a pointer to the host on which the the request 73* @task: request to complete
74* should be queued (either as an error or success). 74* @response: response code for the completed task.
75* @task: This parameter is the completed request. 75* @status: status code for the completed task.
76* @response: This parameter is the response code for the completed task.
77* @status: This parameter is the status code for the completed task.
78* 76*
79* none.
80*/ 77*/
81static void isci_task_complete_for_upper_layer(struct sas_task *task, 78static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
82 enum service_response response, 79 enum service_response response,
83 enum exec_status status, 80 enum exec_status status)
84 enum isci_completion_selection task_notification_selection) 81
85{ 82{
86 unsigned long flags = 0; 83 enum isci_completion_selection disposition;
87 struct Scsi_Host *host = NULL;
88 84
89 task_notification_selection 85 disposition = isci_perform_normal_io_completion;
90 = isci_task_set_completion_status(task, response, status, 86 disposition = isci_task_set_completion_status(task, response, status,
91 task_notification_selection); 87 disposition);
92 88
93 /* Tasks aborted specifically by a call to the lldd_abort_task 89 /* Tasks aborted specifically by a call to the lldd_abort_task
94 * function should not be completed to the host in the regular path. 90 * function should not be completed to the host in the regular path.
95 */ 91 */
96 switch (task_notification_selection) { 92 switch (disposition) {
97 case isci_perform_normal_io_completion: 93 case isci_perform_normal_io_completion:
98 /* Normal notification (task_done) */ 94 /* Normal notification (task_done) */
99 dev_dbg(task->dev->port->ha->dev, 95 dev_dbg(&ihost->pdev->dev,
100 "%s: Normal - task = %p, response=%d, status=%d\n", 96 "%s: Normal - task = %p, response=%d, status=%d\n",
101 __func__, task, response, status); 97 __func__, task, response, status);
102 98
99 task->lldd_task = NULL;
103 if (dev_is_sata(task->dev)) { 100 if (dev_is_sata(task->dev)) {
104 /* Since we are still in the submit path, and since 101 /* Since we are still in the submit path, and since
105 * libsas takes the host lock on behalf of SATA 102 * libsas takes the host lock on behalf of SATA
@@ -107,44 +104,36 @@ static void isci_task_complete_for_upper_layer(struct sas_task *task,
107 * before we can call back and report the I/O 104 * before we can call back and report the I/O
108 * submission error. 105 * submission error.
109 */ 106 */
110 if (task->dev 107 unsigned long flags;
111 && task->dev->port
112 && task->dev->port->ha) {
113 108
114 host = task->dev->port->ha->core.shost; 109 raw_local_irq_save(flags);
115 raw_local_irq_save(flags); 110 spin_unlock(ihost->shost->host_lock);
116 spin_unlock(host->host_lock);
117 }
118 task->task_done(task); 111 task->task_done(task);
119 if (host) { 112 spin_lock(ihost->shost->host_lock);
120 spin_lock(host->host_lock); 113 raw_local_irq_restore(flags);
121 raw_local_irq_restore(flags);
122 }
123 } else 114 } else
124 task->task_done(task); 115 task->task_done(task);
125
126 task->lldd_task = NULL;
127 break; 116 break;
128 117
129 case isci_perform_aborted_io_completion: 118 case isci_perform_aborted_io_completion:
130 /* No notification because this request is already in the 119 /* No notification because this request is already in the
131 * abort path. 120 * abort path.
132 */ 121 */
133 dev_warn(task->dev->port->ha->dev, 122 dev_warn(&ihost->pdev->dev,
134 "%s: Aborted - task = %p, response=%d, status=%d\n", 123 "%s: Aborted - task = %p, response=%d, status=%d\n",
135 __func__, task, response, status); 124 __func__, task, response, status);
136 break; 125 break;
137 126
138 case isci_perform_error_io_completion: 127 case isci_perform_error_io_completion:
139 /* Use sas_task_abort */ 128 /* Use sas_task_abort */
140 dev_warn(task->dev->port->ha->dev, 129 dev_warn(&ihost->pdev->dev,
141 "%s: Error - task = %p, response=%d, status=%d\n", 130 "%s: Error - task = %p, response=%d, status=%d\n",
142 __func__, task, response, status); 131 __func__, task, response, status);
143 sas_task_abort(task); 132 sas_task_abort(task);
144 break; 133 break;
145 134
146 default: 135 default:
147 dev_warn(task->dev->port->ha->dev, 136 dev_warn(&ihost->pdev->dev,
148 "%s: isci task notification default case!", 137 "%s: isci task notification default case!",
149 __func__); 138 __func__);
150 sas_task_abort(task); 139 sas_task_abort(task);
@@ -152,6 +141,10 @@ static void isci_task_complete_for_upper_layer(struct sas_task *task,
152 } 141 }
153} 142}
154 143
144#define for_each_sas_task(num, task) \
145 for (; num > 0; num--,\
146 task = list_entry(task->list.next, struct sas_task, list))
147
155/** 148/**
156 * isci_task_execute_task() - This function is one of the SAS Domain Template 149 * isci_task_execute_task() - This function is one of the SAS Domain Template
157 * functions. This function is called by libsas to send a task down to 150 * functions. This function is called by libsas to send a task down to
@@ -164,7 +157,7 @@ static void isci_task_complete_for_upper_layer(struct sas_task *task,
164 */ 157 */
165int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) 158int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
166{ 159{
167 struct isci_host *isci_host; 160 struct isci_host *ihost = task->dev->port->ha->lldd_ha;
168 struct isci_request *request = NULL; 161 struct isci_request *request = NULL;
169 struct isci_remote_device *device; 162 struct isci_remote_device *device;
170 unsigned long flags; 163 unsigned long flags;
@@ -172,60 +165,23 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
172 enum sci_status status; 165 enum sci_status status;
173 enum isci_status device_status; 166 enum isci_status device_status;
174 167
175 dev_dbg(task->dev->port->ha->dev, "%s: num=%d\n", __func__, num); 168 dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num);
176
177 if ((task->dev == NULL) || (task->dev->port == NULL)) {
178
179 /* Indicate SAS_TASK_UNDELIVERED, so that the scsi midlayer
180 * removes the target.
181 */
182 isci_task_complete_for_upper_layer(
183 task,
184 SAS_TASK_UNDELIVERED,
185 SAS_DEVICE_UNKNOWN,
186 isci_perform_normal_io_completion
187 );
188 return 0; /* The I/O was accepted (and failed). */
189 }
190 isci_host = isci_host_from_sas_ha(task->dev->port->ha);
191 169
192 /* Check if we have room for more tasks */ 170 /* Check if we have room for more tasks */
193 ret = isci_host_can_queue(isci_host, num); 171 ret = isci_host_can_queue(ihost, num);
194 172
195 if (ret) { 173 if (ret) {
196 dev_warn(task->dev->port->ha->dev, "%s: queue full\n", __func__); 174 dev_warn(&ihost->pdev->dev, "%s: queue full\n", __func__);
197 return ret; 175 return ret;
198 } 176 }
199 177
200 do { 178 for_each_sas_task(num, task) {
201 dev_dbg(task->dev->port->ha->dev, 179 dev_dbg(&ihost->pdev->dev,
202 "task = %p, num = %d; dev = %p; cmd = %p\n", 180 "task = %p, num = %d; dev = %p; cmd = %p\n",
203 task, num, task->dev, task->uldd_task); 181 task, num, task->dev, task->uldd_task);
204 182
205 if ((task->dev == NULL) || (task->dev->port == NULL)) {
206 dev_warn(task->dev->port->ha->dev,
207 "%s: task %p's port or dev == NULL!\n",
208 __func__, task);
209
210 /* Indicate SAS_TASK_UNDELIVERED, so that the scsi
211 * midlayer removes the target.
212 */
213 isci_task_complete_for_upper_layer(
214 task,
215 SAS_TASK_UNDELIVERED,
216 SAS_DEVICE_UNKNOWN,
217 isci_perform_normal_io_completion
218 );
219 /* We don't have a valid host reference, so we
220 * can't control the host queueing condition.
221 */
222 goto next_task;
223 }
224
225 device = isci_dev_from_domain_dev(task->dev); 183 device = isci_dev_from_domain_dev(task->dev);
226 184
227 isci_host = isci_host_from_sas_ha(task->dev->port->ha);
228
229 if (device) 185 if (device)
230 device_status = device->status; 186 device_status = device->status;
231 else 187 else
@@ -239,34 +195,28 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
239 if (device_status != isci_ready_for_io) { 195 if (device_status != isci_ready_for_io) {
240 196
241 /* Forces a retry from scsi mid layer. */ 197 /* Forces a retry from scsi mid layer. */
242 dev_warn(task->dev->port->ha->dev, 198 dev_warn(&ihost->pdev->dev,
243 "%s: task %p: isci_host->status = %d, " 199 "%s: task %p: isci_host->status = %d, "
244 "device = %p; device_status = 0x%x\n\n", 200 "device = %p; device_status = 0x%x\n\n",
245 __func__, 201 __func__,
246 task, 202 task,
247 isci_host_get_state(isci_host), 203 isci_host_get_state(ihost),
248 device, device_status); 204 device, device_status);
249 205
250 if (device_status == isci_ready) { 206 if (device_status == isci_ready) {
251 /* Indicate QUEUE_FULL so that the scsi midlayer 207 /* Indicate QUEUE_FULL so that the scsi midlayer
252 * retries. 208 * retries.
253 */ 209 */
254 isci_task_complete_for_upper_layer( 210 isci_task_refuse(ihost, task,
255 task, 211 SAS_TASK_COMPLETE,
256 SAS_TASK_COMPLETE, 212 SAS_QUEUE_FULL);
257 SAS_QUEUE_FULL,
258 isci_perform_normal_io_completion
259 );
260 } else { 213 } else {
261 /* Else, the device is going down. */ 214 /* Else, the device is going down. */
262 isci_task_complete_for_upper_layer( 215 isci_task_refuse(ihost, task,
263 task, 216 SAS_TASK_UNDELIVERED,
264 SAS_TASK_UNDELIVERED, 217 SAS_DEVICE_UNKNOWN);
265 SAS_DEVICE_UNKNOWN,
266 isci_perform_normal_io_completion
267 );
268 } 218 }
269 isci_host_can_dequeue(isci_host, 1); 219 isci_host_can_dequeue(ihost, 1);
270 } else { 220 } else {
271 /* There is a device and it's ready for I/O. */ 221 /* There is a device and it's ready for I/O. */
272 spin_lock_irqsave(&task->task_state_lock, flags); 222 spin_lock_irqsave(&task->task_state_lock, flags);
@@ -276,12 +226,9 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
276 spin_unlock_irqrestore(&task->task_state_lock, 226 spin_unlock_irqrestore(&task->task_state_lock,
277 flags); 227 flags);
278 228
279 isci_task_complete_for_upper_layer( 229 isci_task_refuse(ihost, task,
280 task, 230 SAS_TASK_UNDELIVERED,
281 SAS_TASK_UNDELIVERED, 231 SAM_STAT_TASK_ABORTED);
282 SAM_STAT_TASK_ABORTED,
283 isci_perform_normal_io_completion
284 );
285 232
286 /* The I/O was aborted. */ 233 /* The I/O was aborted. */
287 234
@@ -290,7 +237,7 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
290 spin_unlock_irqrestore(&task->task_state_lock, flags); 237 spin_unlock_irqrestore(&task->task_state_lock, flags);
291 238
292 /* build and send the request. */ 239 /* build and send the request. */
293 status = isci_request_execute(isci_host, task, &request, 240 status = isci_request_execute(ihost, task, &request,
294 gfp_flags); 241 gfp_flags);
295 242
296 if (status != SCI_SUCCESS) { 243 if (status != SCI_SUCCESS) {
@@ -307,19 +254,14 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
307 * SAS_TASK_UNDELIVERED next time 254 * SAS_TASK_UNDELIVERED next time
308 * through. 255 * through.
309 */ 256 */
310 isci_task_complete_for_upper_layer( 257 isci_task_refuse(ihost, task,
311 task, 258 SAS_TASK_COMPLETE,
312 SAS_TASK_COMPLETE, 259 SAS_QUEUE_FULL);
313 SAS_QUEUE_FULL, 260 isci_host_can_dequeue(ihost, 1);
314 isci_perform_normal_io_completion
315 );
316 isci_host_can_dequeue(isci_host, 1);
317 } 261 }
318 } 262 }
319 } 263 }
320next_task: 264 }
321 task = list_entry(task->list.next, struct sas_task, list);
322 } while (--num > 0);
323 return 0; 265 return 0;
324} 266}
325 267