diff options
| author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
| commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
| tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/scsi/scsi_lib.c | |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
| -rw-r--r-- | drivers/scsi/scsi_lib.c | 2023 |
1 files changed, 2023 insertions, 0 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c new file mode 100644 index 000000000000..7cbc4127fb5a --- /dev/null +++ b/drivers/scsi/scsi_lib.c | |||
| @@ -0,0 +1,2023 @@ | |||
| 1 | /* | ||
| 2 | * scsi_lib.c Copyright (C) 1999 Eric Youngdale | ||
| 3 | * | ||
| 4 | * SCSI queueing library. | ||
| 5 | * Initial versions: Eric Youngdale (eric@andante.org). | ||
| 6 | * Based upon conversations with large numbers | ||
| 7 | * of people at Linux Expo. | ||
| 8 | */ | ||
| 9 | |||
| 10 | #include <linux/bio.h> | ||
| 11 | #include <linux/blkdev.h> | ||
| 12 | #include <linux/completion.h> | ||
| 13 | #include <linux/kernel.h> | ||
| 14 | #include <linux/mempool.h> | ||
| 15 | #include <linux/slab.h> | ||
| 16 | #include <linux/init.h> | ||
| 17 | #include <linux/pci.h> | ||
| 18 | #include <linux/delay.h> | ||
| 19 | |||
| 20 | #include <scsi/scsi.h> | ||
| 21 | #include <scsi/scsi_dbg.h> | ||
| 22 | #include <scsi/scsi_device.h> | ||
| 23 | #include <scsi/scsi_driver.h> | ||
| 24 | #include <scsi/scsi_eh.h> | ||
| 25 | #include <scsi/scsi_host.h> | ||
| 26 | #include <scsi/scsi_request.h> | ||
| 27 | |||
| 28 | #include "scsi_priv.h" | ||
| 29 | #include "scsi_logging.h" | ||
| 30 | |||
| 31 | |||
| 32 | #define SG_MEMPOOL_NR (sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool)) | ||
| 33 | #define SG_MEMPOOL_SIZE 32 | ||
| 34 | |||
| 35 | struct scsi_host_sg_pool { | ||
| 36 | size_t size; | ||
| 37 | char *name; | ||
| 38 | kmem_cache_t *slab; | ||
| 39 | mempool_t *pool; | ||
| 40 | }; | ||
| 41 | |||
| 42 | #if (SCSI_MAX_PHYS_SEGMENTS < 32) | ||
| 43 | #error SCSI_MAX_PHYS_SEGMENTS is too small | ||
| 44 | #endif | ||
| 45 | |||
| 46 | #define SP(x) { x, "sgpool-" #x } | ||
| 47 | struct scsi_host_sg_pool scsi_sg_pools[] = { | ||
| 48 | SP(8), | ||
| 49 | SP(16), | ||
| 50 | SP(32), | ||
| 51 | #if (SCSI_MAX_PHYS_SEGMENTS > 32) | ||
| 52 | SP(64), | ||
| 53 | #if (SCSI_MAX_PHYS_SEGMENTS > 64) | ||
| 54 | SP(128), | ||
| 55 | #if (SCSI_MAX_PHYS_SEGMENTS > 128) | ||
| 56 | SP(256), | ||
| 57 | #if (SCSI_MAX_PHYS_SEGMENTS > 256) | ||
| 58 | #error SCSI_MAX_PHYS_SEGMENTS is too large | ||
| 59 | #endif | ||
| 60 | #endif | ||
| 61 | #endif | ||
| 62 | #endif | ||
| 63 | }; | ||
| 64 | #undef SP | ||
| 65 | |||
| 66 | |||
| 67 | /* | ||
| 68 | * Function: scsi_insert_special_req() | ||
| 69 | * | ||
| 70 | * Purpose: Insert pre-formed request into request queue. | ||
| 71 | * | ||
| 72 | * Arguments: sreq - request that is ready to be queued. | ||
| 73 | * at_head - boolean. True if we should insert at head | ||
| 74 | * of queue, false if we should insert at tail. | ||
| 75 | * | ||
| 76 | * Lock status: Assumed that lock is not held upon entry. | ||
| 77 | * | ||
| 78 | * Returns: Nothing | ||
| 79 | * | ||
| 80 | * Notes: This function is called from character device and from | ||
| 81 | * ioctl types of functions where the caller knows exactly | ||
| 82 | * what SCSI command needs to be issued. The idea is that | ||
| 83 | * we merely inject the command into the queue (at the head | ||
| 84 | * for now), and then call the queue request function to actually | ||
| 85 | * process it. | ||
| 86 | */ | ||
| 87 | int scsi_insert_special_req(struct scsi_request *sreq, int at_head) | ||
| 88 | { | ||
| 89 | /* | ||
| 90 | * Because users of this function are apt to reuse requests with no | ||
| 91 | * modification, we have to sanitise the request flags here | ||
| 92 | */ | ||
| 93 | sreq->sr_request->flags &= ~REQ_DONTPREP; | ||
| 94 | blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request, | ||
| 95 | at_head, sreq, 0); | ||
| 96 | return 0; | ||
| 97 | } | ||
| 98 | |||
| 99 | /* | ||
| 100 | * Function: scsi_queue_insert() | ||
| 101 | * | ||
| 102 | * Purpose: Insert a command in the midlevel queue. | ||
| 103 | * | ||
| 104 | * Arguments: cmd - command that we are adding to queue. | ||
| 105 | * reason - why we are inserting command to queue. | ||
| 106 | * | ||
| 107 | * Lock status: Assumed that lock is not held upon entry. | ||
| 108 | * | ||
| 109 | * Returns: Nothing. | ||
| 110 | * | ||
| 111 | * Notes: We do this for one of two cases. Either the host is busy | ||
| 112 | * and it cannot accept any more commands for the time being, | ||
| 113 | * or the device returned QUEUE_FULL and can accept no more | ||
| 114 | * commands. | ||
| 115 | * Notes: This could be called either from an interrupt context or a | ||
| 116 | * normal process context. | ||
| 117 | */ | ||
| 118 | int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) | ||
| 119 | { | ||
| 120 | struct Scsi_Host *host = cmd->device->host; | ||
| 121 | struct scsi_device *device = cmd->device; | ||
| 122 | |||
| 123 | SCSI_LOG_MLQUEUE(1, | ||
| 124 | printk("Inserting command %p into mlqueue\n", cmd)); | ||
| 125 | |||
| 126 | /* | ||
| 127 | * We are inserting the command into the ml queue. First, we | ||
| 128 | * cancel the timer, so it doesn't time out. | ||
| 129 | */ | ||
| 130 | scsi_delete_timer(cmd); | ||
| 131 | |||
| 132 | /* | ||
| 133 | * Next, set the appropriate busy bit for the device/host. | ||
| 134 | * | ||
| 135 | * If the host/device isn't busy, assume that something actually | ||
| 136 | * completed, and that we should be able to queue a command now. | ||
| 137 | * | ||
| 138 | * Note that the prior mid-layer assumption that any host could | ||
| 139 | * always queue at least one command is now broken. The mid-layer | ||
| 140 | * will implement a user specifiable stall (see | ||
| 141 | * scsi_host.max_host_blocked and scsi_device.max_device_blocked) | ||
| 142 | * if a command is requeued with no other commands outstanding | ||
| 143 | * either for the device or for the host. | ||
| 144 | */ | ||
| 145 | if (reason == SCSI_MLQUEUE_HOST_BUSY) | ||
| 146 | host->host_blocked = host->max_host_blocked; | ||
| 147 | else if (reason == SCSI_MLQUEUE_DEVICE_BUSY) | ||
| 148 | device->device_blocked = device->max_device_blocked; | ||
| 149 | |||
| 150 | /* | ||
| 151 | * Register the fact that we own the thing for now. | ||
| 152 | */ | ||
| 153 | cmd->state = SCSI_STATE_MLQUEUE; | ||
| 154 | cmd->owner = SCSI_OWNER_MIDLEVEL; | ||
| 155 | |||
| 156 | /* | ||
| 157 | * Decrement the counters, since these commands are no longer | ||
| 158 | * active on the host/device. | ||
| 159 | */ | ||
| 160 | scsi_device_unbusy(device); | ||
| 161 | |||
| 162 | /* | ||
| 163 | * Insert this command at the head of the queue for it's device. | ||
| 164 | * It will go before all other commands that are already in the queue. | ||
| 165 | * | ||
| 166 | * NOTE: there is magic here about the way the queue is plugged if | ||
| 167 | * we have no outstanding commands. | ||
| 168 | * | ||
| 169 | * Although this *doesn't* plug the queue, it does call the request | ||
| 170 | * function. The SCSI request function detects the blocked condition | ||
| 171 | * and plugs the queue appropriately. | ||
| 172 | */ | ||
| 173 | blk_insert_request(device->request_queue, cmd->request, 1, cmd, 1); | ||
| 174 | return 0; | ||
| 175 | } | ||
| 176 | |||
| 177 | /* | ||
| 178 | * Function: scsi_do_req | ||
| 179 | * | ||
| 180 | * Purpose: Queue a SCSI request | ||
| 181 | * | ||
| 182 | * Arguments: sreq - command descriptor. | ||
| 183 | * cmnd - actual SCSI command to be performed. | ||
| 184 | * buffer - data buffer. | ||
| 185 | * bufflen - size of data buffer. | ||
| 186 | * done - completion function to be run. | ||
| 187 | * timeout - how long to let it run before timeout. | ||
| 188 | * retries - number of retries we allow. | ||
| 189 | * | ||
| 190 | * Lock status: No locks held upon entry. | ||
| 191 | * | ||
| 192 | * Returns: Nothing. | ||
| 193 | * | ||
| 194 | * Notes: This function is only used for queueing requests for things | ||
| 195 | * like ioctls and character device requests - this is because | ||
| 196 | * we essentially just inject a request into the queue for the | ||
| 197 | * device. | ||
| 198 | * | ||
| 199 | * In order to support the scsi_device_quiesce function, we | ||
| 200 | * now inject requests on the *head* of the device queue | ||
| 201 | * rather than the tail. | ||
| 202 | */ | ||
| 203 | void scsi_do_req(struct scsi_request *sreq, const void *cmnd, | ||
| 204 | void *buffer, unsigned bufflen, | ||
| 205 | void (*done)(struct scsi_cmnd *), | ||
| 206 | int timeout, int retries) | ||
| 207 | { | ||
| 208 | /* | ||
| 209 | * If the upper level driver is reusing these things, then | ||
| 210 | * we should release the low-level block now. Another one will | ||
| 211 | * be allocated later when this request is getting queued. | ||
| 212 | */ | ||
| 213 | __scsi_release_request(sreq); | ||
| 214 | |||
| 215 | /* | ||
| 216 | * Our own function scsi_done (which marks the host as not busy, | ||
| 217 | * disables the timeout counter, etc) will be called by us or by the | ||
| 218 | * scsi_hosts[host].queuecommand() function needs to also call | ||
| 219 | * the completion function for the high level driver. | ||
| 220 | */ | ||
| 221 | memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd)); | ||
| 222 | sreq->sr_bufflen = bufflen; | ||
| 223 | sreq->sr_buffer = buffer; | ||
| 224 | sreq->sr_allowed = retries; | ||
| 225 | sreq->sr_done = done; | ||
| 226 | sreq->sr_timeout_per_command = timeout; | ||
| 227 | |||
| 228 | if (sreq->sr_cmd_len == 0) | ||
| 229 | sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]); | ||
| 230 | |||
| 231 | /* | ||
| 232 | * head injection *required* here otherwise quiesce won't work | ||
| 233 | */ | ||
| 234 | scsi_insert_special_req(sreq, 1); | ||
| 235 | } | ||
| 236 | EXPORT_SYMBOL(scsi_do_req); | ||
| 237 | |||
| 238 | static void scsi_wait_done(struct scsi_cmnd *cmd) | ||
| 239 | { | ||
| 240 | struct request *req = cmd->request; | ||
| 241 | struct request_queue *q = cmd->device->request_queue; | ||
| 242 | unsigned long flags; | ||
| 243 | |||
| 244 | req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */ | ||
| 245 | |||
| 246 | spin_lock_irqsave(q->queue_lock, flags); | ||
| 247 | if (blk_rq_tagged(req)) | ||
| 248 | blk_queue_end_tag(q, req); | ||
| 249 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
| 250 | |||
| 251 | if (req->waiting) | ||
| 252 | complete(req->waiting); | ||
| 253 | } | ||
| 254 | |||
| 255 | /* This is the end routine we get to if a command was never attached | ||
| 256 | * to the request. Simply complete the request without changing | ||
| 257 | * rq_status; this will cause a DRIVER_ERROR. */ | ||
| 258 | static void scsi_wait_req_end_io(struct request *req) | ||
| 259 | { | ||
| 260 | BUG_ON(!req->waiting); | ||
| 261 | |||
| 262 | complete(req->waiting); | ||
| 263 | } | ||
| 264 | |||
| 265 | void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer, | ||
| 266 | unsigned bufflen, int timeout, int retries) | ||
| 267 | { | ||
| 268 | DECLARE_COMPLETION(wait); | ||
| 269 | |||
| 270 | sreq->sr_request->waiting = &wait; | ||
| 271 | sreq->sr_request->rq_status = RQ_SCSI_BUSY; | ||
| 272 | sreq->sr_request->end_io = scsi_wait_req_end_io; | ||
| 273 | scsi_do_req(sreq, cmnd, buffer, bufflen, scsi_wait_done, | ||
| 274 | timeout, retries); | ||
| 275 | wait_for_completion(&wait); | ||
| 276 | sreq->sr_request->waiting = NULL; | ||
| 277 | if (sreq->sr_request->rq_status != RQ_SCSI_DONE) | ||
| 278 | sreq->sr_result |= (DRIVER_ERROR << 24); | ||
| 279 | |||
| 280 | __scsi_release_request(sreq); | ||
| 281 | } | ||
| 282 | EXPORT_SYMBOL(scsi_wait_req); | ||
| 283 | |||
| 284 | /* | ||
| 285 | * Function: scsi_init_cmd_errh() | ||
| 286 | * | ||
| 287 | * Purpose: Initialize cmd fields related to error handling. | ||
| 288 | * | ||
| 289 | * Arguments: cmd - command that is ready to be queued. | ||
| 290 | * | ||
| 291 | * Returns: Nothing | ||
| 292 | * | ||
| 293 | * Notes: This function has the job of initializing a number of | ||
| 294 | * fields related to error handling. Typically this will | ||
| 295 | * be called once for each command, as required. | ||
| 296 | */ | ||
| 297 | static int scsi_init_cmd_errh(struct scsi_cmnd *cmd) | ||
| 298 | { | ||
| 299 | cmd->owner = SCSI_OWNER_MIDLEVEL; | ||
| 300 | cmd->serial_number = 0; | ||
| 301 | cmd->serial_number_at_timeout = 0; | ||
| 302 | cmd->abort_reason = 0; | ||
| 303 | |||
| 304 | memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer); | ||
| 305 | |||
| 306 | if (cmd->cmd_len == 0) | ||
| 307 | cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); | ||
| 308 | |||
| 309 | /* | ||
| 310 | * We need saved copies of a number of fields - this is because | ||
| 311 | * error handling may need to overwrite these with different values | ||
| 312 | * to run different commands, and once error handling is complete, | ||
| 313 | * we will need to restore these values prior to running the actual | ||
| 314 | * command. | ||
| 315 | */ | ||
| 316 | cmd->old_use_sg = cmd->use_sg; | ||
| 317 | cmd->old_cmd_len = cmd->cmd_len; | ||
| 318 | cmd->sc_old_data_direction = cmd->sc_data_direction; | ||
| 319 | cmd->old_underflow = cmd->underflow; | ||
| 320 | memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd)); | ||
| 321 | cmd->buffer = cmd->request_buffer; | ||
| 322 | cmd->bufflen = cmd->request_bufflen; | ||
| 323 | cmd->internal_timeout = NORMAL_TIMEOUT; | ||
| 324 | cmd->abort_reason = 0; | ||
| 325 | |||
| 326 | return 1; | ||
| 327 | } | ||
| 328 | |||
| 329 | /* | ||
| 330 | * Function: scsi_setup_cmd_retry() | ||
| 331 | * | ||
| 332 | * Purpose: Restore the command state for a retry | ||
| 333 | * | ||
| 334 | * Arguments: cmd - command to be restored | ||
| 335 | * | ||
| 336 | * Returns: Nothing | ||
| 337 | * | ||
| 338 | * Notes: Immediately prior to retrying a command, we need | ||
| 339 | * to restore certain fields that we saved above. | ||
| 340 | */ | ||
| 341 | void scsi_setup_cmd_retry(struct scsi_cmnd *cmd) | ||
| 342 | { | ||
| 343 | memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd)); | ||
| 344 | cmd->request_buffer = cmd->buffer; | ||
| 345 | cmd->request_bufflen = cmd->bufflen; | ||
| 346 | cmd->use_sg = cmd->old_use_sg; | ||
| 347 | cmd->cmd_len = cmd->old_cmd_len; | ||
| 348 | cmd->sc_data_direction = cmd->sc_old_data_direction; | ||
| 349 | cmd->underflow = cmd->old_underflow; | ||
| 350 | } | ||
| 351 | |||
| 352 | void scsi_device_unbusy(struct scsi_device *sdev) | ||
| 353 | { | ||
| 354 | struct Scsi_Host *shost = sdev->host; | ||
| 355 | unsigned long flags; | ||
| 356 | |||
| 357 | spin_lock_irqsave(shost->host_lock, flags); | ||
| 358 | shost->host_busy--; | ||
| 359 | if (unlikely(test_bit(SHOST_RECOVERY, &shost->shost_state) && | ||
| 360 | shost->host_failed)) | ||
| 361 | scsi_eh_wakeup(shost); | ||
| 362 | spin_unlock(shost->host_lock); | ||
| 363 | spin_lock(&sdev->sdev_lock); | ||
| 364 | sdev->device_busy--; | ||
| 365 | spin_unlock_irqrestore(&sdev->sdev_lock, flags); | ||
| 366 | } | ||
| 367 | |||
| 368 | /* | ||
| 369 | * Called for single_lun devices on IO completion. Clear starget_sdev_user, | ||
| 370 | * and call blk_run_queue for all the scsi_devices on the target - | ||
| 371 | * including current_sdev first. | ||
| 372 | * | ||
| 373 | * Called with *no* scsi locks held. | ||
| 374 | */ | ||
| 375 | static void scsi_single_lun_run(struct scsi_device *current_sdev) | ||
| 376 | { | ||
| 377 | struct Scsi_Host *shost = current_sdev->host; | ||
| 378 | struct scsi_device *sdev, *tmp; | ||
| 379 | struct scsi_target *starget = scsi_target(current_sdev); | ||
| 380 | unsigned long flags; | ||
| 381 | |||
| 382 | spin_lock_irqsave(shost->host_lock, flags); | ||
| 383 | starget->starget_sdev_user = NULL; | ||
| 384 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
| 385 | |||
| 386 | /* | ||
| 387 | * Call blk_run_queue for all LUNs on the target, starting with | ||
| 388 | * current_sdev. We race with others (to set starget_sdev_user), | ||
| 389 | * but in most cases, we will be first. Ideally, each LU on the | ||
| 390 | * target would get some limited time or requests on the target. | ||
| 391 | */ | ||
| 392 | blk_run_queue(current_sdev->request_queue); | ||
| 393 | |||
| 394 | spin_lock_irqsave(shost->host_lock, flags); | ||
| 395 | if (starget->starget_sdev_user) | ||
| 396 | goto out; | ||
| 397 | list_for_each_entry_safe(sdev, tmp, &starget->devices, | ||
| 398 | same_target_siblings) { | ||
| 399 | if (sdev == current_sdev) | ||
| 400 | continue; | ||
| 401 | if (scsi_device_get(sdev)) | ||
| 402 | continue; | ||
| 403 | |||
| 404 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
| 405 | blk_run_queue(sdev->request_queue); | ||
| 406 | spin_lock_irqsave(shost->host_lock, flags); | ||
| 407 | |||
| 408 | scsi_device_put(sdev); | ||
| 409 | } | ||
| 410 | out: | ||
| 411 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
| 412 | } | ||
| 413 | |||
| 414 | /* | ||
| 415 | * Function: scsi_run_queue() | ||
| 416 | * | ||
| 417 | * Purpose: Select a proper request queue to serve next | ||
| 418 | * | ||
| 419 | * Arguments: q - last request's queue | ||
| 420 | * | ||
| 421 | * Returns: Nothing | ||
| 422 | * | ||
| 423 | * Notes: The previous command was completely finished, start | ||
| 424 | * a new one if possible. | ||
| 425 | */ | ||
| 426 | static void scsi_run_queue(struct request_queue *q) | ||
| 427 | { | ||
| 428 | struct scsi_device *sdev = q->queuedata; | ||
| 429 | struct Scsi_Host *shost = sdev->host; | ||
| 430 | unsigned long flags; | ||
| 431 | |||
| 432 | if (sdev->single_lun) | ||
| 433 | scsi_single_lun_run(sdev); | ||
| 434 | |||
| 435 | spin_lock_irqsave(shost->host_lock, flags); | ||
| 436 | while (!list_empty(&shost->starved_list) && | ||
| 437 | !shost->host_blocked && !shost->host_self_blocked && | ||
| 438 | !((shost->can_queue > 0) && | ||
| 439 | (shost->host_busy >= shost->can_queue))) { | ||
| 440 | /* | ||
| 441 | * As long as shost is accepting commands and we have | ||
| 442 | * starved queues, call blk_run_queue. scsi_request_fn | ||
| 443 | * drops the queue_lock and can add us back to the | ||
| 444 | * starved_list. | ||
| 445 | * | ||
| 446 | * host_lock protects the starved_list and starved_entry. | ||
| 447 | * scsi_request_fn must get the host_lock before checking | ||
| 448 | * or modifying starved_list or starved_entry. | ||
| 449 | */ | ||
| 450 | sdev = list_entry(shost->starved_list.next, | ||
| 451 | struct scsi_device, starved_entry); | ||
| 452 | list_del_init(&sdev->starved_entry); | ||
| 453 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
| 454 | |||
| 455 | blk_run_queue(sdev->request_queue); | ||
| 456 | |||
| 457 | spin_lock_irqsave(shost->host_lock, flags); | ||
| 458 | if (unlikely(!list_empty(&sdev->starved_entry))) | ||
| 459 | /* | ||
| 460 | * sdev lost a race, and was put back on the | ||
| 461 | * starved list. This is unlikely but without this | ||
| 462 | * in theory we could loop forever. | ||
| 463 | */ | ||
| 464 | break; | ||
| 465 | } | ||
| 466 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
| 467 | |||
| 468 | blk_run_queue(q); | ||
| 469 | } | ||
| 470 | |||
| 471 | /* | ||
| 472 | * Function: scsi_requeue_command() | ||
| 473 | * | ||
| 474 | * Purpose: Handle post-processing of completed commands. | ||
| 475 | * | ||
| 476 | * Arguments: q - queue to operate on | ||
| 477 | * cmd - command that may need to be requeued. | ||
| 478 | * | ||
| 479 | * Returns: Nothing | ||
| 480 | * | ||
| 481 | * Notes: After command completion, there may be blocks left | ||
| 482 | * over which weren't finished by the previous command | ||
| 483 | * this can be for a number of reasons - the main one is | ||
| 484 | * I/O errors in the middle of the request, in which case | ||
| 485 | * we need to request the blocks that come after the bad | ||
| 486 | * sector. | ||
| 487 | */ | ||
| 488 | static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) | ||
| 489 | { | ||
| 490 | cmd->request->flags &= ~REQ_DONTPREP; | ||
| 491 | blk_insert_request(q, cmd->request, 1, cmd, 1); | ||
| 492 | |||
| 493 | scsi_run_queue(q); | ||
| 494 | } | ||
| 495 | |||
| 496 | void scsi_next_command(struct scsi_cmnd *cmd) | ||
| 497 | { | ||
| 498 | struct request_queue *q = cmd->device->request_queue; | ||
| 499 | |||
| 500 | scsi_put_command(cmd); | ||
| 501 | scsi_run_queue(q); | ||
| 502 | } | ||
| 503 | |||
| 504 | void scsi_run_host_queues(struct Scsi_Host *shost) | ||
| 505 | { | ||
| 506 | struct scsi_device *sdev; | ||
| 507 | |||
| 508 | shost_for_each_device(sdev, shost) | ||
| 509 | scsi_run_queue(sdev->request_queue); | ||
| 510 | } | ||
| 511 | |||
| 512 | /* | ||
| 513 | * Function: scsi_end_request() | ||
| 514 | * | ||
| 515 | * Purpose: Post-processing of completed commands (usually invoked at end | ||
| 516 | * of upper level post-processing and scsi_io_completion). | ||
| 517 | * | ||
| 518 | * Arguments: cmd - command that is complete. | ||
| 519 | * uptodate - 1 if I/O indicates success, <= 0 for I/O error. | ||
| 520 | * bytes - number of bytes of completed I/O | ||
| 521 | * requeue - indicates whether we should requeue leftovers. | ||
| 522 | * | ||
| 523 | * Lock status: Assumed that lock is not held upon entry. | ||
| 524 | * | ||
| 525 | * Returns: cmd if requeue done or required, NULL otherwise | ||
| 526 | * | ||
| 527 | * Notes: This is called for block device requests in order to | ||
| 528 | * mark some number of sectors as complete. | ||
| 529 | * | ||
| 530 | * We are guaranteeing that the request queue will be goosed | ||
| 531 | * at some point during this call. | ||
| 532 | */ | ||
| 533 | static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, | ||
| 534 | int bytes, int requeue) | ||
| 535 | { | ||
| 536 | request_queue_t *q = cmd->device->request_queue; | ||
| 537 | struct request *req = cmd->request; | ||
| 538 | unsigned long flags; | ||
| 539 | |||
| 540 | /* | ||
| 541 | * If there are blocks left over at the end, set up the command | ||
| 542 | * to queue the remainder of them. | ||
| 543 | */ | ||
| 544 | if (end_that_request_chunk(req, uptodate, bytes)) { | ||
| 545 | int leftover = (req->hard_nr_sectors << 9); | ||
| 546 | |||
| 547 | if (blk_pc_request(req)) | ||
| 548 | leftover = req->data_len; | ||
| 549 | |||
| 550 | /* kill remainder if no retrys */ | ||
| 551 | if (!uptodate && blk_noretry_request(req)) | ||
| 552 | end_that_request_chunk(req, 0, leftover); | ||
| 553 | else { | ||
| 554 | if (requeue) | ||
| 555 | /* | ||
| 556 | * Bleah. Leftovers again. Stick the | ||
| 557 | * leftovers in the front of the | ||
| 558 | * queue, and goose the queue again. | ||
| 559 | */ | ||
| 560 | scsi_requeue_command(q, cmd); | ||
| 561 | |||
| 562 | return cmd; | ||
| 563 | } | ||
| 564 | } | ||
| 565 | |||
| 566 | add_disk_randomness(req->rq_disk); | ||
| 567 | |||
| 568 | spin_lock_irqsave(q->queue_lock, flags); | ||
| 569 | if (blk_rq_tagged(req)) | ||
| 570 | blk_queue_end_tag(q, req); | ||
| 571 | end_that_request_last(req); | ||
| 572 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
| 573 | |||
| 574 | /* | ||
| 575 | * This will goose the queue request function at the end, so we don't | ||
| 576 | * need to worry about launching another command. | ||
| 577 | */ | ||
| 578 | scsi_next_command(cmd); | ||
| 579 | return NULL; | ||
| 580 | } | ||
| 581 | |||
| 582 | static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, int gfp_mask) | ||
| 583 | { | ||
| 584 | struct scsi_host_sg_pool *sgp; | ||
| 585 | struct scatterlist *sgl; | ||
| 586 | |||
| 587 | BUG_ON(!cmd->use_sg); | ||
| 588 | |||
| 589 | switch (cmd->use_sg) { | ||
| 590 | case 1 ... 8: | ||
| 591 | cmd->sglist_len = 0; | ||
| 592 | break; | ||
| 593 | case 9 ... 16: | ||
| 594 | cmd->sglist_len = 1; | ||
| 595 | break; | ||
| 596 | case 17 ... 32: | ||
| 597 | cmd->sglist_len = 2; | ||
| 598 | break; | ||
| 599 | #if (SCSI_MAX_PHYS_SEGMENTS > 32) | ||
| 600 | case 33 ... 64: | ||
| 601 | cmd->sglist_len = 3; | ||
| 602 | break; | ||
| 603 | #if (SCSI_MAX_PHYS_SEGMENTS > 64) | ||
| 604 | case 65 ... 128: | ||
| 605 | cmd->sglist_len = 4; | ||
| 606 | break; | ||
| 607 | #if (SCSI_MAX_PHYS_SEGMENTS > 128) | ||
| 608 | case 129 ... 256: | ||
| 609 | cmd->sglist_len = 5; | ||
| 610 | break; | ||
| 611 | #endif | ||
| 612 | #endif | ||
| 613 | #endif | ||
| 614 | default: | ||
| 615 | return NULL; | ||
| 616 | } | ||
| 617 | |||
| 618 | sgp = scsi_sg_pools + cmd->sglist_len; | ||
| 619 | sgl = mempool_alloc(sgp->pool, gfp_mask); | ||
| 620 | if (sgl) | ||
| 621 | memset(sgl, 0, sgp->size); | ||
| 622 | return sgl; | ||
| 623 | } | ||
| 624 | |||
| 625 | static void scsi_free_sgtable(struct scatterlist *sgl, int index) | ||
| 626 | { | ||
| 627 | struct scsi_host_sg_pool *sgp; | ||
| 628 | |||
| 629 | BUG_ON(index > SG_MEMPOOL_NR); | ||
| 630 | |||
| 631 | sgp = scsi_sg_pools + index; | ||
| 632 | mempool_free(sgl, sgp->pool); | ||
| 633 | } | ||
| 634 | |||
| 635 | /* | ||
| 636 | * Function: scsi_release_buffers() | ||
| 637 | * | ||
| 638 | * Purpose: Completion processing for block device I/O requests. | ||
| 639 | * | ||
| 640 | * Arguments: cmd - command that we are bailing. | ||
| 641 | * | ||
| 642 | * Lock status: Assumed that no lock is held upon entry. | ||
| 643 | * | ||
| 644 | * Returns: Nothing | ||
| 645 | * | ||
| 646 | * Notes: In the event that an upper level driver rejects a | ||
| 647 | * command, we must release resources allocated during | ||
| 648 | * the __init_io() function. Primarily this would involve | ||
| 649 | * the scatter-gather table, and potentially any bounce | ||
| 650 | * buffers. | ||
| 651 | */ | ||
| 652 | static void scsi_release_buffers(struct scsi_cmnd *cmd) | ||
| 653 | { | ||
| 654 | struct request *req = cmd->request; | ||
| 655 | |||
| 656 | /* | ||
| 657 | * Free up any indirection buffers we allocated for DMA purposes. | ||
| 658 | */ | ||
| 659 | if (cmd->use_sg) | ||
| 660 | scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len); | ||
| 661 | else if (cmd->request_buffer != req->buffer) | ||
| 662 | kfree(cmd->request_buffer); | ||
| 663 | |||
| 664 | /* | ||
| 665 | * Zero these out. They now point to freed memory, and it is | ||
| 666 | * dangerous to hang onto the pointers. | ||
| 667 | */ | ||
| 668 | cmd->buffer = NULL; | ||
| 669 | cmd->bufflen = 0; | ||
| 670 | cmd->request_buffer = NULL; | ||
| 671 | cmd->request_bufflen = 0; | ||
| 672 | } | ||
| 673 | |||
| 674 | /* | ||
| 675 | * Function: scsi_io_completion() | ||
| 676 | * | ||
| 677 | * Purpose: Completion processing for block device I/O requests. | ||
| 678 | * | ||
| 679 | * Arguments: cmd - command that is finished. | ||
| 680 | * | ||
| 681 | * Lock status: Assumed that no lock is held upon entry. | ||
| 682 | * | ||
| 683 | * Returns: Nothing | ||
| 684 | * | ||
| 685 | * Notes: This function is matched in terms of capabilities to | ||
| 686 | * the function that created the scatter-gather list. | ||
| 687 | * In other words, if there are no bounce buffers | ||
| 688 | * (the normal case for most drivers), we don't need | ||
| 689 | * the logic to deal with cleaning up afterwards. | ||
| 690 | * | ||
| 691 | * We must do one of several things here: | ||
| 692 | * | ||
| 693 | * a) Call scsi_end_request. This will finish off the | ||
| 694 | * specified number of sectors. If we are done, the | ||
| 695 | * command block will be released, and the queue | ||
| 696 | * function will be goosed. If we are not done, then | ||
| 697 | * scsi_end_request will directly goose the queue. | ||
| 698 | * | ||
| 699 | * b) We can just use scsi_requeue_command() here. This would | ||
| 700 | * be used if we just wanted to retry, for example. | ||
| 701 | */ | ||
| 702 | void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes, | ||
| 703 | unsigned int block_bytes) | ||
| 704 | { | ||
| 705 | int result = cmd->result; | ||
| 706 | int this_count = cmd->bufflen; | ||
| 707 | request_queue_t *q = cmd->device->request_queue; | ||
| 708 | struct request *req = cmd->request; | ||
| 709 | int clear_errors = 1; | ||
| 710 | struct scsi_sense_hdr sshdr; | ||
| 711 | int sense_valid = 0; | ||
| 712 | int sense_deferred = 0; | ||
| 713 | |||
| 714 | if (blk_complete_barrier_rq(q, req, good_bytes >> 9)) | ||
| 715 | return; | ||
| 716 | |||
| 717 | /* | ||
| 718 | * Free up any indirection buffers we allocated for DMA purposes. | ||
| 719 | * For the case of a READ, we need to copy the data out of the | ||
| 720 | * bounce buffer and into the real buffer. | ||
| 721 | */ | ||
| 722 | if (cmd->use_sg) | ||
| 723 | scsi_free_sgtable(cmd->buffer, cmd->sglist_len); | ||
| 724 | else if (cmd->buffer != req->buffer) { | ||
| 725 | if (rq_data_dir(req) == READ) { | ||
| 726 | unsigned long flags; | ||
| 727 | char *to = bio_kmap_irq(req->bio, &flags); | ||
| 728 | memcpy(to, cmd->buffer, cmd->bufflen); | ||
| 729 | bio_kunmap_irq(to, &flags); | ||
| 730 | } | ||
| 731 | kfree(cmd->buffer); | ||
| 732 | } | ||
| 733 | |||
| 734 | if (result) { | ||
| 735 | sense_valid = scsi_command_normalize_sense(cmd, &sshdr); | ||
| 736 | if (sense_valid) | ||
| 737 | sense_deferred = scsi_sense_is_deferred(&sshdr); | ||
| 738 | } | ||
| 739 | if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ | ||
| 740 | req->errors = result; | ||
| 741 | if (result) { | ||
| 742 | clear_errors = 0; | ||
| 743 | if (sense_valid && req->sense) { | ||
| 744 | /* | ||
| 745 | * SG_IO wants current and deferred errors | ||
| 746 | */ | ||
| 747 | int len = 8 + cmd->sense_buffer[7]; | ||
| 748 | |||
| 749 | if (len > SCSI_SENSE_BUFFERSIZE) | ||
| 750 | len = SCSI_SENSE_BUFFERSIZE; | ||
| 751 | memcpy(req->sense, cmd->sense_buffer, len); | ||
| 752 | req->sense_len = len; | ||
| 753 | } | ||
| 754 | } else | ||
| 755 | req->data_len = cmd->resid; | ||
| 756 | } | ||
| 757 | |||
| 758 | /* | ||
| 759 | * Zero these out. They now point to freed memory, and it is | ||
| 760 | * dangerous to hang onto the pointers. | ||
| 761 | */ | ||
| 762 | cmd->buffer = NULL; | ||
| 763 | cmd->bufflen = 0; | ||
| 764 | cmd->request_buffer = NULL; | ||
| 765 | cmd->request_bufflen = 0; | ||
| 766 | |||
| 767 | /* | ||
| 768 | * Next deal with any sectors which we were able to correctly | ||
| 769 | * handle. | ||
| 770 | */ | ||
| 771 | if (good_bytes >= 0) { | ||
| 772 | SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n", | ||
| 773 | req->nr_sectors, good_bytes)); | ||
| 774 | SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg)); | ||
| 775 | |||
| 776 | if (clear_errors) | ||
| 777 | req->errors = 0; | ||
| 778 | /* | ||
| 779 | * If multiple sectors are requested in one buffer, then | ||
| 780 | * they will have been finished off by the first command. | ||
| 781 | * If not, then we have a multi-buffer command. | ||
| 782 | * | ||
| 783 | * If block_bytes != 0, it means we had a medium error | ||
| 784 | * of some sort, and that we want to mark some number of | ||
| 785 | * sectors as not uptodate. Thus we want to inhibit | ||
| 786 | * requeueing right here - we will requeue down below | ||
| 787 | * when we handle the bad sectors. | ||
| 788 | */ | ||
| 789 | cmd = scsi_end_request(cmd, 1, good_bytes, result == 0); | ||
| 790 | |||
| 791 | /* | ||
| 792 | * If the command completed without error, then either finish off the | ||
| 793 | * rest of the command, or start a new one. | ||
| 794 | */ | ||
| 795 | if (result == 0 || cmd == NULL ) { | ||
| 796 | return; | ||
| 797 | } | ||
| 798 | } | ||
| 799 | /* | ||
| 800 | * Now, if we were good little boys and girls, Santa left us a request | ||
| 801 | * sense buffer. We can extract information from this, so we | ||
| 802 | * can choose a block to remap, etc. | ||
| 803 | */ | ||
| 804 | if (sense_valid && !sense_deferred) { | ||
| 805 | switch (sshdr.sense_key) { | ||
| 806 | case UNIT_ATTENTION: | ||
| 807 | if (cmd->device->removable) { | ||
| 808 | /* detected disc change. set a bit | ||
| 809 | * and quietly refuse further access. | ||
| 810 | */ | ||
| 811 | cmd->device->changed = 1; | ||
| 812 | cmd = scsi_end_request(cmd, 0, | ||
| 813 | this_count, 1); | ||
| 814 | return; | ||
| 815 | } else { | ||
| 816 | /* | ||
| 817 | * Must have been a power glitch, or a | ||
| 818 | * bus reset. Could not have been a | ||
| 819 | * media change, so we just retry the | ||
| 820 | * request and see what happens. | ||
| 821 | */ | ||
| 822 | scsi_requeue_command(q, cmd); | ||
| 823 | return; | ||
| 824 | } | ||
| 825 | break; | ||
| 826 | case ILLEGAL_REQUEST: | ||
| 827 | /* | ||
| 828 | * If we had an ILLEGAL REQUEST returned, then we may | ||
| 829 | * have performed an unsupported command. The only | ||
| 830 | * thing this should be would be a ten byte read where | ||
| 831 | * only a six byte read was supported. Also, on a | ||
| 832 | * system where READ CAPACITY failed, we may have read | ||
| 833 | * past the end of the disk. | ||
| 834 | */ | ||
| 835 | if (cmd->device->use_10_for_rw && | ||
| 836 | (cmd->cmnd[0] == READ_10 || | ||
| 837 | cmd->cmnd[0] == WRITE_10)) { | ||
| 838 | cmd->device->use_10_for_rw = 0; | ||
| 839 | /* | ||
| 840 | * This will cause a retry with a 6-byte | ||
| 841 | * command. | ||
| 842 | */ | ||
| 843 | scsi_requeue_command(q, cmd); | ||
| 844 | result = 0; | ||
| 845 | } else { | ||
| 846 | cmd = scsi_end_request(cmd, 0, this_count, 1); | ||
| 847 | return; | ||
| 848 | } | ||
| 849 | break; | ||
| 850 | case NOT_READY: | ||
| 851 | /* | ||
| 852 | * If the device is in the process of becoming ready, | ||
| 853 | * retry. | ||
| 854 | */ | ||
| 855 | if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) { | ||
| 856 | scsi_requeue_command(q, cmd); | ||
| 857 | return; | ||
| 858 | } | ||
| 859 | printk(KERN_INFO "Device %s not ready.\n", | ||
| 860 | req->rq_disk ? req->rq_disk->disk_name : ""); | ||
| 861 | cmd = scsi_end_request(cmd, 0, this_count, 1); | ||
| 862 | return; | ||
| 863 | case VOLUME_OVERFLOW: | ||
| 864 | printk(KERN_INFO "Volume overflow <%d %d %d %d> CDB: ", | ||
| 865 | cmd->device->host->host_no, | ||
| 866 | (int)cmd->device->channel, | ||
| 867 | (int)cmd->device->id, (int)cmd->device->lun); | ||
| 868 | __scsi_print_command(cmd->data_cmnd); | ||
| 869 | scsi_print_sense("", cmd); | ||
| 870 | cmd = scsi_end_request(cmd, 0, block_bytes, 1); | ||
| 871 | return; | ||
| 872 | default: | ||
| 873 | break; | ||
| 874 | } | ||
| 875 | } /* driver byte != 0 */ | ||
| 876 | if (host_byte(result) == DID_RESET) { | ||
| 877 | /* | ||
| 878 | * Third party bus reset or reset for error | ||
| 879 | * recovery reasons. Just retry the request | ||
| 880 | * and see what happens. | ||
| 881 | */ | ||
| 882 | scsi_requeue_command(q, cmd); | ||
| 883 | return; | ||
| 884 | } | ||
| 885 | if (result) { | ||
| 886 | printk(KERN_INFO "SCSI error : <%d %d %d %d> return code " | ||
| 887 | "= 0x%x\n", cmd->device->host->host_no, | ||
| 888 | cmd->device->channel, | ||
| 889 | cmd->device->id, | ||
| 890 | cmd->device->lun, result); | ||
| 891 | |||
| 892 | if (driver_byte(result) & DRIVER_SENSE) | ||
| 893 | scsi_print_sense("", cmd); | ||
| 894 | /* | ||
| 895 | * Mark a single buffer as not uptodate. Queue the remainder. | ||
| 896 | * We sometimes get this cruft in the event that a medium error | ||
| 897 | * isn't properly reported. | ||
| 898 | */ | ||
| 899 | block_bytes = req->hard_cur_sectors << 9; | ||
| 900 | if (!block_bytes) | ||
| 901 | block_bytes = req->data_len; | ||
| 902 | cmd = scsi_end_request(cmd, 0, block_bytes, 1); | ||
| 903 | } | ||
| 904 | } | ||
| 905 | EXPORT_SYMBOL(scsi_io_completion); | ||
| 906 | |||
| 907 | /* | ||
| 908 | * Function: scsi_init_io() | ||
| 909 | * | ||
| 910 | * Purpose: SCSI I/O initialize function. | ||
| 911 | * | ||
| 912 | * Arguments: cmd - Command descriptor we wish to initialize | ||
| 913 | * | ||
| 914 | * Returns: 0 on success | ||
| 915 | * BLKPREP_DEFER if the failure is retryable | ||
| 916 | * BLKPREP_KILL if the failure is fatal | ||
| 917 | */ | ||
| 918 | static int scsi_init_io(struct scsi_cmnd *cmd) | ||
| 919 | { | ||
| 920 | struct request *req = cmd->request; | ||
| 921 | struct scatterlist *sgpnt; | ||
| 922 | int count; | ||
| 923 | |||
| 924 | /* | ||
| 925 | * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer | ||
| 926 | */ | ||
| 927 | if ((req->flags & REQ_BLOCK_PC) && !req->bio) { | ||
| 928 | cmd->request_bufflen = req->data_len; | ||
| 929 | cmd->request_buffer = req->data; | ||
| 930 | req->buffer = req->data; | ||
| 931 | cmd->use_sg = 0; | ||
| 932 | return 0; | ||
| 933 | } | ||
| 934 | |||
| 935 | /* | ||
| 936 | * we used to not use scatter-gather for single segment request, | ||
| 937 | * but now we do (it makes highmem I/O easier to support without | ||
| 938 | * kmapping pages) | ||
| 939 | */ | ||
| 940 | cmd->use_sg = req->nr_phys_segments; | ||
| 941 | |||
| 942 | /* | ||
| 943 | * if sg table allocation fails, requeue request later. | ||
| 944 | */ | ||
| 945 | sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC); | ||
| 946 | if (unlikely(!sgpnt)) { | ||
| 947 | req->flags |= REQ_SPECIAL; | ||
| 948 | return BLKPREP_DEFER; | ||
| 949 | } | ||
| 950 | |||
| 951 | cmd->request_buffer = (char *) sgpnt; | ||
| 952 | cmd->request_bufflen = req->nr_sectors << 9; | ||
| 953 | if (blk_pc_request(req)) | ||
| 954 | cmd->request_bufflen = req->data_len; | ||
| 955 | req->buffer = NULL; | ||
| 956 | |||
| 957 | /* | ||
| 958 | * Next, walk the list, and fill in the addresses and sizes of | ||
| 959 | * each segment. | ||
| 960 | */ | ||
| 961 | count = blk_rq_map_sg(req->q, req, cmd->request_buffer); | ||
| 962 | |||
| 963 | /* | ||
| 964 | * mapped well, send it off | ||
| 965 | */ | ||
| 966 | if (likely(count <= cmd->use_sg)) { | ||
| 967 | cmd->use_sg = count; | ||
| 968 | return 0; | ||
| 969 | } | ||
| 970 | |||
| 971 | printk(KERN_ERR "Incorrect number of segments after building list\n"); | ||
| 972 | printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg); | ||
| 973 | printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors, | ||
| 974 | req->current_nr_sectors); | ||
| 975 | |||
| 976 | /* release the command and kill it */ | ||
| 977 | scsi_release_buffers(cmd); | ||
| 978 | scsi_put_command(cmd); | ||
| 979 | return BLKPREP_KILL; | ||
| 980 | } | ||
| 981 | |||
| 982 | static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq) | ||
| 983 | { | ||
| 984 | struct scsi_device *sdev = q->queuedata; | ||
| 985 | struct scsi_driver *drv; | ||
| 986 | |||
| 987 | if (sdev->sdev_state == SDEV_RUNNING) { | ||
| 988 | drv = *(struct scsi_driver **) rq->rq_disk->private_data; | ||
| 989 | |||
| 990 | if (drv->prepare_flush) | ||
| 991 | return drv->prepare_flush(q, rq); | ||
| 992 | } | ||
| 993 | |||
| 994 | return 0; | ||
| 995 | } | ||
| 996 | |||
| 997 | static void scsi_end_flush_fn(request_queue_t *q, struct request *rq) | ||
| 998 | { | ||
| 999 | struct scsi_device *sdev = q->queuedata; | ||
| 1000 | struct request *flush_rq = rq->end_io_data; | ||
| 1001 | struct scsi_driver *drv; | ||
| 1002 | |||
| 1003 | if (flush_rq->errors) { | ||
| 1004 | printk("scsi: barrier error, disabling flush support\n"); | ||
| 1005 | blk_queue_ordered(q, QUEUE_ORDERED_NONE); | ||
| 1006 | } | ||
| 1007 | |||
| 1008 | if (sdev->sdev_state == SDEV_RUNNING) { | ||
| 1009 | drv = *(struct scsi_driver **) rq->rq_disk->private_data; | ||
| 1010 | drv->end_flush(q, rq); | ||
| 1011 | } | ||
| 1012 | } | ||
| 1013 | |||
| 1014 | static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk, | ||
| 1015 | sector_t *error_sector) | ||
| 1016 | { | ||
| 1017 | struct scsi_device *sdev = q->queuedata; | ||
| 1018 | struct scsi_driver *drv; | ||
| 1019 | |||
| 1020 | if (sdev->sdev_state != SDEV_RUNNING) | ||
| 1021 | return -ENXIO; | ||
| 1022 | |||
| 1023 | drv = *(struct scsi_driver **) disk->private_data; | ||
| 1024 | if (drv->issue_flush) | ||
| 1025 | return drv->issue_flush(&sdev->sdev_gendev, error_sector); | ||
| 1026 | |||
| 1027 | return -EOPNOTSUPP; | ||
| 1028 | } | ||
| 1029 | |||
| 1030 | static int scsi_prep_fn(struct request_queue *q, struct request *req) | ||
| 1031 | { | ||
| 1032 | struct scsi_device *sdev = q->queuedata; | ||
| 1033 | struct scsi_cmnd *cmd; | ||
| 1034 | int specials_only = 0; | ||
| 1035 | |||
| 1036 | /* | ||
| 1037 | * Just check to see if the device is online. If it isn't, we | ||
| 1038 | * refuse to process any commands. The device must be brought | ||
| 1039 | * online before trying any recovery commands | ||
| 1040 | */ | ||
| 1041 | if (unlikely(!scsi_device_online(sdev))) { | ||
| 1042 | printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n", | ||
| 1043 | sdev->host->host_no, sdev->id, sdev->lun); | ||
| 1044 | return BLKPREP_KILL; | ||
| 1045 | } | ||
| 1046 | if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { | ||
| 1047 | /* OK, we're not in a running state don't prep | ||
| 1048 | * user commands */ | ||
| 1049 | if (sdev->sdev_state == SDEV_DEL) { | ||
| 1050 | /* Device is fully deleted, no commands | ||
| 1051 | * at all allowed down */ | ||
| 1052 | printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to dead device\n", | ||
| 1053 | sdev->host->host_no, sdev->id, sdev->lun); | ||
| 1054 | return BLKPREP_KILL; | ||
| 1055 | } | ||
| 1056 | /* OK, we only allow special commands (i.e. not | ||
| 1057 | * user initiated ones */ | ||
| 1058 | specials_only = sdev->sdev_state; | ||
| 1059 | } | ||
| 1060 | |||
| 1061 | /* | ||
| 1062 | * Find the actual device driver associated with this command. | ||
| 1063 | * The SPECIAL requests are things like character device or | ||
| 1064 | * ioctls, which did not originate from ll_rw_blk. Note that | ||
| 1065 | * the special field is also used to indicate the cmd for | ||
| 1066 | * the remainder of a partially fulfilled request that can | ||
| 1067 | * come up when there is a medium error. We have to treat | ||
| 1068 | * these two cases differently. We differentiate by looking | ||
| 1069 | * at request->cmd, as this tells us the real story. | ||
| 1070 | */ | ||
| 1071 | if (req->flags & REQ_SPECIAL) { | ||
| 1072 | struct scsi_request *sreq = req->special; | ||
| 1073 | |||
| 1074 | if (sreq->sr_magic == SCSI_REQ_MAGIC) { | ||
| 1075 | cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC); | ||
| 1076 | if (unlikely(!cmd)) | ||
| 1077 | goto defer; | ||
| 1078 | scsi_init_cmd_from_req(cmd, sreq); | ||
| 1079 | } else | ||
| 1080 | cmd = req->special; | ||
| 1081 | } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) { | ||
| 1082 | |||
| 1083 | if(unlikely(specials_only)) { | ||
| 1084 | if(specials_only == SDEV_QUIESCE || | ||
| 1085 | specials_only == SDEV_BLOCK) | ||
| 1086 | return BLKPREP_DEFER; | ||
| 1087 | |||
| 1088 | printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to device being removed\n", | ||
| 1089 | sdev->host->host_no, sdev->id, sdev->lun); | ||
| 1090 | return BLKPREP_KILL; | ||
| 1091 | } | ||
| 1092 | |||
| 1093 | |||
| 1094 | /* | ||
| 1095 | * Now try and find a command block that we can use. | ||
| 1096 | */ | ||
| 1097 | if (!req->special) { | ||
| 1098 | cmd = scsi_get_command(sdev, GFP_ATOMIC); | ||
| 1099 | if (unlikely(!cmd)) | ||
| 1100 | goto defer; | ||
| 1101 | } else | ||
| 1102 | cmd = req->special; | ||
| 1103 | |||
| 1104 | /* pull a tag out of the request if we have one */ | ||
| 1105 | cmd->tag = req->tag; | ||
| 1106 | } else { | ||
| 1107 | blk_dump_rq_flags(req, "SCSI bad req"); | ||
| 1108 | return BLKPREP_KILL; | ||
| 1109 | } | ||
| 1110 | |||
| 1111 | /* note the overloading of req->special. When the tag | ||
| 1112 | * is active it always means cmd. If the tag goes | ||
| 1113 | * back for re-queueing, it may be reset */ | ||
| 1114 | req->special = cmd; | ||
| 1115 | cmd->request = req; | ||
| 1116 | |||
| 1117 | /* | ||
| 1118 | * FIXME: drop the lock here because the functions below | ||
| 1119 | * expect to be called without the queue lock held. Also, | ||
| 1120 | * previously, we dequeued the request before dropping the | ||
| 1121 | * lock. We hope REQ_STARTED prevents anything untoward from | ||
| 1122 | * happening now. | ||
| 1123 | */ | ||
| 1124 | if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) { | ||
| 1125 | struct scsi_driver *drv; | ||
| 1126 | int ret; | ||
| 1127 | |||
| 1128 | /* | ||
| 1129 | * This will do a couple of things: | ||
| 1130 | * 1) Fill in the actual SCSI command. | ||
| 1131 | * 2) Fill in any other upper-level specific fields | ||
| 1132 | * (timeout). | ||
| 1133 | * | ||
| 1134 | * If this returns 0, it means that the request failed | ||
| 1135 | * (reading past end of disk, reading offline device, | ||
| 1136 | * etc). This won't actually talk to the device, but | ||
| 1137 | * some kinds of consistency checking may cause the | ||
| 1138 | * request to be rejected immediately. | ||
| 1139 | */ | ||
| 1140 | |||
| 1141 | /* | ||
| 1142 | * This sets up the scatter-gather table (allocating if | ||
| 1143 | * required). | ||
| 1144 | */ | ||
| 1145 | ret = scsi_init_io(cmd); | ||
| 1146 | if (ret) /* BLKPREP_KILL return also releases the command */ | ||
| 1147 | return ret; | ||
| 1148 | |||
| 1149 | /* | ||
| 1150 | * Initialize the actual SCSI command for this request. | ||
| 1151 | */ | ||
| 1152 | drv = *(struct scsi_driver **)req->rq_disk->private_data; | ||
| 1153 | if (unlikely(!drv->init_command(cmd))) { | ||
| 1154 | scsi_release_buffers(cmd); | ||
| 1155 | scsi_put_command(cmd); | ||
| 1156 | return BLKPREP_KILL; | ||
| 1157 | } | ||
| 1158 | } | ||
| 1159 | |||
| 1160 | /* | ||
| 1161 | * The request is now prepped, no need to come back here | ||
| 1162 | */ | ||
| 1163 | req->flags |= REQ_DONTPREP; | ||
| 1164 | return BLKPREP_OK; | ||
| 1165 | |||
| 1166 | defer: | ||
| 1167 | /* If we defer, the elv_next_request() returns NULL, but the | ||
| 1168 | * queue must be restarted, so we plug here if no returning | ||
| 1169 | * command will automatically do that. */ | ||
| 1170 | if (sdev->device_busy == 0) | ||
| 1171 | blk_plug_device(q); | ||
| 1172 | return BLKPREP_DEFER; | ||
| 1173 | } | ||
| 1174 | |||
| 1175 | /* | ||
| 1176 | * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else | ||
| 1177 | * return 0. | ||
| 1178 | * | ||
| 1179 | * Called with the queue_lock held. | ||
| 1180 | */ | ||
| 1181 | static inline int scsi_dev_queue_ready(struct request_queue *q, | ||
| 1182 | struct scsi_device *sdev) | ||
| 1183 | { | ||
| 1184 | if (sdev->device_busy >= sdev->queue_depth) | ||
| 1185 | return 0; | ||
| 1186 | if (sdev->device_busy == 0 && sdev->device_blocked) { | ||
| 1187 | /* | ||
| 1188 | * unblock after device_blocked iterates to zero | ||
| 1189 | */ | ||
| 1190 | if (--sdev->device_blocked == 0) { | ||
| 1191 | SCSI_LOG_MLQUEUE(3, | ||
| 1192 | printk("scsi%d (%d:%d) unblocking device at" | ||
| 1193 | " zero depth\n", sdev->host->host_no, | ||
| 1194 | sdev->id, sdev->lun)); | ||
| 1195 | } else { | ||
| 1196 | blk_plug_device(q); | ||
| 1197 | return 0; | ||
| 1198 | } | ||
| 1199 | } | ||
| 1200 | if (sdev->device_blocked) | ||
| 1201 | return 0; | ||
| 1202 | |||
| 1203 | return 1; | ||
| 1204 | } | ||
| 1205 | |||
| 1206 | /* | ||
| 1207 | * scsi_host_queue_ready: if we can send requests to shost, return 1 else | ||
| 1208 | * return 0. We must end up running the queue again whenever 0 is | ||
| 1209 | * returned, else IO can hang. | ||
| 1210 | * | ||
| 1211 | * Called with host_lock held. | ||
| 1212 | */ | ||
| 1213 | static inline int scsi_host_queue_ready(struct request_queue *q, | ||
| 1214 | struct Scsi_Host *shost, | ||
| 1215 | struct scsi_device *sdev) | ||
| 1216 | { | ||
| 1217 | if (test_bit(SHOST_RECOVERY, &shost->shost_state)) | ||
| 1218 | return 0; | ||
| 1219 | if (shost->host_busy == 0 && shost->host_blocked) { | ||
| 1220 | /* | ||
| 1221 | * unblock after host_blocked iterates to zero | ||
| 1222 | */ | ||
| 1223 | if (--shost->host_blocked == 0) { | ||
| 1224 | SCSI_LOG_MLQUEUE(3, | ||
| 1225 | printk("scsi%d unblocking host at zero depth\n", | ||
| 1226 | shost->host_no)); | ||
| 1227 | } else { | ||
| 1228 | blk_plug_device(q); | ||
| 1229 | return 0; | ||
| 1230 | } | ||
| 1231 | } | ||
| 1232 | if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) || | ||
| 1233 | shost->host_blocked || shost->host_self_blocked) { | ||
| 1234 | if (list_empty(&sdev->starved_entry)) | ||
| 1235 | list_add_tail(&sdev->starved_entry, &shost->starved_list); | ||
| 1236 | return 0; | ||
| 1237 | } | ||
| 1238 | |||
| 1239 | /* We're OK to process the command, so we can't be starved */ | ||
| 1240 | if (!list_empty(&sdev->starved_entry)) | ||
| 1241 | list_del_init(&sdev->starved_entry); | ||
| 1242 | |||
| 1243 | return 1; | ||
| 1244 | } | ||
| 1245 | |||
| 1246 | /* | ||
| 1247 | * Kill requests for a dead device | ||
| 1248 | */ | ||
| 1249 | static void scsi_kill_requests(request_queue_t *q) | ||
| 1250 | { | ||
| 1251 | struct request *req; | ||
| 1252 | |||
| 1253 | while ((req = elv_next_request(q)) != NULL) { | ||
| 1254 | blkdev_dequeue_request(req); | ||
| 1255 | req->flags |= REQ_QUIET; | ||
| 1256 | while (end_that_request_first(req, 0, req->nr_sectors)) | ||
| 1257 | ; | ||
| 1258 | end_that_request_last(req); | ||
| 1259 | } | ||
| 1260 | } | ||
| 1261 | |||
| 1262 | /* | ||
| 1263 | * Function: scsi_request_fn() | ||
| 1264 | * | ||
| 1265 | * Purpose: Main strategy routine for SCSI. | ||
| 1266 | * | ||
| 1267 | * Arguments: q - Pointer to actual queue. | ||
| 1268 | * | ||
| 1269 | * Returns: Nothing | ||
| 1270 | * | ||
| 1271 | * Lock status: IO request lock assumed to be held when called. | ||
| 1272 | */ | ||
| 1273 | static void scsi_request_fn(struct request_queue *q) | ||
| 1274 | { | ||
| 1275 | struct scsi_device *sdev = q->queuedata; | ||
| 1276 | struct Scsi_Host *shost; | ||
| 1277 | struct scsi_cmnd *cmd; | ||
| 1278 | struct request *req; | ||
| 1279 | |||
| 1280 | if (!sdev) { | ||
| 1281 | printk("scsi: killing requests for dead queue\n"); | ||
| 1282 | scsi_kill_requests(q); | ||
| 1283 | return; | ||
| 1284 | } | ||
| 1285 | |||
| 1286 | if(!get_device(&sdev->sdev_gendev)) | ||
| 1287 | /* We must be tearing the block queue down already */ | ||
| 1288 | return; | ||
| 1289 | |||
| 1290 | /* | ||
| 1291 | * To start with, we keep looping until the queue is empty, or until | ||
| 1292 | * the host is no longer able to accept any more requests. | ||
| 1293 | */ | ||
| 1294 | shost = sdev->host; | ||
| 1295 | while (!blk_queue_plugged(q)) { | ||
| 1296 | int rtn; | ||
| 1297 | /* | ||
| 1298 | * get next queueable request. We do this early to make sure | ||
| 1299 | * that the request is fully prepared even if we cannot | ||
| 1300 | * accept it. | ||
| 1301 | */ | ||
| 1302 | req = elv_next_request(q); | ||
| 1303 | if (!req || !scsi_dev_queue_ready(q, sdev)) | ||
| 1304 | break; | ||
| 1305 | |||
| 1306 | if (unlikely(!scsi_device_online(sdev))) { | ||
| 1307 | printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n", | ||
| 1308 | sdev->host->host_no, sdev->id, sdev->lun); | ||
| 1309 | blkdev_dequeue_request(req); | ||
| 1310 | req->flags |= REQ_QUIET; | ||
| 1311 | while (end_that_request_first(req, 0, req->nr_sectors)) | ||
| 1312 | ; | ||
| 1313 | end_that_request_last(req); | ||
| 1314 | continue; | ||
| 1315 | } | ||
| 1316 | |||
| 1317 | |||
| 1318 | /* | ||
| 1319 | * Remove the request from the request list. | ||
| 1320 | */ | ||
| 1321 | if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) | ||
| 1322 | blkdev_dequeue_request(req); | ||
| 1323 | sdev->device_busy++; | ||
| 1324 | |||
| 1325 | spin_unlock(q->queue_lock); | ||
| 1326 | spin_lock(shost->host_lock); | ||
| 1327 | |||
| 1328 | if (!scsi_host_queue_ready(q, shost, sdev)) | ||
| 1329 | goto not_ready; | ||
| 1330 | if (sdev->single_lun) { | ||
| 1331 | if (scsi_target(sdev)->starget_sdev_user && | ||
| 1332 | scsi_target(sdev)->starget_sdev_user != sdev) | ||
| 1333 | goto not_ready; | ||
| 1334 | scsi_target(sdev)->starget_sdev_user = sdev; | ||
| 1335 | } | ||
| 1336 | shost->host_busy++; | ||
| 1337 | |||
| 1338 | /* | ||
| 1339 | * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will | ||
| 1340 | * take the lock again. | ||
| 1341 | */ | ||
| 1342 | spin_unlock_irq(shost->host_lock); | ||
| 1343 | |||
| 1344 | cmd = req->special; | ||
| 1345 | if (unlikely(cmd == NULL)) { | ||
| 1346 | printk(KERN_CRIT "impossible request in %s.\n" | ||
| 1347 | "please mail a stack trace to " | ||
| 1348 | "linux-scsi@vger.kernel.org", | ||
| 1349 | __FUNCTION__); | ||
| 1350 | BUG(); | ||
| 1351 | } | ||
| 1352 | |||
| 1353 | /* | ||
| 1354 | * Finally, initialize any error handling parameters, and set up | ||
| 1355 | * the timers for timeouts. | ||
| 1356 | */ | ||
| 1357 | scsi_init_cmd_errh(cmd); | ||
| 1358 | |||
| 1359 | /* | ||
| 1360 | * Dispatch the command to the low-level driver. | ||
| 1361 | */ | ||
| 1362 | rtn = scsi_dispatch_cmd(cmd); | ||
| 1363 | spin_lock_irq(q->queue_lock); | ||
| 1364 | if(rtn) { | ||
| 1365 | /* we're refusing the command; because of | ||
| 1366 | * the way locks get dropped, we need to | ||
| 1367 | * check here if plugging is required */ | ||
| 1368 | if(sdev->device_busy == 0) | ||
| 1369 | blk_plug_device(q); | ||
| 1370 | |||
| 1371 | break; | ||
| 1372 | } | ||
| 1373 | } | ||
| 1374 | |||
| 1375 | goto out; | ||
| 1376 | |||
| 1377 | not_ready: | ||
| 1378 | spin_unlock_irq(shost->host_lock); | ||
| 1379 | |||
| 1380 | /* | ||
| 1381 | * lock q, handle tag, requeue req, and decrement device_busy. We | ||
| 1382 | * must return with queue_lock held. | ||
| 1383 | * | ||
| 1384 | * Decrementing device_busy without checking it is OK, as all such | ||
| 1385 | * cases (host limits or settings) should run the queue at some | ||
| 1386 | * later time. | ||
| 1387 | */ | ||
| 1388 | spin_lock_irq(q->queue_lock); | ||
| 1389 | blk_requeue_request(q, req); | ||
| 1390 | sdev->device_busy--; | ||
| 1391 | if(sdev->device_busy == 0) | ||
| 1392 | blk_plug_device(q); | ||
| 1393 | out: | ||
| 1394 | /* must be careful here...if we trigger the ->remove() function | ||
| 1395 | * we cannot be holding the q lock */ | ||
| 1396 | spin_unlock_irq(q->queue_lock); | ||
| 1397 | put_device(&sdev->sdev_gendev); | ||
| 1398 | spin_lock_irq(q->queue_lock); | ||
| 1399 | } | ||
| 1400 | |||
| 1401 | u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) | ||
| 1402 | { | ||
| 1403 | struct device *host_dev; | ||
| 1404 | u64 bounce_limit = 0xffffffff; | ||
| 1405 | |||
| 1406 | if (shost->unchecked_isa_dma) | ||
| 1407 | return BLK_BOUNCE_ISA; | ||
| 1408 | /* | ||
| 1409 | * Platforms with virtual-DMA translation | ||
| 1410 | * hardware have no practical limit. | ||
| 1411 | */ | ||
| 1412 | if (!PCI_DMA_BUS_IS_PHYS) | ||
| 1413 | return BLK_BOUNCE_ANY; | ||
| 1414 | |||
| 1415 | host_dev = scsi_get_device(shost); | ||
| 1416 | if (host_dev && host_dev->dma_mask) | ||
| 1417 | bounce_limit = *host_dev->dma_mask; | ||
| 1418 | |||
| 1419 | return bounce_limit; | ||
| 1420 | } | ||
| 1421 | EXPORT_SYMBOL(scsi_calculate_bounce_limit); | ||
| 1422 | |||
| 1423 | struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) | ||
| 1424 | { | ||
| 1425 | struct Scsi_Host *shost = sdev->host; | ||
| 1426 | struct request_queue *q; | ||
| 1427 | |||
| 1428 | q = blk_init_queue(scsi_request_fn, &sdev->sdev_lock); | ||
| 1429 | if (!q) | ||
| 1430 | return NULL; | ||
| 1431 | |||
| 1432 | blk_queue_prep_rq(q, scsi_prep_fn); | ||
| 1433 | |||
| 1434 | blk_queue_max_hw_segments(q, shost->sg_tablesize); | ||
| 1435 | blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS); | ||
| 1436 | blk_queue_max_sectors(q, shost->max_sectors); | ||
| 1437 | blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); | ||
| 1438 | blk_queue_segment_boundary(q, shost->dma_boundary); | ||
| 1439 | blk_queue_issue_flush_fn(q, scsi_issue_flush_fn); | ||
| 1440 | |||
| 1441 | /* | ||
| 1442 | * ordered tags are superior to flush ordering | ||
| 1443 | */ | ||
| 1444 | if (shost->ordered_tag) | ||
| 1445 | blk_queue_ordered(q, QUEUE_ORDERED_TAG); | ||
| 1446 | else if (shost->ordered_flush) { | ||
| 1447 | blk_queue_ordered(q, QUEUE_ORDERED_FLUSH); | ||
| 1448 | q->prepare_flush_fn = scsi_prepare_flush_fn; | ||
| 1449 | q->end_flush_fn = scsi_end_flush_fn; | ||
| 1450 | } | ||
| 1451 | |||
| 1452 | if (!shost->use_clustering) | ||
| 1453 | clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); | ||
| 1454 | return q; | ||
| 1455 | } | ||
| 1456 | |||
| 1457 | void scsi_free_queue(struct request_queue *q) | ||
| 1458 | { | ||
| 1459 | blk_cleanup_queue(q); | ||
| 1460 | } | ||
| 1461 | |||
| 1462 | /* | ||
| 1463 | * Function: scsi_block_requests() | ||
| 1464 | * | ||
| 1465 | * Purpose: Utility function used by low-level drivers to prevent further | ||
| 1466 | * commands from being queued to the device. | ||
| 1467 | * | ||
| 1468 | * Arguments: shost - Host in question | ||
| 1469 | * | ||
| 1470 | * Returns: Nothing | ||
| 1471 | * | ||
| 1472 | * Lock status: No locks are assumed held. | ||
| 1473 | * | ||
| 1474 | * Notes: There is no timer nor any other means by which the requests | ||
| 1475 | * get unblocked other than the low-level driver calling | ||
| 1476 | * scsi_unblock_requests(). | ||
| 1477 | */ | ||
| 1478 | void scsi_block_requests(struct Scsi_Host *shost) | ||
| 1479 | { | ||
| 1480 | shost->host_self_blocked = 1; | ||
| 1481 | } | ||
| 1482 | EXPORT_SYMBOL(scsi_block_requests); | ||
| 1483 | |||
| 1484 | /* | ||
| 1485 | * Function: scsi_unblock_requests() | ||
| 1486 | * | ||
| 1487 | * Purpose: Utility function used by low-level drivers to allow further | ||
| 1488 | * commands from being queued to the device. | ||
| 1489 | * | ||
| 1490 | * Arguments: shost - Host in question | ||
| 1491 | * | ||
| 1492 | * Returns: Nothing | ||
| 1493 | * | ||
| 1494 | * Lock status: No locks are assumed held. | ||
| 1495 | * | ||
| 1496 | * Notes: There is no timer nor any other means by which the requests | ||
| 1497 | * get unblocked other than the low-level driver calling | ||
| 1498 | * scsi_unblock_requests(). | ||
| 1499 | * | ||
| 1500 | * This is done as an API function so that changes to the | ||
| 1501 | * internals of the scsi mid-layer won't require wholesale | ||
| 1502 | * changes to drivers that use this feature. | ||
| 1503 | */ | ||
| 1504 | void scsi_unblock_requests(struct Scsi_Host *shost) | ||
| 1505 | { | ||
| 1506 | shost->host_self_blocked = 0; | ||
| 1507 | scsi_run_host_queues(shost); | ||
| 1508 | } | ||
| 1509 | EXPORT_SYMBOL(scsi_unblock_requests); | ||
| 1510 | |||
| 1511 | int __init scsi_init_queue(void) | ||
| 1512 | { | ||
| 1513 | int i; | ||
| 1514 | |||
| 1515 | for (i = 0; i < SG_MEMPOOL_NR; i++) { | ||
| 1516 | struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; | ||
| 1517 | int size = sgp->size * sizeof(struct scatterlist); | ||
| 1518 | |||
| 1519 | sgp->slab = kmem_cache_create(sgp->name, size, 0, | ||
| 1520 | SLAB_HWCACHE_ALIGN, NULL, NULL); | ||
| 1521 | if (!sgp->slab) { | ||
| 1522 | printk(KERN_ERR "SCSI: can't init sg slab %s\n", | ||
| 1523 | sgp->name); | ||
| 1524 | } | ||
| 1525 | |||
| 1526 | sgp->pool = mempool_create(SG_MEMPOOL_SIZE, | ||
| 1527 | mempool_alloc_slab, mempool_free_slab, | ||
| 1528 | sgp->slab); | ||
| 1529 | if (!sgp->pool) { | ||
| 1530 | printk(KERN_ERR "SCSI: can't init sg mempool %s\n", | ||
| 1531 | sgp->name); | ||
| 1532 | } | ||
| 1533 | } | ||
| 1534 | |||
| 1535 | return 0; | ||
| 1536 | } | ||
| 1537 | |||
| 1538 | void scsi_exit_queue(void) | ||
| 1539 | { | ||
| 1540 | int i; | ||
| 1541 | |||
| 1542 | for (i = 0; i < SG_MEMPOOL_NR; i++) { | ||
| 1543 | struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; | ||
| 1544 | mempool_destroy(sgp->pool); | ||
| 1545 | kmem_cache_destroy(sgp->slab); | ||
| 1546 | } | ||
| 1547 | } | ||
| 1548 | /** | ||
| 1549 | * __scsi_mode_sense - issue a mode sense, falling back from 10 to | ||
| 1550 | * six bytes if necessary. | ||
| 1551 | * @sreq: SCSI request to fill in with the MODE_SENSE | ||
| 1552 | * @dbd: set if mode sense will allow block descriptors to be returned | ||
| 1553 | * @modepage: mode page being requested | ||
| 1554 | * @buffer: request buffer (may not be smaller than eight bytes) | ||
| 1555 | * @len: length of request buffer. | ||
| 1556 | * @timeout: command timeout | ||
| 1557 | * @retries: number of retries before failing | ||
| 1558 | * @data: returns a structure abstracting the mode header data | ||
| 1559 | * | ||
| 1560 | * Returns zero if unsuccessful, or the header offset (either 4 | ||
| 1561 | * or 8 depending on whether a six or ten byte command was | ||
| 1562 | * issued) if successful. | ||
| 1563 | **/ | ||
| 1564 | int | ||
| 1565 | __scsi_mode_sense(struct scsi_request *sreq, int dbd, int modepage, | ||
| 1566 | unsigned char *buffer, int len, int timeout, int retries, | ||
| 1567 | struct scsi_mode_data *data) { | ||
| 1568 | unsigned char cmd[12]; | ||
| 1569 | int use_10_for_ms; | ||
| 1570 | int header_length; | ||
| 1571 | |||
| 1572 | memset(data, 0, sizeof(*data)); | ||
| 1573 | memset(&cmd[0], 0, 12); | ||
| 1574 | cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ | ||
| 1575 | cmd[2] = modepage; | ||
| 1576 | |||
| 1577 | retry: | ||
| 1578 | use_10_for_ms = sreq->sr_device->use_10_for_ms; | ||
| 1579 | |||
| 1580 | if (use_10_for_ms) { | ||
| 1581 | if (len < 8) | ||
| 1582 | len = 8; | ||
| 1583 | |||
| 1584 | cmd[0] = MODE_SENSE_10; | ||
| 1585 | cmd[8] = len; | ||
| 1586 | header_length = 8; | ||
| 1587 | } else { | ||
| 1588 | if (len < 4) | ||
| 1589 | len = 4; | ||
| 1590 | |||
| 1591 | cmd[0] = MODE_SENSE; | ||
| 1592 | cmd[4] = len; | ||
| 1593 | header_length = 4; | ||
| 1594 | } | ||
| 1595 | |||
| 1596 | sreq->sr_cmd_len = 0; | ||
| 1597 | memset(sreq->sr_sense_buffer, 0, sizeof(sreq->sr_sense_buffer)); | ||
| 1598 | sreq->sr_data_direction = DMA_FROM_DEVICE; | ||
| 1599 | |||
| 1600 | memset(buffer, 0, len); | ||
| 1601 | |||
| 1602 | scsi_wait_req(sreq, cmd, buffer, len, timeout, retries); | ||
| 1603 | |||
| 1604 | /* This code looks awful: what it's doing is making sure an | ||
| 1605 | * ILLEGAL REQUEST sense return identifies the actual command | ||
| 1606 | * byte as the problem. MODE_SENSE commands can return | ||
| 1607 | * ILLEGAL REQUEST if the code page isn't supported */ | ||
| 1608 | |||
| 1609 | if (use_10_for_ms && !scsi_status_is_good(sreq->sr_result) && | ||
| 1610 | (driver_byte(sreq->sr_result) & DRIVER_SENSE)) { | ||
| 1611 | struct scsi_sense_hdr sshdr; | ||
| 1612 | |||
| 1613 | if (scsi_request_normalize_sense(sreq, &sshdr)) { | ||
| 1614 | if ((sshdr.sense_key == ILLEGAL_REQUEST) && | ||
| 1615 | (sshdr.asc == 0x20) && (sshdr.ascq == 0)) { | ||
| 1616 | /* | ||
| 1617 | * Invalid command operation code | ||
| 1618 | */ | ||
| 1619 | sreq->sr_device->use_10_for_ms = 0; | ||
| 1620 | goto retry; | ||
| 1621 | } | ||
| 1622 | } | ||
| 1623 | } | ||
| 1624 | |||
| 1625 | if(scsi_status_is_good(sreq->sr_result)) { | ||
| 1626 | data->header_length = header_length; | ||
| 1627 | if(use_10_for_ms) { | ||
| 1628 | data->length = buffer[0]*256 + buffer[1] + 2; | ||
| 1629 | data->medium_type = buffer[2]; | ||
| 1630 | data->device_specific = buffer[3]; | ||
| 1631 | data->longlba = buffer[4] & 0x01; | ||
| 1632 | data->block_descriptor_length = buffer[6]*256 | ||
| 1633 | + buffer[7]; | ||
| 1634 | } else { | ||
| 1635 | data->length = buffer[0] + 1; | ||
| 1636 | data->medium_type = buffer[1]; | ||
| 1637 | data->device_specific = buffer[2]; | ||
| 1638 | data->block_descriptor_length = buffer[3]; | ||
| 1639 | } | ||
| 1640 | } | ||
| 1641 | |||
| 1642 | return sreq->sr_result; | ||
| 1643 | } | ||
| 1644 | EXPORT_SYMBOL(__scsi_mode_sense); | ||
| 1645 | |||
| 1646 | /** | ||
| 1647 | * scsi_mode_sense - issue a mode sense, falling back from 10 to | ||
| 1648 | * six bytes if necessary. | ||
| 1649 | * @sdev: scsi device to send command to. | ||
| 1650 | * @dbd: set if mode sense will disable block descriptors in the return | ||
| 1651 | * @modepage: mode page being requested | ||
| 1652 | * @buffer: request buffer (may not be smaller than eight bytes) | ||
| 1653 | * @len: length of request buffer. | ||
| 1654 | * @timeout: command timeout | ||
| 1655 | * @retries: number of retries before failing | ||
| 1656 | * | ||
| 1657 | * Returns zero if unsuccessful, or the header offset (either 4 | ||
| 1658 | * or 8 depending on whether a six or ten byte command was | ||
| 1659 | * issued) if successful. | ||
| 1660 | **/ | ||
| 1661 | int | ||
| 1662 | scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, | ||
| 1663 | unsigned char *buffer, int len, int timeout, int retries, | ||
| 1664 | struct scsi_mode_data *data) | ||
| 1665 | { | ||
| 1666 | struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL); | ||
| 1667 | int ret; | ||
| 1668 | |||
| 1669 | if (!sreq) | ||
| 1670 | return -1; | ||
| 1671 | |||
| 1672 | ret = __scsi_mode_sense(sreq, dbd, modepage, buffer, len, | ||
| 1673 | timeout, retries, data); | ||
| 1674 | |||
| 1675 | scsi_release_request(sreq); | ||
| 1676 | |||
| 1677 | return ret; | ||
| 1678 | } | ||
| 1679 | EXPORT_SYMBOL(scsi_mode_sense); | ||
| 1680 | |||
| 1681 | int | ||
| 1682 | scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries) | ||
| 1683 | { | ||
| 1684 | struct scsi_request *sreq; | ||
| 1685 | char cmd[] = { | ||
| 1686 | TEST_UNIT_READY, 0, 0, 0, 0, 0, | ||
| 1687 | }; | ||
| 1688 | int result; | ||
| 1689 | |||
| 1690 | sreq = scsi_allocate_request(sdev, GFP_KERNEL); | ||
| 1691 | if (!sreq) | ||
| 1692 | return -ENOMEM; | ||
| 1693 | |||
| 1694 | sreq->sr_data_direction = DMA_NONE; | ||
| 1695 | scsi_wait_req(sreq, cmd, NULL, 0, timeout, retries); | ||
| 1696 | |||
| 1697 | if ((driver_byte(sreq->sr_result) & DRIVER_SENSE) && sdev->removable) { | ||
| 1698 | struct scsi_sense_hdr sshdr; | ||
| 1699 | |||
| 1700 | if ((scsi_request_normalize_sense(sreq, &sshdr)) && | ||
| 1701 | ((sshdr.sense_key == UNIT_ATTENTION) || | ||
| 1702 | (sshdr.sense_key == NOT_READY))) { | ||
| 1703 | sdev->changed = 1; | ||
| 1704 | sreq->sr_result = 0; | ||
| 1705 | } | ||
| 1706 | } | ||
| 1707 | result = sreq->sr_result; | ||
| 1708 | scsi_release_request(sreq); | ||
| 1709 | return result; | ||
| 1710 | } | ||
| 1711 | EXPORT_SYMBOL(scsi_test_unit_ready); | ||
| 1712 | |||
| 1713 | /** | ||
| 1714 | * scsi_device_set_state - Take the given device through the device | ||
| 1715 | * state model. | ||
| 1716 | * @sdev: scsi device to change the state of. | ||
| 1717 | * @state: state to change to. | ||
| 1718 | * | ||
| 1719 | * Returns zero if unsuccessful or an error if the requested | ||
| 1720 | * transition is illegal. | ||
| 1721 | **/ | ||
| 1722 | int | ||
| 1723 | scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) | ||
| 1724 | { | ||
| 1725 | enum scsi_device_state oldstate = sdev->sdev_state; | ||
| 1726 | |||
| 1727 | if (state == oldstate) | ||
| 1728 | return 0; | ||
| 1729 | |||
| 1730 | switch (state) { | ||
| 1731 | case SDEV_CREATED: | ||
| 1732 | /* There are no legal states that come back to | ||
| 1733 | * created. This is the manually initialised start | ||
| 1734 | * state */ | ||
| 1735 | goto illegal; | ||
| 1736 | |||
| 1737 | case SDEV_RUNNING: | ||
| 1738 | switch (oldstate) { | ||
| 1739 | case SDEV_CREATED: | ||
| 1740 | case SDEV_OFFLINE: | ||
| 1741 | case SDEV_QUIESCE: | ||
| 1742 | case SDEV_BLOCK: | ||
| 1743 | break; | ||
| 1744 | default: | ||
| 1745 | goto illegal; | ||
| 1746 | } | ||
| 1747 | break; | ||
| 1748 | |||
| 1749 | case SDEV_QUIESCE: | ||
| 1750 | switch (oldstate) { | ||
| 1751 | case SDEV_RUNNING: | ||
| 1752 | case SDEV_OFFLINE: | ||
| 1753 | break; | ||
| 1754 | default: | ||
| 1755 | goto illegal; | ||
| 1756 | } | ||
| 1757 | break; | ||
| 1758 | |||
| 1759 | case SDEV_OFFLINE: | ||
| 1760 | switch (oldstate) { | ||
| 1761 | case SDEV_CREATED: | ||
| 1762 | case SDEV_RUNNING: | ||
| 1763 | case SDEV_QUIESCE: | ||
| 1764 | case SDEV_BLOCK: | ||
| 1765 | break; | ||
| 1766 | default: | ||
| 1767 | goto illegal; | ||
| 1768 | } | ||
| 1769 | break; | ||
| 1770 | |||
| 1771 | case SDEV_BLOCK: | ||
| 1772 | switch (oldstate) { | ||
| 1773 | case SDEV_CREATED: | ||
| 1774 | case SDEV_RUNNING: | ||
| 1775 | break; | ||
| 1776 | default: | ||
| 1777 | goto illegal; | ||
| 1778 | } | ||
| 1779 | break; | ||
| 1780 | |||
| 1781 | case SDEV_CANCEL: | ||
| 1782 | switch (oldstate) { | ||
| 1783 | case SDEV_CREATED: | ||
| 1784 | case SDEV_RUNNING: | ||
| 1785 | case SDEV_OFFLINE: | ||
| 1786 | case SDEV_BLOCK: | ||
| 1787 | break; | ||
| 1788 | default: | ||
| 1789 | goto illegal; | ||
| 1790 | } | ||
| 1791 | break; | ||
| 1792 | |||
| 1793 | case SDEV_DEL: | ||
| 1794 | switch (oldstate) { | ||
| 1795 | case SDEV_CANCEL: | ||
| 1796 | break; | ||
| 1797 | default: | ||
| 1798 | goto illegal; | ||
| 1799 | } | ||
| 1800 | break; | ||
| 1801 | |||
| 1802 | } | ||
| 1803 | sdev->sdev_state = state; | ||
| 1804 | return 0; | ||
| 1805 | |||
| 1806 | illegal: | ||
| 1807 | SCSI_LOG_ERROR_RECOVERY(1, | ||
| 1808 | dev_printk(KERN_ERR, &sdev->sdev_gendev, | ||
| 1809 | "Illegal state transition %s->%s\n", | ||
| 1810 | scsi_device_state_name(oldstate), | ||
| 1811 | scsi_device_state_name(state)) | ||
| 1812 | ); | ||
| 1813 | return -EINVAL; | ||
| 1814 | } | ||
| 1815 | EXPORT_SYMBOL(scsi_device_set_state); | ||
| 1816 | |||
| 1817 | /** | ||
| 1818 | * scsi_device_quiesce - Block user issued commands. | ||
| 1819 | * @sdev: scsi device to quiesce. | ||
| 1820 | * | ||
| 1821 | * This works by trying to transition to the SDEV_QUIESCE state | ||
| 1822 | * (which must be a legal transition). When the device is in this | ||
| 1823 | * state, only special requests will be accepted, all others will | ||
| 1824 | * be deferred. Since special requests may also be requeued requests, | ||
| 1825 | * a successful return doesn't guarantee the device will be | ||
| 1826 | * totally quiescent. | ||
| 1827 | * | ||
| 1828 | * Must be called with user context, may sleep. | ||
| 1829 | * | ||
| 1830 | * Returns zero if unsuccessful or an error if not. | ||
| 1831 | **/ | ||
| 1832 | int | ||
| 1833 | scsi_device_quiesce(struct scsi_device *sdev) | ||
| 1834 | { | ||
| 1835 | int err = scsi_device_set_state(sdev, SDEV_QUIESCE); | ||
| 1836 | if (err) | ||
| 1837 | return err; | ||
| 1838 | |||
| 1839 | scsi_run_queue(sdev->request_queue); | ||
| 1840 | while (sdev->device_busy) { | ||
| 1841 | msleep_interruptible(200); | ||
| 1842 | scsi_run_queue(sdev->request_queue); | ||
| 1843 | } | ||
| 1844 | return 0; | ||
| 1845 | } | ||
| 1846 | EXPORT_SYMBOL(scsi_device_quiesce); | ||
| 1847 | |||
| 1848 | /** | ||
| 1849 | * scsi_device_resume - Restart user issued commands to a quiesced device. | ||
| 1850 | * @sdev: scsi device to resume. | ||
| 1851 | * | ||
| 1852 | * Moves the device from quiesced back to running and restarts the | ||
| 1853 | * queues. | ||
| 1854 | * | ||
| 1855 | * Must be called with user context, may sleep. | ||
| 1856 | **/ | ||
| 1857 | void | ||
| 1858 | scsi_device_resume(struct scsi_device *sdev) | ||
| 1859 | { | ||
| 1860 | if(scsi_device_set_state(sdev, SDEV_RUNNING)) | ||
| 1861 | return; | ||
| 1862 | scsi_run_queue(sdev->request_queue); | ||
| 1863 | } | ||
| 1864 | EXPORT_SYMBOL(scsi_device_resume); | ||
| 1865 | |||
| 1866 | static void | ||
| 1867 | device_quiesce_fn(struct scsi_device *sdev, void *data) | ||
| 1868 | { | ||
| 1869 | scsi_device_quiesce(sdev); | ||
| 1870 | } | ||
| 1871 | |||
| 1872 | void | ||
| 1873 | scsi_target_quiesce(struct scsi_target *starget) | ||
| 1874 | { | ||
| 1875 | starget_for_each_device(starget, NULL, device_quiesce_fn); | ||
| 1876 | } | ||
| 1877 | EXPORT_SYMBOL(scsi_target_quiesce); | ||
| 1878 | |||
| 1879 | static void | ||
| 1880 | device_resume_fn(struct scsi_device *sdev, void *data) | ||
| 1881 | { | ||
| 1882 | scsi_device_resume(sdev); | ||
| 1883 | } | ||
| 1884 | |||
| 1885 | void | ||
| 1886 | scsi_target_resume(struct scsi_target *starget) | ||
| 1887 | { | ||
| 1888 | starget_for_each_device(starget, NULL, device_resume_fn); | ||
| 1889 | } | ||
| 1890 | EXPORT_SYMBOL(scsi_target_resume); | ||
| 1891 | |||
| 1892 | /** | ||
| 1893 | * scsi_internal_device_block - internal function to put a device | ||
| 1894 | * temporarily into the SDEV_BLOCK state | ||
| 1895 | * @sdev: device to block | ||
| 1896 | * | ||
| 1897 | * Block request made by scsi lld's to temporarily stop all | ||
| 1898 | * scsi commands on the specified device. Called from interrupt | ||
| 1899 | * or normal process context. | ||
| 1900 | * | ||
| 1901 | * Returns zero if successful or error if not | ||
| 1902 | * | ||
| 1903 | * Notes: | ||
| 1904 | * This routine transitions the device to the SDEV_BLOCK state | ||
| 1905 | * (which must be a legal transition). When the device is in this | ||
| 1906 | * state, all commands are deferred until the scsi lld reenables | ||
| 1907 | * the device with scsi_device_unblock or device_block_tmo fires. | ||
| 1908 | * This routine assumes the host_lock is held on entry. | ||
| 1909 | **/ | ||
| 1910 | int | ||
| 1911 | scsi_internal_device_block(struct scsi_device *sdev) | ||
| 1912 | { | ||
| 1913 | request_queue_t *q = sdev->request_queue; | ||
| 1914 | unsigned long flags; | ||
| 1915 | int err = 0; | ||
| 1916 | |||
| 1917 | err = scsi_device_set_state(sdev, SDEV_BLOCK); | ||
| 1918 | if (err) | ||
| 1919 | return err; | ||
| 1920 | |||
| 1921 | /* | ||
| 1922 | * The device has transitioned to SDEV_BLOCK. Stop the | ||
| 1923 | * block layer from calling the midlayer with this device's | ||
| 1924 | * request queue. | ||
| 1925 | */ | ||
| 1926 | spin_lock_irqsave(q->queue_lock, flags); | ||
| 1927 | blk_stop_queue(q); | ||
| 1928 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
| 1929 | |||
| 1930 | return 0; | ||
| 1931 | } | ||
| 1932 | EXPORT_SYMBOL_GPL(scsi_internal_device_block); | ||
| 1933 | |||
| 1934 | /** | ||
| 1935 | * scsi_internal_device_unblock - resume a device after a block request | ||
| 1936 | * @sdev: device to resume | ||
| 1937 | * | ||
| 1938 | * Called by scsi lld's or the midlayer to restart the device queue | ||
| 1939 | * for the previously suspended scsi device. Called from interrupt or | ||
| 1940 | * normal process context. | ||
| 1941 | * | ||
| 1942 | * Returns zero if successful or error if not. | ||
| 1943 | * | ||
| 1944 | * Notes: | ||
| 1945 | * This routine transitions the device to the SDEV_RUNNING state | ||
| 1946 | * (which must be a legal transition) allowing the midlayer to | ||
| 1947 | * goose the queue for this device. This routine assumes the | ||
| 1948 | * host_lock is held upon entry. | ||
| 1949 | **/ | ||
| 1950 | int | ||
| 1951 | scsi_internal_device_unblock(struct scsi_device *sdev) | ||
| 1952 | { | ||
| 1953 | request_queue_t *q = sdev->request_queue; | ||
| 1954 | int err; | ||
| 1955 | unsigned long flags; | ||
| 1956 | |||
| 1957 | /* | ||
| 1958 | * Try to transition the scsi device to SDEV_RUNNING | ||
| 1959 | * and goose the device queue if successful. | ||
| 1960 | */ | ||
| 1961 | err = scsi_device_set_state(sdev, SDEV_RUNNING); | ||
| 1962 | if (err) | ||
| 1963 | return err; | ||
| 1964 | |||
| 1965 | spin_lock_irqsave(q->queue_lock, flags); | ||
| 1966 | blk_start_queue(q); | ||
| 1967 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
| 1968 | |||
| 1969 | return 0; | ||
| 1970 | } | ||
| 1971 | EXPORT_SYMBOL_GPL(scsi_internal_device_unblock); | ||
| 1972 | |||
| 1973 | static void | ||
| 1974 | device_block(struct scsi_device *sdev, void *data) | ||
| 1975 | { | ||
| 1976 | scsi_internal_device_block(sdev); | ||
| 1977 | } | ||
| 1978 | |||
| 1979 | static int | ||
| 1980 | target_block(struct device *dev, void *data) | ||
| 1981 | { | ||
| 1982 | if (scsi_is_target_device(dev)) | ||
| 1983 | starget_for_each_device(to_scsi_target(dev), NULL, | ||
| 1984 | device_block); | ||
| 1985 | return 0; | ||
| 1986 | } | ||
| 1987 | |||
| 1988 | void | ||
| 1989 | scsi_target_block(struct device *dev) | ||
| 1990 | { | ||
| 1991 | if (scsi_is_target_device(dev)) | ||
| 1992 | starget_for_each_device(to_scsi_target(dev), NULL, | ||
| 1993 | device_block); | ||
| 1994 | else | ||
| 1995 | device_for_each_child(dev, NULL, target_block); | ||
| 1996 | } | ||
| 1997 | EXPORT_SYMBOL_GPL(scsi_target_block); | ||
| 1998 | |||
| 1999 | static void | ||
| 2000 | device_unblock(struct scsi_device *sdev, void *data) | ||
| 2001 | { | ||
| 2002 | scsi_internal_device_unblock(sdev); | ||
| 2003 | } | ||
| 2004 | |||
| 2005 | static int | ||
| 2006 | target_unblock(struct device *dev, void *data) | ||
| 2007 | { | ||
| 2008 | if (scsi_is_target_device(dev)) | ||
| 2009 | starget_for_each_device(to_scsi_target(dev), NULL, | ||
| 2010 | device_unblock); | ||
| 2011 | return 0; | ||
| 2012 | } | ||
| 2013 | |||
| 2014 | void | ||
| 2015 | scsi_target_unblock(struct device *dev) | ||
| 2016 | { | ||
| 2017 | if (scsi_is_target_device(dev)) | ||
| 2018 | starget_for_each_device(to_scsi_target(dev), NULL, | ||
| 2019 | device_unblock); | ||
| 2020 | else | ||
| 2021 | device_for_each_child(dev, NULL, target_unblock); | ||
| 2022 | } | ||
| 2023 | EXPORT_SYMBOL_GPL(scsi_target_unblock); | ||
