diff options
| -rw-r--r-- | drivers/scsi/scsi.c | 21 | ||||
| -rw-r--r-- | drivers/scsi/scsi_lib.c | 66 | ||||
| -rw-r--r-- | drivers/scsi/scsi_sysfs.c | 10 | ||||
| -rw-r--r-- | include/scsi/scsi_device.h | 7 | ||||
| -rw-r--r-- | include/scsi/scsi_host.h | 7 |
5 files changed, 58 insertions, 53 deletions
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 21fb97b01dd6..3dde8a35493f 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
| @@ -726,17 +726,16 @@ void scsi_finish_command(struct scsi_cmnd *cmd) | |||
| 726 | 726 | ||
| 727 | scsi_device_unbusy(sdev); | 727 | scsi_device_unbusy(sdev); |
| 728 | 728 | ||
| 729 | /* | 729 | /* |
| 730 | * Clear the flags which say that the device/host is no longer | 730 | * Clear the flags that say that the device/target/host is no longer |
| 731 | * capable of accepting new commands. These are set in scsi_queue.c | 731 | * capable of accepting new commands. |
| 732 | * for both the queue full condition on a device, and for a | 732 | */ |
| 733 | * host full condition on the host. | 733 | if (atomic_read(&shost->host_blocked)) |
| 734 | * | 734 | atomic_set(&shost->host_blocked, 0); |
| 735 | * XXX(hch): What about locking? | 735 | if (atomic_read(&starget->target_blocked)) |
| 736 | */ | 736 | atomic_set(&starget->target_blocked, 0); |
| 737 | shost->host_blocked = 0; | 737 | if (atomic_read(&sdev->device_blocked)) |
| 738 | starget->target_blocked = 0; | 738 | atomic_set(&sdev->device_blocked, 0); |
| 739 | sdev->device_blocked = 0; | ||
| 740 | 739 | ||
| 741 | /* | 740 | /* |
| 742 | * If we have valid sense information, then some kind of recovery | 741 | * If we have valid sense information, then some kind of recovery |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 1ddf0fb43b59..69da4cb5cb13 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
| @@ -99,14 +99,16 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason) | |||
| 99 | */ | 99 | */ |
| 100 | switch (reason) { | 100 | switch (reason) { |
| 101 | case SCSI_MLQUEUE_HOST_BUSY: | 101 | case SCSI_MLQUEUE_HOST_BUSY: |
| 102 | host->host_blocked = host->max_host_blocked; | 102 | atomic_set(&host->host_blocked, host->max_host_blocked); |
| 103 | break; | 103 | break; |
| 104 | case SCSI_MLQUEUE_DEVICE_BUSY: | 104 | case SCSI_MLQUEUE_DEVICE_BUSY: |
| 105 | case SCSI_MLQUEUE_EH_RETRY: | 105 | case SCSI_MLQUEUE_EH_RETRY: |
| 106 | device->device_blocked = device->max_device_blocked; | 106 | atomic_set(&device->device_blocked, |
| 107 | device->max_device_blocked); | ||
| 107 | break; | 108 | break; |
| 108 | case SCSI_MLQUEUE_TARGET_BUSY: | 109 | case SCSI_MLQUEUE_TARGET_BUSY: |
| 109 | starget->target_blocked = starget->max_target_blocked; | 110 | atomic_set(&starget->target_blocked, |
| 111 | starget->max_target_blocked); | ||
| 110 | break; | 112 | break; |
| 111 | } | 113 | } |
| 112 | } | 114 | } |
| @@ -351,29 +353,35 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev) | |||
| 351 | spin_unlock_irqrestore(shost->host_lock, flags); | 353 | spin_unlock_irqrestore(shost->host_lock, flags); |
| 352 | } | 354 | } |
| 353 | 355 | ||
| 354 | static inline int scsi_device_is_busy(struct scsi_device *sdev) | 356 | static inline bool scsi_device_is_busy(struct scsi_device *sdev) |
| 355 | { | 357 | { |
| 356 | if (atomic_read(&sdev->device_busy) >= sdev->queue_depth || | 358 | if (atomic_read(&sdev->device_busy) >= sdev->queue_depth) |
| 357 | sdev->device_blocked) | 359 | return true; |
| 358 | return 1; | 360 | if (atomic_read(&sdev->device_blocked) > 0) |
| 359 | return 0; | 361 | return true; |
| 362 | return false; | ||
| 360 | } | 363 | } |
| 361 | 364 | ||
| 362 | static inline int scsi_target_is_busy(struct scsi_target *starget) | 365 | static inline bool scsi_target_is_busy(struct scsi_target *starget) |
| 363 | { | 366 | { |
| 364 | return ((starget->can_queue > 0 && | 367 | if (starget->can_queue > 0 && |
| 365 | atomic_read(&starget->target_busy) >= starget->can_queue) || | 368 | atomic_read(&starget->target_busy) >= starget->can_queue) |
| 366 | starget->target_blocked); | 369 | return true; |
| 370 | if (atomic_read(&starget->target_blocked) > 0) | ||
| 371 | return true; | ||
| 372 | return false; | ||
| 367 | } | 373 | } |
| 368 | 374 | ||
| 369 | static inline int scsi_host_is_busy(struct Scsi_Host *shost) | 375 | static inline bool scsi_host_is_busy(struct Scsi_Host *shost) |
| 370 | { | 376 | { |
| 371 | if ((shost->can_queue > 0 && | 377 | if (shost->can_queue > 0 && |
| 372 | atomic_read(&shost->host_busy) >= shost->can_queue) || | 378 | atomic_read(&shost->host_busy) >= shost->can_queue) |
| 373 | shost->host_blocked || shost->host_self_blocked) | 379 | return true; |
| 374 | return 1; | 380 | if (atomic_read(&shost->host_blocked) > 0) |
| 375 | 381 | return true; | |
| 376 | return 0; | 382 | if (shost->host_self_blocked) |
| 383 | return true; | ||
| 384 | return false; | ||
| 377 | } | 385 | } |
| 378 | 386 | ||
| 379 | static void scsi_starved_list_run(struct Scsi_Host *shost) | 387 | static void scsi_starved_list_run(struct Scsi_Host *shost) |
| @@ -1256,14 +1264,14 @@ static inline int scsi_dev_queue_ready(struct request_queue *q, | |||
| 1256 | unsigned int busy; | 1264 | unsigned int busy; |
| 1257 | 1265 | ||
| 1258 | busy = atomic_inc_return(&sdev->device_busy) - 1; | 1266 | busy = atomic_inc_return(&sdev->device_busy) - 1; |
| 1259 | if (sdev->device_blocked) { | 1267 | if (atomic_read(&sdev->device_blocked)) { |
| 1260 | if (busy) | 1268 | if (busy) |
| 1261 | goto out_dec; | 1269 | goto out_dec; |
| 1262 | 1270 | ||
| 1263 | /* | 1271 | /* |
| 1264 | * unblock after device_blocked iterates to zero | 1272 | * unblock after device_blocked iterates to zero |
| 1265 | */ | 1273 | */ |
| 1266 | if (--sdev->device_blocked != 0) { | 1274 | if (atomic_dec_return(&sdev->device_blocked) > 0) { |
| 1267 | blk_delay_queue(q, SCSI_QUEUE_DELAY); | 1275 | blk_delay_queue(q, SCSI_QUEUE_DELAY); |
| 1268 | goto out_dec; | 1276 | goto out_dec; |
| 1269 | } | 1277 | } |
| @@ -1302,19 +1310,15 @@ static inline int scsi_target_queue_ready(struct Scsi_Host *shost, | |||
| 1302 | } | 1310 | } |
| 1303 | 1311 | ||
| 1304 | busy = atomic_inc_return(&starget->target_busy) - 1; | 1312 | busy = atomic_inc_return(&starget->target_busy) - 1; |
| 1305 | if (starget->target_blocked) { | 1313 | if (atomic_read(&starget->target_blocked) > 0) { |
| 1306 | if (busy) | 1314 | if (busy) |
| 1307 | goto starved; | 1315 | goto starved; |
| 1308 | 1316 | ||
| 1309 | /* | 1317 | /* |
| 1310 | * unblock after target_blocked iterates to zero | 1318 | * unblock after target_blocked iterates to zero |
| 1311 | */ | 1319 | */ |
| 1312 | spin_lock_irq(shost->host_lock); | 1320 | if (atomic_dec_return(&starget->target_blocked) > 0) |
| 1313 | if (--starget->target_blocked != 0) { | ||
| 1314 | spin_unlock_irq(shost->host_lock); | ||
| 1315 | goto out_dec; | 1321 | goto out_dec; |
| 1316 | } | ||
| 1317 | spin_unlock_irq(shost->host_lock); | ||
| 1318 | 1322 | ||
| 1319 | SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, | 1323 | SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, |
| 1320 | "unblocking target at zero depth\n")); | 1324 | "unblocking target at zero depth\n")); |
| @@ -1349,19 +1353,15 @@ static inline int scsi_host_queue_ready(struct request_queue *q, | |||
| 1349 | return 0; | 1353 | return 0; |
| 1350 | 1354 | ||
| 1351 | busy = atomic_inc_return(&shost->host_busy) - 1; | 1355 | busy = atomic_inc_return(&shost->host_busy) - 1; |
| 1352 | if (shost->host_blocked) { | 1356 | if (atomic_read(&shost->host_blocked) > 0) { |
| 1353 | if (busy) | 1357 | if (busy) |
| 1354 | goto starved; | 1358 | goto starved; |
| 1355 | 1359 | ||
| 1356 | /* | 1360 | /* |
| 1357 | * unblock after host_blocked iterates to zero | 1361 | * unblock after host_blocked iterates to zero |
| 1358 | */ | 1362 | */ |
| 1359 | spin_lock_irq(shost->host_lock); | 1363 | if (atomic_dec_return(&shost->host_blocked) > 0) |
| 1360 | if (--shost->host_blocked != 0) { | ||
| 1361 | spin_unlock_irq(shost->host_lock); | ||
| 1362 | goto out_dec; | 1364 | goto out_dec; |
| 1363 | } | ||
| 1364 | spin_unlock_irq(shost->host_lock); | ||
| 1365 | 1365 | ||
| 1366 | SCSI_LOG_MLQUEUE(3, | 1366 | SCSI_LOG_MLQUEUE(3, |
| 1367 | shost_printk(KERN_INFO, shost, | 1367 | shost_printk(KERN_INFO, shost, |
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 79df9847edef..209cae3097ea 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c | |||
| @@ -584,7 +584,6 @@ static int scsi_sdev_check_buf_bit(const char *buf) | |||
| 584 | /* | 584 | /* |
| 585 | * Create the actual show/store functions and data structures. | 585 | * Create the actual show/store functions and data structures. |
| 586 | */ | 586 | */ |
| 587 | sdev_rd_attr (device_blocked, "%d\n"); | ||
| 588 | sdev_rd_attr (type, "%d\n"); | 587 | sdev_rd_attr (type, "%d\n"); |
| 589 | sdev_rd_attr (scsi_level, "%d\n"); | 588 | sdev_rd_attr (scsi_level, "%d\n"); |
| 590 | sdev_rd_attr (vendor, "%.8s\n"); | 589 | sdev_rd_attr (vendor, "%.8s\n"); |
| @@ -600,6 +599,15 @@ sdev_show_device_busy(struct device *dev, struct device_attribute *attr, | |||
| 600 | } | 599 | } |
| 601 | static DEVICE_ATTR(device_busy, S_IRUGO, sdev_show_device_busy, NULL); | 600 | static DEVICE_ATTR(device_busy, S_IRUGO, sdev_show_device_busy, NULL); |
| 602 | 601 | ||
| 602 | static ssize_t | ||
| 603 | sdev_show_device_blocked(struct device *dev, struct device_attribute *attr, | ||
| 604 | char *buf) | ||
| 605 | { | ||
| 606 | struct scsi_device *sdev = to_scsi_device(dev); | ||
| 607 | return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_blocked)); | ||
| 608 | } | ||
| 609 | static DEVICE_ATTR(device_blocked, S_IRUGO, sdev_show_device_blocked, NULL); | ||
| 610 | |||
| 603 | /* | 611 | /* |
| 604 | * TODO: can we make these symlinks to the block layer ones? | 612 | * TODO: can we make these symlinks to the block layer ones? |
| 605 | */ | 613 | */ |
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 3329901c7243..0f853f2c9dc7 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h | |||
| @@ -82,6 +82,8 @@ struct scsi_device { | |||
| 82 | struct list_head same_target_siblings; /* just the devices sharing same target id */ | 82 | struct list_head same_target_siblings; /* just the devices sharing same target id */ |
| 83 | 83 | ||
| 84 | atomic_t device_busy; /* commands actually active on LLDD */ | 84 | atomic_t device_busy; /* commands actually active on LLDD */ |
| 85 | atomic_t device_blocked; /* Device returned QUEUE_FULL. */ | ||
| 86 | |||
| 85 | spinlock_t list_lock; | 87 | spinlock_t list_lock; |
| 86 | struct list_head cmd_list; /* queue of in use SCSI Command structures */ | 88 | struct list_head cmd_list; /* queue of in use SCSI Command structures */ |
| 87 | struct list_head starved_entry; | 89 | struct list_head starved_entry; |
| @@ -180,8 +182,6 @@ struct scsi_device { | |||
| 180 | struct list_head event_list; /* asserted events */ | 182 | struct list_head event_list; /* asserted events */ |
| 181 | struct work_struct event_work; | 183 | struct work_struct event_work; |
| 182 | 184 | ||
| 183 | unsigned int device_blocked; /* Device returned QUEUE_FULL. */ | ||
| 184 | |||
| 185 | unsigned int max_device_blocked; /* what device_blocked counts down from */ | 185 | unsigned int max_device_blocked; /* what device_blocked counts down from */ |
| 186 | #define SCSI_DEFAULT_DEVICE_BLOCKED 3 | 186 | #define SCSI_DEFAULT_DEVICE_BLOCKED 3 |
| 187 | 187 | ||
| @@ -291,12 +291,13 @@ struct scsi_target { | |||
| 291 | * the same target will also. */ | 291 | * the same target will also. */ |
| 292 | /* commands actually active on LLD. */ | 292 | /* commands actually active on LLD. */ |
| 293 | atomic_t target_busy; | 293 | atomic_t target_busy; |
| 294 | atomic_t target_blocked; | ||
| 295 | |||
| 294 | /* | 296 | /* |
| 295 | * LLDs should set this in the slave_alloc host template callout. | 297 | * LLDs should set this in the slave_alloc host template callout. |
| 296 | * If set to zero then there is not limit. | 298 | * If set to zero then there is not limit. |
| 297 | */ | 299 | */ |
| 298 | unsigned int can_queue; | 300 | unsigned int can_queue; |
| 299 | unsigned int target_blocked; | ||
| 300 | unsigned int max_target_blocked; | 301 | unsigned int max_target_blocked; |
| 301 | #define SCSI_DEFAULT_TARGET_BLOCKED 3 | 302 | #define SCSI_DEFAULT_TARGET_BLOCKED 3 |
| 302 | 303 | ||
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 51f7911b1cbb..5e8ebc1ac12b 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h | |||
| @@ -583,6 +583,8 @@ struct Scsi_Host { | |||
| 583 | struct blk_queue_tag *bqt; | 583 | struct blk_queue_tag *bqt; |
| 584 | 584 | ||
| 585 | atomic_t host_busy; /* commands actually active on low-level */ | 585 | atomic_t host_busy; /* commands actually active on low-level */ |
| 586 | atomic_t host_blocked; | ||
| 587 | |||
| 586 | unsigned int host_failed; /* commands that failed. | 588 | unsigned int host_failed; /* commands that failed. |
| 587 | protected by host_lock */ | 589 | protected by host_lock */ |
| 588 | unsigned int host_eh_scheduled; /* EH scheduled without command */ | 590 | unsigned int host_eh_scheduled; /* EH scheduled without command */ |
| @@ -682,11 +684,6 @@ struct Scsi_Host { | |||
| 682 | struct workqueue_struct *tmf_work_q; | 684 | struct workqueue_struct *tmf_work_q; |
| 683 | 685 | ||
| 684 | /* | 686 | /* |
| 685 | * Host has rejected a command because it was busy. | ||
| 686 | */ | ||
| 687 | unsigned int host_blocked; | ||
| 688 | |||
| 689 | /* | ||
| 690 | * Value host_blocked counts down from | 687 | * Value host_blocked counts down from |
| 691 | */ | 688 | */ |
| 692 | unsigned int max_host_blocked; | 689 | unsigned int max_host_blocked; |
