diff options
| -rw-r--r-- | drivers/scsi/hosts.c | 9 | ||||
| -rw-r--r-- | drivers/scsi/scsi_lib.c | 46 | ||||
| -rw-r--r-- | drivers/scsi/sd.c | 58 | ||||
| -rw-r--r-- | include/scsi/scsi_driver.h | 1 | ||||
| -rw-r--r-- | include/scsi/scsi_host.h | 1 |
5 files changed, 20 insertions, 95 deletions
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 5b9c2c5a7f0e..66783c860a19 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c | |||
| @@ -347,17 +347,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) | |||
| 347 | shost->cmd_per_lun = sht->cmd_per_lun; | 347 | shost->cmd_per_lun = sht->cmd_per_lun; |
| 348 | shost->unchecked_isa_dma = sht->unchecked_isa_dma; | 348 | shost->unchecked_isa_dma = sht->unchecked_isa_dma; |
| 349 | shost->use_clustering = sht->use_clustering; | 349 | shost->use_clustering = sht->use_clustering; |
| 350 | shost->ordered_flush = sht->ordered_flush; | ||
| 351 | shost->ordered_tag = sht->ordered_tag; | 350 | shost->ordered_tag = sht->ordered_tag; |
| 352 | 351 | ||
| 353 | /* | ||
| 354 | * hosts/devices that do queueing must support ordered tags | ||
| 355 | */ | ||
| 356 | if (shost->can_queue > 1 && shost->ordered_flush) { | ||
| 357 | printk(KERN_ERR "scsi: ordered flushes don't support queueing\n"); | ||
| 358 | shost->ordered_flush = 0; | ||
| 359 | } | ||
| 360 | |||
| 361 | if (sht->max_host_blocked) | 352 | if (sht->max_host_blocked) |
| 362 | shost->max_host_blocked = sht->max_host_blocked; | 353 | shost->max_host_blocked = sht->max_host_blocked; |
| 363 | else | 354 | else |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 53551f1dfe21..7a38b101976c 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
| @@ -932,9 +932,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes, | |||
| 932 | int sense_valid = 0; | 932 | int sense_valid = 0; |
| 933 | int sense_deferred = 0; | 933 | int sense_deferred = 0; |
| 934 | 934 | ||
| 935 | if (blk_complete_barrier_rq(q, req, good_bytes >> 9)) | ||
| 936 | return; | ||
| 937 | |||
| 938 | /* | 935 | /* |
| 939 | * Free up any indirection buffers we allocated for DMA purposes. | 936 | * Free up any indirection buffers we allocated for DMA purposes. |
| 940 | * For the case of a READ, we need to copy the data out of the | 937 | * For the case of a READ, we need to copy the data out of the |
| @@ -1199,38 +1196,6 @@ static int scsi_init_io(struct scsi_cmnd *cmd) | |||
| 1199 | return BLKPREP_KILL; | 1196 | return BLKPREP_KILL; |
| 1200 | } | 1197 | } |
| 1201 | 1198 | ||
| 1202 | static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq) | ||
| 1203 | { | ||
| 1204 | struct scsi_device *sdev = q->queuedata; | ||
| 1205 | struct scsi_driver *drv; | ||
| 1206 | |||
| 1207 | if (sdev->sdev_state == SDEV_RUNNING) { | ||
| 1208 | drv = *(struct scsi_driver **) rq->rq_disk->private_data; | ||
| 1209 | |||
| 1210 | if (drv->prepare_flush) | ||
| 1211 | return drv->prepare_flush(q, rq); | ||
| 1212 | } | ||
| 1213 | |||
| 1214 | return 0; | ||
| 1215 | } | ||
| 1216 | |||
| 1217 | static void scsi_end_flush_fn(request_queue_t *q, struct request *rq) | ||
| 1218 | { | ||
| 1219 | struct scsi_device *sdev = q->queuedata; | ||
| 1220 | struct request *flush_rq = rq->end_io_data; | ||
| 1221 | struct scsi_driver *drv; | ||
| 1222 | |||
| 1223 | if (flush_rq->errors) { | ||
| 1224 | printk("scsi: barrier error, disabling flush support\n"); | ||
| 1225 | blk_queue_ordered(q, QUEUE_ORDERED_NONE); | ||
| 1226 | } | ||
| 1227 | |||
| 1228 | if (sdev->sdev_state == SDEV_RUNNING) { | ||
| 1229 | drv = *(struct scsi_driver **) rq->rq_disk->private_data; | ||
| 1230 | drv->end_flush(q, rq); | ||
| 1231 | } | ||
| 1232 | } | ||
| 1233 | |||
| 1234 | static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk, | 1199 | static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk, |
| 1235 | sector_t *error_sector) | 1200 | sector_t *error_sector) |
| 1236 | { | 1201 | { |
| @@ -1703,17 +1668,6 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) | |||
| 1703 | blk_queue_segment_boundary(q, shost->dma_boundary); | 1668 | blk_queue_segment_boundary(q, shost->dma_boundary); |
| 1704 | blk_queue_issue_flush_fn(q, scsi_issue_flush_fn); | 1669 | blk_queue_issue_flush_fn(q, scsi_issue_flush_fn); |
| 1705 | 1670 | ||
| 1706 | /* | ||
| 1707 | * ordered tags are superior to flush ordering | ||
| 1708 | */ | ||
| 1709 | if (shost->ordered_tag) | ||
| 1710 | blk_queue_ordered(q, QUEUE_ORDERED_TAG); | ||
| 1711 | else if (shost->ordered_flush) { | ||
| 1712 | blk_queue_ordered(q, QUEUE_ORDERED_FLUSH); | ||
| 1713 | q->prepare_flush_fn = scsi_prepare_flush_fn; | ||
| 1714 | q->end_flush_fn = scsi_end_flush_fn; | ||
| 1715 | } | ||
| 1716 | |||
| 1717 | if (!shost->use_clustering) | 1671 | if (!shost->use_clustering) |
| 1718 | clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); | 1672 | clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
| 1719 | return q; | 1673 | return q; |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index d651150ee76d..2eefc9eb5da6 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
| @@ -121,8 +121,7 @@ static void sd_shutdown(struct device *dev); | |||
| 121 | static void sd_rescan(struct device *); | 121 | static void sd_rescan(struct device *); |
| 122 | static int sd_init_command(struct scsi_cmnd *); | 122 | static int sd_init_command(struct scsi_cmnd *); |
| 123 | static int sd_issue_flush(struct device *, sector_t *); | 123 | static int sd_issue_flush(struct device *, sector_t *); |
| 124 | static void sd_end_flush(request_queue_t *, struct request *); | 124 | static void sd_prepare_flush(request_queue_t *, struct request *); |
| 125 | static int sd_prepare_flush(request_queue_t *, struct request *); | ||
| 126 | static void sd_read_capacity(struct scsi_disk *sdkp, char *diskname, | 125 | static void sd_read_capacity(struct scsi_disk *sdkp, char *diskname, |
| 127 | unsigned char *buffer); | 126 | unsigned char *buffer); |
| 128 | 127 | ||
| @@ -137,8 +136,6 @@ static struct scsi_driver sd_template = { | |||
| 137 | .rescan = sd_rescan, | 136 | .rescan = sd_rescan, |
| 138 | .init_command = sd_init_command, | 137 | .init_command = sd_init_command, |
| 139 | .issue_flush = sd_issue_flush, | 138 | .issue_flush = sd_issue_flush, |
| 140 | .prepare_flush = sd_prepare_flush, | ||
| 141 | .end_flush = sd_end_flush, | ||
| 142 | }; | 139 | }; |
| 143 | 140 | ||
| 144 | /* | 141 | /* |
| @@ -729,42 +726,13 @@ static int sd_issue_flush(struct device *dev, sector_t *error_sector) | |||
| 729 | return ret; | 726 | return ret; |
| 730 | } | 727 | } |
| 731 | 728 | ||
| 732 | static void sd_end_flush(request_queue_t *q, struct request *flush_rq) | 729 | static void sd_prepare_flush(request_queue_t *q, struct request *rq) |
| 733 | { | 730 | { |
| 734 | struct request *rq = flush_rq->end_io_data; | ||
| 735 | struct scsi_cmnd *cmd = rq->special; | ||
| 736 | unsigned int bytes = rq->hard_nr_sectors << 9; | ||
| 737 | |||
| 738 | if (!flush_rq->errors) { | ||
| 739 | spin_unlock(q->queue_lock); | ||
| 740 | scsi_io_completion(cmd, bytes, 0); | ||
| 741 | spin_lock(q->queue_lock); | ||
| 742 | } else if (blk_barrier_postflush(rq)) { | ||
| 743 | spin_unlock(q->queue_lock); | ||
| 744 | scsi_io_completion(cmd, 0, bytes); | ||
| 745 | spin_lock(q->queue_lock); | ||
| 746 | } else { | ||
| 747 | /* | ||
| 748 | * force journal abort of barriers | ||
| 749 | */ | ||
| 750 | end_that_request_first(rq, -EOPNOTSUPP, rq->hard_nr_sectors); | ||
| 751 | end_that_request_last(rq, -EOPNOTSUPP); | ||
| 752 | } | ||
| 753 | } | ||
| 754 | |||
| 755 | static int sd_prepare_flush(request_queue_t *q, struct request *rq) | ||
| 756 | { | ||
| 757 | struct scsi_device *sdev = q->queuedata; | ||
| 758 | struct scsi_disk *sdkp = dev_get_drvdata(&sdev->sdev_gendev); | ||
| 759 | |||
| 760 | if (!sdkp || !sdkp->WCE) | ||
| 761 | return 0; | ||
| 762 | |||
| 763 | memset(rq->cmd, 0, sizeof(rq->cmd)); | 731 | memset(rq->cmd, 0, sizeof(rq->cmd)); |
| 764 | rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER; | 732 | rq->flags |= REQ_BLOCK_PC; |
| 765 | rq->timeout = SD_TIMEOUT; | 733 | rq->timeout = SD_TIMEOUT; |
| 766 | rq->cmd[0] = SYNCHRONIZE_CACHE; | 734 | rq->cmd[0] = SYNCHRONIZE_CACHE; |
| 767 | return 1; | 735 | rq->cmd_len = 10; |
| 768 | } | 736 | } |
| 769 | 737 | ||
| 770 | static void sd_rescan(struct device *dev) | 738 | static void sd_rescan(struct device *dev) |
| @@ -1462,6 +1430,7 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
| 1462 | struct scsi_disk *sdkp = scsi_disk(disk); | 1430 | struct scsi_disk *sdkp = scsi_disk(disk); |
| 1463 | struct scsi_device *sdp = sdkp->device; | 1431 | struct scsi_device *sdp = sdkp->device; |
| 1464 | unsigned char *buffer; | 1432 | unsigned char *buffer; |
| 1433 | unsigned ordered; | ||
| 1465 | 1434 | ||
| 1466 | SCSI_LOG_HLQUEUE(3, printk("sd_revalidate_disk: disk=%s\n", disk->disk_name)); | 1435 | SCSI_LOG_HLQUEUE(3, printk("sd_revalidate_disk: disk=%s\n", disk->disk_name)); |
| 1467 | 1436 | ||
| @@ -1498,7 +1467,20 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
| 1498 | sd_read_write_protect_flag(sdkp, disk->disk_name, buffer); | 1467 | sd_read_write_protect_flag(sdkp, disk->disk_name, buffer); |
| 1499 | sd_read_cache_type(sdkp, disk->disk_name, buffer); | 1468 | sd_read_cache_type(sdkp, disk->disk_name, buffer); |
| 1500 | } | 1469 | } |
| 1501 | 1470 | ||
| 1471 | /* | ||
| 1472 | * We now have all cache related info, determine how we deal | ||
| 1473 | * with ordered requests. Note that as the current SCSI | ||
| 1474 | * dispatch function can alter request order, we cannot use | ||
| 1475 | * QUEUE_ORDERED_TAG_* even when ordered tag is supported. | ||
| 1476 | */ | ||
| 1477 | if (sdkp->WCE) | ||
| 1478 | ordered = QUEUE_ORDERED_DRAIN_FLUSH; | ||
| 1479 | else | ||
| 1480 | ordered = QUEUE_ORDERED_DRAIN; | ||
| 1481 | |||
| 1482 | blk_queue_ordered(sdkp->disk->queue, ordered, sd_prepare_flush); | ||
| 1483 | |||
| 1502 | set_capacity(disk, sdkp->capacity); | 1484 | set_capacity(disk, sdkp->capacity); |
| 1503 | kfree(buffer); | 1485 | kfree(buffer); |
| 1504 | 1486 | ||
| @@ -1598,6 +1580,7 @@ static int sd_probe(struct device *dev) | |||
| 1598 | strcpy(gd->devfs_name, sdp->devfs_name); | 1580 | strcpy(gd->devfs_name, sdp->devfs_name); |
| 1599 | 1581 | ||
| 1600 | gd->private_data = &sdkp->driver; | 1582 | gd->private_data = &sdkp->driver; |
| 1583 | gd->queue = sdkp->device->request_queue; | ||
| 1601 | 1584 | ||
| 1602 | sd_revalidate_disk(gd); | 1585 | sd_revalidate_disk(gd); |
| 1603 | 1586 | ||
| @@ -1605,7 +1588,6 @@ static int sd_probe(struct device *dev) | |||
| 1605 | gd->flags = GENHD_FL_DRIVERFS; | 1588 | gd->flags = GENHD_FL_DRIVERFS; |
| 1606 | if (sdp->removable) | 1589 | if (sdp->removable) |
| 1607 | gd->flags |= GENHD_FL_REMOVABLE; | 1590 | gd->flags |= GENHD_FL_REMOVABLE; |
| 1608 | gd->queue = sdkp->device->request_queue; | ||
| 1609 | 1591 | ||
| 1610 | dev_set_drvdata(dev, sdkp); | 1592 | dev_set_drvdata(dev, sdkp); |
| 1611 | add_disk(gd); | 1593 | add_disk(gd); |
diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h index 850dfa877fda..02e26c1672bf 100644 --- a/include/scsi/scsi_driver.h +++ b/include/scsi/scsi_driver.h | |||
| @@ -15,7 +15,6 @@ struct scsi_driver { | |||
| 15 | void (*rescan)(struct device *); | 15 | void (*rescan)(struct device *); |
| 16 | int (*issue_flush)(struct device *, sector_t *); | 16 | int (*issue_flush)(struct device *, sector_t *); |
| 17 | int (*prepare_flush)(struct request_queue *, struct request *); | 17 | int (*prepare_flush)(struct request_queue *, struct request *); |
| 18 | void (*end_flush)(struct request_queue *, struct request *); | ||
| 19 | }; | 18 | }; |
| 20 | #define to_scsi_driver(drv) \ | 19 | #define to_scsi_driver(drv) \ |
| 21 | container_of((drv), struct scsi_driver, gendrv) | 20 | container_of((drv), struct scsi_driver, gendrv) |
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 6cbb1982ed03..25f637bd38b9 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h | |||
| @@ -392,7 +392,6 @@ struct scsi_host_template { | |||
| 392 | /* | 392 | /* |
| 393 | * ordered write support | 393 | * ordered write support |
| 394 | */ | 394 | */ |
| 395 | unsigned ordered_flush:1; | ||
| 396 | unsigned ordered_tag:1; | 395 | unsigned ordered_tag:1; |
| 397 | 396 | ||
| 398 | /* | 397 | /* |
