diff options
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/brd.c | 1 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_int.h | 3 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_receiver.c | 2 | ||||
-rw-r--r-- | drivers/block/loop.c | 20 | ||||
-rw-r--r-- | drivers/block/osdblk.c | 5 | ||||
-rw-r--r-- | drivers/block/pktcdvd.c | 1 | ||||
-rw-r--r-- | drivers/block/ps3disk.c | 2 | ||||
-rw-r--r-- | drivers/block/virtio_blk.c | 37 | ||||
-rw-r--r-- | drivers/block/xen-blkfront.c | 54 |
9 files changed, 41 insertions, 84 deletions
diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 82bfd5bb4a97..b7f51e4594f8 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c | |||
@@ -483,7 +483,6 @@ static struct brd_device *brd_alloc(int i) | |||
483 | if (!brd->brd_queue) | 483 | if (!brd->brd_queue) |
484 | goto out_free_dev; | 484 | goto out_free_dev; |
485 | blk_queue_make_request(brd->brd_queue, brd_make_request); | 485 | blk_queue_make_request(brd->brd_queue, brd_make_request); |
486 | blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG); | ||
487 | blk_queue_max_hw_sectors(brd->brd_queue, 1024); | 486 | blk_queue_max_hw_sectors(brd->brd_queue, 1024); |
488 | blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); | 487 | blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); |
489 | 488 | ||
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index c07c370c4c82..9bdcf4393c0a 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h | |||
@@ -2409,8 +2409,7 @@ static inline void drbd_md_flush(struct drbd_conf *mdev) | |||
2409 | if (test_bit(MD_NO_BARRIER, &mdev->flags)) | 2409 | if (test_bit(MD_NO_BARRIER, &mdev->flags)) |
2410 | return; | 2410 | return; |
2411 | 2411 | ||
2412 | r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL, | 2412 | r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL); |
2413 | BLKDEV_IFL_WAIT); | ||
2414 | if (r) { | 2413 | if (r) { |
2415 | set_bit(MD_NO_BARRIER, &mdev->flags); | 2414 | set_bit(MD_NO_BARRIER, &mdev->flags); |
2416 | dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r); | 2415 | dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r); |
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 760ae0df9251..efd6169acf2f 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -987,7 +987,7 @@ static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct d | |||
987 | 987 | ||
988 | if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) { | 988 | if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) { |
989 | rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL, | 989 | rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL, |
990 | NULL, BLKDEV_IFL_WAIT); | 990 | NULL); |
991 | if (rv) { | 991 | if (rv) { |
992 | dev_err(DEV, "local disk flush failed with status %d\n", rv); | 992 | dev_err(DEV, "local disk flush failed with status %d\n", rv); |
993 | /* would rather check on EOPNOTSUPP, but that is not reliable. | 993 | /* would rather check on EOPNOTSUPP, but that is not reliable. |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index de3083b0a4f5..6c48b3545f84 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -479,17 +479,17 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) | |||
479 | pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; | 479 | pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; |
480 | 480 | ||
481 | if (bio_rw(bio) == WRITE) { | 481 | if (bio_rw(bio) == WRITE) { |
482 | bool barrier = !!(bio->bi_rw & REQ_HARDBARRIER); | ||
483 | struct file *file = lo->lo_backing_file; | 482 | struct file *file = lo->lo_backing_file; |
484 | 483 | ||
485 | if (barrier) { | 484 | /* REQ_HARDBARRIER is deprecated */ |
486 | if (unlikely(!file->f_op->fsync)) { | 485 | if (bio->bi_rw & REQ_HARDBARRIER) { |
487 | ret = -EOPNOTSUPP; | 486 | ret = -EOPNOTSUPP; |
488 | goto out; | 487 | goto out; |
489 | } | 488 | } |
490 | 489 | ||
490 | if (bio->bi_rw & REQ_FLUSH) { | ||
491 | ret = vfs_fsync(file, 0); | 491 | ret = vfs_fsync(file, 0); |
492 | if (unlikely(ret)) { | 492 | if (unlikely(ret && ret != -EINVAL)) { |
493 | ret = -EIO; | 493 | ret = -EIO; |
494 | goto out; | 494 | goto out; |
495 | } | 495 | } |
@@ -497,9 +497,9 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) | |||
497 | 497 | ||
498 | ret = lo_send(lo, bio, pos); | 498 | ret = lo_send(lo, bio, pos); |
499 | 499 | ||
500 | if (barrier && !ret) { | 500 | if ((bio->bi_rw & REQ_FUA) && !ret) { |
501 | ret = vfs_fsync(file, 0); | 501 | ret = vfs_fsync(file, 0); |
502 | if (unlikely(ret)) | 502 | if (unlikely(ret && ret != -EINVAL)) |
503 | ret = -EIO; | 503 | ret = -EIO; |
504 | } | 504 | } |
505 | } else | 505 | } else |
@@ -931,7 +931,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, | |||
931 | lo->lo_queue->unplug_fn = loop_unplug; | 931 | lo->lo_queue->unplug_fn = loop_unplug; |
932 | 932 | ||
933 | if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) | 933 | if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) |
934 | blk_queue_ordered(lo->lo_queue, QUEUE_ORDERED_DRAIN); | 934 | blk_queue_flush(lo->lo_queue, REQ_FLUSH); |
935 | 935 | ||
936 | set_capacity(lo->lo_disk, size); | 936 | set_capacity(lo->lo_disk, size); |
937 | bd_set_size(bdev, size << 9); | 937 | bd_set_size(bdev, size << 9); |
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c index 2284b4f05c62..87311ebac0db 100644 --- a/drivers/block/osdblk.c +++ b/drivers/block/osdblk.c | |||
@@ -310,8 +310,7 @@ static void osdblk_rq_fn(struct request_queue *q) | |||
310 | break; | 310 | break; |
311 | 311 | ||
312 | /* filter out block requests we don't understand */ | 312 | /* filter out block requests we don't understand */ |
313 | if (rq->cmd_type != REQ_TYPE_FS && | 313 | if (rq->cmd_type != REQ_TYPE_FS) { |
314 | !(rq->cmd_flags & REQ_HARDBARRIER)) { | ||
315 | blk_end_request_all(rq, 0); | 314 | blk_end_request_all(rq, 0); |
316 | continue; | 315 | continue; |
317 | } | 316 | } |
@@ -439,7 +438,7 @@ static int osdblk_init_disk(struct osdblk_device *osdev) | |||
439 | blk_queue_stack_limits(q, osd_request_queue(osdev->osd)); | 438 | blk_queue_stack_limits(q, osd_request_queue(osdev->osd)); |
440 | 439 | ||
441 | blk_queue_prep_rq(q, blk_queue_start_tag); | 440 | blk_queue_prep_rq(q, blk_queue_start_tag); |
442 | blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH); | 441 | blk_queue_flush(q, REQ_FLUSH); |
443 | 442 | ||
444 | disk->queue = q; | 443 | disk->queue = q; |
445 | 444 | ||
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index ef58fccadad3..19b3568e9326 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -753,7 +753,6 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command * | |||
753 | 753 | ||
754 | rq->timeout = 60*HZ; | 754 | rq->timeout = 60*HZ; |
755 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | 755 | rq->cmd_type = REQ_TYPE_BLOCK_PC; |
756 | rq->cmd_flags |= REQ_HARDBARRIER; | ||
757 | if (cgc->quiet) | 756 | if (cgc->quiet) |
758 | rq->cmd_flags |= REQ_QUIET; | 757 | rq->cmd_flags |= REQ_QUIET; |
759 | 758 | ||
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index 03688c2da319..8e1ce2e2916a 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c | |||
@@ -468,7 +468,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev) | |||
468 | blk_queue_dma_alignment(queue, dev->blk_size-1); | 468 | blk_queue_dma_alignment(queue, dev->blk_size-1); |
469 | blk_queue_logical_block_size(queue, dev->blk_size); | 469 | blk_queue_logical_block_size(queue, dev->blk_size); |
470 | 470 | ||
471 | blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH); | 471 | blk_queue_flush(queue, REQ_FLUSH); |
472 | 472 | ||
473 | blk_queue_max_segments(queue, -1); | 473 | blk_queue_max_segments(queue, -1); |
474 | blk_queue_max_segment_size(queue, dev->bounce_size); | 474 | blk_queue_max_segment_size(queue, dev->bounce_size); |
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 8320490226b7..6ecf89cdf006 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
@@ -127,9 +127,6 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, | |||
127 | } | 127 | } |
128 | } | 128 | } |
129 | 129 | ||
130 | if (vbr->req->cmd_flags & REQ_HARDBARRIER) | ||
131 | vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER; | ||
132 | |||
133 | sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr)); | 130 | sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr)); |
134 | 131 | ||
135 | /* | 132 | /* |
@@ -379,31 +376,9 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) | |||
379 | vblk->disk->driverfs_dev = &vdev->dev; | 376 | vblk->disk->driverfs_dev = &vdev->dev; |
380 | index++; | 377 | index++; |
381 | 378 | ||
382 | if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) { | 379 | /* configure queue flush support */ |
383 | /* | 380 | if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) |
384 | * If the FLUSH feature is supported we do have support for | 381 | blk_queue_flush(q, REQ_FLUSH); |
385 | * flushing a volatile write cache on the host. Use that | ||
386 | * to implement write barrier support. | ||
387 | */ | ||
388 | blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH); | ||
389 | } else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) { | ||
390 | /* | ||
391 | * If the BARRIER feature is supported the host expects us | ||
392 | * to order request by tags. This implies there is not | ||
393 | * volatile write cache on the host, and that the host | ||
394 | * never re-orders outstanding I/O. This feature is not | ||
395 | * useful for real life scenarious and deprecated. | ||
396 | */ | ||
397 | blk_queue_ordered(q, QUEUE_ORDERED_TAG); | ||
398 | } else { | ||
399 | /* | ||
400 | * If the FLUSH feature is not supported we must assume that | ||
401 | * the host does not perform any kind of volatile write | ||
402 | * caching. We still need to drain the queue to provider | ||
403 | * proper barrier semantics. | ||
404 | */ | ||
405 | blk_queue_ordered(q, QUEUE_ORDERED_DRAIN); | ||
406 | } | ||
407 | 382 | ||
408 | /* If disk is read-only in the host, the guest should obey */ | 383 | /* If disk is read-only in the host, the guest should obey */ |
409 | if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) | 384 | if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) |
@@ -522,9 +497,9 @@ static const struct virtio_device_id id_table[] = { | |||
522 | }; | 497 | }; |
523 | 498 | ||
524 | static unsigned int features[] = { | 499 | static unsigned int features[] = { |
525 | VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, | 500 | VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, |
526 | VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, | 501 | VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI, |
527 | VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY | 502 | VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY |
528 | }; | 503 | }; |
529 | 504 | ||
530 | /* | 505 | /* |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 3ff06f475eef..4b33a18c32e0 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -96,7 +96,7 @@ struct blkfront_info | |||
96 | struct gnttab_free_callback callback; | 96 | struct gnttab_free_callback callback; |
97 | struct blk_shadow shadow[BLK_RING_SIZE]; | 97 | struct blk_shadow shadow[BLK_RING_SIZE]; |
98 | unsigned long shadow_free; | 98 | unsigned long shadow_free; |
99 | int feature_barrier; | 99 | unsigned int feature_flush; |
100 | int is_ready; | 100 | int is_ready; |
101 | }; | 101 | }; |
102 | 102 | ||
@@ -419,26 +419,12 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) | |||
419 | } | 419 | } |
420 | 420 | ||
421 | 421 | ||
422 | static int xlvbd_barrier(struct blkfront_info *info) | 422 | static void xlvbd_flush(struct blkfront_info *info) |
423 | { | 423 | { |
424 | int err; | 424 | blk_queue_flush(info->rq, info->feature_flush); |
425 | const char *barrier; | ||
426 | |||
427 | switch (info->feature_barrier) { | ||
428 | case QUEUE_ORDERED_DRAIN: barrier = "enabled (drain)"; break; | ||
429 | case QUEUE_ORDERED_TAG: barrier = "enabled (tag)"; break; | ||
430 | case QUEUE_ORDERED_NONE: barrier = "disabled"; break; | ||
431 | default: return -EINVAL; | ||
432 | } | ||
433 | |||
434 | err = blk_queue_ordered(info->rq, info->feature_barrier); | ||
435 | |||
436 | if (err) | ||
437 | return err; | ||
438 | |||
439 | printk(KERN_INFO "blkfront: %s: barriers %s\n", | 425 | printk(KERN_INFO "blkfront: %s: barriers %s\n", |
440 | info->gd->disk_name, barrier); | 426 | info->gd->disk_name, |
441 | return 0; | 427 | info->feature_flush ? "enabled" : "disabled"); |
442 | } | 428 | } |
443 | 429 | ||
444 | 430 | ||
@@ -517,7 +503,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, | |||
517 | info->rq = gd->queue; | 503 | info->rq = gd->queue; |
518 | info->gd = gd; | 504 | info->gd = gd; |
519 | 505 | ||
520 | xlvbd_barrier(info); | 506 | xlvbd_flush(info); |
521 | 507 | ||
522 | if (vdisk_info & VDISK_READONLY) | 508 | if (vdisk_info & VDISK_READONLY) |
523 | set_disk_ro(gd, 1); | 509 | set_disk_ro(gd, 1); |
@@ -663,8 +649,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) | |||
663 | printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", | 649 | printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", |
664 | info->gd->disk_name); | 650 | info->gd->disk_name); |
665 | error = -EOPNOTSUPP; | 651 | error = -EOPNOTSUPP; |
666 | info->feature_barrier = QUEUE_ORDERED_NONE; | 652 | info->feature_flush = 0; |
667 | xlvbd_barrier(info); | 653 | xlvbd_flush(info); |
668 | } | 654 | } |
669 | /* fall through */ | 655 | /* fall through */ |
670 | case BLKIF_OP_READ: | 656 | case BLKIF_OP_READ: |
@@ -1077,20 +1063,20 @@ static void blkfront_connect(struct blkfront_info *info) | |||
1077 | /* | 1063 | /* |
1078 | * If there's no "feature-barrier" defined, then it means | 1064 | * If there's no "feature-barrier" defined, then it means |
1079 | * we're dealing with a very old backend which writes | 1065 | * we're dealing with a very old backend which writes |
1080 | * synchronously; draining will do what needs to get done. | 1066 | * synchronously; nothing to do. |
1081 | * | 1067 | * |
1082 | * If there are barriers, then we can do full queued writes | 1068 | * If there are barriers, then we use flush. |
1083 | * with tagged barriers. | ||
1084 | * | ||
1085 | * If barriers are not supported, then there's no much we can | ||
1086 | * do, so just set ordering to NONE. | ||
1087 | */ | 1069 | */ |
1088 | if (err) | 1070 | info->feature_flush = 0; |
1089 | info->feature_barrier = QUEUE_ORDERED_DRAIN; | 1071 | |
1090 | else if (barrier) | 1072 | /* |
1091 | info->feature_barrier = QUEUE_ORDERED_TAG; | 1073 | * The driver doesn't properly handled empty flushes, so |
1092 | else | 1074 | * lets disable barrier support for now. |
1093 | info->feature_barrier = QUEUE_ORDERED_NONE; | 1075 | */ |
1076 | #if 0 | ||
1077 | if (!err && barrier) | ||
1078 | info->feature_flush = REQ_FLUSH; | ||
1079 | #endif | ||
1094 | 1080 | ||
1095 | err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); | 1081 | err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); |
1096 | if (err) { | 1082 | if (err) { |