aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-09-03 05:56:16 -0400
committerJens Axboe <jaxboe@fusionio.com>2010-09-10 06:35:36 -0400
commit4913efe456c987057e5d36a3f0a55422a9072cae (patch)
tree295f04a7214e1933df3301dd42c12ff3f282a22c /drivers/block
parent6958f145459ca7ad9715024de97445addacb8510 (diff)
block: deprecate barrier and replace blk_queue_ordered() with blk_queue_flush()
Barrier is deemed too heavy and will soon be replaced by FLUSH/FUA requests. Deprecate barrier. All REQ_HARDBARRIERs are failed with -EOPNOTSUPP and blk_queue_ordered() is replaced with simpler blk_queue_flush(). blk_queue_flush() takes combinations of REQ_FLUSH and FUA. If a device has write cache and can flush it, it should set REQ_FLUSH. If the device can handle FUA writes, it should also set REQ_FUA. All blk_queue_ordered() users are converted. * ORDERED_DRAIN is mapped to 0 which is the default value. * ORDERED_DRAIN_FLUSH is mapped to REQ_FLUSH. * ORDERED_DRAIN_FLUSH_FUA is mapped to REQ_FLUSH | REQ_FUA. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Boaz Harrosh <bharrosh@panasas.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Michael S. Tsirkin <mst@redhat.com> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Chris Wright <chrisw@sous-sol.org> Cc: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Geert Uytterhoeven <Geert.Uytterhoeven@sonycom.com> Cc: David S. Miller <davem@davemloft.net> Cc: Alasdair G Kergon <agk@redhat.com> Cc: Pierre Ossman <drzeus@drzeus.cx> Cc: Stefan Weinhuber <wein@de.ibm.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/brd.c1
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/osdblk.c2
-rw-r--r--drivers/block/ps3disk.c2
-rw-r--r--drivers/block/virtio_blk.c25
-rw-r--r--drivers/block/xen-blkfront.c43
6 files changed, 24 insertions, 51 deletions
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 47a41272d26..fa33f97722b 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -482,7 +482,6 @@ static struct brd_device *brd_alloc(int i)
482 if (!brd->brd_queue) 482 if (!brd->brd_queue)
483 goto out_free_dev; 483 goto out_free_dev;
484 blk_queue_make_request(brd->brd_queue, brd_make_request); 484 blk_queue_make_request(brd->brd_queue, brd_make_request);
485 blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_DRAIN);
486 blk_queue_max_hw_sectors(brd->brd_queue, 1024); 485 blk_queue_max_hw_sectors(brd->brd_queue, 1024);
487 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); 486 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
488 487
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c3a4a2e176d..953d1e12f4d 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -832,7 +832,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
832 lo->lo_queue->unplug_fn = loop_unplug; 832 lo->lo_queue->unplug_fn = loop_unplug;
833 833
834 if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) 834 if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
835 blk_queue_ordered(lo->lo_queue, QUEUE_ORDERED_DRAIN_FLUSH); 835 blk_queue_flush(lo->lo_queue, REQ_FLUSH);
836 836
837 set_capacity(lo->lo_disk, size); 837 set_capacity(lo->lo_disk, size);
838 bd_set_size(bdev, size << 9); 838 bd_set_size(bdev, size << 9);
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c
index 2284b4f05c6..72d62462433 100644
--- a/drivers/block/osdblk.c
+++ b/drivers/block/osdblk.c
@@ -439,7 +439,7 @@ static int osdblk_init_disk(struct osdblk_device *osdev)
439 blk_queue_stack_limits(q, osd_request_queue(osdev->osd)); 439 blk_queue_stack_limits(q, osd_request_queue(osdev->osd));
440 440
441 blk_queue_prep_rq(q, blk_queue_start_tag); 441 blk_queue_prep_rq(q, blk_queue_start_tag);
442 blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH); 442 blk_queue_flush(q, REQ_FLUSH);
443 443
444 disk->queue = q; 444 disk->queue = q;
445 445
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index e9da874d041..4911f9e57bc 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -468,7 +468,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
468 blk_queue_dma_alignment(queue, dev->blk_size-1); 468 blk_queue_dma_alignment(queue, dev->blk_size-1);
469 blk_queue_logical_block_size(queue, dev->blk_size); 469 blk_queue_logical_block_size(queue, dev->blk_size);
470 470
471 blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH); 471 blk_queue_flush(queue, REQ_FLUSH);
472 472
473 blk_queue_max_segments(queue, -1); 473 blk_queue_max_segments(queue, -1);
474 blk_queue_max_segment_size(queue, dev->bounce_size); 474 blk_queue_max_segment_size(queue, dev->bounce_size);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 79652809eee..d10b635b394 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -388,22 +388,15 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
388 vblk->disk->driverfs_dev = &vdev->dev; 388 vblk->disk->driverfs_dev = &vdev->dev;
389 index++; 389 index++;
390 390
391 if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) { 391 /*
392 /* 392 * If the FLUSH feature is supported we do have support for
393 * If the FLUSH feature is supported we do have support for 393 * flushing a volatile write cache on the host. Use that to
394 * flushing a volatile write cache on the host. Use that 394 * implement write barrier support; otherwise, we must assume
395 * to implement write barrier support. 395 * that the host does not perform any kind of volatile write
396 */ 396 * caching.
397 blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH); 397 */
398 } else { 398 if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH))
399 /* 399 blk_queue_flush(q, REQ_FLUSH);
400 * If the FLUSH feature is not supported we must assume that
401 * the host does not perform any kind of volatile write
402 * caching. We still need to drain the queue to provider
403 * proper barrier semantics.
404 */
405 blk_queue_ordered(q, QUEUE_ORDERED_DRAIN);
406 }
407 400
408 /* If disk is read-only in the host, the guest should obey */ 401 /* If disk is read-only in the host, the guest should obey */
409 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) 402 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 50ec6f83499..0b1eea64326 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -95,7 +95,7 @@ struct blkfront_info
95 struct gnttab_free_callback callback; 95 struct gnttab_free_callback callback;
96 struct blk_shadow shadow[BLK_RING_SIZE]; 96 struct blk_shadow shadow[BLK_RING_SIZE];
97 unsigned long shadow_free; 97 unsigned long shadow_free;
98 int feature_barrier; 98 unsigned int feature_flush;
99 int is_ready; 99 int is_ready;
100}; 100};
101 101
@@ -418,25 +418,12 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
418} 418}
419 419
420 420
421static int xlvbd_barrier(struct blkfront_info *info) 421static void xlvbd_flush(struct blkfront_info *info)
422{ 422{
423 int err; 423 blk_queue_flush(info->rq, info->feature_flush);
424 const char *barrier;
425
426 switch (info->feature_barrier) {
427 case QUEUE_ORDERED_DRAIN: barrier = "enabled"; break;
428 case QUEUE_ORDERED_NONE: barrier = "disabled"; break;
429 default: return -EINVAL;
430 }
431
432 err = blk_queue_ordered(info->rq, info->feature_barrier);
433
434 if (err)
435 return err;
436
437 printk(KERN_INFO "blkfront: %s: barriers %s\n", 424 printk(KERN_INFO "blkfront: %s: barriers %s\n",
438 info->gd->disk_name, barrier); 425 info->gd->disk_name,
439 return 0; 426 info->feature_flush ? "enabled" : "disabled");
440} 427}
441 428
442 429
@@ -515,7 +502,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
515 info->rq = gd->queue; 502 info->rq = gd->queue;
516 info->gd = gd; 503 info->gd = gd;
517 504
518 xlvbd_barrier(info); 505 xlvbd_flush(info);
519 506
520 if (vdisk_info & VDISK_READONLY) 507 if (vdisk_info & VDISK_READONLY)
521 set_disk_ro(gd, 1); 508 set_disk_ro(gd, 1);
@@ -661,8 +648,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
661 printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", 648 printk(KERN_WARNING "blkfront: %s: write barrier op failed\n",
662 info->gd->disk_name); 649 info->gd->disk_name);
663 error = -EOPNOTSUPP; 650 error = -EOPNOTSUPP;
664 info->feature_barrier = QUEUE_ORDERED_NONE; 651 info->feature_flush = 0;
665 xlvbd_barrier(info); 652 xlvbd_flush(info);
666 } 653 }
667 /* fall through */ 654 /* fall through */
668 case BLKIF_OP_READ: 655 case BLKIF_OP_READ:
@@ -1075,19 +1062,13 @@ static void blkfront_connect(struct blkfront_info *info)
1075 /* 1062 /*
1076 * If there's no "feature-barrier" defined, then it means 1063 * If there's no "feature-barrier" defined, then it means
1077 * we're dealing with a very old backend which writes 1064 * we're dealing with a very old backend which writes
1078 * synchronously; draining will do what needs to get done. 1065 * synchronously; nothing to do.
1079 * 1066 *
1080 * If there are barriers, then we use flush. 1067 * If there are barriers, then we use flush.
1081 *
1082 * If barriers are not supported, then there's no much we can
1083 * do, so just set ordering to NONE.
1084 */ 1068 */
1085 if (err) 1069 info->feature_flush = 0;
1086 info->feature_barrier = QUEUE_ORDERED_DRAIN; 1070 if (!err && barrier)
1087 else if (barrier) 1071 info->feature_flush = REQ_FLUSH;
1088 info->feature_barrier = QUEUE_ORDERED_DRAIN_FLUSH;
1089 else
1090 info->feature_barrier = QUEUE_ORDERED_NONE;
1091 1072
1092 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); 1073 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
1093 if (err) { 1074 if (err) {