aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/cciss.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c2
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/block/virtio_blk.c64
-rw-r--r--drivers/block/xen-blkfront.c30
5 files changed, 84 insertions, 16 deletions
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index fdf1b79eb347..31064df1370a 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -379,7 +379,7 @@ static void cciss_map_sg_chain_block(ctlr_info_t *h, CommandList_struct *c,
379static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", 379static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
380 "UNKNOWN" 380 "UNKNOWN"
381}; 381};
382#define RAID_UNKNOWN (sizeof(raid_label) / sizeof(raid_label[0])-1) 382#define RAID_UNKNOWN (ARRAY_SIZE(raid_label)-1)
383 383
384#ifdef CONFIG_PROC_FS 384#ifdef CONFIG_PROC_FS
385 385
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 20abef531c99..081522d3c742 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1087,7 +1087,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1087 } else { 1087 } else {
1088 epoch->flags = 0; 1088 epoch->flags = 0;
1089 atomic_set(&epoch->epoch_size, 0); 1089 atomic_set(&epoch->epoch_size, 0);
1090 /* atomic_set(&epoch->active, 0); is alrady zero */ 1090 /* atomic_set(&epoch->active, 0); is already zero */
1091 if (rv == FE_STILL_LIVE) 1091 if (rv == FE_STILL_LIVE)
1092 rv = FE_RECYCLED; 1092 rv = FE_RECYCLED;
1093 } 1093 }
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 6751789fb379..0daa422aa281 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -4,7 +4,7 @@
4 * Note that you can not swap over this thing, yet. Seems to work but 4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general. 5 * deadlocks sometimes - you can not swap over TCP in general.
6 * 6 *
7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@suse.cz> 7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com> 8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
9 * 9 *
10 * This file is released under GPLv2 or later. 10 * This file is released under GPLv2 or later.
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 7b0f7b624adf..2aafafca2b13 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -224,16 +224,6 @@ static int virtblk_locked_ioctl(struct block_device *bdev, fmode_t mode,
224 struct gendisk *disk = bdev->bd_disk; 224 struct gendisk *disk = bdev->bd_disk;
225 struct virtio_blk *vblk = disk->private_data; 225 struct virtio_blk *vblk = disk->private_data;
226 226
227 if (cmd == 0x56424944) { /* 'VBID' */
228 void __user *usr_data = (void __user *)data;
229 char id_str[VIRTIO_BLK_ID_BYTES];
230 int err;
231
232 err = virtblk_get_id(disk, id_str);
233 if (!err && copy_to_user(usr_data, id_str, VIRTIO_BLK_ID_BYTES))
234 err = -EFAULT;
235 return err;
236 }
237 /* 227 /*
238 * Only allow the generic SCSI ioctls if the host can support it. 228 * Only allow the generic SCSI ioctls if the host can support it.
239 */ 229 */
@@ -292,6 +282,27 @@ static int index_to_minor(int index)
292 return index << PART_BITS; 282 return index << PART_BITS;
293} 283}
294 284
285static ssize_t virtblk_serial_show(struct device *dev,
286 struct device_attribute *attr, char *buf)
287{
288 struct gendisk *disk = dev_to_disk(dev);
289 int err;
290
291 /* sysfs gives us a PAGE_SIZE buffer */
292 BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
293
294 buf[VIRTIO_BLK_ID_BYTES] = '\0';
295 err = virtblk_get_id(disk, buf);
296 if (!err)
297 return strlen(buf);
298
299 if (err == -EIO) /* Unsupported? Make it empty. */
300 return 0;
301
302 return err;
303}
304DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
305
295static int __devinit virtblk_probe(struct virtio_device *vdev) 306static int __devinit virtblk_probe(struct virtio_device *vdev)
296{ 307{
297 struct virtio_blk *vblk; 308 struct virtio_blk *vblk;
@@ -377,11 +388,31 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
377 vblk->disk->driverfs_dev = &vdev->dev; 388 vblk->disk->driverfs_dev = &vdev->dev;
378 index++; 389 index++;
379 390
380 /* If barriers are supported, tell block layer that queue is ordered */ 391 if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) {
381 if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) 392 /*
393 * If the FLUSH feature is supported we do have support for
394 * flushing a volatile write cache on the host. Use that
395 * to implement write barrier support.
396 */
382 blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH); 397 blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH);
383 else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) 398 } else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) {
399 /*
400 * If the BARRIER feature is supported the host expects us
401 * to order request by tags. This implies there is not
402 * volatile write cache on the host, and that the host
403 * never re-orders outstanding I/O. This feature is not
404 * useful for real life scenarious and deprecated.
405 */
384 blk_queue_ordered(q, QUEUE_ORDERED_TAG); 406 blk_queue_ordered(q, QUEUE_ORDERED_TAG);
407 } else {
408 /*
409 * If the FLUSH feature is not supported we must assume that
410 * the host does not perform any kind of volatile write
411 * caching. We still need to drain the queue to provider
412 * proper barrier semantics.
413 */
414 blk_queue_ordered(q, QUEUE_ORDERED_DRAIN);
415 }
385 416
386 /* If disk is read-only in the host, the guest should obey */ 417 /* If disk is read-only in the host, the guest should obey */
387 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) 418 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
@@ -455,8 +486,15 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
455 486
456 487
457 add_disk(vblk->disk); 488 add_disk(vblk->disk);
489 err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
490 if (err)
491 goto out_del_disk;
492
458 return 0; 493 return 0;
459 494
495out_del_disk:
496 del_gendisk(vblk->disk);
497 blk_cleanup_queue(vblk->disk->queue);
460out_put_disk: 498out_put_disk:
461 put_disk(vblk->disk); 499 put_disk(vblk->disk);
462out_mempool: 500out_mempool:
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 67d9bfab59fa..ac1b682edecb 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -49,6 +49,7 @@
49#include <xen/grant_table.h> 49#include <xen/grant_table.h>
50#include <xen/events.h> 50#include <xen/events.h>
51#include <xen/page.h> 51#include <xen/page.h>
52#include <xen/platform_pci.h>
52 53
53#include <xen/interface/grant_table.h> 54#include <xen/interface/grant_table.h>
54#include <xen/interface/io/blkif.h> 55#include <xen/interface/io/blkif.h>
@@ -829,6 +830,35 @@ static int blkfront_probe(struct xenbus_device *dev,
829 } 830 }
830 } 831 }
831 832
833 if (xen_hvm_domain()) {
834 char *type;
835 int len;
836 /* no unplug has been done: do not hook devices != xen vbds */
837 if (xen_platform_pci_unplug & XEN_UNPLUG_IGNORE) {
838 int major;
839
840 if (!VDEV_IS_EXTENDED(vdevice))
841 major = BLKIF_MAJOR(vdevice);
842 else
843 major = XENVBD_MAJOR;
844
845 if (major != XENVBD_MAJOR) {
846 printk(KERN_INFO
847 "%s: HVM does not support vbd %d as xen block device\n",
848 __FUNCTION__, vdevice);
849 return -ENODEV;
850 }
851 }
852 /* do not create a PV cdrom device if we are an HVM guest */
853 type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len);
854 if (IS_ERR(type))
855 return -ENODEV;
856 if (strncmp(type, "cdrom", 5) == 0) {
857 kfree(type);
858 return -ENODEV;
859 }
860 kfree(type);
861 }
832 info = kzalloc(sizeof(*info), GFP_KERNEL); 862 info = kzalloc(sizeof(*info), GFP_KERNEL);
833 if (!info) { 863 if (!info) {
834 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); 864 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");