aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig65
-rw-r--r--block/blk-map.c46
-rw-r--r--block/blk-merge.c9
-rw-r--r--block/blk-sysfs.c10
-rw-r--r--block/bsg.c96
-rw-r--r--block/compat_ioctl.c1
6 files changed, 139 insertions, 88 deletions
diff --git a/block/Kconfig b/block/Kconfig
index 7db9a411649d..3e97f2bc446f 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -5,14 +5,18 @@ menuconfig BLOCK
5 bool "Enable the block layer" if EMBEDDED 5 bool "Enable the block layer" if EMBEDDED
6 default y 6 default y
7 help 7 help
8 This permits the block layer to be removed from the kernel if it's not 8 Provide block layer support for the kernel.
9 needed (on some embedded devices for example). If this option is
10 disabled, then blockdev files will become unusable and some
11 filesystems (such as ext3) will become unavailable.
12 9
13 This option will also disable SCSI character devices and USB storage 10 Disable this option to remove the block layer support from the
14 since they make use of various block layer definitions and 11 kernel. This may be useful for embedded devices.
15 facilities. 12
13 If this option is disabled:
14
15 - block device files will become unusable
16 - some filesystems (such as ext3) will become unavailable.
17
18 Also, SCSI character devices and USB storage will be disabled since
19 they make use of various block layer definitions and facilities.
16 20
17 Say Y here unless you know you really don't want to mount disks and 21 Say Y here unless you know you really don't want to mount disks and
18 suchlike. 22 suchlike.
@@ -23,9 +27,20 @@ config LBD
23 bool "Support for Large Block Devices" 27 bool "Support for Large Block Devices"
24 depends on !64BIT 28 depends on !64BIT
25 help 29 help
26 Say Y here if you want to attach large (bigger than 2TB) discs to 30 Enable block devices of size 2TB and larger.
27 your machine, or if you want to have a raid or loopback device 31
28 bigger than 2TB. Otherwise say N. 32 This option is required to support the full capacity of large
33 (2TB+) block devices, including RAID, disk, Network Block Device,
34 Logical Volume Manager (LVM) and loopback.
35
36 For example, RAID devices are frequently bigger than the capacity
37 of the largest individual hard drive.
38
39 This option is not required if you have individual disk drives
40 which total 2TB+ and you are not aggregating the capacity into
41 a large block device (e.g. using RAID or LVM).
42
43 If unsure, say N.
29 44
30config BLK_DEV_IO_TRACE 45config BLK_DEV_IO_TRACE
31 bool "Support for tracing block io actions" 46 bool "Support for tracing block io actions"
@@ -33,19 +48,21 @@ config BLK_DEV_IO_TRACE
33 select RELAY 48 select RELAY
34 select DEBUG_FS 49 select DEBUG_FS
35 help 50 help
36 Say Y here, if you want to be able to trace the block layer actions 51 Say Y here if you want to be able to trace the block layer actions
37 on a given queue. Tracing allows you to see any traffic happening 52 on a given queue. Tracing allows you to see any traffic happening
38 on a block device queue. For more information (and the user space 53 on a block device queue. For more information (and the userspace
39 support tools needed), fetch the blktrace app from: 54 support tools needed), fetch the blktrace tools from:
40 55
41 git://git.kernel.dk/blktrace.git 56 git://git.kernel.dk/blktrace.git
42 57
58 If unsure, say N.
59
43config LSF 60config LSF
44 bool "Support for Large Single Files" 61 bool "Support for Large Single Files"
45 depends on !64BIT 62 depends on !64BIT
46 help 63 help
47 Say Y here if you want to be able to handle very large files (bigger 64 Say Y here if you want to be able to handle very large files (2TB
48 than 2TB), otherwise say N. 65 and larger), otherwise say N.
49 66
50 If unsure, say Y. 67 If unsure, say Y.
51 68
@@ -53,14 +70,16 @@ config BLK_DEV_BSG
53 bool "Block layer SG support v4 (EXPERIMENTAL)" 70 bool "Block layer SG support v4 (EXPERIMENTAL)"
54 depends on EXPERIMENTAL 71 depends on EXPERIMENTAL
55 ---help--- 72 ---help---
56 Saying Y here will enable generic SG (SCSI generic) v4 support 73 Saying Y here will enable generic SG (SCSI generic) v4 support
57 for any block device. 74 for any block device.
58 75
59 Unlike SG v3 (aka block/scsi_ioctl.c drivers/scsi/sg.c), SG v4 76 Unlike SG v3 (aka block/scsi_ioctl.c drivers/scsi/sg.c), SG v4
60 can handle complicated SCSI commands: tagged variable length cdbs 77 can handle complicated SCSI commands: tagged variable length cdbs
61 with bidirectional data transfers and generic request/response 78 with bidirectional data transfers and generic request/response
62 protocols (e.g. Task Management Functions and SMP in Serial 79 protocols (e.g. Task Management Functions and SMP in Serial
63 Attached SCSI). 80 Attached SCSI).
81
82 If unsure, say N.
64 83
65endif # BLOCK 84endif # BLOCK
66 85
diff --git a/block/blk-map.c b/block/blk-map.c
index c07d9c8317f4..3c942bd6422a 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -5,6 +5,7 @@
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/bio.h> 6#include <linux/bio.h>
7#include <linux/blkdev.h> 7#include <linux/blkdev.h>
8#include <scsi/sg.h> /* for struct sg_iovec */
8 9
9#include "blk.h" 10#include "blk.h"
10 11
@@ -140,25 +141,8 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
140 ubuf += ret; 141 ubuf += ret;
141 } 142 }
142 143
143 /* 144 if (!bio_flagged(bio, BIO_USER_MAPPED))
144 * __blk_rq_map_user() copies the buffers if starting address 145 rq->cmd_flags |= REQ_COPY_USER;
145 * or length isn't aligned to dma_pad_mask. As the copied
146 * buffer is always page aligned, we know that there's enough
147 * room for padding. Extend the last bio and update
148 * rq->data_len accordingly.
149 *
150 * On unmap, bio_uncopy_user() will use unmodified
151 * bio_map_data pointed to by bio->bi_private.
152 */
153 if (len & q->dma_pad_mask) {
154 unsigned int pad_len = (q->dma_pad_mask & ~len) + 1;
155 struct bio *tail = rq->biotail;
156
157 tail->bi_io_vec[tail->bi_vcnt - 1].bv_len += pad_len;
158 tail->bi_size += pad_len;
159
160 rq->extra_len += pad_len;
161 }
162 146
163 rq->buffer = rq->data = NULL; 147 rq->buffer = rq->data = NULL;
164 return 0; 148 return 0;
@@ -194,15 +178,26 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
194 struct sg_iovec *iov, int iov_count, unsigned int len) 178 struct sg_iovec *iov, int iov_count, unsigned int len)
195{ 179{
196 struct bio *bio; 180 struct bio *bio;
181 int i, read = rq_data_dir(rq) == READ;
182 int unaligned = 0;
197 183
198 if (!iov || iov_count <= 0) 184 if (!iov || iov_count <= 0)
199 return -EINVAL; 185 return -EINVAL;
200 186
201 /* we don't allow misaligned data like bio_map_user() does. If the 187 for (i = 0; i < iov_count; i++) {
202 * user is using sg, they're expected to know the alignment constraints 188 unsigned long uaddr = (unsigned long)iov[i].iov_base;
203 * and respect them accordingly */ 189
204 bio = bio_map_user_iov(q, NULL, iov, iov_count, 190 if (uaddr & queue_dma_alignment(q)) {
205 rq_data_dir(rq) == READ); 191 unaligned = 1;
192 break;
193 }
194 }
195
196 if (unaligned || (q->dma_pad_mask & len))
197 bio = bio_copy_user_iov(q, iov, iov_count, read);
198 else
199 bio = bio_map_user_iov(q, NULL, iov, iov_count, read);
200
206 if (IS_ERR(bio)) 201 if (IS_ERR(bio))
207 return PTR_ERR(bio); 202 return PTR_ERR(bio);
208 203
@@ -212,6 +207,9 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
212 return -EINVAL; 207 return -EINVAL;
213 } 208 }
214 209
210 if (!bio_flagged(bio, BIO_USER_MAPPED))
211 rq->cmd_flags |= REQ_COPY_USER;
212
215 bio_get(bio); 213 bio_get(bio);
216 blk_rq_bio_prep(q, rq, bio); 214 blk_rq_bio_prep(q, rq, bio);
217 rq->buffer = rq->data = NULL; 215 rq->buffer = rq->data = NULL;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 0f58616bcd7f..b5c5c4a9e3f0 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -220,6 +220,15 @@ new_segment:
220 bvprv = bvec; 220 bvprv = bvec;
221 } /* segments in rq */ 221 } /* segments in rq */
222 222
223
224 if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
225 (rq->data_len & q->dma_pad_mask)) {
226 unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1;
227
228 sg->length += pad_len;
229 rq->extra_len += pad_len;
230 }
231
223 if (q->dma_drain_size && q->dma_drain_needed(rq)) { 232 if (q->dma_drain_size && q->dma_drain_needed(rq)) {
224 if (rq->cmd_flags & REQ_RW) 233 if (rq->cmd_flags & REQ_RW)
225 memset(q->dma_drain_buffer, 0, q->dma_drain_size); 234 memset(q->dma_drain_buffer, 0, q->dma_drain_size);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 54d0db116153..fc41d83be22b 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -276,9 +276,12 @@ int blk_register_queue(struct gendisk *disk)
276 276
277 struct request_queue *q = disk->queue; 277 struct request_queue *q = disk->queue;
278 278
279 if (!q || !q->request_fn) 279 if (WARN_ON(!q))
280 return -ENXIO; 280 return -ENXIO;
281 281
282 if (!q->request_fn)
283 return 0;
284
282 ret = kobject_add(&q->kobj, kobject_get(&disk->dev.kobj), 285 ret = kobject_add(&q->kobj, kobject_get(&disk->dev.kobj),
283 "%s", "queue"); 286 "%s", "queue");
284 if (ret < 0) 287 if (ret < 0)
@@ -300,7 +303,10 @@ void blk_unregister_queue(struct gendisk *disk)
300{ 303{
301 struct request_queue *q = disk->queue; 304 struct request_queue *q = disk->queue;
302 305
303 if (q && q->request_fn) { 306 if (WARN_ON(!q))
307 return;
308
309 if (q->request_fn) {
304 elv_unregister_queue(q); 310 elv_unregister_queue(q);
305 311
306 kobject_uevent(&q->kobj, KOBJ_REMOVE); 312 kobject_uevent(&q->kobj, KOBJ_REMOVE);
diff --git a/block/bsg.c b/block/bsg.c
index 8917c5174dc2..23ea4fd1a66d 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -37,7 +37,6 @@ struct bsg_device {
37 struct list_head done_list; 37 struct list_head done_list;
38 struct hlist_node dev_list; 38 struct hlist_node dev_list;
39 atomic_t ref_count; 39 atomic_t ref_count;
40 int minor;
41 int queued_cmds; 40 int queued_cmds;
42 int done_cmds; 41 int done_cmds;
43 wait_queue_head_t wq_done; 42 wait_queue_head_t wq_done;
@@ -368,7 +367,7 @@ static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
368 367
369 spin_lock_irq(&bd->lock); 368 spin_lock_irq(&bd->lock);
370 if (bd->done_cmds) { 369 if (bd->done_cmds) {
371 bc = list_entry(bd->done_list.next, struct bsg_command, list); 370 bc = list_first_entry(&bd->done_list, struct bsg_command, list);
372 list_del(&bc->list); 371 list_del(&bc->list);
373 bd->done_cmds--; 372 bd->done_cmds--;
374 } 373 }
@@ -468,8 +467,6 @@ static int bsg_complete_all_commands(struct bsg_device *bd)
468 467
469 dprintk("%s: entered\n", bd->name); 468 dprintk("%s: entered\n", bd->name);
470 469
471 set_bit(BSG_F_BLOCK, &bd->flags);
472
473 /* 470 /*
474 * wait for all commands to complete 471 * wait for all commands to complete
475 */ 472 */
@@ -702,13 +699,26 @@ static struct bsg_device *bsg_alloc_device(void)
702 return bd; 699 return bd;
703} 700}
704 701
702static void bsg_kref_release_function(struct kref *kref)
703{
704 struct bsg_class_device *bcd =
705 container_of(kref, struct bsg_class_device, ref);
706
707 if (bcd->release)
708 bcd->release(bcd->parent);
709
710 put_device(bcd->parent);
711}
712
705static int bsg_put_device(struct bsg_device *bd) 713static int bsg_put_device(struct bsg_device *bd)
706{ 714{
707 int ret = 0; 715 int ret = 0, do_free;
716 struct request_queue *q = bd->queue;
708 717
709 mutex_lock(&bsg_mutex); 718 mutex_lock(&bsg_mutex);
710 719
711 if (!atomic_dec_and_test(&bd->ref_count)) 720 do_free = atomic_dec_and_test(&bd->ref_count);
721 if (!do_free)
712 goto out; 722 goto out;
713 723
714 dprintk("%s: tearing down\n", bd->name); 724 dprintk("%s: tearing down\n", bd->name);
@@ -725,11 +735,13 @@ static int bsg_put_device(struct bsg_device *bd)
725 */ 735 */
726 ret = bsg_complete_all_commands(bd); 736 ret = bsg_complete_all_commands(bd);
727 737
728 blk_put_queue(bd->queue);
729 hlist_del(&bd->dev_list); 738 hlist_del(&bd->dev_list);
730 kfree(bd); 739 kfree(bd);
731out: 740out:
732 mutex_unlock(&bsg_mutex); 741 mutex_unlock(&bsg_mutex);
742 kref_put(&q->bsg_dev.ref, bsg_kref_release_function);
743 if (do_free)
744 blk_put_queue(q);
733 return ret; 745 return ret;
734} 746}
735 747
@@ -738,24 +750,28 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
738 struct file *file) 750 struct file *file)
739{ 751{
740 struct bsg_device *bd; 752 struct bsg_device *bd;
753 int ret;
741#ifdef BSG_DEBUG 754#ifdef BSG_DEBUG
742 unsigned char buf[32]; 755 unsigned char buf[32];
743#endif 756#endif
757 ret = blk_get_queue(rq);
758 if (ret)
759 return ERR_PTR(-ENXIO);
744 760
745 bd = bsg_alloc_device(); 761 bd = bsg_alloc_device();
746 if (!bd) 762 if (!bd) {
763 blk_put_queue(rq);
747 return ERR_PTR(-ENOMEM); 764 return ERR_PTR(-ENOMEM);
765 }
748 766
749 bd->queue = rq; 767 bd->queue = rq;
750 kobject_get(&rq->kobj);
751 bsg_set_block(bd, file); 768 bsg_set_block(bd, file);
752 769
753 atomic_set(&bd->ref_count, 1); 770 atomic_set(&bd->ref_count, 1);
754 bd->minor = iminor(inode);
755 mutex_lock(&bsg_mutex); 771 mutex_lock(&bsg_mutex);
756 hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(bd->minor)); 772 hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
757 773
758 strncpy(bd->name, rq->bsg_dev.class_dev->class_id, sizeof(bd->name) - 1); 774 strncpy(bd->name, rq->bsg_dev.class_dev->bus_id, sizeof(bd->name) - 1);
759 dprintk("bound to <%s>, max queue %d\n", 775 dprintk("bound to <%s>, max queue %d\n",
760 format_dev_t(buf, inode->i_rdev), bd->max_queue); 776 format_dev_t(buf, inode->i_rdev), bd->max_queue);
761 777
@@ -763,23 +779,21 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
763 return bd; 779 return bd;
764} 780}
765 781
766static struct bsg_device *__bsg_get_device(int minor) 782static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
767{ 783{
768 struct bsg_device *bd = NULL; 784 struct bsg_device *bd;
769 struct hlist_node *entry; 785 struct hlist_node *entry;
770 786
771 mutex_lock(&bsg_mutex); 787 mutex_lock(&bsg_mutex);
772 788
773 hlist_for_each(entry, bsg_dev_idx_hash(minor)) { 789 hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) {
774 bd = hlist_entry(entry, struct bsg_device, dev_list); 790 if (bd->queue == q) {
775 if (bd->minor == minor) {
776 atomic_inc(&bd->ref_count); 791 atomic_inc(&bd->ref_count);
777 break; 792 goto found;
778 } 793 }
779
780 bd = NULL;
781 } 794 }
782 795 bd = NULL;
796found:
783 mutex_unlock(&bsg_mutex); 797 mutex_unlock(&bsg_mutex);
784 return bd; 798 return bd;
785} 799}
@@ -789,21 +803,27 @@ static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
789 struct bsg_device *bd; 803 struct bsg_device *bd;
790 struct bsg_class_device *bcd; 804 struct bsg_class_device *bcd;
791 805
792 bd = __bsg_get_device(iminor(inode));
793 if (bd)
794 return bd;
795
796 /* 806 /*
797 * find the class device 807 * find the class device
798 */ 808 */
799 mutex_lock(&bsg_mutex); 809 mutex_lock(&bsg_mutex);
800 bcd = idr_find(&bsg_minor_idr, iminor(inode)); 810 bcd = idr_find(&bsg_minor_idr, iminor(inode));
811 if (bcd)
812 kref_get(&bcd->ref);
801 mutex_unlock(&bsg_mutex); 813 mutex_unlock(&bsg_mutex);
802 814
803 if (!bcd) 815 if (!bcd)
804 return ERR_PTR(-ENODEV); 816 return ERR_PTR(-ENODEV);
805 817
806 return bsg_add_device(inode, bcd->queue, file); 818 bd = __bsg_get_device(iminor(inode), bcd->queue);
819 if (bd)
820 return bd;
821
822 bd = bsg_add_device(inode, bcd->queue, file);
823 if (IS_ERR(bd))
824 kref_put(&bcd->ref, bsg_kref_release_function);
825
826 return bd;
807} 827}
808 828
809static int bsg_open(struct inode *inode, struct file *file) 829static int bsg_open(struct inode *inode, struct file *file)
@@ -939,27 +959,26 @@ void bsg_unregister_queue(struct request_queue *q)
939 mutex_lock(&bsg_mutex); 959 mutex_lock(&bsg_mutex);
940 idr_remove(&bsg_minor_idr, bcd->minor); 960 idr_remove(&bsg_minor_idr, bcd->minor);
941 sysfs_remove_link(&q->kobj, "bsg"); 961 sysfs_remove_link(&q->kobj, "bsg");
942 class_device_unregister(bcd->class_dev); 962 device_unregister(bcd->class_dev);
943 put_device(bcd->dev);
944 bcd->class_dev = NULL; 963 bcd->class_dev = NULL;
945 bcd->dev = NULL; 964 kref_put(&bcd->ref, bsg_kref_release_function);
946 mutex_unlock(&bsg_mutex); 965 mutex_unlock(&bsg_mutex);
947} 966}
948EXPORT_SYMBOL_GPL(bsg_unregister_queue); 967EXPORT_SYMBOL_GPL(bsg_unregister_queue);
949 968
950int bsg_register_queue(struct request_queue *q, struct device *gdev, 969int bsg_register_queue(struct request_queue *q, struct device *parent,
951 const char *name) 970 const char *name, void (*release)(struct device *))
952{ 971{
953 struct bsg_class_device *bcd; 972 struct bsg_class_device *bcd;
954 dev_t dev; 973 dev_t dev;
955 int ret, minor; 974 int ret, minor;
956 struct class_device *class_dev = NULL; 975 struct device *class_dev = NULL;
957 const char *devname; 976 const char *devname;
958 977
959 if (name) 978 if (name)
960 devname = name; 979 devname = name;
961 else 980 else
962 devname = gdev->bus_id; 981 devname = parent->bus_id;
963 982
964 /* 983 /*
965 * we need a proper transport to send commands, not a stacked device 984 * we need a proper transport to send commands, not a stacked device
@@ -990,10 +1009,11 @@ int bsg_register_queue(struct request_queue *q, struct device *gdev,
990 1009
991 bcd->minor = minor; 1010 bcd->minor = minor;
992 bcd->queue = q; 1011 bcd->queue = q;
993 bcd->dev = get_device(gdev); 1012 bcd->parent = get_device(parent);
1013 bcd->release = release;
1014 kref_init(&bcd->ref);
994 dev = MKDEV(bsg_major, bcd->minor); 1015 dev = MKDEV(bsg_major, bcd->minor);
995 class_dev = class_device_create(bsg_class, NULL, dev, gdev, "%s", 1016 class_dev = device_create(bsg_class, parent, dev, "%s", devname);
996 devname);
997 if (IS_ERR(class_dev)) { 1017 if (IS_ERR(class_dev)) {
998 ret = PTR_ERR(class_dev); 1018 ret = PTR_ERR(class_dev);
999 goto put_dev; 1019 goto put_dev;
@@ -1010,9 +1030,9 @@ int bsg_register_queue(struct request_queue *q, struct device *gdev,
1010 return 0; 1030 return 0;
1011 1031
1012unregister_class_dev: 1032unregister_class_dev:
1013 class_device_unregister(class_dev); 1033 device_unregister(class_dev);
1014put_dev: 1034put_dev:
1015 put_device(gdev); 1035 put_device(parent);
1016remove_idr: 1036remove_idr:
1017 idr_remove(&bsg_minor_idr, minor); 1037 idr_remove(&bsg_minor_idr, minor);
1018unlock: 1038unlock:
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index b73373216b0e..c70d0b6f666f 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -624,7 +624,6 @@ static int compat_blkdev_driver_ioctl(struct inode *inode, struct file *file,
624 case HDIO_GET_IDENTITY: 624 case HDIO_GET_IDENTITY:
625 case HDIO_DRIVE_TASK: 625 case HDIO_DRIVE_TASK:
626 case HDIO_DRIVE_CMD: 626 case HDIO_DRIVE_CMD:
627 case HDIO_SCAN_HWIF:
628 /* 0x330 is reserved -- it used to be HDIO_GETGEO_BIG */ 627 /* 0x330 is reserved -- it used to be HDIO_GETGEO_BIG */
629 case 0x330: 628 case 0x330:
630 /* 0x02 -- Floppy ioctls */ 629 /* 0x02 -- Floppy ioctls */