aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-21 19:03:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-21 19:03:40 -0400
commit548453fd107f789f5f1bc2dc13cc432ceb3b5efd (patch)
treedc5a62d49260d66b7390ef110113134e3bef9152 /block
parent9fd91217b15751997cab35ad309b37b44eaa6774 (diff)
parentfb199746303a6bfd6121834ec9e810471185c530 (diff)
Merge branch 'for-2.6.26' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.26' of git://git.kernel.dk/linux-2.6-block: block: fix blk_register_queue() return value block: fix memory hotplug and bouncing in block layer block: replace remaining __FUNCTION__ occurrences Kconfig: clean up block/Kconfig help descriptions cciss: fix warning oops on rmmod of driver cciss: Fix race between disk-adding code and interrupt handler block: move the padding adjustment to blk_rq_map_sg block: add bio_copy_user_iov support to blk_rq_map_user_iov block: convert bio_copy_user to bio_copy_user_iov loop: manage partitions in disk image cdrom: use kmalloced buffers instead of buffers on stack cdrom: make unregister_cdrom() return void cdrom: use list_head for cdrom_device_info list cdrom: protect cdrom_device_info list by mutex cdrom: cleanup hardcoded error-code cdrom: remove ifdef CONFIG_SYSCTL
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig65
-rw-r--r--block/blk-map.c46
-rw-r--r--block/blk-merge.c9
-rw-r--r--block/blk-sysfs.c10
4 files changed, 81 insertions, 49 deletions
diff --git a/block/Kconfig b/block/Kconfig
index 7db9a411649d..3e97f2bc446f 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -5,14 +5,18 @@ menuconfig BLOCK
5 bool "Enable the block layer" if EMBEDDED 5 bool "Enable the block layer" if EMBEDDED
6 default y 6 default y
7 help 7 help
8 This permits the block layer to be removed from the kernel if it's not 8 Provide block layer support for the kernel.
9 needed (on some embedded devices for example). If this option is
10 disabled, then blockdev files will become unusable and some
11 filesystems (such as ext3) will become unavailable.
12 9
13 This option will also disable SCSI character devices and USB storage 10 Disable this option to remove the block layer support from the
14 since they make use of various block layer definitions and 11 kernel. This may be useful for embedded devices.
15 facilities. 12
13 If this option is disabled:
14
15 - block device files will become unusable
16 - some filesystems (such as ext3) will become unavailable.
17
18 Also, SCSI character devices and USB storage will be disabled since
19 they make use of various block layer definitions and facilities.
16 20
17 Say Y here unless you know you really don't want to mount disks and 21 Say Y here unless you know you really don't want to mount disks and
18 suchlike. 22 suchlike.
@@ -23,9 +27,20 @@ config LBD
23 bool "Support for Large Block Devices" 27 bool "Support for Large Block Devices"
24 depends on !64BIT 28 depends on !64BIT
25 help 29 help
26 Say Y here if you want to attach large (bigger than 2TB) discs to 30 Enable block devices of size 2TB and larger.
27 your machine, or if you want to have a raid or loopback device 31
28 bigger than 2TB. Otherwise say N. 32 This option is required to support the full capacity of large
33 (2TB+) block devices, including RAID, disk, Network Block Device,
34 Logical Volume Manager (LVM) and loopback.
35
36 For example, RAID devices are frequently bigger than the capacity
37 of the largest individual hard drive.
38
39 This option is not required if you have individual disk drives
40 which total 2TB+ and you are not aggregating the capacity into
41 a large block device (e.g. using RAID or LVM).
42
43 If unsure, say N.
29 44
30config BLK_DEV_IO_TRACE 45config BLK_DEV_IO_TRACE
31 bool "Support for tracing block io actions" 46 bool "Support for tracing block io actions"
@@ -33,19 +48,21 @@ config BLK_DEV_IO_TRACE
33 select RELAY 48 select RELAY
34 select DEBUG_FS 49 select DEBUG_FS
35 help 50 help
36 Say Y here, if you want to be able to trace the block layer actions 51 Say Y here if you want to be able to trace the block layer actions
37 on a given queue. Tracing allows you to see any traffic happening 52 on a given queue. Tracing allows you to see any traffic happening
38 on a block device queue. For more information (and the user space 53 on a block device queue. For more information (and the userspace
39 support tools needed), fetch the blktrace app from: 54 support tools needed), fetch the blktrace tools from:
40 55
41 git://git.kernel.dk/blktrace.git 56 git://git.kernel.dk/blktrace.git
42 57
58 If unsure, say N.
59
43config LSF 60config LSF
44 bool "Support for Large Single Files" 61 bool "Support for Large Single Files"
45 depends on !64BIT 62 depends on !64BIT
46 help 63 help
47 Say Y here if you want to be able to handle very large files (bigger 64 Say Y here if you want to be able to handle very large files (2TB
48 than 2TB), otherwise say N. 65 and larger), otherwise say N.
49 66
50 If unsure, say Y. 67 If unsure, say Y.
51 68
@@ -53,14 +70,16 @@ config BLK_DEV_BSG
53 bool "Block layer SG support v4 (EXPERIMENTAL)" 70 bool "Block layer SG support v4 (EXPERIMENTAL)"
54 depends on EXPERIMENTAL 71 depends on EXPERIMENTAL
55 ---help--- 72 ---help---
56 Saying Y here will enable generic SG (SCSI generic) v4 support 73 Saying Y here will enable generic SG (SCSI generic) v4 support
57 for any block device. 74 for any block device.
58 75
59 Unlike SG v3 (aka block/scsi_ioctl.c drivers/scsi/sg.c), SG v4 76 Unlike SG v3 (aka block/scsi_ioctl.c drivers/scsi/sg.c), SG v4
60 can handle complicated SCSI commands: tagged variable length cdbs 77 can handle complicated SCSI commands: tagged variable length cdbs
61 with bidirectional data transfers and generic request/response 78 with bidirectional data transfers and generic request/response
62 protocols (e.g. Task Management Functions and SMP in Serial 79 protocols (e.g. Task Management Functions and SMP in Serial
63 Attached SCSI). 80 Attached SCSI).
81
82 If unsure, say N.
64 83
65endif # BLOCK 84endif # BLOCK
66 85
diff --git a/block/blk-map.c b/block/blk-map.c
index c07d9c8317f4..3c942bd6422a 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -5,6 +5,7 @@
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/bio.h> 6#include <linux/bio.h>
7#include <linux/blkdev.h> 7#include <linux/blkdev.h>
8#include <scsi/sg.h> /* for struct sg_iovec */
8 9
9#include "blk.h" 10#include "blk.h"
10 11
@@ -140,25 +141,8 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
140 ubuf += ret; 141 ubuf += ret;
141 } 142 }
142 143
143 /* 144 if (!bio_flagged(bio, BIO_USER_MAPPED))
144 * __blk_rq_map_user() copies the buffers if starting address 145 rq->cmd_flags |= REQ_COPY_USER;
145 * or length isn't aligned to dma_pad_mask. As the copied
146 * buffer is always page aligned, we know that there's enough
147 * room for padding. Extend the last bio and update
148 * rq->data_len accordingly.
149 *
150 * On unmap, bio_uncopy_user() will use unmodified
151 * bio_map_data pointed to by bio->bi_private.
152 */
153 if (len & q->dma_pad_mask) {
154 unsigned int pad_len = (q->dma_pad_mask & ~len) + 1;
155 struct bio *tail = rq->biotail;
156
157 tail->bi_io_vec[tail->bi_vcnt - 1].bv_len += pad_len;
158 tail->bi_size += pad_len;
159
160 rq->extra_len += pad_len;
161 }
162 146
163 rq->buffer = rq->data = NULL; 147 rq->buffer = rq->data = NULL;
164 return 0; 148 return 0;
@@ -194,15 +178,26 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
194 struct sg_iovec *iov, int iov_count, unsigned int len) 178 struct sg_iovec *iov, int iov_count, unsigned int len)
195{ 179{
196 struct bio *bio; 180 struct bio *bio;
181 int i, read = rq_data_dir(rq) == READ;
182 int unaligned = 0;
197 183
198 if (!iov || iov_count <= 0) 184 if (!iov || iov_count <= 0)
199 return -EINVAL; 185 return -EINVAL;
200 186
201 /* we don't allow misaligned data like bio_map_user() does. If the 187 for (i = 0; i < iov_count; i++) {
202 * user is using sg, they're expected to know the alignment constraints 188 unsigned long uaddr = (unsigned long)iov[i].iov_base;
203 * and respect them accordingly */ 189
204 bio = bio_map_user_iov(q, NULL, iov, iov_count, 190 if (uaddr & queue_dma_alignment(q)) {
205 rq_data_dir(rq) == READ); 191 unaligned = 1;
192 break;
193 }
194 }
195
196 if (unaligned || (q->dma_pad_mask & len))
197 bio = bio_copy_user_iov(q, iov, iov_count, read);
198 else
199 bio = bio_map_user_iov(q, NULL, iov, iov_count, read);
200
206 if (IS_ERR(bio)) 201 if (IS_ERR(bio))
207 return PTR_ERR(bio); 202 return PTR_ERR(bio);
208 203
@@ -212,6 +207,9 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
212 return -EINVAL; 207 return -EINVAL;
213 } 208 }
214 209
210 if (!bio_flagged(bio, BIO_USER_MAPPED))
211 rq->cmd_flags |= REQ_COPY_USER;
212
215 bio_get(bio); 213 bio_get(bio);
216 blk_rq_bio_prep(q, rq, bio); 214 blk_rq_bio_prep(q, rq, bio);
217 rq->buffer = rq->data = NULL; 215 rq->buffer = rq->data = NULL;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 0f58616bcd7f..b5c5c4a9e3f0 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -220,6 +220,15 @@ new_segment:
220 bvprv = bvec; 220 bvprv = bvec;
221 } /* segments in rq */ 221 } /* segments in rq */
222 222
223
224 if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
225 (rq->data_len & q->dma_pad_mask)) {
226 unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1;
227
228 sg->length += pad_len;
229 rq->extra_len += pad_len;
230 }
231
223 if (q->dma_drain_size && q->dma_drain_needed(rq)) { 232 if (q->dma_drain_size && q->dma_drain_needed(rq)) {
224 if (rq->cmd_flags & REQ_RW) 233 if (rq->cmd_flags & REQ_RW)
225 memset(q->dma_drain_buffer, 0, q->dma_drain_size); 234 memset(q->dma_drain_buffer, 0, q->dma_drain_size);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 54d0db116153..fc41d83be22b 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -276,9 +276,12 @@ int blk_register_queue(struct gendisk *disk)
276 276
277 struct request_queue *q = disk->queue; 277 struct request_queue *q = disk->queue;
278 278
279 if (!q || !q->request_fn) 279 if (WARN_ON(!q))
280 return -ENXIO; 280 return -ENXIO;
281 281
282 if (!q->request_fn)
283 return 0;
284
282 ret = kobject_add(&q->kobj, kobject_get(&disk->dev.kobj), 285 ret = kobject_add(&q->kobj, kobject_get(&disk->dev.kobj),
283 "%s", "queue"); 286 "%s", "queue");
284 if (ret < 0) 287 if (ret < 0)
@@ -300,7 +303,10 @@ void blk_unregister_queue(struct gendisk *disk)
300{ 303{
301 struct request_queue *q = disk->queue; 304 struct request_queue *q = disk->queue;
302 305
303 if (q && q->request_fn) { 306 if (WARN_ON(!q))
307 return;
308
309 if (q->request_fn) {
304 elv_unregister_queue(q); 310 elv_unregister_queue(q);
305 311
306 kobject_uevent(&q->kobj, KOBJ_REMOVE); 312 kobject_uevent(&q->kobj, KOBJ_REMOVE);