aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/Kconfig46
-rw-r--r--drivers/block/aoe/aoedev.c2
-rw-r--r--drivers/block/cfq-iosched.c3
-rw-r--r--drivers/block/cryptoloop.c6
-rw-r--r--drivers/block/deadline-iosched.c12
-rw-r--r--drivers/block/floppy.c41
-rw-r--r--drivers/block/genhd.c2
-rw-r--r--drivers/block/ll_rw_blk.c196
-rw-r--r--drivers/block/scsi_ioctl.c60
9 files changed, 239 insertions, 129 deletions
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index b594768b0241..51b0af1cebee 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -6,7 +6,7 @@ menu "Block devices"
6 6
7config BLK_DEV_FD 7config BLK_DEV_FD
8 tristate "Normal floppy disk support" 8 tristate "Normal floppy disk support"
9 depends on (!ARCH_S390 && !M68K && !IA64 && !UML && !ARM) || Q40 || (SUN3X && BROKEN) || ARCH_RPC || ARCH_EBSA285 9 depends on ARCH_MAY_HAVE_PC_FDC
10 ---help--- 10 ---help---
11 If you want to use the floppy disk drive(s) of your PC under Linux, 11 If you want to use the floppy disk drive(s) of your PC under Linux,
12 say Y. Information about this driver, especially important for IBM 12 say Y. Information about this driver, especially important for IBM
@@ -408,54 +408,12 @@ config BLK_DEV_INITRD
408 "real" root file system, etc. See <file:Documentation/initrd.txt> 408 "real" root file system, etc. See <file:Documentation/initrd.txt>
409 for details. 409 for details.
410 410
411config INITRAMFS_SOURCE
412 string "Initramfs source file(s)"
413 default ""
414 help
415 This can be either a single cpio archive with a .cpio suffix or a
416 space-separated list of directories and files for building the
417 initramfs image. A cpio archive should contain a filesystem archive
418 to be used as an initramfs image. Directories should contain a
419 filesystem layout to be included in the initramfs image. Files
420 should contain entries according to the format described by the
421 "usr/gen_init_cpio" program in the kernel tree.
422
423 When multiple directories and files are specified then the
424 initramfs image will be the aggregate of all of them.
425
426 See <file:Documentation/early-userspace/README for more details.
427
428 If you are not sure, leave it blank.
429
430config INITRAMFS_ROOT_UID
431 int "User ID to map to 0 (user root)"
432 depends on INITRAMFS_SOURCE!=""
433 default "0"
434 help
435 This setting is only meaningful if the INITRAMFS_SOURCE is
436 contains a directory. Setting this user ID (UID) to something
437 other than "0" will cause all files owned by that UID to be
438 owned by user root in the initial ramdisk image.
439
440 If you are not sure, leave it set to "0".
441
442config INITRAMFS_ROOT_GID
443 int "Group ID to map to 0 (group root)"
444 depends on INITRAMFS_SOURCE!=""
445 default "0"
446 help
447 This setting is only meaningful if the INITRAMFS_SOURCE is
448 contains a directory. Setting this group ID (GID) to something
449 other than "0" will cause all files owned by that GID to be
450 owned by group root in the initial ramdisk image.
451
452 If you are not sure, leave it set to "0".
453 411
454#XXX - it makes sense to enable this only for 32-bit subarch's, not for x86_64 412#XXX - it makes sense to enable this only for 32-bit subarch's, not for x86_64
455#for instance. 413#for instance.
456config LBD 414config LBD
457 bool "Support for Large Block Devices" 415 bool "Support for Large Block Devices"
458 depends on X86 || MIPS32 || PPC32 || ARCH_S390_31 || SUPERH || UML 416 depends on X86 || (MIPS && 32BIT) || PPC32 || ARCH_S390_31 || SUPERH || UML
459 help 417 help
460 Say Y here if you want to attach large (bigger than 2TB) discs to 418 Say Y here if you want to attach large (bigger than 2TB) discs to
461 your machine, or if you want to have a raid or loopback device 419 your machine, or if you want to have a raid or loopback device
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 6e231c5a1199..ded33ba31acc 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -35,7 +35,7 @@ aoedev_newdev(ulong nframes)
35 struct aoedev *d; 35 struct aoedev *d;
36 struct frame *f, *e; 36 struct frame *f, *e;
37 37
38 d = kcalloc(1, sizeof *d, GFP_ATOMIC); 38 d = kzalloc(sizeof *d, GFP_ATOMIC);
39 if (d == NULL) 39 if (d == NULL)
40 return NULL; 40 return NULL;
41 f = kcalloc(nframes, sizeof *f, GFP_ATOMIC); 41 f = kcalloc(nframes, sizeof *f, GFP_ATOMIC);
diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c
index cd056e7e64ec..30c0903c7cdd 100644
--- a/drivers/block/cfq-iosched.c
+++ b/drivers/block/cfq-iosched.c
@@ -2260,8 +2260,6 @@ static void cfq_put_cfqd(struct cfq_data *cfqd)
2260 if (!atomic_dec_and_test(&cfqd->ref)) 2260 if (!atomic_dec_and_test(&cfqd->ref))
2261 return; 2261 return;
2262 2262
2263 blk_put_queue(q);
2264
2265 cfq_shutdown_timer_wq(cfqd); 2263 cfq_shutdown_timer_wq(cfqd);
2266 q->elevator->elevator_data = NULL; 2264 q->elevator->elevator_data = NULL;
2267 2265
@@ -2318,7 +2316,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
2318 e->elevator_data = cfqd; 2316 e->elevator_data = cfqd;
2319 2317
2320 cfqd->queue = q; 2318 cfqd->queue = q;
2321 atomic_inc(&q->refcnt);
2322 2319
2323 cfqd->max_queued = q->nr_requests / 4; 2320 cfqd->max_queued = q->nr_requests / 4;
2324 q->nr_batching = cfq_queued; 2321 q->nr_batching = cfq_queued;
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c
index 5be6f998d8c5..3d4261c39f16 100644
--- a/drivers/block/cryptoloop.c
+++ b/drivers/block/cryptoloop.c
@@ -57,9 +57,11 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
57 mode = strsep(&cmsp, "-"); 57 mode = strsep(&cmsp, "-");
58 58
59 if (mode == NULL || strcmp(mode, "cbc") == 0) 59 if (mode == NULL || strcmp(mode, "cbc") == 0)
60 tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_CBC); 60 tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_CBC |
61 CRYPTO_TFM_REQ_MAY_SLEEP);
61 else if (strcmp(mode, "ecb") == 0) 62 else if (strcmp(mode, "ecb") == 0)
62 tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_ECB); 63 tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_ECB |
64 CRYPTO_TFM_REQ_MAY_SLEEP);
63 if (tfm == NULL) 65 if (tfm == NULL)
64 return -EINVAL; 66 return -EINVAL;
65 67
diff --git a/drivers/block/deadline-iosched.c b/drivers/block/deadline-iosched.c
index ff5201e02153..24594c57c323 100644
--- a/drivers/block/deadline-iosched.c
+++ b/drivers/block/deadline-iosched.c
@@ -507,18 +507,12 @@ static int deadline_dispatch_requests(struct deadline_data *dd)
507 const int reads = !list_empty(&dd->fifo_list[READ]); 507 const int reads = !list_empty(&dd->fifo_list[READ]);
508 const int writes = !list_empty(&dd->fifo_list[WRITE]); 508 const int writes = !list_empty(&dd->fifo_list[WRITE]);
509 struct deadline_rq *drq; 509 struct deadline_rq *drq;
510 int data_dir, other_dir; 510 int data_dir;
511 511
512 /* 512 /*
513 * batches are currently reads XOR writes 513 * batches are currently reads XOR writes
514 */ 514 */
515 drq = NULL; 515 drq = dd->next_drq[WRITE] ? : dd->next_drq[READ];
516
517 if (dd->next_drq[READ])
518 drq = dd->next_drq[READ];
519
520 if (dd->next_drq[WRITE])
521 drq = dd->next_drq[WRITE];
522 516
523 if (drq) { 517 if (drq) {
524 /* we have a "next request" */ 518 /* we have a "next request" */
@@ -544,7 +538,6 @@ static int deadline_dispatch_requests(struct deadline_data *dd)
544 goto dispatch_writes; 538 goto dispatch_writes;
545 539
546 data_dir = READ; 540 data_dir = READ;
547 other_dir = WRITE;
548 541
549 goto dispatch_find_request; 542 goto dispatch_find_request;
550 } 543 }
@@ -560,7 +553,6 @@ dispatch_writes:
560 dd->starved = 0; 553 dd->starved = 0;
561 554
562 data_dir = WRITE; 555 data_dir = WRITE;
563 other_dir = READ;
564 556
565 goto dispatch_find_request; 557 goto dispatch_find_request;
566 } 558 }
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index f0c1084b840f..888dad5eef34 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -493,6 +493,8 @@ static struct floppy_struct user_params[N_DRIVE];
493 493
494static sector_t floppy_sizes[256]; 494static sector_t floppy_sizes[256];
495 495
496static char floppy_device_name[] = "floppy";
497
496/* 498/*
497 * The driver is trying to determine the correct media format 499 * The driver is trying to determine the correct media format
498 * while probing is set. rw_interrupt() clears it after a 500 * while probing is set. rw_interrupt() clears it after a
@@ -4191,18 +4193,24 @@ static int __init floppy_setup(char *str)
4191 4193
4192static int have_no_fdc = -ENODEV; 4194static int have_no_fdc = -ENODEV;
4193 4195
4196static ssize_t floppy_cmos_show(struct device *dev,
4197 struct device_attribute *attr, char *buf)
4198{
4199 struct platform_device *p;
4200 int drive;
4201
4202 p = container_of(dev, struct platform_device,dev);
4203 drive = p->id;
4204 return sprintf(buf, "%X\n", UDP->cmos);
4205}
4206DEVICE_ATTR(cmos,S_IRUGO,floppy_cmos_show,NULL);
4207
4194static void floppy_device_release(struct device *dev) 4208static void floppy_device_release(struct device *dev)
4195{ 4209{
4196 complete(&device_release); 4210 complete(&device_release);
4197} 4211}
4198 4212
4199static struct platform_device floppy_device = { 4213static struct platform_device floppy_device[N_DRIVE];
4200 .name = "floppy",
4201 .id = 0,
4202 .dev = {
4203 .release = floppy_device_release,
4204 }
4205};
4206 4214
4207static struct kobject *floppy_find(dev_t dev, int *part, void *data) 4215static struct kobject *floppy_find(dev_t dev, int *part, void *data)
4208{ 4216{
@@ -4370,20 +4378,26 @@ static int __init floppy_init(void)
4370 goto out_flush_work; 4378 goto out_flush_work;
4371 } 4379 }
4372 4380
4373 err = platform_device_register(&floppy_device);
4374 if (err)
4375 goto out_flush_work;
4376
4377 for (drive = 0; drive < N_DRIVE; drive++) { 4381 for (drive = 0; drive < N_DRIVE; drive++) {
4378 if (!(allowed_drive_mask & (1 << drive))) 4382 if (!(allowed_drive_mask & (1 << drive)))
4379 continue; 4383 continue;
4380 if (fdc_state[FDC(drive)].version == FDC_NONE) 4384 if (fdc_state[FDC(drive)].version == FDC_NONE)
4381 continue; 4385 continue;
4386
4387 floppy_device[drive].name = floppy_device_name;
4388 floppy_device[drive].id = drive;
4389 floppy_device[drive].dev.release = floppy_device_release;
4390
4391 err = platform_device_register(&floppy_device[drive]);
4392 if (err)
4393 goto out_flush_work;
4394
4395 device_create_file(&floppy_device[drive].dev,&dev_attr_cmos);
4382 /* to be cleaned up... */ 4396 /* to be cleaned up... */
4383 disks[drive]->private_data = (void *)(long)drive; 4397 disks[drive]->private_data = (void *)(long)drive;
4384 disks[drive]->queue = floppy_queue; 4398 disks[drive]->queue = floppy_queue;
4385 disks[drive]->flags |= GENHD_FL_REMOVABLE; 4399 disks[drive]->flags |= GENHD_FL_REMOVABLE;
4386 disks[drive]->driverfs_dev = &floppy_device.dev; 4400 disks[drive]->driverfs_dev = &floppy_device[drive].dev;
4387 add_disk(disks[drive]); 4401 add_disk(disks[drive]);
4388 } 4402 }
4389 4403
@@ -4603,10 +4617,11 @@ void cleanup_module(void)
4603 fdc_state[FDC(drive)].version != FDC_NONE) { 4617 fdc_state[FDC(drive)].version != FDC_NONE) {
4604 del_gendisk(disks[drive]); 4618 del_gendisk(disks[drive]);
4605 unregister_devfs_entries(drive); 4619 unregister_devfs_entries(drive);
4620 device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos);
4621 platform_device_unregister(&floppy_device[drive]);
4606 } 4622 }
4607 put_disk(disks[drive]); 4623 put_disk(disks[drive]);
4608 } 4624 }
4609 platform_device_unregister(&floppy_device);
4610 devfs_remove("floppy"); 4625 devfs_remove("floppy");
4611 4626
4612 del_timer_sync(&fd_timeout); 4627 del_timer_sync(&fd_timeout);
diff --git a/drivers/block/genhd.c b/drivers/block/genhd.c
index 47fd3659a061..d42840cc0d1d 100644
--- a/drivers/block/genhd.c
+++ b/drivers/block/genhd.c
@@ -45,7 +45,7 @@ int get_blkdev_list(char *p, int used)
45 struct blk_major_name *n; 45 struct blk_major_name *n;
46 int i, len; 46 int i, len;
47 47
48 len = sprintf(p, "\nBlock devices:\n"); 48 len = snprintf(p, (PAGE_SIZE-used), "\nBlock devices:\n");
49 49
50 down(&block_subsys_sem); 50 down(&block_subsys_sem);
51 for (i = 0; i < ARRAY_SIZE(major_names); i++) { 51 for (i = 0; i < ARRAY_SIZE(major_names); i++) {
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 3c818544475e..483d71b10cf9 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -235,8 +235,8 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
235 * set defaults 235 * set defaults
236 */ 236 */
237 q->nr_requests = BLKDEV_MAX_RQ; 237 q->nr_requests = BLKDEV_MAX_RQ;
238 q->max_phys_segments = MAX_PHYS_SEGMENTS; 238 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
239 q->max_hw_segments = MAX_HW_SEGMENTS; 239 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
240 q->make_request_fn = mfn; 240 q->make_request_fn = mfn;
241 q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 241 q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
242 q->backing_dev_info.state = 0; 242 q->backing_dev_info.state = 0;
@@ -284,6 +284,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
284 rq->special = NULL; 284 rq->special = NULL;
285 rq->data_len = 0; 285 rq->data_len = 0;
286 rq->data = NULL; 286 rq->data = NULL;
287 rq->nr_phys_segments = 0;
287 rq->sense = NULL; 288 rq->sense = NULL;
288 rq->end_io = NULL; 289 rq->end_io = NULL;
289 rq->end_io_data = NULL; 290 rq->end_io_data = NULL;
@@ -2115,7 +2116,7 @@ EXPORT_SYMBOL(blk_insert_request);
2115/** 2116/**
2116 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage 2117 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
2117 * @q: request queue where request should be inserted 2118 * @q: request queue where request should be inserted
2118 * @rw: READ or WRITE data 2119 * @rq: request structure to fill
2119 * @ubuf: the user buffer 2120 * @ubuf: the user buffer
2120 * @len: length of user data 2121 * @len: length of user data
2121 * 2122 *
@@ -2132,21 +2133,19 @@ EXPORT_SYMBOL(blk_insert_request);
2132 * original bio must be passed back in to blk_rq_unmap_user() for proper 2133 * original bio must be passed back in to blk_rq_unmap_user() for proper
2133 * unmapping. 2134 * unmapping.
2134 */ 2135 */
2135struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf, 2136int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
2136 unsigned int len) 2137 unsigned int len)
2137{ 2138{
2138 unsigned long uaddr; 2139 unsigned long uaddr;
2139 struct request *rq;
2140 struct bio *bio; 2140 struct bio *bio;
2141 int reading;
2141 2142
2142 if (len > (q->max_sectors << 9)) 2143 if (len > (q->max_sectors << 9))
2143 return ERR_PTR(-EINVAL); 2144 return -EINVAL;
2144 if ((!len && ubuf) || (len && !ubuf)) 2145 if (!len || !ubuf)
2145 return ERR_PTR(-EINVAL); 2146 return -EINVAL;
2146 2147
2147 rq = blk_get_request(q, rw, __GFP_WAIT); 2148 reading = rq_data_dir(rq) == READ;
2148 if (!rq)
2149 return ERR_PTR(-ENOMEM);
2150 2149
2151 /* 2150 /*
2152 * if alignment requirement is satisfied, map in user pages for 2151 * if alignment requirement is satisfied, map in user pages for
@@ -2154,9 +2153,9 @@ struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf,
2154 */ 2153 */
2155 uaddr = (unsigned long) ubuf; 2154 uaddr = (unsigned long) ubuf;
2156 if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) 2155 if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
2157 bio = bio_map_user(q, NULL, uaddr, len, rw == READ); 2156 bio = bio_map_user(q, NULL, uaddr, len, reading);
2158 else 2157 else
2159 bio = bio_copy_user(q, uaddr, len, rw == READ); 2158 bio = bio_copy_user(q, uaddr, len, reading);
2160 2159
2161 if (!IS_ERR(bio)) { 2160 if (!IS_ERR(bio)) {
2162 rq->bio = rq->biotail = bio; 2161 rq->bio = rq->biotail = bio;
@@ -2164,28 +2163,70 @@ struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf,
2164 2163
2165 rq->buffer = rq->data = NULL; 2164 rq->buffer = rq->data = NULL;
2166 rq->data_len = len; 2165 rq->data_len = len;
2167 return rq; 2166 return 0;
2168 } 2167 }
2169 2168
2170 /* 2169 /*
2171 * bio is the err-ptr 2170 * bio is the err-ptr
2172 */ 2171 */
2173 blk_put_request(rq); 2172 return PTR_ERR(bio);
2174 return (struct request *) bio;
2175} 2173}
2176 2174
2177EXPORT_SYMBOL(blk_rq_map_user); 2175EXPORT_SYMBOL(blk_rq_map_user);
2178 2176
2179/** 2177/**
2178 * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
2179 * @q: request queue where request should be inserted
2180 * @rq: request to map data to
2181 * @iov: pointer to the iovec
2182 * @iov_count: number of elements in the iovec
2183 *
2184 * Description:
2185 * Data will be mapped directly for zero copy io, if possible. Otherwise
2186 * a kernel bounce buffer is used.
2187 *
2188 * A matching blk_rq_unmap_user() must be issued at the end of io, while
2189 * still in process context.
2190 *
2191 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
2192 * before being submitted to the device, as pages mapped may be out of
2193 * reach. It's the callers responsibility to make sure this happens. The
2194 * original bio must be passed back in to blk_rq_unmap_user() for proper
2195 * unmapping.
2196 */
2197int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
2198 struct sg_iovec *iov, int iov_count)
2199{
2200 struct bio *bio;
2201
2202 if (!iov || iov_count <= 0)
2203 return -EINVAL;
2204
2205 /* we don't allow misaligned data like bio_map_user() does. If the
2206 * user is using sg, they're expected to know the alignment constraints
2207 * and respect them accordingly */
2208 bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ);
2209 if (IS_ERR(bio))
2210 return PTR_ERR(bio);
2211
2212 rq->bio = rq->biotail = bio;
2213 blk_rq_bio_prep(q, rq, bio);
2214 rq->buffer = rq->data = NULL;
2215 rq->data_len = bio->bi_size;
2216 return 0;
2217}
2218
2219EXPORT_SYMBOL(blk_rq_map_user_iov);
2220
2221/**
2180 * blk_rq_unmap_user - unmap a request with user data 2222 * blk_rq_unmap_user - unmap a request with user data
2181 * @rq: request to be unmapped 2223 * @bio: bio to be unmapped
2182 * @bio: bio for the request
2183 * @ulen: length of user buffer 2224 * @ulen: length of user buffer
2184 * 2225 *
2185 * Description: 2226 * Description:
2186 * Unmap a request previously mapped by blk_rq_map_user(). 2227 * Unmap a bio previously mapped by blk_rq_map_user().
2187 */ 2228 */
2188int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen) 2229int blk_rq_unmap_user(struct bio *bio, unsigned int ulen)
2189{ 2230{
2190 int ret = 0; 2231 int ret = 0;
2191 2232
@@ -2196,31 +2237,89 @@ int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen)
2196 ret = bio_uncopy_user(bio); 2237 ret = bio_uncopy_user(bio);
2197 } 2238 }
2198 2239
2199 blk_put_request(rq); 2240 return 0;
2200 return ret;
2201} 2241}
2202 2242
2203EXPORT_SYMBOL(blk_rq_unmap_user); 2243EXPORT_SYMBOL(blk_rq_unmap_user);
2204 2244
2205/** 2245/**
2246 * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
2247 * @q: request queue where request should be inserted
2248 * @rq: request to fill
2249 * @kbuf: the kernel buffer
2250 * @len: length of user data
2251 * @gfp_mask: memory allocation flags
2252 */
2253int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
2254 unsigned int len, unsigned int gfp_mask)
2255{
2256 struct bio *bio;
2257
2258 if (len > (q->max_sectors << 9))
2259 return -EINVAL;
2260 if (!len || !kbuf)
2261 return -EINVAL;
2262
2263 bio = bio_map_kern(q, kbuf, len, gfp_mask);
2264 if (IS_ERR(bio))
2265 return PTR_ERR(bio);
2266
2267 if (rq_data_dir(rq) == WRITE)
2268 bio->bi_rw |= (1 << BIO_RW);
2269
2270 rq->bio = rq->biotail = bio;
2271 blk_rq_bio_prep(q, rq, bio);
2272
2273 rq->buffer = rq->data = NULL;
2274 rq->data_len = len;
2275 return 0;
2276}
2277
2278EXPORT_SYMBOL(blk_rq_map_kern);
2279
2280/**
2281 * blk_execute_rq_nowait - insert a request into queue for execution
2282 * @q: queue to insert the request in
2283 * @bd_disk: matching gendisk
2284 * @rq: request to insert
2285 * @at_head: insert request at head or tail of queue
2286 * @done: I/O completion handler
2287 *
2288 * Description:
2289 * Insert a fully prepared request at the back of the io scheduler queue
2290 * for execution. Don't wait for completion.
2291 */
2292void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
2293 struct request *rq, int at_head,
2294 void (*done)(struct request *))
2295{
2296 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
2297
2298 rq->rq_disk = bd_disk;
2299 rq->flags |= REQ_NOMERGE;
2300 rq->end_io = done;
2301 elv_add_request(q, rq, where, 1);
2302 generic_unplug_device(q);
2303}
2304
2305/**
2206 * blk_execute_rq - insert a request into queue for execution 2306 * blk_execute_rq - insert a request into queue for execution
2207 * @q: queue to insert the request in 2307 * @q: queue to insert the request in
2208 * @bd_disk: matching gendisk 2308 * @bd_disk: matching gendisk
2209 * @rq: request to insert 2309 * @rq: request to insert
2310 * @at_head: insert request at head or tail of queue
2210 * 2311 *
2211 * Description: 2312 * Description:
2212 * Insert a fully prepared request at the back of the io scheduler queue 2313 * Insert a fully prepared request at the back of the io scheduler queue
2213 * for execution. 2314 * for execution and wait for completion.
2214 */ 2315 */
2215int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk, 2316int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
2216 struct request *rq) 2317 struct request *rq, int at_head)
2217{ 2318{
2218 DECLARE_COMPLETION(wait); 2319 DECLARE_COMPLETION(wait);
2219 char sense[SCSI_SENSE_BUFFERSIZE]; 2320 char sense[SCSI_SENSE_BUFFERSIZE];
2220 int err = 0; 2321 int err = 0;
2221 2322
2222 rq->rq_disk = bd_disk;
2223
2224 /* 2323 /*
2225 * we need an extra reference to the request, so we can look at 2324 * we need an extra reference to the request, so we can look at
2226 * it after io completion 2325 * it after io completion
@@ -2233,11 +2332,8 @@ int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
2233 rq->sense_len = 0; 2332 rq->sense_len = 0;
2234 } 2333 }
2235 2334
2236 rq->flags |= REQ_NOMERGE;
2237 rq->waiting = &wait; 2335 rq->waiting = &wait;
2238 rq->end_io = blk_end_sync_rq; 2336 blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
2239 elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
2240 generic_unplug_device(q);
2241 wait_for_completion(&wait); 2337 wait_for_completion(&wait);
2242 rq->waiting = NULL; 2338 rq->waiting = NULL;
2243 2339
@@ -2277,6 +2373,44 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
2277 2373
2278EXPORT_SYMBOL(blkdev_issue_flush); 2374EXPORT_SYMBOL(blkdev_issue_flush);
2279 2375
2376/**
2377 * blkdev_scsi_issue_flush_fn - issue flush for SCSI devices
2378 * @q: device queue
2379 * @disk: gendisk
2380 * @error_sector: error offset
2381 *
2382 * Description:
2383 * Devices understanding the SCSI command set, can use this function as
2384 * a helper for issuing a cache flush. Note: driver is required to store
2385 * the error offset (in case of error flushing) in ->sector of struct
2386 * request.
2387 */
2388int blkdev_scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
2389 sector_t *error_sector)
2390{
2391 struct request *rq = blk_get_request(q, WRITE, __GFP_WAIT);
2392 int ret;
2393
2394 rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER;
2395 rq->sector = 0;
2396 memset(rq->cmd, 0, sizeof(rq->cmd));
2397 rq->cmd[0] = 0x35;
2398 rq->cmd_len = 12;
2399 rq->data = NULL;
2400 rq->data_len = 0;
2401 rq->timeout = 60 * HZ;
2402
2403 ret = blk_execute_rq(q, disk, rq, 0);
2404
2405 if (ret && error_sector)
2406 *error_sector = rq->sector;
2407
2408 blk_put_request(rq);
2409 return ret;
2410}
2411
2412EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn);
2413
2280static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) 2414static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
2281{ 2415{
2282 int rw = rq_data_dir(rq); 2416 int rw = rq_data_dir(rq);
diff --git a/drivers/block/scsi_ioctl.c b/drivers/block/scsi_ioctl.c
index 681871ca5d60..abb2df249fd3 100644
--- a/drivers/block/scsi_ioctl.c
+++ b/drivers/block/scsi_ioctl.c
@@ -216,7 +216,7 @@ static int sg_io(struct file *file, request_queue_t *q,
216 struct gendisk *bd_disk, struct sg_io_hdr *hdr) 216 struct gendisk *bd_disk, struct sg_io_hdr *hdr)
217{ 217{
218 unsigned long start_time; 218 unsigned long start_time;
219 int reading, writing; 219 int writing = 0, ret = 0;
220 struct request *rq; 220 struct request *rq;
221 struct bio *bio; 221 struct bio *bio;
222 char sense[SCSI_SENSE_BUFFERSIZE]; 222 char sense[SCSI_SENSE_BUFFERSIZE];
@@ -231,38 +231,48 @@ static int sg_io(struct file *file, request_queue_t *q,
231 if (verify_command(file, cmd)) 231 if (verify_command(file, cmd))
232 return -EPERM; 232 return -EPERM;
233 233
234 /*
235 * we'll do that later
236 */
237 if (hdr->iovec_count)
238 return -EOPNOTSUPP;
239
240 if (hdr->dxfer_len > (q->max_sectors << 9)) 234 if (hdr->dxfer_len > (q->max_sectors << 9))
241 return -EIO; 235 return -EIO;
242 236
243 reading = writing = 0; 237 if (hdr->dxfer_len)
244 if (hdr->dxfer_len) {
245 switch (hdr->dxfer_direction) { 238 switch (hdr->dxfer_direction) {
246 default: 239 default:
247 return -EINVAL; 240 return -EINVAL;
248 case SG_DXFER_TO_FROM_DEV: 241 case SG_DXFER_TO_FROM_DEV:
249 reading = 1;
250 /* fall through */
251 case SG_DXFER_TO_DEV: 242 case SG_DXFER_TO_DEV:
252 writing = 1; 243 writing = 1;
253 break; 244 break;
254 case SG_DXFER_FROM_DEV: 245 case SG_DXFER_FROM_DEV:
255 reading = 1;
256 break; 246 break;
257 } 247 }
258 248
259 rq = blk_rq_map_user(q, writing ? WRITE : READ, hdr->dxferp, 249 rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
260 hdr->dxfer_len); 250 if (!rq)
251 return -ENOMEM;
252
253 if (hdr->iovec_count) {
254 const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
255 struct sg_iovec *iov;
256
257 iov = kmalloc(size, GFP_KERNEL);
258 if (!iov) {
259 ret = -ENOMEM;
260 goto out;
261 }
262
263 if (copy_from_user(iov, hdr->dxferp, size)) {
264 kfree(iov);
265 ret = -EFAULT;
266 goto out;
267 }
268
269 ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count);
270 kfree(iov);
271 } else if (hdr->dxfer_len)
272 ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len);
261 273
262 if (IS_ERR(rq)) 274 if (ret)
263 return PTR_ERR(rq); 275 goto out;
264 } else
265 rq = blk_get_request(q, READ, __GFP_WAIT);
266 276
267 /* 277 /*
268 * fill in request structure 278 * fill in request structure
@@ -298,7 +308,7 @@ static int sg_io(struct file *file, request_queue_t *q,
298 * (if he doesn't check that is his problem). 308 * (if he doesn't check that is his problem).
299 * N.B. a non-zero SCSI status is _not_ necessarily an error. 309 * N.B. a non-zero SCSI status is _not_ necessarily an error.
300 */ 310 */
301 blk_execute_rq(q, bd_disk, rq); 311 blk_execute_rq(q, bd_disk, rq, 0);
302 312
303 /* write to all output members */ 313 /* write to all output members */
304 hdr->status = 0xff & rq->errors; 314 hdr->status = 0xff & rq->errors;
@@ -320,12 +330,14 @@ static int sg_io(struct file *file, request_queue_t *q,
320 hdr->sb_len_wr = len; 330 hdr->sb_len_wr = len;
321 } 331 }
322 332
323 if (blk_rq_unmap_user(rq, bio, hdr->dxfer_len)) 333 if (blk_rq_unmap_user(bio, hdr->dxfer_len))
324 return -EFAULT; 334 ret = -EFAULT;
325 335
326 /* may not have succeeded, but output values written to control 336 /* may not have succeeded, but output values written to control
327 * structure (struct sg_io_hdr). */ 337 * structure (struct sg_io_hdr). */
328 return 0; 338out:
339 blk_put_request(rq);
340 return ret;
329} 341}
330 342
331#define OMAX_SB_LEN 16 /* For backward compatibility */ 343#define OMAX_SB_LEN 16 /* For backward compatibility */
@@ -408,7 +420,7 @@ static int sg_scsi_ioctl(struct file *file, request_queue_t *q,
408 rq->data_len = bytes; 420 rq->data_len = bytes;
409 rq->flags |= REQ_BLOCK_PC; 421 rq->flags |= REQ_BLOCK_PC;
410 422
411 blk_execute_rq(q, bd_disk, rq); 423 blk_execute_rq(q, bd_disk, rq, 0);
412 err = rq->errors & 0xff; /* only 8 bit SCSI status */ 424 err = rq->errors & 0xff; /* only 8 bit SCSI status */
413 if (err) { 425 if (err) {
414 if (rq->sense_len && rq->sense) { 426 if (rq->sense_len && rq->sense) {
@@ -561,7 +573,7 @@ int scsi_cmd_ioctl(struct file *file, struct gendisk *bd_disk, unsigned int cmd,
561 rq->cmd[0] = GPCMD_START_STOP_UNIT; 573 rq->cmd[0] = GPCMD_START_STOP_UNIT;
562 rq->cmd[4] = 0x02 + (close != 0); 574 rq->cmd[4] = 0x02 + (close != 0);
563 rq->cmd_len = 6; 575 rq->cmd_len = 6;
564 err = blk_execute_rq(q, bd_disk, rq); 576 err = blk_execute_rq(q, bd_disk, rq, 0);
565 blk_put_request(rq); 577 blk_put_request(rq);
566 break; 578 break;
567 default: 579 default: