aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2011-09-12 06:12:01 -0400
committerJens Axboe <jaxboe@fusionio.com>2011-09-12 06:12:01 -0400
commit5a7bbad27a410350e64a2d7f5ec18fc73836c14f (patch)
tree3447cd62dbcbd77b4071e2eb7576f1d7632ef2d3
parentc20e8de27fef9f59869c81c288ad6cf28200e00c (diff)
block: remove support for bio remapping from ->make_request
There is very little benefit in allowing to let a ->make_request instance update the bios device and sector and loop around it in __generic_make_request when we can archive the same through calling generic_make_request from the driver and letting the loop in generic_make_request handle it. Note that various drivers got the return value from ->make_request and returned non-zero values for errors. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: NeilBrown <neilb@suse.de> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
-rw-r--r--arch/m68k/emu/nfblock.c3
-rw-r--r--arch/powerpc/sysdev/axonram.c8
-rw-r--r--block/blk-core.c153
-rw-r--r--drivers/block/aoe/aoeblk.c14
-rw-r--r--drivers/block/brd.c4
-rw-r--r--drivers/block/drbd/drbd_int.h2
-rw-r--r--drivers/block/drbd/drbd_req.c8
-rw-r--r--drivers/block/loop.c5
-rw-r--r--drivers/block/pktcdvd.c11
-rw-r--r--drivers/block/ps3vram.c6
-rw-r--r--drivers/block/umem.c4
-rw-r--r--drivers/md/dm.c14
-rw-r--r--drivers/md/faulty.c14
-rw-r--r--drivers/md/linear.c17
-rw-r--r--drivers/md/md.c12
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/multipath.c8
-rw-r--r--drivers/md/raid0.c22
-rw-r--r--drivers/md/raid1.c8
-rw-r--r--drivers/md/raid10.c19
-rw-r--r--drivers/md/raid5.c8
-rw-r--r--drivers/s390/block/dcssblk.c7
-rw-r--r--drivers/s390/block/xpram.c5
-rw-r--r--drivers/staging/zram/zram_drv.c8
-rw-r--r--include/linux/blkdev.h4
25 files changed, 151 insertions, 215 deletions
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c
index 48e50f8c1c7e..e3011338ab40 100644
--- a/arch/m68k/emu/nfblock.c
+++ b/arch/m68k/emu/nfblock.c
@@ -59,7 +59,7 @@ struct nfhd_device {
59 struct gendisk *disk; 59 struct gendisk *disk;
60}; 60};
61 61
62static int nfhd_make_request(struct request_queue *queue, struct bio *bio) 62static void nfhd_make_request(struct request_queue *queue, struct bio *bio)
63{ 63{
64 struct nfhd_device *dev = queue->queuedata; 64 struct nfhd_device *dev = queue->queuedata;
65 struct bio_vec *bvec; 65 struct bio_vec *bvec;
@@ -76,7 +76,6 @@ static int nfhd_make_request(struct request_queue *queue, struct bio *bio)
76 sec += len; 76 sec += len;
77 } 77 }
78 bio_endio(bio, 0); 78 bio_endio(bio, 0);
79 return 0;
80} 79}
81 80
82static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 81static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index 265f0f09395a..ba4271919062 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -104,7 +104,7 @@ axon_ram_irq_handler(int irq, void *dev)
104 * axon_ram_make_request - make_request() method for block device 104 * axon_ram_make_request - make_request() method for block device
105 * @queue, @bio: see blk_queue_make_request() 105 * @queue, @bio: see blk_queue_make_request()
106 */ 106 */
107static int 107static void
108axon_ram_make_request(struct request_queue *queue, struct bio *bio) 108axon_ram_make_request(struct request_queue *queue, struct bio *bio)
109{ 109{
110 struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data; 110 struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data;
@@ -113,7 +113,6 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
113 struct bio_vec *vec; 113 struct bio_vec *vec;
114 unsigned int transfered; 114 unsigned int transfered;
115 unsigned short idx; 115 unsigned short idx;
116 int rc = 0;
117 116
118 phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT); 117 phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT);
119 phys_end = bank->io_addr + bank->size; 118 phys_end = bank->io_addr + bank->size;
@@ -121,8 +120,7 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
121 bio_for_each_segment(vec, bio, idx) { 120 bio_for_each_segment(vec, bio, idx) {
122 if (unlikely(phys_mem + vec->bv_len > phys_end)) { 121 if (unlikely(phys_mem + vec->bv_len > phys_end)) {
123 bio_io_error(bio); 122 bio_io_error(bio);
124 rc = -ERANGE; 123 return;
125 break;
126 } 124 }
127 125
128 user_mem = page_address(vec->bv_page) + vec->bv_offset; 126 user_mem = page_address(vec->bv_page) + vec->bv_offset;
@@ -135,8 +133,6 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
135 transfered += vec->bv_len; 133 transfered += vec->bv_len;
136 } 134 }
137 bio_endio(bio, 0); 135 bio_endio(bio, 0);
138
139 return rc;
140} 136}
141 137
142/** 138/**
diff --git a/block/blk-core.c b/block/blk-core.c
index ab673f0b8c30..f58e019be67b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1211,7 +1211,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1211 blk_rq_bio_prep(req->q, req, bio); 1211 blk_rq_bio_prep(req->q, req, bio);
1212} 1212}
1213 1213
1214int blk_queue_bio(struct request_queue *q, struct bio *bio) 1214void blk_queue_bio(struct request_queue *q, struct bio *bio)
1215{ 1215{
1216 const bool sync = !!(bio->bi_rw & REQ_SYNC); 1216 const bool sync = !!(bio->bi_rw & REQ_SYNC);
1217 struct blk_plug *plug; 1217 struct blk_plug *plug;
@@ -1236,7 +1236,7 @@ int blk_queue_bio(struct request_queue *q, struct bio *bio)
1236 * any locks. 1236 * any locks.
1237 */ 1237 */
1238 if (attempt_plug_merge(current, q, bio)) 1238 if (attempt_plug_merge(current, q, bio))
1239 goto out; 1239 return;
1240 1240
1241 spin_lock_irq(q->queue_lock); 1241 spin_lock_irq(q->queue_lock);
1242 1242
@@ -1312,8 +1312,6 @@ get_rq:
1312out_unlock: 1312out_unlock:
1313 spin_unlock_irq(q->queue_lock); 1313 spin_unlock_irq(q->queue_lock);
1314 } 1314 }
1315out:
1316 return 0;
1317} 1315}
1318EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */ 1316EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */
1319 1317
@@ -1441,112 +1439,85 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1441static inline void __generic_make_request(struct bio *bio) 1439static inline void __generic_make_request(struct bio *bio)
1442{ 1440{
1443 struct request_queue *q; 1441 struct request_queue *q;
1444 sector_t old_sector; 1442 int nr_sectors = bio_sectors(bio);
1445 int ret, nr_sectors = bio_sectors(bio);
1446 dev_t old_dev;
1447 int err = -EIO; 1443 int err = -EIO;
1444 char b[BDEVNAME_SIZE];
1445 struct hd_struct *part;
1448 1446
1449 might_sleep(); 1447 might_sleep();
1450 1448
1451 if (bio_check_eod(bio, nr_sectors)) 1449 if (bio_check_eod(bio, nr_sectors))
1452 goto end_io; 1450 goto end_io;
1453 1451
1454 /* 1452 q = bdev_get_queue(bio->bi_bdev);
1455 * Resolve the mapping until finished. (drivers are 1453 if (unlikely(!q)) {
1456 * still free to implement/resolve their own stacking 1454 printk(KERN_ERR
1457 * by explicitly returning 0) 1455 "generic_make_request: Trying to access "
1458 * 1456 "nonexistent block-device %s (%Lu)\n",
1459 * NOTE: we don't repeat the blk_size check for each new device. 1457 bdevname(bio->bi_bdev, b),
1460 * Stacking drivers are expected to know what they are doing. 1458 (long long) bio->bi_sector);
1461 */ 1459 goto end_io;
1462 old_sector = -1; 1460 }
1463 old_dev = 0;
1464 do {
1465 char b[BDEVNAME_SIZE];
1466 struct hd_struct *part;
1467
1468 q = bdev_get_queue(bio->bi_bdev);
1469 if (unlikely(!q)) {
1470 printk(KERN_ERR
1471 "generic_make_request: Trying to access "
1472 "nonexistent block-device %s (%Lu)\n",
1473 bdevname(bio->bi_bdev, b),
1474 (long long) bio->bi_sector);
1475 goto end_io;
1476 }
1477
1478 if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
1479 nr_sectors > queue_max_hw_sectors(q))) {
1480 printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1481 bdevname(bio->bi_bdev, b),
1482 bio_sectors(bio),
1483 queue_max_hw_sectors(q));
1484 goto end_io;
1485 }
1486
1487 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
1488 goto end_io;
1489
1490 part = bio->bi_bdev->bd_part;
1491 if (should_fail_request(part, bio->bi_size) ||
1492 should_fail_request(&part_to_disk(part)->part0,
1493 bio->bi_size))
1494 goto end_io;
1495 1461
1496 /* 1462 if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
1497 * If this device has partitions, remap block n 1463 nr_sectors > queue_max_hw_sectors(q))) {
1498 * of partition p to block n+start(p) of the disk. 1464 printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1499 */ 1465 bdevname(bio->bi_bdev, b),
1500 blk_partition_remap(bio); 1466 bio_sectors(bio),
1467 queue_max_hw_sectors(q));
1468 goto end_io;
1469 }
1501 1470
1502 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) 1471 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
1503 goto end_io; 1472 goto end_io;
1504 1473
1505 if (old_sector != -1) 1474 part = bio->bi_bdev->bd_part;
1506 trace_block_bio_remap(q, bio, old_dev, old_sector); 1475 if (should_fail_request(part, bio->bi_size) ||
1476 should_fail_request(&part_to_disk(part)->part0,
1477 bio->bi_size))
1478 goto end_io;
1507 1479
1508 old_sector = bio->bi_sector; 1480 /*
1509 old_dev = bio->bi_bdev->bd_dev; 1481 * If this device has partitions, remap block n
1482 * of partition p to block n+start(p) of the disk.
1483 */
1484 blk_partition_remap(bio);
1510 1485
1511 if (bio_check_eod(bio, nr_sectors)) 1486 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
1512 goto end_io; 1487 goto end_io;
1513 1488
1514 /* 1489 if (bio_check_eod(bio, nr_sectors))
1515 * Filter flush bio's early so that make_request based 1490 goto end_io;
1516 * drivers without flush support don't have to worry
1517 * about them.
1518 */
1519 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
1520 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
1521 if (!nr_sectors) {
1522 err = 0;
1523 goto end_io;
1524 }
1525 }
1526 1491
1527 if ((bio->bi_rw & REQ_DISCARD) && 1492 /*
1528 (!blk_queue_discard(q) || 1493 * Filter flush bio's early so that make_request based
1529 ((bio->bi_rw & REQ_SECURE) && 1494 * drivers without flush support don't have to worry
1530 !blk_queue_secdiscard(q)))) { 1495 * about them.
1531 err = -EOPNOTSUPP; 1496 */
1497 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
1498 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
1499 if (!nr_sectors) {
1500 err = 0;
1532 goto end_io; 1501 goto end_io;
1533 } 1502 }
1503 }
1534 1504
1535 if (blk_throtl_bio(q, &bio)) 1505 if ((bio->bi_rw & REQ_DISCARD) &&
1536 goto end_io; 1506 (!blk_queue_discard(q) ||
1537 1507 ((bio->bi_rw & REQ_SECURE) &&
1538 /* 1508 !blk_queue_secdiscard(q)))) {
1539 * If bio = NULL, bio has been throttled and will be submitted 1509 err = -EOPNOTSUPP;
1540 * later. 1510 goto end_io;
1541 */ 1511 }
1542 if (!bio)
1543 break;
1544
1545 trace_block_bio_queue(q, bio);
1546 1512
1547 ret = q->make_request_fn(q, bio); 1513 if (blk_throtl_bio(q, &bio))
1548 } while (ret); 1514 goto end_io;
1549 1515
1516 /* if bio = NULL, bio has been throttled and will be submitted later. */
1517 if (!bio)
1518 return;
1519 trace_block_bio_queue(q, bio);
1520 q->make_request_fn(q, bio);
1550 return; 1521 return;
1551 1522
1552end_io: 1523end_io:
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 528f6318ded1..167ba0af47f5 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -159,7 +159,7 @@ aoeblk_release(struct gendisk *disk, fmode_t mode)
159 return 0; 159 return 0;
160} 160}
161 161
162static int 162static void
163aoeblk_make_request(struct request_queue *q, struct bio *bio) 163aoeblk_make_request(struct request_queue *q, struct bio *bio)
164{ 164{
165 struct sk_buff_head queue; 165 struct sk_buff_head queue;
@@ -172,25 +172,25 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
172 if (bio == NULL) { 172 if (bio == NULL) {
173 printk(KERN_ERR "aoe: bio is NULL\n"); 173 printk(KERN_ERR "aoe: bio is NULL\n");
174 BUG(); 174 BUG();
175 return 0; 175 return;
176 } 176 }
177 d = bio->bi_bdev->bd_disk->private_data; 177 d = bio->bi_bdev->bd_disk->private_data;
178 if (d == NULL) { 178 if (d == NULL) {
179 printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n"); 179 printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n");
180 BUG(); 180 BUG();
181 bio_endio(bio, -ENXIO); 181 bio_endio(bio, -ENXIO);
182 return 0; 182 return;
183 } else if (bio->bi_io_vec == NULL) { 183 } else if (bio->bi_io_vec == NULL) {
184 printk(KERN_ERR "aoe: bi_io_vec is NULL\n"); 184 printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
185 BUG(); 185 BUG();
186 bio_endio(bio, -ENXIO); 186 bio_endio(bio, -ENXIO);
187 return 0; 187 return;
188 } 188 }
189 buf = mempool_alloc(d->bufpool, GFP_NOIO); 189 buf = mempool_alloc(d->bufpool, GFP_NOIO);
190 if (buf == NULL) { 190 if (buf == NULL) {
191 printk(KERN_INFO "aoe: buf allocation failure\n"); 191 printk(KERN_INFO "aoe: buf allocation failure\n");
192 bio_endio(bio, -ENOMEM); 192 bio_endio(bio, -ENOMEM);
193 return 0; 193 return;
194 } 194 }
195 memset(buf, 0, sizeof(*buf)); 195 memset(buf, 0, sizeof(*buf));
196 INIT_LIST_HEAD(&buf->bufs); 196 INIT_LIST_HEAD(&buf->bufs);
@@ -211,7 +211,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
211 spin_unlock_irqrestore(&d->lock, flags); 211 spin_unlock_irqrestore(&d->lock, flags);
212 mempool_free(buf, d->bufpool); 212 mempool_free(buf, d->bufpool);
213 bio_endio(bio, -ENXIO); 213 bio_endio(bio, -ENXIO);
214 return 0; 214 return;
215 } 215 }
216 216
217 list_add_tail(&buf->bufs, &d->bufq); 217 list_add_tail(&buf->bufs, &d->bufq);
@@ -222,8 +222,6 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
222 222
223 spin_unlock_irqrestore(&d->lock, flags); 223 spin_unlock_irqrestore(&d->lock, flags);
224 aoenet_xmit(&queue); 224 aoenet_xmit(&queue);
225
226 return 0;
227} 225}
228 226
229static int 227static int
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index dba1c32e1ddf..d22119d49e53 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -323,7 +323,7 @@ out:
323 return err; 323 return err;
324} 324}
325 325
326static int brd_make_request(struct request_queue *q, struct bio *bio) 326static void brd_make_request(struct request_queue *q, struct bio *bio)
327{ 327{
328 struct block_device *bdev = bio->bi_bdev; 328 struct block_device *bdev = bio->bi_bdev;
329 struct brd_device *brd = bdev->bd_disk->private_data; 329 struct brd_device *brd = bdev->bd_disk->private_data;
@@ -359,8 +359,6 @@ static int brd_make_request(struct request_queue *q, struct bio *bio)
359 359
360out: 360out:
361 bio_endio(bio, err); 361 bio_endio(bio, err);
362
363 return 0;
364} 362}
365 363
366#ifdef CONFIG_BLK_DEV_XIP 364#ifdef CONFIG_BLK_DEV_XIP
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index ef2ceed3be4b..36eee3969a98 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1507,7 +1507,7 @@ extern void drbd_free_mdev(struct drbd_conf *mdev);
1507extern int proc_details; 1507extern int proc_details;
1508 1508
1509/* drbd_req */ 1509/* drbd_req */
1510extern int drbd_make_request(struct request_queue *q, struct bio *bio); 1510extern void drbd_make_request(struct request_queue *q, struct bio *bio);
1511extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req); 1511extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req);
1512extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec); 1512extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec);
1513extern int is_valid_ar_handle(struct drbd_request *, sector_t); 1513extern int is_valid_ar_handle(struct drbd_request *, sector_t);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 3424d675b769..4a0f314086e5 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1073,7 +1073,7 @@ static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write)
1073 return 0; 1073 return 0;
1074} 1074}
1075 1075
1076int drbd_make_request(struct request_queue *q, struct bio *bio) 1076void drbd_make_request(struct request_queue *q, struct bio *bio)
1077{ 1077{
1078 unsigned int s_enr, e_enr; 1078 unsigned int s_enr, e_enr;
1079 struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata; 1079 struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
@@ -1081,7 +1081,7 @@ int drbd_make_request(struct request_queue *q, struct bio *bio)
1081 1081
1082 if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) { 1082 if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) {
1083 bio_endio(bio, -EPERM); 1083 bio_endio(bio, -EPERM);
1084 return 0; 1084 return;
1085 } 1085 }
1086 1086
1087 start_time = jiffies; 1087 start_time = jiffies;
@@ -1100,7 +1100,8 @@ int drbd_make_request(struct request_queue *q, struct bio *bio)
1100 1100
1101 if (likely(s_enr == e_enr)) { 1101 if (likely(s_enr == e_enr)) {
1102 inc_ap_bio(mdev, 1); 1102 inc_ap_bio(mdev, 1);
1103 return drbd_make_request_common(mdev, bio, start_time); 1103 drbd_make_request_common(mdev, bio, start_time);
1104 return;
1104 } 1105 }
1105 1106
1106 /* can this bio be split generically? 1107 /* can this bio be split generically?
@@ -1148,7 +1149,6 @@ int drbd_make_request(struct request_queue *q, struct bio *bio)
1148 1149
1149 bio_pair_release(bp); 1150 bio_pair_release(bp);
1150 } 1151 }
1151 return 0;
1152} 1152}
1153 1153
1154/* This is called by bio_add_page(). With this function we reduce 1154/* This is called by bio_add_page(). With this function we reduce
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 76c8da78212b..8360239d553c 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -514,7 +514,7 @@ static struct bio *loop_get_bio(struct loop_device *lo)
514 return bio_list_pop(&lo->lo_bio_list); 514 return bio_list_pop(&lo->lo_bio_list);
515} 515}
516 516
517static int loop_make_request(struct request_queue *q, struct bio *old_bio) 517static void loop_make_request(struct request_queue *q, struct bio *old_bio)
518{ 518{
519 struct loop_device *lo = q->queuedata; 519 struct loop_device *lo = q->queuedata;
520 int rw = bio_rw(old_bio); 520 int rw = bio_rw(old_bio);
@@ -532,12 +532,11 @@ static int loop_make_request(struct request_queue *q, struct bio *old_bio)
532 loop_add_bio(lo, old_bio); 532 loop_add_bio(lo, old_bio);
533 wake_up(&lo->lo_event); 533 wake_up(&lo->lo_event);
534 spin_unlock_irq(&lo->lo_lock); 534 spin_unlock_irq(&lo->lo_lock);
535 return 0; 535 return;
536 536
537out: 537out:
538 spin_unlock_irq(&lo->lo_lock); 538 spin_unlock_irq(&lo->lo_lock);
539 bio_io_error(old_bio); 539 bio_io_error(old_bio);
540 return 0;
541} 540}
542 541
543struct switch_request { 542struct switch_request {
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index e133f094ab08..a63b0a2b7805 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2444,7 +2444,7 @@ static void pkt_end_io_read_cloned(struct bio *bio, int err)
2444 pkt_bio_finished(pd); 2444 pkt_bio_finished(pd);
2445} 2445}
2446 2446
2447static int pkt_make_request(struct request_queue *q, struct bio *bio) 2447static void pkt_make_request(struct request_queue *q, struct bio *bio)
2448{ 2448{
2449 struct pktcdvd_device *pd; 2449 struct pktcdvd_device *pd;
2450 char b[BDEVNAME_SIZE]; 2450 char b[BDEVNAME_SIZE];
@@ -2473,7 +2473,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
2473 cloned_bio->bi_end_io = pkt_end_io_read_cloned; 2473 cloned_bio->bi_end_io = pkt_end_io_read_cloned;
2474 pd->stats.secs_r += bio->bi_size >> 9; 2474 pd->stats.secs_r += bio->bi_size >> 9;
2475 pkt_queue_bio(pd, cloned_bio); 2475 pkt_queue_bio(pd, cloned_bio);
2476 return 0; 2476 return;
2477 } 2477 }
2478 2478
2479 if (!test_bit(PACKET_WRITABLE, &pd->flags)) { 2479 if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
@@ -2509,7 +2509,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
2509 pkt_make_request(q, &bp->bio1); 2509 pkt_make_request(q, &bp->bio1);
2510 pkt_make_request(q, &bp->bio2); 2510 pkt_make_request(q, &bp->bio2);
2511 bio_pair_release(bp); 2511 bio_pair_release(bp);
2512 return 0; 2512 return;
2513 } 2513 }
2514 } 2514 }
2515 2515
@@ -2533,7 +2533,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
2533 } 2533 }
2534 spin_unlock(&pkt->lock); 2534 spin_unlock(&pkt->lock);
2535 spin_unlock(&pd->cdrw.active_list_lock); 2535 spin_unlock(&pd->cdrw.active_list_lock);
2536 return 0; 2536 return;
2537 } else { 2537 } else {
2538 blocked_bio = 1; 2538 blocked_bio = 1;
2539 } 2539 }
@@ -2584,10 +2584,9 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
2584 */ 2584 */
2585 wake_up(&pd->wqueue); 2585 wake_up(&pd->wqueue);
2586 } 2586 }
2587 return 0; 2587 return;
2588end_io: 2588end_io:
2589 bio_io_error(bio); 2589 bio_io_error(bio);
2590 return 0;
2591} 2590}
2592 2591
2593 2592
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index b3bdb8af89cf..7fad7af87eb2 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -596,7 +596,7 @@ out:
596 return next; 596 return next;
597} 597}
598 598
599static int ps3vram_make_request(struct request_queue *q, struct bio *bio) 599static void ps3vram_make_request(struct request_queue *q, struct bio *bio)
600{ 600{
601 struct ps3_system_bus_device *dev = q->queuedata; 601 struct ps3_system_bus_device *dev = q->queuedata;
602 struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); 602 struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
@@ -610,13 +610,11 @@ static int ps3vram_make_request(struct request_queue *q, struct bio *bio)
610 spin_unlock_irq(&priv->lock); 610 spin_unlock_irq(&priv->lock);
611 611
612 if (busy) 612 if (busy)
613 return 0; 613 return;
614 614
615 do { 615 do {
616 bio = ps3vram_do_bio(dev, bio); 616 bio = ps3vram_do_bio(dev, bio);
617 } while (bio); 617 } while (bio);
618
619 return 0;
620} 618}
621 619
622static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev) 620static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev)
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 031ca720d926..aa2712060bfb 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -513,7 +513,7 @@ static void process_page(unsigned long data)
513 } 513 }
514} 514}
515 515
516static int mm_make_request(struct request_queue *q, struct bio *bio) 516static void mm_make_request(struct request_queue *q, struct bio *bio)
517{ 517{
518 struct cardinfo *card = q->queuedata; 518 struct cardinfo *card = q->queuedata;
519 pr_debug("mm_make_request %llu %u\n", 519 pr_debug("mm_make_request %llu %u\n",
@@ -525,7 +525,7 @@ static int mm_make_request(struct request_queue *q, struct bio *bio)
525 card->biotail = &bio->bi_next; 525 card->biotail = &bio->bi_next;
526 spin_unlock_irq(&card->lock); 526 spin_unlock_irq(&card->lock);
527 527
528 return 0; 528 return;
529} 529}
530 530
531static irqreturn_t mm_interrupt(int irq, void *__card) 531static irqreturn_t mm_interrupt(int irq, void *__card)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 78b20868bcbc..7b986e77b75e 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1388,7 +1388,7 @@ out:
1388 * The request function that just remaps the bio built up by 1388 * The request function that just remaps the bio built up by
1389 * dm_merge_bvec. 1389 * dm_merge_bvec.
1390 */ 1390 */
1391static int _dm_request(struct request_queue *q, struct bio *bio) 1391static void _dm_request(struct request_queue *q, struct bio *bio)
1392{ 1392{
1393 int rw = bio_data_dir(bio); 1393 int rw = bio_data_dir(bio);
1394 struct mapped_device *md = q->queuedata; 1394 struct mapped_device *md = q->queuedata;
@@ -1409,12 +1409,12 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
1409 queue_io(md, bio); 1409 queue_io(md, bio);
1410 else 1410 else
1411 bio_io_error(bio); 1411 bio_io_error(bio);
1412 return 0; 1412 return;
1413 } 1413 }
1414 1414
1415 __split_and_process_bio(md, bio); 1415 __split_and_process_bio(md, bio);
1416 up_read(&md->io_lock); 1416 up_read(&md->io_lock);
1417 return 0; 1417 return;
1418} 1418}
1419 1419
1420static int dm_request_based(struct mapped_device *md) 1420static int dm_request_based(struct mapped_device *md)
@@ -1422,14 +1422,14 @@ static int dm_request_based(struct mapped_device *md)
1422 return blk_queue_stackable(md->queue); 1422 return blk_queue_stackable(md->queue);
1423} 1423}
1424 1424
1425static int dm_request(struct request_queue *q, struct bio *bio) 1425static void dm_request(struct request_queue *q, struct bio *bio)
1426{ 1426{
1427 struct mapped_device *md = q->queuedata; 1427 struct mapped_device *md = q->queuedata;
1428 1428
1429 if (dm_request_based(md)) 1429 if (dm_request_based(md))
1430 return blk_queue_bio(q, bio); 1430 blk_queue_bio(q, bio);
1431 1431 else
1432 return _dm_request(q, bio); 1432 _dm_request(q, bio);
1433} 1433}
1434 1434
1435void dm_dispatch_request(struct request *rq) 1435void dm_dispatch_request(struct request *rq)
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 23078dabb6df..5ef304d4341c 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -169,7 +169,7 @@ static void add_sector(conf_t *conf, sector_t start, int mode)
169 conf->nfaults = n+1; 169 conf->nfaults = n+1;
170} 170}
171 171
172static int make_request(mddev_t *mddev, struct bio *bio) 172static void make_request(mddev_t *mddev, struct bio *bio)
173{ 173{
174 conf_t *conf = mddev->private; 174 conf_t *conf = mddev->private;
175 int failit = 0; 175 int failit = 0;
@@ -181,7 +181,7 @@ static int make_request(mddev_t *mddev, struct bio *bio)
181 * just fail immediately 181 * just fail immediately
182 */ 182 */
183 bio_endio(bio, -EIO); 183 bio_endio(bio, -EIO);
184 return 0; 184 return;
185 } 185 }
186 186
187 if (check_sector(conf, bio->bi_sector, bio->bi_sector+(bio->bi_size>>9), 187 if (check_sector(conf, bio->bi_sector, bio->bi_sector+(bio->bi_size>>9),
@@ -211,15 +211,15 @@ static int make_request(mddev_t *mddev, struct bio *bio)
211 } 211 }
212 if (failit) { 212 if (failit) {
213 struct bio *b = bio_clone_mddev(bio, GFP_NOIO, mddev); 213 struct bio *b = bio_clone_mddev(bio, GFP_NOIO, mddev);
214
214 b->bi_bdev = conf->rdev->bdev; 215 b->bi_bdev = conf->rdev->bdev;
215 b->bi_private = bio; 216 b->bi_private = bio;
216 b->bi_end_io = faulty_fail; 217 b->bi_end_io = faulty_fail;
217 generic_make_request(b); 218 bio = b;
218 return 0; 219 } else
219 } else {
220 bio->bi_bdev = conf->rdev->bdev; 220 bio->bi_bdev = conf->rdev->bdev;
221 return 1; 221
222 } 222 generic_make_request(bio);
223} 223}
224 224
225static void status(struct seq_file *seq, mddev_t *mddev) 225static void status(struct seq_file *seq, mddev_t *mddev)
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 6cd2c313e800..c6ee491d98e7 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -264,14 +264,14 @@ static int linear_stop (mddev_t *mddev)
264 return 0; 264 return 0;
265} 265}
266 266
267static int linear_make_request (mddev_t *mddev, struct bio *bio) 267static void linear_make_request (mddev_t *mddev, struct bio *bio)
268{ 268{
269 dev_info_t *tmp_dev; 269 dev_info_t *tmp_dev;
270 sector_t start_sector; 270 sector_t start_sector;
271 271
272 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 272 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
273 md_flush_request(mddev, bio); 273 md_flush_request(mddev, bio);
274 return 0; 274 return;
275 } 275 }
276 276
277 rcu_read_lock(); 277 rcu_read_lock();
@@ -293,7 +293,7 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio)
293 (unsigned long long)start_sector); 293 (unsigned long long)start_sector);
294 rcu_read_unlock(); 294 rcu_read_unlock();
295 bio_io_error(bio); 295 bio_io_error(bio);
296 return 0; 296 return;
297 } 297 }
298 if (unlikely(bio->bi_sector + (bio->bi_size >> 9) > 298 if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >
299 tmp_dev->end_sector)) { 299 tmp_dev->end_sector)) {
@@ -307,20 +307,17 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio)
307 307
308 bp = bio_split(bio, end_sector - bio->bi_sector); 308 bp = bio_split(bio, end_sector - bio->bi_sector);
309 309
310 if (linear_make_request(mddev, &bp->bio1)) 310 linear_make_request(mddev, &bp->bio1);
311 generic_make_request(&bp->bio1); 311 linear_make_request(mddev, &bp->bio2);
312 if (linear_make_request(mddev, &bp->bio2))
313 generic_make_request(&bp->bio2);
314 bio_pair_release(bp); 312 bio_pair_release(bp);
315 return 0; 313 return;
316 } 314 }
317 315
318 bio->bi_bdev = tmp_dev->rdev->bdev; 316 bio->bi_bdev = tmp_dev->rdev->bdev;
319 bio->bi_sector = bio->bi_sector - start_sector 317 bio->bi_sector = bio->bi_sector - start_sector
320 + tmp_dev->rdev->data_offset; 318 + tmp_dev->rdev->data_offset;
321 rcu_read_unlock(); 319 rcu_read_unlock();
322 320 generic_make_request(bio);
323 return 1;
324} 321}
325 322
326static void linear_status (struct seq_file *seq, mddev_t *mddev) 323static void linear_status (struct seq_file *seq, mddev_t *mddev)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8e221a20f5d9..5c2178562c96 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -330,18 +330,17 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
330 * call has finished, the bio has been linked into some internal structure 330 * call has finished, the bio has been linked into some internal structure
331 * and so is visible to ->quiesce(), so we don't need the refcount any more. 331 * and so is visible to ->quiesce(), so we don't need the refcount any more.
332 */ 332 */
333static int md_make_request(struct request_queue *q, struct bio *bio) 333static void md_make_request(struct request_queue *q, struct bio *bio)
334{ 334{
335 const int rw = bio_data_dir(bio); 335 const int rw = bio_data_dir(bio);
336 mddev_t *mddev = q->queuedata; 336 mddev_t *mddev = q->queuedata;
337 int rv;
338 int cpu; 337 int cpu;
339 unsigned int sectors; 338 unsigned int sectors;
340 339
341 if (mddev == NULL || mddev->pers == NULL 340 if (mddev == NULL || mddev->pers == NULL
342 || !mddev->ready) { 341 || !mddev->ready) {
343 bio_io_error(bio); 342 bio_io_error(bio);
344 return 0; 343 return;
345 } 344 }
346 smp_rmb(); /* Ensure implications of 'active' are visible */ 345 smp_rmb(); /* Ensure implications of 'active' are visible */
347 rcu_read_lock(); 346 rcu_read_lock();
@@ -366,7 +365,7 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
366 * go away inside make_request 365 * go away inside make_request
367 */ 366 */
368 sectors = bio_sectors(bio); 367 sectors = bio_sectors(bio);
369 rv = mddev->pers->make_request(mddev, bio); 368 mddev->pers->make_request(mddev, bio);
370 369
371 cpu = part_stat_lock(); 370 cpu = part_stat_lock();
372 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 371 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
@@ -375,8 +374,6 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
375 374
376 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 375 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
377 wake_up(&mddev->sb_wait); 376 wake_up(&mddev->sb_wait);
378
379 return rv;
380} 377}
381 378
382/* mddev_suspend makes sure no new requests are submitted 379/* mddev_suspend makes sure no new requests are submitted
@@ -475,8 +472,7 @@ static void md_submit_flush_data(struct work_struct *ws)
475 bio_endio(bio, 0); 472 bio_endio(bio, 0);
476 else { 473 else {
477 bio->bi_rw &= ~REQ_FLUSH; 474 bio->bi_rw &= ~REQ_FLUSH;
478 if (mddev->pers->make_request(mddev, bio)) 475 mddev->pers->make_request(mddev, bio);
479 generic_make_request(bio);
480 } 476 }
481 477
482 mddev->flush_bio = NULL; 478 mddev->flush_bio = NULL;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 1e586bb4452e..bd47847cf7ca 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -424,7 +424,7 @@ struct mdk_personality
424 int level; 424 int level;
425 struct list_head list; 425 struct list_head list;
426 struct module *owner; 426 struct module *owner;
427 int (*make_request)(mddev_t *mddev, struct bio *bio); 427 void (*make_request)(mddev_t *mddev, struct bio *bio);
428 int (*run)(mddev_t *mddev); 428 int (*run)(mddev_t *mddev);
429 int (*stop)(mddev_t *mddev); 429 int (*stop)(mddev_t *mddev);
430 void (*status)(struct seq_file *seq, mddev_t *mddev); 430 void (*status)(struct seq_file *seq, mddev_t *mddev);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 3535c23af288..407cb5691425 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -106,7 +106,7 @@ static void multipath_end_request(struct bio *bio, int error)
106 rdev_dec_pending(rdev, conf->mddev); 106 rdev_dec_pending(rdev, conf->mddev);
107} 107}
108 108
109static int multipath_make_request(mddev_t *mddev, struct bio * bio) 109static void multipath_make_request(mddev_t *mddev, struct bio * bio)
110{ 110{
111 multipath_conf_t *conf = mddev->private; 111 multipath_conf_t *conf = mddev->private;
112 struct multipath_bh * mp_bh; 112 struct multipath_bh * mp_bh;
@@ -114,7 +114,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
114 114
115 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 115 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
116 md_flush_request(mddev, bio); 116 md_flush_request(mddev, bio);
117 return 0; 117 return;
118 } 118 }
119 119
120 mp_bh = mempool_alloc(conf->pool, GFP_NOIO); 120 mp_bh = mempool_alloc(conf->pool, GFP_NOIO);
@@ -126,7 +126,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
126 if (mp_bh->path < 0) { 126 if (mp_bh->path < 0) {
127 bio_endio(bio, -EIO); 127 bio_endio(bio, -EIO);
128 mempool_free(mp_bh, conf->pool); 128 mempool_free(mp_bh, conf->pool);
129 return 0; 129 return;
130 } 130 }
131 multipath = conf->multipaths + mp_bh->path; 131 multipath = conf->multipaths + mp_bh->path;
132 132
@@ -137,7 +137,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
137 mp_bh->bio.bi_end_io = multipath_end_request; 137 mp_bh->bio.bi_end_io = multipath_end_request;
138 mp_bh->bio.bi_private = mp_bh; 138 mp_bh->bio.bi_private = mp_bh;
139 generic_make_request(&mp_bh->bio); 139 generic_make_request(&mp_bh->bio);
140 return 0; 140 return;
141} 141}
142 142
143static void multipath_status (struct seq_file *seq, mddev_t *mddev) 143static void multipath_status (struct seq_file *seq, mddev_t *mddev)
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index e86bf3682e1e..4066615d61af 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -466,7 +466,7 @@ static inline int is_io_in_chunk_boundary(mddev_t *mddev,
466 } 466 }
467} 467}
468 468
469static int raid0_make_request(mddev_t *mddev, struct bio *bio) 469static void raid0_make_request(mddev_t *mddev, struct bio *bio)
470{ 470{
471 unsigned int chunk_sects; 471 unsigned int chunk_sects;
472 sector_t sector_offset; 472 sector_t sector_offset;
@@ -475,7 +475,7 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio)
475 475
476 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 476 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
477 md_flush_request(mddev, bio); 477 md_flush_request(mddev, bio);
478 return 0; 478 return;
479 } 479 }
480 480
481 chunk_sects = mddev->chunk_sectors; 481 chunk_sects = mddev->chunk_sectors;
@@ -495,13 +495,10 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio)
495 else 495 else
496 bp = bio_split(bio, chunk_sects - 496 bp = bio_split(bio, chunk_sects -
497 sector_div(sector, chunk_sects)); 497 sector_div(sector, chunk_sects));
498 if (raid0_make_request(mddev, &bp->bio1)) 498 raid0_make_request(mddev, &bp->bio1);
499 generic_make_request(&bp->bio1); 499 raid0_make_request(mddev, &bp->bio2);
500 if (raid0_make_request(mddev, &bp->bio2))
501 generic_make_request(&bp->bio2);
502
503 bio_pair_release(bp); 500 bio_pair_release(bp);
504 return 0; 501 return;
505 } 502 }
506 503
507 sector_offset = bio->bi_sector; 504 sector_offset = bio->bi_sector;
@@ -511,10 +508,9 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio)
511 bio->bi_bdev = tmp_dev->bdev; 508 bio->bi_bdev = tmp_dev->bdev;
512 bio->bi_sector = sector_offset + zone->dev_start + 509 bio->bi_sector = sector_offset + zone->dev_start +
513 tmp_dev->data_offset; 510 tmp_dev->data_offset;
514 /* 511
515 * Let the main block layer submit the IO and resolve recursion: 512 generic_make_request(bio);
516 */ 513 return;
517 return 1;
518 514
519bad_map: 515bad_map:
520 printk("md/raid0:%s: make_request bug: can't convert block across chunks" 516 printk("md/raid0:%s: make_request bug: can't convert block across chunks"
@@ -523,7 +519,7 @@ bad_map:
523 (unsigned long long)bio->bi_sector, bio->bi_size >> 10); 519 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
524 520
525 bio_io_error(bio); 521 bio_io_error(bio);
526 return 0; 522 return;
527} 523}
528 524
529static void raid0_status(struct seq_file *seq, mddev_t *mddev) 525static void raid0_status(struct seq_file *seq, mddev_t *mddev)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 32323f0afd89..97f2a5f977b1 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -785,7 +785,7 @@ do_sync_io:
785 PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); 785 PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
786} 786}
787 787
788static int make_request(mddev_t *mddev, struct bio * bio) 788static void make_request(mddev_t *mddev, struct bio * bio)
789{ 789{
790 conf_t *conf = mddev->private; 790 conf_t *conf = mddev->private;
791 mirror_info_t *mirror; 791 mirror_info_t *mirror;
@@ -870,7 +870,7 @@ read_again:
870 if (rdisk < 0) { 870 if (rdisk < 0) {
871 /* couldn't find anywhere to read from */ 871 /* couldn't find anywhere to read from */
872 raid_end_bio_io(r1_bio); 872 raid_end_bio_io(r1_bio);
873 return 0; 873 return;
874 } 874 }
875 mirror = conf->mirrors + rdisk; 875 mirror = conf->mirrors + rdisk;
876 876
@@ -928,7 +928,7 @@ read_again:
928 goto read_again; 928 goto read_again;
929 } else 929 } else
930 generic_make_request(read_bio); 930 generic_make_request(read_bio);
931 return 0; 931 return;
932 } 932 }
933 933
934 /* 934 /*
@@ -1119,8 +1119,6 @@ read_again:
1119 1119
1120 if (do_sync || !bitmap || !plugged) 1120 if (do_sync || !bitmap || !plugged)
1121 md_wakeup_thread(mddev->thread); 1121 md_wakeup_thread(mddev->thread);
1122
1123 return 0;
1124} 1122}
1125 1123
1126static void status(struct seq_file *seq, mddev_t *mddev) 1124static void status(struct seq_file *seq, mddev_t *mddev)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 8b29cd4f01c8..04b625e1cb60 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -825,7 +825,7 @@ static void unfreeze_array(conf_t *conf)
825 spin_unlock_irq(&conf->resync_lock); 825 spin_unlock_irq(&conf->resync_lock);
826} 826}
827 827
828static int make_request(mddev_t *mddev, struct bio * bio) 828static void make_request(mddev_t *mddev, struct bio * bio)
829{ 829{
830 conf_t *conf = mddev->private; 830 conf_t *conf = mddev->private;
831 mirror_info_t *mirror; 831 mirror_info_t *mirror;
@@ -844,7 +844,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
844 844
845 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 845 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
846 md_flush_request(mddev, bio); 846 md_flush_request(mddev, bio);
847 return 0; 847 return;
848 } 848 }
849 849
850 /* If this request crosses a chunk boundary, we need to 850 /* If this request crosses a chunk boundary, we need to
@@ -876,10 +876,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
876 conf->nr_waiting++; 876 conf->nr_waiting++;
877 spin_unlock_irq(&conf->resync_lock); 877 spin_unlock_irq(&conf->resync_lock);
878 878
879 if (make_request(mddev, &bp->bio1)) 879 make_request(mddev, &bp->bio1);
880 generic_make_request(&bp->bio1); 880 make_request(mddev, &bp->bio2);
881 if (make_request(mddev, &bp->bio2))
882 generic_make_request(&bp->bio2);
883 881
884 spin_lock_irq(&conf->resync_lock); 882 spin_lock_irq(&conf->resync_lock);
885 conf->nr_waiting--; 883 conf->nr_waiting--;
@@ -887,14 +885,14 @@ static int make_request(mddev_t *mddev, struct bio * bio)
887 spin_unlock_irq(&conf->resync_lock); 885 spin_unlock_irq(&conf->resync_lock);
888 886
889 bio_pair_release(bp); 887 bio_pair_release(bp);
890 return 0; 888 return;
891 bad_map: 889 bad_map:
892 printk("md/raid10:%s: make_request bug: can't convert block across chunks" 890 printk("md/raid10:%s: make_request bug: can't convert block across chunks"
893 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2, 891 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
894 (unsigned long long)bio->bi_sector, bio->bi_size >> 10); 892 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
895 893
896 bio_io_error(bio); 894 bio_io_error(bio);
897 return 0; 895 return;
898 } 896 }
899 897
900 md_write_start(mddev, bio); 898 md_write_start(mddev, bio);
@@ -937,7 +935,7 @@ read_again:
937 slot = r10_bio->read_slot; 935 slot = r10_bio->read_slot;
938 if (disk < 0) { 936 if (disk < 0) {
939 raid_end_bio_io(r10_bio); 937 raid_end_bio_io(r10_bio);
940 return 0; 938 return;
941 } 939 }
942 mirror = conf->mirrors + disk; 940 mirror = conf->mirrors + disk;
943 941
@@ -985,7 +983,7 @@ read_again:
985 goto read_again; 983 goto read_again;
986 } else 984 } else
987 generic_make_request(read_bio); 985 generic_make_request(read_bio);
988 return 0; 986 return;
989 } 987 }
990 988
991 /* 989 /*
@@ -1157,7 +1155,6 @@ retry_write:
1157 1155
1158 if (do_sync || !mddev->bitmap || !plugged) 1156 if (do_sync || !mddev->bitmap || !plugged)
1159 md_wakeup_thread(mddev->thread); 1157 md_wakeup_thread(mddev->thread);
1160 return 0;
1161} 1158}
1162 1159
1163static void status(struct seq_file *seq, mddev_t *mddev) 1160static void status(struct seq_file *seq, mddev_t *mddev)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index dbae459fb02d..96b7f6a1b6f2 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3695,7 +3695,7 @@ static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf)
3695 return sh; 3695 return sh;
3696} 3696}
3697 3697
3698static int make_request(mddev_t *mddev, struct bio * bi) 3698static void make_request(mddev_t *mddev, struct bio * bi)
3699{ 3699{
3700 raid5_conf_t *conf = mddev->private; 3700 raid5_conf_t *conf = mddev->private;
3701 int dd_idx; 3701 int dd_idx;
@@ -3708,7 +3708,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
3708 3708
3709 if (unlikely(bi->bi_rw & REQ_FLUSH)) { 3709 if (unlikely(bi->bi_rw & REQ_FLUSH)) {
3710 md_flush_request(mddev, bi); 3710 md_flush_request(mddev, bi);
3711 return 0; 3711 return;
3712 } 3712 }
3713 3713
3714 md_write_start(mddev, bi); 3714 md_write_start(mddev, bi);
@@ -3716,7 +3716,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
3716 if (rw == READ && 3716 if (rw == READ &&
3717 mddev->reshape_position == MaxSector && 3717 mddev->reshape_position == MaxSector &&
3718 chunk_aligned_read(mddev,bi)) 3718 chunk_aligned_read(mddev,bi))
3719 return 0; 3719 return;
3720 3720
3721 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 3721 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
3722 last_sector = bi->bi_sector + (bi->bi_size>>9); 3722 last_sector = bi->bi_sector + (bi->bi_size>>9);
@@ -3851,8 +3851,6 @@ static int make_request(mddev_t *mddev, struct bio * bi)
3851 3851
3852 bio_endio(bi, 0); 3852 bio_endio(bi, 0);
3853 } 3853 }
3854
3855 return 0;
3856} 3854}
3857 3855
3858static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks); 3856static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks);
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 9b43ae94beba..a5a55da2a1ac 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -27,7 +27,7 @@
27 27
28static int dcssblk_open(struct block_device *bdev, fmode_t mode); 28static int dcssblk_open(struct block_device *bdev, fmode_t mode);
29static int dcssblk_release(struct gendisk *disk, fmode_t mode); 29static int dcssblk_release(struct gendisk *disk, fmode_t mode);
30static int dcssblk_make_request(struct request_queue *q, struct bio *bio); 30static void dcssblk_make_request(struct request_queue *q, struct bio *bio);
31static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum, 31static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
32 void **kaddr, unsigned long *pfn); 32 void **kaddr, unsigned long *pfn);
33 33
@@ -814,7 +814,7 @@ out:
814 return rc; 814 return rc;
815} 815}
816 816
817static int 817static void
818dcssblk_make_request(struct request_queue *q, struct bio *bio) 818dcssblk_make_request(struct request_queue *q, struct bio *bio)
819{ 819{
820 struct dcssblk_dev_info *dev_info; 820 struct dcssblk_dev_info *dev_info;
@@ -871,10 +871,9 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
871 bytes_done += bvec->bv_len; 871 bytes_done += bvec->bv_len;
872 } 872 }
873 bio_endio(bio, 0); 873 bio_endio(bio, 0);
874 return 0; 874 return;
875fail: 875fail:
876 bio_io_error(bio); 876 bio_io_error(bio);
877 return 0;
878} 877}
879 878
880static int 879static int
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 1f6a4d894e73..98f3e4ade924 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -181,7 +181,7 @@ static unsigned long xpram_highest_page_index(void)
181/* 181/*
182 * Block device make request function. 182 * Block device make request function.
183 */ 183 */
184static int xpram_make_request(struct request_queue *q, struct bio *bio) 184static void xpram_make_request(struct request_queue *q, struct bio *bio)
185{ 185{
186 xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; 186 xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
187 struct bio_vec *bvec; 187 struct bio_vec *bvec;
@@ -221,10 +221,9 @@ static int xpram_make_request(struct request_queue *q, struct bio *bio)
221 } 221 }
222 set_bit(BIO_UPTODATE, &bio->bi_flags); 222 set_bit(BIO_UPTODATE, &bio->bi_flags);
223 bio_endio(bio, 0); 223 bio_endio(bio, 0);
224 return 0; 224 return;
225fail: 225fail:
226 bio_io_error(bio); 226 bio_io_error(bio);
227 return 0;
228} 227}
229 228
230static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo) 229static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo)
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index d70ec1ad10de..02589cab6710 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -556,24 +556,22 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio)
556/* 556/*
557 * Handler function for all zram I/O requests. 557 * Handler function for all zram I/O requests.
558 */ 558 */
559static int zram_make_request(struct request_queue *queue, struct bio *bio) 559static void zram_make_request(struct request_queue *queue, struct bio *bio)
560{ 560{
561 struct zram *zram = queue->queuedata; 561 struct zram *zram = queue->queuedata;
562 562
563 if (!valid_io_request(zram, bio)) { 563 if (!valid_io_request(zram, bio)) {
564 zram_stat64_inc(zram, &zram->stats.invalid_io); 564 zram_stat64_inc(zram, &zram->stats.invalid_io);
565 bio_io_error(bio); 565 bio_io_error(bio);
566 return 0; 566 return;
567 } 567 }
568 568
569 if (unlikely(!zram->init_done) && zram_init_device(zram)) { 569 if (unlikely(!zram->init_done) && zram_init_device(zram)) {
570 bio_io_error(bio); 570 bio_io_error(bio);
571 return 0; 571 return;
572 } 572 }
573 573
574 __zram_make_request(zram, bio, bio_data_dir(bio)); 574 __zram_make_request(zram, bio, bio_data_dir(bio));
575
576 return 0;
577} 575}
578 576
579void zram_reset_device(struct zram *zram) 577void zram_reset_device(struct zram *zram)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 085f95414c7f..c712efdafc3f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -193,7 +193,7 @@ struct request_pm_state
193#include <linux/elevator.h> 193#include <linux/elevator.h>
194 194
195typedef void (request_fn_proc) (struct request_queue *q); 195typedef void (request_fn_proc) (struct request_queue *q);
196typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 196typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
197typedef int (prep_rq_fn) (struct request_queue *, struct request *); 197typedef int (prep_rq_fn) (struct request_queue *, struct request *);
198typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 198typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
199 199
@@ -675,7 +675,7 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
675extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 675extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
676 struct scsi_ioctl_command __user *); 676 struct scsi_ioctl_command __user *);
677 677
678extern int blk_queue_bio(struct request_queue *q, struct bio *bio); 678extern void blk_queue_bio(struct request_queue *q, struct bio *bio);
679 679
680/* 680/*
681 * A queue has just exitted congestion. Note this in the global counter of 681 * A queue has just exitted congestion. Note this in the global counter of