aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/elevator.c17
-rw-r--r--block/ll_rw_blk.c270
-rw-r--r--drivers/block/pktcdvd.c7
-rw-r--r--drivers/block/ps3disk.c21
-rw-r--r--drivers/ide/ide-disk.c29
-rw-r--r--drivers/ide/ide-io.c35
-rw-r--r--drivers/md/dm-table.c28
-rw-r--r--drivers/md/dm.c16
-rw-r--r--drivers/md/dm.h1
-rw-r--r--drivers/md/linear.c20
-rw-r--r--drivers/md/md.c1
-rw-r--r--drivers/md/multipath.c30
-rw-r--r--drivers/md/raid0.c21
-rw-r--r--drivers/md/raid1.c31
-rw-r--r--drivers/md/raid10.c31
-rw-r--r--drivers/md/raid5.c31
-rw-r--r--drivers/message/i2o/i2o_block.c24
-rw-r--r--drivers/scsi/sd.c22
-rw-r--r--fs/bio.c23
-rw-r--r--include/linux/bio.h19
-rw-r--r--include/linux/blkdev.h8
-rw-r--r--include/linux/ide.h5
-rw-r--r--mm/bounce.c6
23 files changed, 240 insertions, 456 deletions
diff --git a/block/elevator.c b/block/elevator.c
index b9c518afe1f8..952aee04a68a 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -712,6 +712,14 @@ struct request *elv_next_request(struct request_queue *q)
712 int ret; 712 int ret;
713 713
714 while ((rq = __elv_next_request(q)) != NULL) { 714 while ((rq = __elv_next_request(q)) != NULL) {
715 /*
716 * Kill the empty barrier place holder, the driver must
717 * not ever see it.
718 */
719 if (blk_empty_barrier(rq)) {
720 end_queued_request(rq, 1);
721 continue;
722 }
715 if (!(rq->cmd_flags & REQ_STARTED)) { 723 if (!(rq->cmd_flags & REQ_STARTED)) {
716 /* 724 /*
717 * This is the first time the device driver 725 * This is the first time the device driver
@@ -751,15 +759,8 @@ struct request *elv_next_request(struct request_queue *q)
751 rq = NULL; 759 rq = NULL;
752 break; 760 break;
753 } else if (ret == BLKPREP_KILL) { 761 } else if (ret == BLKPREP_KILL) {
754 int nr_bytes = rq->hard_nr_sectors << 9;
755
756 if (!nr_bytes)
757 nr_bytes = rq->data_len;
758
759 blkdev_dequeue_request(rq);
760 rq->cmd_flags |= REQ_QUIET; 762 rq->cmd_flags |= REQ_QUIET;
761 end_that_request_chunk(rq, 0, nr_bytes); 763 end_queued_request(rq, 0);
762 end_that_request_last(rq, 0);
763 } else { 764 } else {
764 printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__, 765 printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
765 ret); 766 ret);
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index d875673e76cd..4df7d027eb06 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -304,23 +304,6 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
304 304
305EXPORT_SYMBOL(blk_queue_ordered); 305EXPORT_SYMBOL(blk_queue_ordered);
306 306
307/**
308 * blk_queue_issue_flush_fn - set function for issuing a flush
309 * @q: the request queue
310 * @iff: the function to be called issuing the flush
311 *
312 * Description:
313 * If a driver supports issuing a flush command, the support is notified
314 * to the block layer by defining it through this call.
315 *
316 **/
317void blk_queue_issue_flush_fn(struct request_queue *q, issue_flush_fn *iff)
318{
319 q->issue_flush_fn = iff;
320}
321
322EXPORT_SYMBOL(blk_queue_issue_flush_fn);
323
324/* 307/*
325 * Cache flushing for ordered writes handling 308 * Cache flushing for ordered writes handling
326 */ 309 */
@@ -377,10 +360,12 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
377 /* 360 /*
378 * Okay, sequence complete. 361 * Okay, sequence complete.
379 */ 362 */
380 rq = q->orig_bar_rq; 363 uptodate = 1;
381 uptodate = q->orderr ? q->orderr : 1; 364 if (q->orderr)
365 uptodate = q->orderr;
382 366
383 q->ordseq = 0; 367 q->ordseq = 0;
368 rq = q->orig_bar_rq;
384 369
385 end_that_request_first(rq, uptodate, rq->hard_nr_sectors); 370 end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
386 end_that_request_last(rq, uptodate); 371 end_that_request_last(rq, uptodate);
@@ -445,7 +430,8 @@ static inline struct request *start_ordered(struct request_queue *q,
445 rq_init(q, rq); 430 rq_init(q, rq);
446 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) 431 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
447 rq->cmd_flags |= REQ_RW; 432 rq->cmd_flags |= REQ_RW;
448 rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0; 433 if (q->ordered & QUEUE_ORDERED_FUA)
434 rq->cmd_flags |= REQ_FUA;
449 rq->elevator_private = NULL; 435 rq->elevator_private = NULL;
450 rq->elevator_private2 = NULL; 436 rq->elevator_private2 = NULL;
451 init_request_from_bio(rq, q->orig_bar_rq->bio); 437 init_request_from_bio(rq, q->orig_bar_rq->bio);
@@ -455,9 +441,12 @@ static inline struct request *start_ordered(struct request_queue *q,
455 * Queue ordered sequence. As we stack them at the head, we 441 * Queue ordered sequence. As we stack them at the head, we
456 * need to queue in reverse order. Note that we rely on that 442 * need to queue in reverse order. Note that we rely on that
457 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs 443 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
458 * request gets inbetween ordered sequence. 444 * request gets inbetween ordered sequence. If this request is
445 * an empty barrier, we don't need to do a postflush ever since
446 * there will be no data written between the pre and post flush.
447 * Hence a single flush will suffice.
459 */ 448 */
460 if (q->ordered & QUEUE_ORDERED_POSTFLUSH) 449 if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
461 queue_flush(q, QUEUE_ORDERED_POSTFLUSH); 450 queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
462 else 451 else
463 q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; 452 q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
@@ -481,7 +470,7 @@ static inline struct request *start_ordered(struct request_queue *q,
481int blk_do_ordered(struct request_queue *q, struct request **rqp) 470int blk_do_ordered(struct request_queue *q, struct request **rqp)
482{ 471{
483 struct request *rq = *rqp; 472 struct request *rq = *rqp;
484 int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); 473 const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
485 474
486 if (!q->ordseq) { 475 if (!q->ordseq) {
487 if (!is_barrier) 476 if (!is_barrier)
@@ -2660,6 +2649,14 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
2660 2649
2661EXPORT_SYMBOL(blk_execute_rq); 2650EXPORT_SYMBOL(blk_execute_rq);
2662 2651
2652static void bio_end_empty_barrier(struct bio *bio, int err)
2653{
2654 if (err)
2655 clear_bit(BIO_UPTODATE, &bio->bi_flags);
2656
2657 complete(bio->bi_private);
2658}
2659
2663/** 2660/**
2664 * blkdev_issue_flush - queue a flush 2661 * blkdev_issue_flush - queue a flush
2665 * @bdev: blockdev to issue flush for 2662 * @bdev: blockdev to issue flush for
@@ -2672,7 +2669,10 @@ EXPORT_SYMBOL(blk_execute_rq);
2672 */ 2669 */
2673int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) 2670int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
2674{ 2671{
2672 DECLARE_COMPLETION_ONSTACK(wait);
2675 struct request_queue *q; 2673 struct request_queue *q;
2674 struct bio *bio;
2675 int ret;
2676 2676
2677 if (bdev->bd_disk == NULL) 2677 if (bdev->bd_disk == NULL)
2678 return -ENXIO; 2678 return -ENXIO;
@@ -2680,10 +2680,32 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
2680 q = bdev_get_queue(bdev); 2680 q = bdev_get_queue(bdev);
2681 if (!q) 2681 if (!q)
2682 return -ENXIO; 2682 return -ENXIO;
2683 if (!q->issue_flush_fn)
2684 return -EOPNOTSUPP;
2685 2683
2686 return q->issue_flush_fn(q, bdev->bd_disk, error_sector); 2684 bio = bio_alloc(GFP_KERNEL, 0);
2685 if (!bio)
2686 return -ENOMEM;
2687
2688 bio->bi_end_io = bio_end_empty_barrier;
2689 bio->bi_private = &wait;
2690 bio->bi_bdev = bdev;
2691 submit_bio(1 << BIO_RW_BARRIER, bio);
2692
2693 wait_for_completion(&wait);
2694
2695 /*
2696 * The driver must store the error location in ->bi_sector, if
2697 * it supports it. For non-stacked drivers, this should be copied
2698 * from rq->sector.
2699 */
2700 if (error_sector)
2701 *error_sector = bio->bi_sector;
2702
2703 ret = 0;
2704 if (!bio_flagged(bio, BIO_UPTODATE))
2705 ret = -EIO;
2706
2707 bio_put(bio);
2708 return ret;
2687} 2709}
2688 2710
2689EXPORT_SYMBOL(blkdev_issue_flush); 2711EXPORT_SYMBOL(blkdev_issue_flush);
@@ -3051,7 +3073,7 @@ static inline void blk_partition_remap(struct bio *bio)
3051{ 3073{
3052 struct block_device *bdev = bio->bi_bdev; 3074 struct block_device *bdev = bio->bi_bdev;
3053 3075
3054 if (bdev != bdev->bd_contains) { 3076 if (bio_sectors(bio) && bdev != bdev->bd_contains) {
3055 struct hd_struct *p = bdev->bd_part; 3077 struct hd_struct *p = bdev->bd_part;
3056 const int rw = bio_data_dir(bio); 3078 const int rw = bio_data_dir(bio);
3057 3079
@@ -3117,6 +3139,35 @@ static inline int should_fail_request(struct bio *bio)
3117 3139
3118#endif /* CONFIG_FAIL_MAKE_REQUEST */ 3140#endif /* CONFIG_FAIL_MAKE_REQUEST */
3119 3141
3142/*
3143 * Check whether this bio extends beyond the end of the device.
3144 */
3145static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
3146{
3147 sector_t maxsector;
3148
3149 if (!nr_sectors)
3150 return 0;
3151
3152 /* Test device or partition size, when known. */
3153 maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
3154 if (maxsector) {
3155 sector_t sector = bio->bi_sector;
3156
3157 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
3158 /*
3159 * This may well happen - the kernel calls bread()
3160 * without checking the size of the device, e.g., when
3161 * mounting a device.
3162 */
3163 handle_bad_sector(bio);
3164 return 1;
3165 }
3166 }
3167
3168 return 0;
3169}
3170
3120/** 3171/**
3121 * generic_make_request: hand a buffer to its device driver for I/O 3172 * generic_make_request: hand a buffer to its device driver for I/O
3122 * @bio: The bio describing the location in memory and on the device. 3173 * @bio: The bio describing the location in memory and on the device.
@@ -3144,27 +3195,14 @@ static inline int should_fail_request(struct bio *bio)
3144static inline void __generic_make_request(struct bio *bio) 3195static inline void __generic_make_request(struct bio *bio)
3145{ 3196{
3146 struct request_queue *q; 3197 struct request_queue *q;
3147 sector_t maxsector;
3148 sector_t old_sector; 3198 sector_t old_sector;
3149 int ret, nr_sectors = bio_sectors(bio); 3199 int ret, nr_sectors = bio_sectors(bio);
3150 dev_t old_dev; 3200 dev_t old_dev;
3151 3201
3152 might_sleep(); 3202 might_sleep();
3153 /* Test device or partition size, when known. */
3154 maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
3155 if (maxsector) {
3156 sector_t sector = bio->bi_sector;
3157 3203
3158 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { 3204 if (bio_check_eod(bio, nr_sectors))
3159 /* 3205 goto end_io;
3160 * This may well happen - the kernel calls bread()
3161 * without checking the size of the device, e.g., when
3162 * mounting a device.
3163 */
3164 handle_bad_sector(bio);
3165 goto end_io;
3166 }
3167 }
3168 3206
3169 /* 3207 /*
3170 * Resolve the mapping until finished. (drivers are 3208 * Resolve the mapping until finished. (drivers are
@@ -3191,7 +3229,7 @@ end_io:
3191 break; 3229 break;
3192 } 3230 }
3193 3231
3194 if (unlikely(bio_sectors(bio) > q->max_hw_sectors)) { 3232 if (unlikely(nr_sectors > q->max_hw_sectors)) {
3195 printk("bio too big device %s (%u > %u)\n", 3233 printk("bio too big device %s (%u > %u)\n",
3196 bdevname(bio->bi_bdev, b), 3234 bdevname(bio->bi_bdev, b),
3197 bio_sectors(bio), 3235 bio_sectors(bio),
@@ -3212,7 +3250,7 @@ end_io:
3212 blk_partition_remap(bio); 3250 blk_partition_remap(bio);
3213 3251
3214 if (old_sector != -1) 3252 if (old_sector != -1)
3215 blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, 3253 blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
3216 old_sector); 3254 old_sector);
3217 3255
3218 blk_add_trace_bio(q, bio, BLK_TA_QUEUE); 3256 blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
@@ -3220,21 +3258,8 @@ end_io:
3220 old_sector = bio->bi_sector; 3258 old_sector = bio->bi_sector;
3221 old_dev = bio->bi_bdev->bd_dev; 3259 old_dev = bio->bi_bdev->bd_dev;
3222 3260
3223 maxsector = bio->bi_bdev->bd_inode->i_size >> 9; 3261 if (bio_check_eod(bio, nr_sectors))
3224 if (maxsector) { 3262 goto end_io;
3225 sector_t sector = bio->bi_sector;
3226
3227 if (maxsector < nr_sectors ||
3228 maxsector - nr_sectors < sector) {
3229 /*
3230 * This may well happen - partitions are not
3231 * checked to make sure they are within the size
3232 * of the whole device.
3233 */
3234 handle_bad_sector(bio);
3235 goto end_io;
3236 }
3237 }
3238 3263
3239 ret = q->make_request_fn(q, bio); 3264 ret = q->make_request_fn(q, bio);
3240 } while (ret); 3265 } while (ret);
@@ -3307,23 +3332,32 @@ void submit_bio(int rw, struct bio *bio)
3307{ 3332{
3308 int count = bio_sectors(bio); 3333 int count = bio_sectors(bio);
3309 3334
3310 BIO_BUG_ON(!bio->bi_size);
3311 BIO_BUG_ON(!bio->bi_io_vec);
3312 bio->bi_rw |= rw; 3335 bio->bi_rw |= rw;
3313 if (rw & WRITE) {
3314 count_vm_events(PGPGOUT, count);
3315 } else {
3316 task_io_account_read(bio->bi_size);
3317 count_vm_events(PGPGIN, count);
3318 }
3319 3336
3320 if (unlikely(block_dump)) { 3337 /*
3321 char b[BDEVNAME_SIZE]; 3338 * If it's a regular read/write or a barrier with data attached,
3322 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n", 3339 * go through the normal accounting stuff before submission.
3323 current->comm, current->pid, 3340 */
3324 (rw & WRITE) ? "WRITE" : "READ", 3341 if (!bio_empty_barrier(bio)) {
3325 (unsigned long long)bio->bi_sector, 3342
3326 bdevname(bio->bi_bdev,b)); 3343 BIO_BUG_ON(!bio->bi_size);
3344 BIO_BUG_ON(!bio->bi_io_vec);
3345
3346 if (rw & WRITE) {
3347 count_vm_events(PGPGOUT, count);
3348 } else {
3349 task_io_account_read(bio->bi_size);
3350 count_vm_events(PGPGIN, count);
3351 }
3352
3353 if (unlikely(block_dump)) {
3354 char b[BDEVNAME_SIZE];
3355 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
3356 current->comm, current->pid,
3357 (rw & WRITE) ? "WRITE" : "READ",
3358 (unsigned long long)bio->bi_sector,
3359 bdevname(bio->bi_bdev,b));
3360 }
3327 } 3361 }
3328 3362
3329 generic_make_request(bio); 3363 generic_make_request(bio);
@@ -3399,6 +3433,14 @@ static int __end_that_request_first(struct request *req, int uptodate,
3399 while ((bio = req->bio) != NULL) { 3433 while ((bio = req->bio) != NULL) {
3400 int nbytes; 3434 int nbytes;
3401 3435
3436 /*
3437 * For an empty barrier request, the low level driver must
3438 * store a potential error location in ->sector. We pass
3439 * that back up in ->bi_sector.
3440 */
3441 if (blk_empty_barrier(req))
3442 bio->bi_sector = req->sector;
3443
3402 if (nr_bytes >= bio->bi_size) { 3444 if (nr_bytes >= bio->bi_size) {
3403 req->bio = bio->bi_next; 3445 req->bio = bio->bi_next;
3404 nbytes = bio->bi_size; 3446 nbytes = bio->bi_size;
@@ -3564,7 +3606,7 @@ static struct notifier_block blk_cpu_notifier __cpuinitdata = {
3564 * Description: 3606 * Description:
3565 * Ends all I/O on a request. It does not handle partial completions, 3607 * Ends all I/O on a request. It does not handle partial completions,
3566 * unless the driver actually implements this in its completion callback 3608 * unless the driver actually implements this in its completion callback
3567 * through requeueing. Theh actual completion happens out-of-order, 3609 * through requeueing. The actual completion happens out-of-order,
3568 * through a softirq handler. The user must have registered a completion 3610 * through a softirq handler. The user must have registered a completion
3569 * callback through blk_queue_softirq_done(). 3611 * callback through blk_queue_softirq_done().
3570 **/ 3612 **/
@@ -3627,15 +3669,83 @@ void end_that_request_last(struct request *req, int uptodate)
3627 3669
3628EXPORT_SYMBOL(end_that_request_last); 3670EXPORT_SYMBOL(end_that_request_last);
3629 3671
3630void end_request(struct request *req, int uptodate) 3672static inline void __end_request(struct request *rq, int uptodate,
3673 unsigned int nr_bytes, int dequeue)
3631{ 3674{
3632 if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) { 3675 if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
3633 add_disk_randomness(req->rq_disk); 3676 if (dequeue)
3634 blkdev_dequeue_request(req); 3677 blkdev_dequeue_request(rq);
3635 end_that_request_last(req, uptodate); 3678 add_disk_randomness(rq->rq_disk);
3679 end_that_request_last(rq, uptodate);
3636 } 3680 }
3637} 3681}
3638 3682
3683static unsigned int rq_byte_size(struct request *rq)
3684{
3685 if (blk_fs_request(rq))
3686 return rq->hard_nr_sectors << 9;
3687
3688 return rq->data_len;
3689}
3690
3691/**
3692 * end_queued_request - end all I/O on a queued request
3693 * @rq: the request being processed
3694 * @uptodate: error value or 0/1 uptodate flag
3695 *
3696 * Description:
3697 * Ends all I/O on a request, and removes it from the block layer queues.
3698 * Not suitable for normal IO completion, unless the driver still has
3699 * the request attached to the block layer.
3700 *
3701 **/
3702void end_queued_request(struct request *rq, int uptodate)
3703{
3704 __end_request(rq, uptodate, rq_byte_size(rq), 1);
3705}
3706EXPORT_SYMBOL(end_queued_request);
3707
3708/**
3709 * end_dequeued_request - end all I/O on a dequeued request
3710 * @rq: the request being processed
3711 * @uptodate: error value or 0/1 uptodate flag
3712 *
3713 * Description:
3714 * Ends all I/O on a request. The request must already have been
3715 * dequeued using blkdev_dequeue_request(), as is normally the case
3716 * for most drivers.
3717 *
3718 **/
3719void end_dequeued_request(struct request *rq, int uptodate)
3720{
3721 __end_request(rq, uptodate, rq_byte_size(rq), 0);
3722}
3723EXPORT_SYMBOL(end_dequeued_request);
3724
3725
3726/**
3727 * end_request - end I/O on the current segment of the request
3728 * @rq: the request being processed
3729 * @uptodate: error value or 0/1 uptodate flag
3730 *
3731 * Description:
3732 * Ends I/O on the current segment of a request. If that is the only
3733 * remaining segment, the request is also completed and freed.
3734 *
3735 * This is a remnant of how older block drivers handled IO completions.
3736 * Modern drivers typically end IO on the full request in one go, unless
3737 * they have a residual value to account for. For that case this function
3738 * isn't really useful, unless the residual just happens to be the
3739 * full current segment. In other words, don't use this function in new
3740 * code. Either use end_request_completely(), or the
3741 * end_that_request_chunk() (along with end_that_request_last()) for
3742 * partial completions.
3743 *
3744 **/
3745void end_request(struct request *req, int uptodate)
3746{
3747 __end_request(req, uptodate, req->hard_cur_sectors << 9, 1);
3748}
3639EXPORT_SYMBOL(end_request); 3749EXPORT_SYMBOL(end_request);
3640 3750
3641static void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 3751static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 540bf3676985..a8130a4ad6d4 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1133,16 +1133,21 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1133 * Schedule reads for missing parts of the packet. 1133 * Schedule reads for missing parts of the packet.
1134 */ 1134 */
1135 for (f = 0; f < pkt->frames; f++) { 1135 for (f = 0; f < pkt->frames; f++) {
1136 struct bio_vec *vec;
1137
1136 int p, offset; 1138 int p, offset;
1137 if (written[f]) 1139 if (written[f])
1138 continue; 1140 continue;
1139 bio = pkt->r_bios[f]; 1141 bio = pkt->r_bios[f];
1142 vec = bio->bi_io_vec;
1140 bio_init(bio); 1143 bio_init(bio);
1141 bio->bi_max_vecs = 1; 1144 bio->bi_max_vecs = 1;
1142 bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); 1145 bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
1143 bio->bi_bdev = pd->bdev; 1146 bio->bi_bdev = pd->bdev;
1144 bio->bi_end_io = pkt_end_io_read; 1147 bio->bi_end_io = pkt_end_io_read;
1145 bio->bi_private = pkt; 1148 bio->bi_private = pkt;
1149 bio->bi_io_vec = vec;
1150 bio->bi_destructor = pkt_bio_destructor;
1146 1151
1147 p = (f * CD_FRAMESIZE) / PAGE_SIZE; 1152 p = (f * CD_FRAMESIZE) / PAGE_SIZE;
1148 offset = (f * CD_FRAMESIZE) % PAGE_SIZE; 1153 offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
@@ -1439,6 +1444,8 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
1439 pkt->w_bio->bi_bdev = pd->bdev; 1444 pkt->w_bio->bi_bdev = pd->bdev;
1440 pkt->w_bio->bi_end_io = pkt_end_io_packet_write; 1445 pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
1441 pkt->w_bio->bi_private = pkt; 1446 pkt->w_bio->bi_private = pkt;
1447 pkt->w_bio->bi_io_vec = bvec;
1448 pkt->w_bio->bi_destructor = pkt_bio_destructor;
1442 for (f = 0; f < pkt->frames; f++) 1449 for (f = 0; f < pkt->frames; f++)
1443 if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset)) 1450 if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
1444 BUG(); 1451 BUG();
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 06d0552cf49c..e354bfc070e1 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -414,26 +414,6 @@ static void ps3disk_prepare_flush(struct request_queue *q, struct request *req)
414 req->cmd_type = REQ_TYPE_FLUSH; 414 req->cmd_type = REQ_TYPE_FLUSH;
415} 415}
416 416
417static int ps3disk_issue_flush(struct request_queue *q, struct gendisk *gendisk,
418 sector_t *sector)
419{
420 struct ps3_storage_device *dev = q->queuedata;
421 struct request *req;
422 int res;
423
424 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
425
426 req = blk_get_request(q, WRITE, __GFP_WAIT);
427 ps3disk_prepare_flush(q, req);
428 res = blk_execute_rq(q, gendisk, req, 0);
429 if (res)
430 dev_err(&dev->sbd.core, "%s:%u: flush request failed %d\n",
431 __func__, __LINE__, res);
432 blk_put_request(req);
433 return res;
434}
435
436
437static unsigned long ps3disk_mask; 417static unsigned long ps3disk_mask;
438 418
439static DEFINE_MUTEX(ps3disk_mask_mutex); 419static DEFINE_MUTEX(ps3disk_mask_mutex);
@@ -506,7 +486,6 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
506 blk_queue_dma_alignment(queue, dev->blk_size-1); 486 blk_queue_dma_alignment(queue, dev->blk_size-1);
507 blk_queue_hardsect_size(queue, dev->blk_size); 487 blk_queue_hardsect_size(queue, dev->blk_size);
508 488
509 blk_queue_issue_flush_fn(queue, ps3disk_issue_flush);
510 blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH, 489 blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
511 ps3disk_prepare_flush); 490 ps3disk_prepare_flush);
512 491
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 4754769eda97..92177ca48b4d 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -716,32 +716,6 @@ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
716 rq->buffer = rq->cmd; 716 rq->buffer = rq->cmd;
717} 717}
718 718
719static int idedisk_issue_flush(struct request_queue *q, struct gendisk *disk,
720 sector_t *error_sector)
721{
722 ide_drive_t *drive = q->queuedata;
723 struct request *rq;
724 int ret;
725
726 if (!drive->wcache)
727 return 0;
728
729 rq = blk_get_request(q, WRITE, __GFP_WAIT);
730
731 idedisk_prepare_flush(q, rq);
732
733 ret = blk_execute_rq(q, disk, rq, 0);
734
735 /*
736 * if we failed and caller wants error offset, get it
737 */
738 if (ret && error_sector)
739 *error_sector = ide_get_error_location(drive, rq->cmd);
740
741 blk_put_request(rq);
742 return ret;
743}
744
745/* 719/*
746 * This is tightly woven into the driver->do_special can not touch. 720 * This is tightly woven into the driver->do_special can not touch.
747 * DON'T do it again until a total personality rewrite is committed. 721 * DON'T do it again until a total personality rewrite is committed.
@@ -781,7 +755,6 @@ static void update_ordered(ide_drive_t *drive)
781 struct hd_driveid *id = drive->id; 755 struct hd_driveid *id = drive->id;
782 unsigned ordered = QUEUE_ORDERED_NONE; 756 unsigned ordered = QUEUE_ORDERED_NONE;
783 prepare_flush_fn *prep_fn = NULL; 757 prepare_flush_fn *prep_fn = NULL;
784 issue_flush_fn *issue_fn = NULL;
785 758
786 if (drive->wcache) { 759 if (drive->wcache) {
787 unsigned long long capacity; 760 unsigned long long capacity;
@@ -805,13 +778,11 @@ static void update_ordered(ide_drive_t *drive)
805 if (barrier) { 778 if (barrier) {
806 ordered = QUEUE_ORDERED_DRAIN_FLUSH; 779 ordered = QUEUE_ORDERED_DRAIN_FLUSH;
807 prep_fn = idedisk_prepare_flush; 780 prep_fn = idedisk_prepare_flush;
808 issue_fn = idedisk_issue_flush;
809 } 781 }
810 } else 782 } else
811 ordered = QUEUE_ORDERED_DRAIN; 783 ordered = QUEUE_ORDERED_DRAIN;
812 784
813 blk_queue_ordered(drive->queue, ordered, prep_fn); 785 blk_queue_ordered(drive->queue, ordered, prep_fn);
814 blk_queue_issue_flush_fn(drive->queue, issue_fn);
815} 786}
816 787
817static int write_cache(ide_drive_t *drive, int arg) 788static int write_cache(ide_drive_t *drive, int arg)
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 4cece930114c..f36ff5962af6 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -322,41 +322,6 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
322 spin_unlock_irqrestore(&ide_lock, flags); 322 spin_unlock_irqrestore(&ide_lock, flags);
323} 323}
324 324
325/*
326 * FIXME: probably move this somewhere else, name is bad too :)
327 */
328u64 ide_get_error_location(ide_drive_t *drive, char *args)
329{
330 u32 high, low;
331 u8 hcyl, lcyl, sect;
332 u64 sector;
333
334 high = 0;
335 hcyl = args[5];
336 lcyl = args[4];
337 sect = args[3];
338
339 if (ide_id_has_flush_cache_ext(drive->id)) {
340 low = (hcyl << 16) | (lcyl << 8) | sect;
341 HWIF(drive)->OUTB(drive->ctl|0x80, IDE_CONTROL_REG);
342 high = ide_read_24(drive);
343 } else {
344 u8 cur = HWIF(drive)->INB(IDE_SELECT_REG);
345 if (cur & 0x40) {
346 high = cur & 0xf;
347 low = (hcyl << 16) | (lcyl << 8) | sect;
348 } else {
349 low = hcyl * drive->head * drive->sect;
350 low += lcyl * drive->sect;
351 low += sect - 1;
352 }
353 }
354
355 sector = ((u64) high << 24) | low;
356 return sector;
357}
358EXPORT_SYMBOL(ide_get_error_location);
359
360/** 325/**
361 * ide_end_drive_cmd - end an explicit drive command 326 * ide_end_drive_cmd - end an explicit drive command
362 * @drive: command 327 * @drive: command
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 2bcde5798b5a..fbe477bb2c68 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -999,33 +999,6 @@ void dm_table_unplug_all(struct dm_table *t)
999 } 999 }
1000} 1000}
1001 1001
1002int dm_table_flush_all(struct dm_table *t)
1003{
1004 struct list_head *d, *devices = dm_table_get_devices(t);
1005 int ret = 0;
1006 unsigned i;
1007
1008 for (i = 0; i < t->num_targets; i++)
1009 if (t->targets[i].type->flush)
1010 t->targets[i].type->flush(&t->targets[i]);
1011
1012 for (d = devices->next; d != devices; d = d->next) {
1013 struct dm_dev *dd = list_entry(d, struct dm_dev, list);
1014 struct request_queue *q = bdev_get_queue(dd->bdev);
1015 int err;
1016
1017 if (!q->issue_flush_fn)
1018 err = -EOPNOTSUPP;
1019 else
1020 err = q->issue_flush_fn(q, dd->bdev->bd_disk, NULL);
1021
1022 if (!ret)
1023 ret = err;
1024 }
1025
1026 return ret;
1027}
1028
1029struct mapped_device *dm_table_get_md(struct dm_table *t) 1002struct mapped_device *dm_table_get_md(struct dm_table *t)
1030{ 1003{
1031 dm_get(t->md); 1004 dm_get(t->md);
@@ -1043,4 +1016,3 @@ EXPORT_SYMBOL(dm_table_get_md);
1043EXPORT_SYMBOL(dm_table_put); 1016EXPORT_SYMBOL(dm_table_put);
1044EXPORT_SYMBOL(dm_table_get); 1017EXPORT_SYMBOL(dm_table_get);
1045EXPORT_SYMBOL(dm_table_unplug_all); 1018EXPORT_SYMBOL(dm_table_unplug_all);
1046EXPORT_SYMBOL(dm_table_flush_all);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 167765c47747..d837d37f6209 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -840,21 +840,6 @@ static int dm_request(struct request_queue *q, struct bio *bio)
840 return 0; 840 return 0;
841} 841}
842 842
843static int dm_flush_all(struct request_queue *q, struct gendisk *disk,
844 sector_t *error_sector)
845{
846 struct mapped_device *md = q->queuedata;
847 struct dm_table *map = dm_get_table(md);
848 int ret = -ENXIO;
849
850 if (map) {
851 ret = dm_table_flush_all(map);
852 dm_table_put(map);
853 }
854
855 return ret;
856}
857
858static void dm_unplug_all(struct request_queue *q) 843static void dm_unplug_all(struct request_queue *q)
859{ 844{
860 struct mapped_device *md = q->queuedata; 845 struct mapped_device *md = q->queuedata;
@@ -1003,7 +988,6 @@ static struct mapped_device *alloc_dev(int minor)
1003 blk_queue_make_request(md->queue, dm_request); 988 blk_queue_make_request(md->queue, dm_request);
1004 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 989 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1005 md->queue->unplug_fn = dm_unplug_all; 990 md->queue->unplug_fn = dm_unplug_all;
1006 md->queue->issue_flush_fn = dm_flush_all;
1007 991
1008 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache); 992 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
1009 if (!md->io_pool) 993 if (!md->io_pool)
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 462ee652a890..4b3faa45277e 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -111,7 +111,6 @@ void dm_table_postsuspend_targets(struct dm_table *t);
111int dm_table_resume_targets(struct dm_table *t); 111int dm_table_resume_targets(struct dm_table *t);
112int dm_table_any_congested(struct dm_table *t, int bdi_bits); 112int dm_table_any_congested(struct dm_table *t, int bdi_bits);
113void dm_table_unplug_all(struct dm_table *t); 113void dm_table_unplug_all(struct dm_table *t);
114int dm_table_flush_all(struct dm_table *t);
115 114
116/*----------------------------------------------------------------- 115/*-----------------------------------------------------------------
117 * A registry of target types. 116 * A registry of target types.
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 550148770bb2..56a11f6c127b 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -92,25 +92,6 @@ static void linear_unplug(struct request_queue *q)
92 } 92 }
93} 93}
94 94
95static int linear_issue_flush(struct request_queue *q, struct gendisk *disk,
96 sector_t *error_sector)
97{
98 mddev_t *mddev = q->queuedata;
99 linear_conf_t *conf = mddev_to_conf(mddev);
100 int i, ret = 0;
101
102 for (i=0; i < mddev->raid_disks && ret == 0; i++) {
103 struct block_device *bdev = conf->disks[i].rdev->bdev;
104 struct request_queue *r_queue = bdev_get_queue(bdev);
105
106 if (!r_queue->issue_flush_fn)
107 ret = -EOPNOTSUPP;
108 else
109 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector);
110 }
111 return ret;
112}
113
114static int linear_congested(void *data, int bits) 95static int linear_congested(void *data, int bits)
115{ 96{
116 mddev_t *mddev = data; 97 mddev_t *mddev = data;
@@ -279,7 +260,6 @@ static int linear_run (mddev_t *mddev)
279 260
280 blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec); 261 blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
281 mddev->queue->unplug_fn = linear_unplug; 262 mddev->queue->unplug_fn = linear_unplug;
282 mddev->queue->issue_flush_fn = linear_issue_flush;
283 mddev->queue->backing_dev_info.congested_fn = linear_congested; 263 mddev->queue->backing_dev_info.congested_fn = linear_congested;
284 mddev->queue->backing_dev_info.congested_data = mddev; 264 mddev->queue->backing_dev_info.congested_data = mddev;
285 return 0; 265 return 0;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index acf1b81b47cb..0dc563d76b39 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3463,7 +3463,6 @@ static int do_md_stop(mddev_t * mddev, int mode)
3463 mddev->pers->stop(mddev); 3463 mddev->pers->stop(mddev);
3464 mddev->queue->merge_bvec_fn = NULL; 3464 mddev->queue->merge_bvec_fn = NULL;
3465 mddev->queue->unplug_fn = NULL; 3465 mddev->queue->unplug_fn = NULL;
3466 mddev->queue->issue_flush_fn = NULL;
3467 mddev->queue->backing_dev_info.congested_fn = NULL; 3466 mddev->queue->backing_dev_info.congested_fn = NULL;
3468 if (mddev->pers->sync_request) 3467 if (mddev->pers->sync_request)
3469 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 3468 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index f2a63f394ad9..b35731cceac6 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -194,35 +194,6 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev)
194 seq_printf (seq, "]"); 194 seq_printf (seq, "]");
195} 195}
196 196
197static int multipath_issue_flush(struct request_queue *q, struct gendisk *disk,
198 sector_t *error_sector)
199{
200 mddev_t *mddev = q->queuedata;
201 multipath_conf_t *conf = mddev_to_conf(mddev);
202 int i, ret = 0;
203
204 rcu_read_lock();
205 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
206 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
207 if (rdev && !test_bit(Faulty, &rdev->flags)) {
208 struct block_device *bdev = rdev->bdev;
209 struct request_queue *r_queue = bdev_get_queue(bdev);
210
211 if (!r_queue->issue_flush_fn)
212 ret = -EOPNOTSUPP;
213 else {
214 atomic_inc(&rdev->nr_pending);
215 rcu_read_unlock();
216 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
217 error_sector);
218 rdev_dec_pending(rdev, mddev);
219 rcu_read_lock();
220 }
221 }
222 }
223 rcu_read_unlock();
224 return ret;
225}
226static int multipath_congested(void *data, int bits) 197static int multipath_congested(void *data, int bits)
227{ 198{
228 mddev_t *mddev = data; 199 mddev_t *mddev = data;
@@ -527,7 +498,6 @@ static int multipath_run (mddev_t *mddev)
527 mddev->array_size = mddev->size; 498 mddev->array_size = mddev->size;
528 499
529 mddev->queue->unplug_fn = multipath_unplug; 500 mddev->queue->unplug_fn = multipath_unplug;
530 mddev->queue->issue_flush_fn = multipath_issue_flush;
531 mddev->queue->backing_dev_info.congested_fn = multipath_congested; 501 mddev->queue->backing_dev_info.congested_fn = multipath_congested;
532 mddev->queue->backing_dev_info.congested_data = mddev; 502 mddev->queue->backing_dev_info.congested_data = mddev;
533 503
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index ef0da2d84959..e79e1a538d44 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -40,26 +40,6 @@ static void raid0_unplug(struct request_queue *q)
40 } 40 }
41} 41}
42 42
43static int raid0_issue_flush(struct request_queue *q, struct gendisk *disk,
44 sector_t *error_sector)
45{
46 mddev_t *mddev = q->queuedata;
47 raid0_conf_t *conf = mddev_to_conf(mddev);
48 mdk_rdev_t **devlist = conf->strip_zone[0].dev;
49 int i, ret = 0;
50
51 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
52 struct block_device *bdev = devlist[i]->bdev;
53 struct request_queue *r_queue = bdev_get_queue(bdev);
54
55 if (!r_queue->issue_flush_fn)
56 ret = -EOPNOTSUPP;
57 else
58 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector);
59 }
60 return ret;
61}
62
63static int raid0_congested(void *data, int bits) 43static int raid0_congested(void *data, int bits)
64{ 44{
65 mddev_t *mddev = data; 45 mddev_t *mddev = data;
@@ -250,7 +230,6 @@ static int create_strip_zones (mddev_t *mddev)
250 230
251 mddev->queue->unplug_fn = raid0_unplug; 231 mddev->queue->unplug_fn = raid0_unplug;
252 232
253 mddev->queue->issue_flush_fn = raid0_issue_flush;
254 mddev->queue->backing_dev_info.congested_fn = raid0_congested; 233 mddev->queue->backing_dev_info.congested_fn = raid0_congested;
255 mddev->queue->backing_dev_info.congested_data = mddev; 234 mddev->queue->backing_dev_info.congested_data = mddev;
256 235
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 6d03bea6fa58..0bcefad82413 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -567,36 +567,6 @@ static void raid1_unplug(struct request_queue *q)
567 md_wakeup_thread(mddev->thread); 567 md_wakeup_thread(mddev->thread);
568} 568}
569 569
570static int raid1_issue_flush(struct request_queue *q, struct gendisk *disk,
571 sector_t *error_sector)
572{
573 mddev_t *mddev = q->queuedata;
574 conf_t *conf = mddev_to_conf(mddev);
575 int i, ret = 0;
576
577 rcu_read_lock();
578 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
579 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
580 if (rdev && !test_bit(Faulty, &rdev->flags)) {
581 struct block_device *bdev = rdev->bdev;
582 struct request_queue *r_queue = bdev_get_queue(bdev);
583
584 if (!r_queue->issue_flush_fn)
585 ret = -EOPNOTSUPP;
586 else {
587 atomic_inc(&rdev->nr_pending);
588 rcu_read_unlock();
589 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
590 error_sector);
591 rdev_dec_pending(rdev, mddev);
592 rcu_read_lock();
593 }
594 }
595 }
596 rcu_read_unlock();
597 return ret;
598}
599
600static int raid1_congested(void *data, int bits) 570static int raid1_congested(void *data, int bits)
601{ 571{
602 mddev_t *mddev = data; 572 mddev_t *mddev = data;
@@ -1997,7 +1967,6 @@ static int run(mddev_t *mddev)
1997 mddev->array_size = mddev->size; 1967 mddev->array_size = mddev->size;
1998 1968
1999 mddev->queue->unplug_fn = raid1_unplug; 1969 mddev->queue->unplug_fn = raid1_unplug;
2000 mddev->queue->issue_flush_fn = raid1_issue_flush;
2001 mddev->queue->backing_dev_info.congested_fn = raid1_congested; 1970 mddev->queue->backing_dev_info.congested_fn = raid1_congested;
2002 mddev->queue->backing_dev_info.congested_data = mddev; 1971 mddev->queue->backing_dev_info.congested_data = mddev;
2003 1972
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 25a96c42bdb0..fc6607acb6e4 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -611,36 +611,6 @@ static void raid10_unplug(struct request_queue *q)
611 md_wakeup_thread(mddev->thread); 611 md_wakeup_thread(mddev->thread);
612} 612}
613 613
614static int raid10_issue_flush(struct request_queue *q, struct gendisk *disk,
615 sector_t *error_sector)
616{
617 mddev_t *mddev = q->queuedata;
618 conf_t *conf = mddev_to_conf(mddev);
619 int i, ret = 0;
620
621 rcu_read_lock();
622 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
623 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
624 if (rdev && !test_bit(Faulty, &rdev->flags)) {
625 struct block_device *bdev = rdev->bdev;
626 struct request_queue *r_queue = bdev_get_queue(bdev);
627
628 if (!r_queue->issue_flush_fn)
629 ret = -EOPNOTSUPP;
630 else {
631 atomic_inc(&rdev->nr_pending);
632 rcu_read_unlock();
633 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
634 error_sector);
635 rdev_dec_pending(rdev, mddev);
636 rcu_read_lock();
637 }
638 }
639 }
640 rcu_read_unlock();
641 return ret;
642}
643
644static int raid10_congested(void *data, int bits) 614static int raid10_congested(void *data, int bits)
645{ 615{
646 mddev_t *mddev = data; 616 mddev_t *mddev = data;
@@ -2118,7 +2088,6 @@ static int run(mddev_t *mddev)
2118 mddev->resync_max_sectors = size << conf->chunk_shift; 2088 mddev->resync_max_sectors = size << conf->chunk_shift;
2119 2089
2120 mddev->queue->unplug_fn = raid10_unplug; 2090 mddev->queue->unplug_fn = raid10_unplug;
2121 mddev->queue->issue_flush_fn = raid10_issue_flush;
2122 mddev->queue->backing_dev_info.congested_fn = raid10_congested; 2091 mddev->queue->backing_dev_info.congested_fn = raid10_congested;
2123 mddev->queue->backing_dev_info.congested_data = mddev; 2092 mddev->queue->backing_dev_info.congested_data = mddev;
2124 2093
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index caaca9e178bc..8ee181a01f52 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3204,36 +3204,6 @@ static void raid5_unplug_device(struct request_queue *q)
3204 unplug_slaves(mddev); 3204 unplug_slaves(mddev);
3205} 3205}
3206 3206
3207static int raid5_issue_flush(struct request_queue *q, struct gendisk *disk,
3208 sector_t *error_sector)
3209{
3210 mddev_t *mddev = q->queuedata;
3211 raid5_conf_t *conf = mddev_to_conf(mddev);
3212 int i, ret = 0;
3213
3214 rcu_read_lock();
3215 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
3216 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
3217 if (rdev && !test_bit(Faulty, &rdev->flags)) {
3218 struct block_device *bdev = rdev->bdev;
3219 struct request_queue *r_queue = bdev_get_queue(bdev);
3220
3221 if (!r_queue->issue_flush_fn)
3222 ret = -EOPNOTSUPP;
3223 else {
3224 atomic_inc(&rdev->nr_pending);
3225 rcu_read_unlock();
3226 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
3227 error_sector);
3228 rdev_dec_pending(rdev, mddev);
3229 rcu_read_lock();
3230 }
3231 }
3232 }
3233 rcu_read_unlock();
3234 return ret;
3235}
3236
3237static int raid5_congested(void *data, int bits) 3207static int raid5_congested(void *data, int bits)
3238{ 3208{
3239 mddev_t *mddev = data; 3209 mddev_t *mddev = data;
@@ -4263,7 +4233,6 @@ static int run(mddev_t *mddev)
4263 mdname(mddev)); 4233 mdname(mddev));
4264 4234
4265 mddev->queue->unplug_fn = raid5_unplug_device; 4235 mddev->queue->unplug_fn = raid5_unplug_device;
4266 mddev->queue->issue_flush_fn = raid5_issue_flush;
4267 mddev->queue->backing_dev_info.congested_data = mddev; 4236 mddev->queue->backing_dev_info.congested_data = mddev;
4268 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 4237 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
4269 4238
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index 50b2c7334410..d602ba6d5417 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -149,29 +149,6 @@ static int i2o_block_device_flush(struct i2o_device *dev)
149}; 149};
150 150
151/** 151/**
152 * i2o_block_issue_flush - device-flush interface for block-layer
153 * @queue: the request queue of the device which should be flushed
154 * @disk: gendisk
155 * @error_sector: error offset
156 *
157 * Helper function to provide flush functionality to block-layer.
158 *
159 * Returns 0 on success or negative error code on failure.
160 */
161
162static int i2o_block_issue_flush(struct request_queue * queue, struct gendisk *disk,
163 sector_t * error_sector)
164{
165 struct i2o_block_device *i2o_blk_dev = queue->queuedata;
166 int rc = -ENODEV;
167
168 if (likely(i2o_blk_dev))
169 rc = i2o_block_device_flush(i2o_blk_dev->i2o_dev);
170
171 return rc;
172}
173
174/**
175 * i2o_block_device_mount - Mount (load) the media of device dev 152 * i2o_block_device_mount - Mount (load) the media of device dev
176 * @dev: I2O device which should receive the mount request 153 * @dev: I2O device which should receive the mount request
177 * @media_id: Media Identifier 154 * @media_id: Media Identifier
@@ -1009,7 +986,6 @@ static struct i2o_block_device *i2o_block_device_alloc(void)
1009 } 986 }
1010 987
1011 blk_queue_prep_rq(queue, i2o_block_prep_req_fn); 988 blk_queue_prep_rq(queue, i2o_block_prep_req_fn);
1012 blk_queue_issue_flush_fn(queue, i2o_block_issue_flush);
1013 989
1014 gd->major = I2O_MAJOR; 990 gd->major = I2O_MAJOR;
1015 gd->queue = queue; 991 gd->queue = queue;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 0a3a528212c2..69f542c4923c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -826,27 +826,6 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
826 return 0; 826 return 0;
827} 827}
828 828
829static int sd_issue_flush(struct request_queue *q, struct gendisk *disk,
830 sector_t *error_sector)
831{
832 int ret = 0;
833 struct scsi_device *sdp = q->queuedata;
834 struct scsi_disk *sdkp;
835
836 if (sdp->sdev_state != SDEV_RUNNING)
837 return -ENXIO;
838
839 sdkp = scsi_disk_get_from_dev(&sdp->sdev_gendev);
840
841 if (!sdkp)
842 return -ENODEV;
843
844 if (sdkp->WCE)
845 ret = sd_sync_cache(sdkp);
846 scsi_disk_put(sdkp);
847 return ret;
848}
849
850static void sd_prepare_flush(struct request_queue *q, struct request *rq) 829static void sd_prepare_flush(struct request_queue *q, struct request *rq)
851{ 830{
852 memset(rq->cmd, 0, sizeof(rq->cmd)); 831 memset(rq->cmd, 0, sizeof(rq->cmd));
@@ -1697,7 +1676,6 @@ static int sd_probe(struct device *dev)
1697 sd_revalidate_disk(gd); 1676 sd_revalidate_disk(gd);
1698 1677
1699 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn); 1678 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
1700 blk_queue_issue_flush_fn(sdp->request_queue, sd_issue_flush);
1701 1679
1702 gd->driverfs_dev = &sdp->sdev_gendev; 1680 gd->driverfs_dev = &sdp->sdev_gendev;
1703 gd->flags = GENHD_FL_DRIVERFS; 1681 gd->flags = GENHD_FL_DRIVERFS;
diff --git a/fs/bio.c b/fs/bio.c
index 5f604f269dfa..d59ddbf79626 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -109,11 +109,14 @@ static inline struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned lon
109 109
110void bio_free(struct bio *bio, struct bio_set *bio_set) 110void bio_free(struct bio *bio, struct bio_set *bio_set)
111{ 111{
112 const int pool_idx = BIO_POOL_IDX(bio); 112 if (bio->bi_io_vec) {
113 const int pool_idx = BIO_POOL_IDX(bio);
113 114
114 BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS); 115 BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS);
116
117 mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]);
118 }
115 119
116 mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]);
117 mempool_free(bio, bio_set->bio_pool); 120 mempool_free(bio, bio_set->bio_pool);
118} 121}
119 122
@@ -127,21 +130,9 @@ static void bio_fs_destructor(struct bio *bio)
127 130
128void bio_init(struct bio *bio) 131void bio_init(struct bio *bio)
129{ 132{
130 bio->bi_next = NULL; 133 memset(bio, 0, sizeof(*bio));
131 bio->bi_bdev = NULL;
132 bio->bi_flags = 1 << BIO_UPTODATE; 134 bio->bi_flags = 1 << BIO_UPTODATE;
133 bio->bi_rw = 0;
134 bio->bi_vcnt = 0;
135 bio->bi_idx = 0;
136 bio->bi_phys_segments = 0;
137 bio->bi_hw_segments = 0;
138 bio->bi_hw_front_size = 0;
139 bio->bi_hw_back_size = 0;
140 bio->bi_size = 0;
141 bio->bi_max_vecs = 0;
142 bio->bi_end_io = NULL;
143 atomic_set(&bio->bi_cnt, 1); 135 atomic_set(&bio->bi_cnt, 1);
144 bio->bi_private = NULL;
145} 136}
146 137
147/** 138/**
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 089a8bc55dd4..4da441337d6e 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -176,13 +176,28 @@ struct bio {
176#define bio_offset(bio) bio_iovec((bio))->bv_offset 176#define bio_offset(bio) bio_iovec((bio))->bv_offset
177#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) 177#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
178#define bio_sectors(bio) ((bio)->bi_size >> 9) 178#define bio_sectors(bio) ((bio)->bi_size >> 9)
179#define bio_cur_sectors(bio) (bio_iovec(bio)->bv_len >> 9)
180#define bio_data(bio) (page_address(bio_page((bio))) + bio_offset((bio)))
181#define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER)) 179#define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
182#define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC)) 180#define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC))
183#define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST)) 181#define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
184#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD)) 182#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
185#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META)) 183#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META))
184#define bio_empty_barrier(bio) (bio_barrier(bio) && !(bio)->bi_size)
185
186static inline unsigned int bio_cur_sectors(struct bio *bio)
187{
188 if (bio->bi_vcnt)
189 return bio_iovec(bio)->bv_len >> 9;
190
191 return 0;
192}
193
194static inline void *bio_data(struct bio *bio)
195{
196 if (bio->bi_vcnt)
197 return page_address(bio_page(bio)) + bio_offset(bio);
198
199 return NULL;
200}
186 201
187/* 202/*
188 * will die 203 * will die
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5ed888b04b29..bbf906a0b419 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -330,7 +330,6 @@ typedef void (unplug_fn) (struct request_queue *);
330 330
331struct bio_vec; 331struct bio_vec;
332typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *); 332typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
333typedef int (issue_flush_fn) (struct request_queue *, struct gendisk *, sector_t *);
334typedef void (prepare_flush_fn) (struct request_queue *, struct request *); 333typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
335typedef void (softirq_done_fn)(struct request *); 334typedef void (softirq_done_fn)(struct request *);
336 335
@@ -368,7 +367,6 @@ struct request_queue
368 prep_rq_fn *prep_rq_fn; 367 prep_rq_fn *prep_rq_fn;
369 unplug_fn *unplug_fn; 368 unplug_fn *unplug_fn;
370 merge_bvec_fn *merge_bvec_fn; 369 merge_bvec_fn *merge_bvec_fn;
371 issue_flush_fn *issue_flush_fn;
372 prepare_flush_fn *prepare_flush_fn; 370 prepare_flush_fn *prepare_flush_fn;
373 softirq_done_fn *softirq_done_fn; 371 softirq_done_fn *softirq_done_fn;
374 372
@@ -540,6 +538,7 @@ enum {
540#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) 538#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER)
541#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) 539#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
542#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 540#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
541#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
543 542
544#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 543#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
545 544
@@ -729,7 +728,9 @@ static inline void blk_run_address_space(struct address_space *mapping)
729extern int end_that_request_first(struct request *, int, int); 728extern int end_that_request_first(struct request *, int, int);
730extern int end_that_request_chunk(struct request *, int, int); 729extern int end_that_request_chunk(struct request *, int, int);
731extern void end_that_request_last(struct request *, int); 730extern void end_that_request_last(struct request *, int);
732extern void end_request(struct request *req, int uptodate); 731extern void end_request(struct request *, int);
732extern void end_queued_request(struct request *, int);
733extern void end_dequeued_request(struct request *, int);
733extern void blk_complete_request(struct request *); 734extern void blk_complete_request(struct request *);
734 735
735/* 736/*
@@ -767,7 +768,6 @@ extern void blk_queue_dma_alignment(struct request_queue *, int);
767extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 768extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
768extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 769extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
769extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); 770extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
770extern void blk_queue_issue_flush_fn(struct request_queue *, issue_flush_fn *);
771extern int blk_do_ordered(struct request_queue *, struct request **); 771extern int blk_do_ordered(struct request_queue *, struct request **);
772extern unsigned blk_ordered_cur_seq(struct request_queue *); 772extern unsigned blk_ordered_cur_seq(struct request_queue *);
773extern unsigned blk_ordered_req_seq(struct request *); 773extern unsigned blk_ordered_req_seq(struct request *);
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 02a27e8cbad2..234fa3df24f6 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -1093,11 +1093,6 @@ extern ide_startstop_t ide_do_reset (ide_drive_t *);
1093extern void ide_init_drive_cmd (struct request *rq); 1093extern void ide_init_drive_cmd (struct request *rq);
1094 1094
1095/* 1095/*
1096 * this function returns error location sector offset in case of a write error
1097 */
1098extern u64 ide_get_error_location(ide_drive_t *, char *);
1099
1100/*
1101 * "action" parameter type for ide_do_drive_cmd() below. 1096 * "action" parameter type for ide_do_drive_cmd() below.
1102 */ 1097 */
1103typedef enum { 1098typedef enum {
diff --git a/mm/bounce.c b/mm/bounce.c
index 3b549bf31f7d..b6d2d0f1019b 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -265,6 +265,12 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
265 mempool_t *pool; 265 mempool_t *pool;
266 266
267 /* 267 /*
268 * Data-less bio, nothing to bounce
269 */
270 if (bio_empty_barrier(*bio_orig))
271 return;
272
273 /*
268 * for non-isa bounce case, just check if the bounce pfn is equal 274 * for non-isa bounce case, just check if the bounce pfn is equal
269 * to or bigger than the highest pfn in the system -- in that case, 275 * to or bigger than the highest pfn in the system -- in that case,
270 * don't waste time iterating over bio segments 276 * don't waste time iterating over bio segments