diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2015-04-28 02:48:34 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-08-13 14:31:57 -0400 |
commit | 8ae126660fddbeebb9251a174e6fa45b6ad8f932 (patch) | |
tree | fe471f94c8686b59f7a07b5b4f59ee4f51254902 | |
parent | 7140aafce2fc14c5af02fdb7859b6bea0108be3d (diff) |
block: kill merge_bvec_fn() completely
As generic_make_request() is now able to handle arbitrarily sized bios,
it's no longer necessary for each individual block driver to define its
own ->merge_bvec_fn() callback. Remove every invocation completely.
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Lars Ellenberg <drbd-dev@lists.linbit.com>
Cc: drbd-user@lists.linbit.com
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Yehuda Sadeh <yehuda@inktank.com>
Cc: Sage Weil <sage@inktank.com>
Cc: Alex Elder <elder@kernel.org>
Cc: ceph-devel@vger.kernel.org
Cc: Alasdair Kergon <agk@redhat.com>
Cc: Mike Snitzer <snitzer@redhat.com>
Cc: dm-devel@redhat.com
Cc: Neil Brown <neilb@suse.de>
Cc: linux-raid@vger.kernel.org
Cc: Christoph Hellwig <hch@infradead.org>
Cc: "Martin K. Petersen" <martin.petersen@oracle.com>
Acked-by: NeilBrown <neilb@suse.de> (for the 'md' bits)
Acked-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
[dpark: also remove ->merge_bvec_fn() in dm-thin as well as
dm-era-target, and resolve merge conflicts]
Signed-off-by: Dongsu Park <dpark@posteo.net>
Signed-off-by: Ming Lin <ming.l@ssi.samsung.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
32 files changed, 9 insertions, 859 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c index d9c3a75e4a60..0027def35f5a 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -69,24 +69,13 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, | |||
69 | struct bio *split; | 69 | struct bio *split; |
70 | struct bio_vec bv, bvprv; | 70 | struct bio_vec bv, bvprv; |
71 | struct bvec_iter iter; | 71 | struct bvec_iter iter; |
72 | unsigned seg_size = 0, nsegs = 0; | 72 | unsigned seg_size = 0, nsegs = 0, sectors = 0; |
73 | int prev = 0; | 73 | int prev = 0; |
74 | 74 | ||
75 | struct bvec_merge_data bvm = { | ||
76 | .bi_bdev = bio->bi_bdev, | ||
77 | .bi_sector = bio->bi_iter.bi_sector, | ||
78 | .bi_size = 0, | ||
79 | .bi_rw = bio->bi_rw, | ||
80 | }; | ||
81 | |||
82 | bio_for_each_segment(bv, bio, iter) { | 75 | bio_for_each_segment(bv, bio, iter) { |
83 | if (q->merge_bvec_fn && | 76 | sectors += bv.bv_len >> 9; |
84 | q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len) | ||
85 | goto split; | ||
86 | |||
87 | bvm.bi_size += bv.bv_len; | ||
88 | 77 | ||
89 | if (bvm.bi_size >> 9 > queue_max_sectors(q)) | 78 | if (sectors > queue_max_sectors(q)) |
90 | goto split; | 79 | goto split; |
91 | 80 | ||
92 | /* | 81 | /* |
diff --git a/block/blk-settings.c b/block/blk-settings.c index b38d8d723276..9df73991b231 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -53,28 +53,6 @@ void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn) | |||
53 | } | 53 | } |
54 | EXPORT_SYMBOL(blk_queue_unprep_rq); | 54 | EXPORT_SYMBOL(blk_queue_unprep_rq); |
55 | 55 | ||
56 | /** | ||
57 | * blk_queue_merge_bvec - set a merge_bvec function for queue | ||
58 | * @q: queue | ||
59 | * @mbfn: merge_bvec_fn | ||
60 | * | ||
61 | * Usually queues have static limitations on the max sectors or segments that | ||
62 | * we can put in a request. Stacking drivers may have some settings that | ||
63 | * are dynamic, and thus we have to query the queue whether it is ok to | ||
64 | * add a new bio_vec to a bio at a given offset or not. If the block device | ||
65 | * has such limitations, it needs to register a merge_bvec_fn to control | ||
66 | * the size of bio's sent to it. Note that a block device *must* allow a | ||
67 | * single page to be added to an empty bio. The block device driver may want | ||
68 | * to use the bio_split() function to deal with these bio's. By default | ||
69 | * no merge_bvec_fn is defined for a queue, and only the fixed limits are | ||
70 | * honored. | ||
71 | */ | ||
72 | void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) | ||
73 | { | ||
74 | q->merge_bvec_fn = mbfn; | ||
75 | } | ||
76 | EXPORT_SYMBOL(blk_queue_merge_bvec); | ||
77 | |||
78 | void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) | 56 | void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) |
79 | { | 57 | { |
80 | q->softirq_done_fn = fn; | 58 | q->softirq_done_fn = fn; |
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index a08c4a9179f1..015c6e91b756 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h | |||
@@ -1450,7 +1450,6 @@ extern void do_submit(struct work_struct *ws); | |||
1450 | extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long); | 1450 | extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long); |
1451 | extern void drbd_make_request(struct request_queue *q, struct bio *bio); | 1451 | extern void drbd_make_request(struct request_queue *q, struct bio *bio); |
1452 | extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req); | 1452 | extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req); |
1453 | extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec); | ||
1454 | extern int is_valid_ar_handle(struct drbd_request *, sector_t); | 1453 | extern int is_valid_ar_handle(struct drbd_request *, sector_t); |
1455 | 1454 | ||
1456 | 1455 | ||
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index a1518539b858..74d97f4bac34 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -2774,7 +2774,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig | |||
2774 | This triggers a max_bio_size message upon first attach or connect */ | 2774 | This triggers a max_bio_size message upon first attach or connect */ |
2775 | blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8); | 2775 | blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8); |
2776 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); | 2776 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); |
2777 | blk_queue_merge_bvec(q, drbd_merge_bvec); | ||
2778 | q->queue_lock = &resource->req_lock; | 2777 | q->queue_lock = &resource->req_lock; |
2779 | 2778 | ||
2780 | device->md_io.page = alloc_page(GFP_KERNEL); | 2779 | device->md_io.page = alloc_page(GFP_KERNEL); |
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 923c857b395b..211592682169 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c | |||
@@ -1512,41 +1512,6 @@ void drbd_make_request(struct request_queue *q, struct bio *bio) | |||
1512 | __drbd_make_request(device, bio, start_jif); | 1512 | __drbd_make_request(device, bio, start_jif); |
1513 | } | 1513 | } |
1514 | 1514 | ||
1515 | /* This is called by bio_add_page(). | ||
1516 | * | ||
1517 | * q->max_hw_sectors and other global limits are already enforced there. | ||
1518 | * | ||
1519 | * We need to call down to our lower level device, | ||
1520 | * in case it has special restrictions. | ||
1521 | * | ||
1522 | * We also may need to enforce configured max-bio-bvecs limits. | ||
1523 | * | ||
1524 | * As long as the BIO is empty we have to allow at least one bvec, | ||
1525 | * regardless of size and offset, so no need to ask lower levels. | ||
1526 | */ | ||
1527 | int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec) | ||
1528 | { | ||
1529 | struct drbd_device *device = (struct drbd_device *) q->queuedata; | ||
1530 | unsigned int bio_size = bvm->bi_size; | ||
1531 | int limit = DRBD_MAX_BIO_SIZE; | ||
1532 | int backing_limit; | ||
1533 | |||
1534 | if (bio_size && get_ldev(device)) { | ||
1535 | unsigned int max_hw_sectors = queue_max_hw_sectors(q); | ||
1536 | struct request_queue * const b = | ||
1537 | device->ldev->backing_bdev->bd_disk->queue; | ||
1538 | if (b->merge_bvec_fn) { | ||
1539 | bvm->bi_bdev = device->ldev->backing_bdev; | ||
1540 | backing_limit = b->merge_bvec_fn(b, bvm, bvec); | ||
1541 | limit = min(limit, backing_limit); | ||
1542 | } | ||
1543 | put_ldev(device); | ||
1544 | if ((limit >> 9) > max_hw_sectors) | ||
1545 | limit = max_hw_sectors << 9; | ||
1546 | } | ||
1547 | return limit; | ||
1548 | } | ||
1549 | |||
1550 | void request_timer_fn(unsigned long data) | 1515 | void request_timer_fn(unsigned long data) |
1551 | { | 1516 | { |
1552 | struct drbd_device *device = (struct drbd_device *) data; | 1517 | struct drbd_device *device = (struct drbd_device *) data; |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index ee7ad5e44632..7be2375db7f2 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -2506,26 +2506,6 @@ end_io: | |||
2506 | 2506 | ||
2507 | 2507 | ||
2508 | 2508 | ||
2509 | static int pkt_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd, | ||
2510 | struct bio_vec *bvec) | ||
2511 | { | ||
2512 | struct pktcdvd_device *pd = q->queuedata; | ||
2513 | sector_t zone = get_zone(bmd->bi_sector, pd); | ||
2514 | int used = ((bmd->bi_sector - zone) << 9) + bmd->bi_size; | ||
2515 | int remaining = (pd->settings.size << 9) - used; | ||
2516 | int remaining2; | ||
2517 | |||
2518 | /* | ||
2519 | * A bio <= PAGE_SIZE must be allowed. If it crosses a packet | ||
2520 | * boundary, pkt_make_request() will split the bio. | ||
2521 | */ | ||
2522 | remaining2 = PAGE_SIZE - bmd->bi_size; | ||
2523 | remaining = max(remaining, remaining2); | ||
2524 | |||
2525 | BUG_ON(remaining < 0); | ||
2526 | return remaining; | ||
2527 | } | ||
2528 | |||
2529 | static void pkt_init_queue(struct pktcdvd_device *pd) | 2509 | static void pkt_init_queue(struct pktcdvd_device *pd) |
2530 | { | 2510 | { |
2531 | struct request_queue *q = pd->disk->queue; | 2511 | struct request_queue *q = pd->disk->queue; |
@@ -2533,7 +2513,6 @@ static void pkt_init_queue(struct pktcdvd_device *pd) | |||
2533 | blk_queue_make_request(q, pkt_make_request); | 2513 | blk_queue_make_request(q, pkt_make_request); |
2534 | blk_queue_logical_block_size(q, CD_FRAMESIZE); | 2514 | blk_queue_logical_block_size(q, CD_FRAMESIZE); |
2535 | blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS); | 2515 | blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS); |
2536 | blk_queue_merge_bvec(q, pkt_merge_bvec); | ||
2537 | q->queuedata = pd; | 2516 | q->queuedata = pd; |
2538 | } | 2517 | } |
2539 | 2518 | ||
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index dcc86937f55c..71dd061a7e11 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -3462,52 +3462,6 @@ static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
3462 | return BLK_MQ_RQ_QUEUE_OK; | 3462 | return BLK_MQ_RQ_QUEUE_OK; |
3463 | } | 3463 | } |
3464 | 3464 | ||
3465 | /* | ||
3466 | * a queue callback. Makes sure that we don't create a bio that spans across | ||
3467 | * multiple osd objects. One exception would be with a single page bios, | ||
3468 | * which we handle later at bio_chain_clone_range() | ||
3469 | */ | ||
3470 | static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd, | ||
3471 | struct bio_vec *bvec) | ||
3472 | { | ||
3473 | struct rbd_device *rbd_dev = q->queuedata; | ||
3474 | sector_t sector_offset; | ||
3475 | sector_t sectors_per_obj; | ||
3476 | sector_t obj_sector_offset; | ||
3477 | int ret; | ||
3478 | |||
3479 | /* | ||
3480 | * Find how far into its rbd object the partition-relative | ||
3481 | * bio start sector is to offset relative to the enclosing | ||
3482 | * device. | ||
3483 | */ | ||
3484 | sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector; | ||
3485 | sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT); | ||
3486 | obj_sector_offset = sector_offset & (sectors_per_obj - 1); | ||
3487 | |||
3488 | /* | ||
3489 | * Compute the number of bytes from that offset to the end | ||
3490 | * of the object. Account for what's already used by the bio. | ||
3491 | */ | ||
3492 | ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT; | ||
3493 | if (ret > bmd->bi_size) | ||
3494 | ret -= bmd->bi_size; | ||
3495 | else | ||
3496 | ret = 0; | ||
3497 | |||
3498 | /* | ||
3499 | * Don't send back more than was asked for. And if the bio | ||
3500 | * was empty, let the whole thing through because: "Note | ||
3501 | * that a block device *must* allow a single page to be | ||
3502 | * added to an empty bio." | ||
3503 | */ | ||
3504 | rbd_assert(bvec->bv_len <= PAGE_SIZE); | ||
3505 | if (ret > (int) bvec->bv_len || !bmd->bi_size) | ||
3506 | ret = (int) bvec->bv_len; | ||
3507 | |||
3508 | return ret; | ||
3509 | } | ||
3510 | |||
3511 | static void rbd_free_disk(struct rbd_device *rbd_dev) | 3465 | static void rbd_free_disk(struct rbd_device *rbd_dev) |
3512 | { | 3466 | { |
3513 | struct gendisk *disk = rbd_dev->disk; | 3467 | struct gendisk *disk = rbd_dev->disk; |
@@ -3806,7 +3760,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) | |||
3806 | blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); | 3760 | blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); |
3807 | q->limits.discard_zeroes_data = 1; | 3761 | q->limits.discard_zeroes_data = 1; |
3808 | 3762 | ||
3809 | blk_queue_merge_bvec(q, rbd_merge_bvec); | ||
3810 | disk->queue = q; | 3763 | disk->queue = q; |
3811 | 3764 | ||
3812 | q->queuedata = rbd_dev; | 3765 | q->queuedata = rbd_dev; |
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 04d0dadc48b1..d2b5dfbb30cf 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -3771,26 +3771,6 @@ static int cache_iterate_devices(struct dm_target *ti, | |||
3771 | return r; | 3771 | return r; |
3772 | } | 3772 | } |
3773 | 3773 | ||
3774 | /* | ||
3775 | * We assume I/O is going to the origin (which is the volume | ||
3776 | * more likely to have restrictions e.g. by being striped). | ||
3777 | * (Looking up the exact location of the data would be expensive | ||
3778 | * and could always be out of date by the time the bio is submitted.) | ||
3779 | */ | ||
3780 | static int cache_bvec_merge(struct dm_target *ti, | ||
3781 | struct bvec_merge_data *bvm, | ||
3782 | struct bio_vec *biovec, int max_size) | ||
3783 | { | ||
3784 | struct cache *cache = ti->private; | ||
3785 | struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev); | ||
3786 | |||
3787 | if (!q->merge_bvec_fn) | ||
3788 | return max_size; | ||
3789 | |||
3790 | bvm->bi_bdev = cache->origin_dev->bdev; | ||
3791 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | ||
3792 | } | ||
3793 | |||
3794 | static void set_discard_limits(struct cache *cache, struct queue_limits *limits) | 3774 | static void set_discard_limits(struct cache *cache, struct queue_limits *limits) |
3795 | { | 3775 | { |
3796 | /* | 3776 | /* |
@@ -3834,7 +3814,6 @@ static struct target_type cache_target = { | |||
3834 | .status = cache_status, | 3814 | .status = cache_status, |
3835 | .message = cache_message, | 3815 | .message = cache_message, |
3836 | .iterate_devices = cache_iterate_devices, | 3816 | .iterate_devices = cache_iterate_devices, |
3837 | .merge = cache_bvec_merge, | ||
3838 | .io_hints = cache_io_hints, | 3817 | .io_hints = cache_io_hints, |
3839 | }; | 3818 | }; |
3840 | 3819 | ||
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 59da573cf994..ba5c2105f4e6 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -2035,21 +2035,6 @@ error: | |||
2035 | return -EINVAL; | 2035 | return -EINVAL; |
2036 | } | 2036 | } |
2037 | 2037 | ||
2038 | static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm, | ||
2039 | struct bio_vec *biovec, int max_size) | ||
2040 | { | ||
2041 | struct crypt_config *cc = ti->private; | ||
2042 | struct request_queue *q = bdev_get_queue(cc->dev->bdev); | ||
2043 | |||
2044 | if (!q->merge_bvec_fn) | ||
2045 | return max_size; | ||
2046 | |||
2047 | bvm->bi_bdev = cc->dev->bdev; | ||
2048 | bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector); | ||
2049 | |||
2050 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | ||
2051 | } | ||
2052 | |||
2053 | static int crypt_iterate_devices(struct dm_target *ti, | 2038 | static int crypt_iterate_devices(struct dm_target *ti, |
2054 | iterate_devices_callout_fn fn, void *data) | 2039 | iterate_devices_callout_fn fn, void *data) |
2055 | { | 2040 | { |
@@ -2070,7 +2055,6 @@ static struct target_type crypt_target = { | |||
2070 | .preresume = crypt_preresume, | 2055 | .preresume = crypt_preresume, |
2071 | .resume = crypt_resume, | 2056 | .resume = crypt_resume, |
2072 | .message = crypt_message, | 2057 | .message = crypt_message, |
2073 | .merge = crypt_merge, | ||
2074 | .iterate_devices = crypt_iterate_devices, | 2058 | .iterate_devices = crypt_iterate_devices, |
2075 | }; | 2059 | }; |
2076 | 2060 | ||
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index ad913cd4aded..0119ebfb3d49 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c | |||
@@ -1673,20 +1673,6 @@ static int era_iterate_devices(struct dm_target *ti, | |||
1673 | return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data); | 1673 | return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data); |
1674 | } | 1674 | } |
1675 | 1675 | ||
1676 | static int era_merge(struct dm_target *ti, struct bvec_merge_data *bvm, | ||
1677 | struct bio_vec *biovec, int max_size) | ||
1678 | { | ||
1679 | struct era *era = ti->private; | ||
1680 | struct request_queue *q = bdev_get_queue(era->origin_dev->bdev); | ||
1681 | |||
1682 | if (!q->merge_bvec_fn) | ||
1683 | return max_size; | ||
1684 | |||
1685 | bvm->bi_bdev = era->origin_dev->bdev; | ||
1686 | |||
1687 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | ||
1688 | } | ||
1689 | |||
1690 | static void era_io_hints(struct dm_target *ti, struct queue_limits *limits) | 1676 | static void era_io_hints(struct dm_target *ti, struct queue_limits *limits) |
1691 | { | 1677 | { |
1692 | struct era *era = ti->private; | 1678 | struct era *era = ti->private; |
@@ -1717,7 +1703,6 @@ static struct target_type era_target = { | |||
1717 | .status = era_status, | 1703 | .status = era_status, |
1718 | .message = era_message, | 1704 | .message = era_message, |
1719 | .iterate_devices = era_iterate_devices, | 1705 | .iterate_devices = era_iterate_devices, |
1720 | .merge = era_merge, | ||
1721 | .io_hints = era_io_hints | 1706 | .io_hints = era_io_hints |
1722 | }; | 1707 | }; |
1723 | 1708 | ||
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 04481247aab8..afab13bd683e 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c | |||
@@ -387,21 +387,6 @@ static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long ar | |||
387 | return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg); | 387 | return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg); |
388 | } | 388 | } |
389 | 389 | ||
390 | static int flakey_merge(struct dm_target *ti, struct bvec_merge_data *bvm, | ||
391 | struct bio_vec *biovec, int max_size) | ||
392 | { | ||
393 | struct flakey_c *fc = ti->private; | ||
394 | struct request_queue *q = bdev_get_queue(fc->dev->bdev); | ||
395 | |||
396 | if (!q->merge_bvec_fn) | ||
397 | return max_size; | ||
398 | |||
399 | bvm->bi_bdev = fc->dev->bdev; | ||
400 | bvm->bi_sector = flakey_map_sector(ti, bvm->bi_sector); | ||
401 | |||
402 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | ||
403 | } | ||
404 | |||
405 | static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) | 390 | static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) |
406 | { | 391 | { |
407 | struct flakey_c *fc = ti->private; | 392 | struct flakey_c *fc = ti->private; |
@@ -419,7 +404,6 @@ static struct target_type flakey_target = { | |||
419 | .end_io = flakey_end_io, | 404 | .end_io = flakey_end_io, |
420 | .status = flakey_status, | 405 | .status = flakey_status, |
421 | .ioctl = flakey_ioctl, | 406 | .ioctl = flakey_ioctl, |
422 | .merge = flakey_merge, | ||
423 | .iterate_devices = flakey_iterate_devices, | 407 | .iterate_devices = flakey_iterate_devices, |
424 | }; | 408 | }; |
425 | 409 | ||
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 53e848c10939..7dd5fc8e3eea 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c | |||
@@ -130,21 +130,6 @@ static int linear_ioctl(struct dm_target *ti, unsigned int cmd, | |||
130 | return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg); | 130 | return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg); |
131 | } | 131 | } |
132 | 132 | ||
133 | static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm, | ||
134 | struct bio_vec *biovec, int max_size) | ||
135 | { | ||
136 | struct linear_c *lc = ti->private; | ||
137 | struct request_queue *q = bdev_get_queue(lc->dev->bdev); | ||
138 | |||
139 | if (!q->merge_bvec_fn) | ||
140 | return max_size; | ||
141 | |||
142 | bvm->bi_bdev = lc->dev->bdev; | ||
143 | bvm->bi_sector = linear_map_sector(ti, bvm->bi_sector); | ||
144 | |||
145 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | ||
146 | } | ||
147 | |||
148 | static int linear_iterate_devices(struct dm_target *ti, | 133 | static int linear_iterate_devices(struct dm_target *ti, |
149 | iterate_devices_callout_fn fn, void *data) | 134 | iterate_devices_callout_fn fn, void *data) |
150 | { | 135 | { |
@@ -162,7 +147,6 @@ static struct target_type linear_target = { | |||
162 | .map = linear_map, | 147 | .map = linear_map, |
163 | .status = linear_status, | 148 | .status = linear_status, |
164 | .ioctl = linear_ioctl, | 149 | .ioctl = linear_ioctl, |
165 | .merge = linear_merge, | ||
166 | .iterate_devices = linear_iterate_devices, | 150 | .iterate_devices = linear_iterate_devices, |
167 | }; | 151 | }; |
168 | 152 | ||
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index e9d17488d5e3..316cc3fb741f 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c | |||
@@ -725,21 +725,6 @@ static int log_writes_ioctl(struct dm_target *ti, unsigned int cmd, | |||
725 | return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg); | 725 | return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg); |
726 | } | 726 | } |
727 | 727 | ||
728 | static int log_writes_merge(struct dm_target *ti, struct bvec_merge_data *bvm, | ||
729 | struct bio_vec *biovec, int max_size) | ||
730 | { | ||
731 | struct log_writes_c *lc = ti->private; | ||
732 | struct request_queue *q = bdev_get_queue(lc->dev->bdev); | ||
733 | |||
734 | if (!q->merge_bvec_fn) | ||
735 | return max_size; | ||
736 | |||
737 | bvm->bi_bdev = lc->dev->bdev; | ||
738 | bvm->bi_sector = dm_target_offset(ti, bvm->bi_sector); | ||
739 | |||
740 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | ||
741 | } | ||
742 | |||
743 | static int log_writes_iterate_devices(struct dm_target *ti, | 728 | static int log_writes_iterate_devices(struct dm_target *ti, |
744 | iterate_devices_callout_fn fn, | 729 | iterate_devices_callout_fn fn, |
745 | void *data) | 730 | void *data) |
@@ -793,7 +778,6 @@ static struct target_type log_writes_target = { | |||
793 | .end_io = normal_end_io, | 778 | .end_io = normal_end_io, |
794 | .status = log_writes_status, | 779 | .status = log_writes_status, |
795 | .ioctl = log_writes_ioctl, | 780 | .ioctl = log_writes_ioctl, |
796 | .merge = log_writes_merge, | ||
797 | .message = log_writes_message, | 781 | .message = log_writes_message, |
798 | .iterate_devices = log_writes_iterate_devices, | 782 | .iterate_devices = log_writes_iterate_devices, |
799 | .io_hints = log_writes_io_hints, | 783 | .io_hints = log_writes_io_hints, |
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 2daa67793511..97e165183e79 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
@@ -1717,24 +1717,6 @@ static void raid_resume(struct dm_target *ti) | |||
1717 | mddev_resume(&rs->md); | 1717 | mddev_resume(&rs->md); |
1718 | } | 1718 | } |
1719 | 1719 | ||
1720 | static int raid_merge(struct dm_target *ti, struct bvec_merge_data *bvm, | ||
1721 | struct bio_vec *biovec, int max_size) | ||
1722 | { | ||
1723 | struct raid_set *rs = ti->private; | ||
1724 | struct md_personality *pers = rs->md.pers; | ||
1725 | |||
1726 | if (pers && pers->mergeable_bvec) | ||
1727 | return min(max_size, pers->mergeable_bvec(&rs->md, bvm, biovec)); | ||
1728 | |||
1729 | /* | ||
1730 | * In case we can't request the personality because | ||
1731 | * the raid set is not running yet | ||
1732 | * | ||
1733 | * -> return safe minimum | ||
1734 | */ | ||
1735 | return rs->md.chunk_sectors; | ||
1736 | } | ||
1737 | |||
1738 | static struct target_type raid_target = { | 1720 | static struct target_type raid_target = { |
1739 | .name = "raid", | 1721 | .name = "raid", |
1740 | .version = {1, 7, 0}, | 1722 | .version = {1, 7, 0}, |
@@ -1749,7 +1731,6 @@ static struct target_type raid_target = { | |||
1749 | .presuspend = raid_presuspend, | 1731 | .presuspend = raid_presuspend, |
1750 | .postsuspend = raid_postsuspend, | 1732 | .postsuspend = raid_postsuspend, |
1751 | .resume = raid_resume, | 1733 | .resume = raid_resume, |
1752 | .merge = raid_merge, | ||
1753 | }; | 1734 | }; |
1754 | 1735 | ||
1755 | static int __init dm_raid_init(void) | 1736 | static int __init dm_raid_init(void) |
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index dd8ca0bb0980..d10b6876018e 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -2330,20 +2330,6 @@ static void origin_status(struct dm_target *ti, status_type_t type, | |||
2330 | } | 2330 | } |
2331 | } | 2331 | } |
2332 | 2332 | ||
2333 | static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm, | ||
2334 | struct bio_vec *biovec, int max_size) | ||
2335 | { | ||
2336 | struct dm_origin *o = ti->private; | ||
2337 | struct request_queue *q = bdev_get_queue(o->dev->bdev); | ||
2338 | |||
2339 | if (!q->merge_bvec_fn) | ||
2340 | return max_size; | ||
2341 | |||
2342 | bvm->bi_bdev = o->dev->bdev; | ||
2343 | |||
2344 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | ||
2345 | } | ||
2346 | |||
2347 | static int origin_iterate_devices(struct dm_target *ti, | 2333 | static int origin_iterate_devices(struct dm_target *ti, |
2348 | iterate_devices_callout_fn fn, void *data) | 2334 | iterate_devices_callout_fn fn, void *data) |
2349 | { | 2335 | { |
@@ -2362,7 +2348,6 @@ static struct target_type origin_target = { | |||
2362 | .resume = origin_resume, | 2348 | .resume = origin_resume, |
2363 | .postsuspend = origin_postsuspend, | 2349 | .postsuspend = origin_postsuspend, |
2364 | .status = origin_status, | 2350 | .status = origin_status, |
2365 | .merge = origin_merge, | ||
2366 | .iterate_devices = origin_iterate_devices, | 2351 | .iterate_devices = origin_iterate_devices, |
2367 | }; | 2352 | }; |
2368 | 2353 | ||
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 4f94c7da82f6..484029db8cba 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c | |||
@@ -412,26 +412,6 @@ static void stripe_io_hints(struct dm_target *ti, | |||
412 | blk_limits_io_opt(limits, chunk_size * sc->stripes); | 412 | blk_limits_io_opt(limits, chunk_size * sc->stripes); |
413 | } | 413 | } |
414 | 414 | ||
415 | static int stripe_merge(struct dm_target *ti, struct bvec_merge_data *bvm, | ||
416 | struct bio_vec *biovec, int max_size) | ||
417 | { | ||
418 | struct stripe_c *sc = ti->private; | ||
419 | sector_t bvm_sector = bvm->bi_sector; | ||
420 | uint32_t stripe; | ||
421 | struct request_queue *q; | ||
422 | |||
423 | stripe_map_sector(sc, bvm_sector, &stripe, &bvm_sector); | ||
424 | |||
425 | q = bdev_get_queue(sc->stripe[stripe].dev->bdev); | ||
426 | if (!q->merge_bvec_fn) | ||
427 | return max_size; | ||
428 | |||
429 | bvm->bi_bdev = sc->stripe[stripe].dev->bdev; | ||
430 | bvm->bi_sector = sc->stripe[stripe].physical_start + bvm_sector; | ||
431 | |||
432 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | ||
433 | } | ||
434 | |||
435 | static struct target_type stripe_target = { | 415 | static struct target_type stripe_target = { |
436 | .name = "striped", | 416 | .name = "striped", |
437 | .version = {1, 5, 1}, | 417 | .version = {1, 5, 1}, |
@@ -443,7 +423,6 @@ static struct target_type stripe_target = { | |||
443 | .status = stripe_status, | 423 | .status = stripe_status, |
444 | .iterate_devices = stripe_iterate_devices, | 424 | .iterate_devices = stripe_iterate_devices, |
445 | .io_hints = stripe_io_hints, | 425 | .io_hints = stripe_io_hints, |
446 | .merge = stripe_merge, | ||
447 | }; | 426 | }; |
448 | 427 | ||
449 | int __init dm_stripe_init(void) | 428 | int __init dm_stripe_init(void) |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 16ba55ad7089..afb4ad3dfeb3 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -440,14 +440,6 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, | |||
440 | q->limits.alignment_offset, | 440 | q->limits.alignment_offset, |
441 | (unsigned long long) start << SECTOR_SHIFT); | 441 | (unsigned long long) start << SECTOR_SHIFT); |
442 | 442 | ||
443 | /* | ||
444 | * Check if merge fn is supported. | ||
445 | * If not we'll force DM to use PAGE_SIZE or | ||
446 | * smaller I/O, just to be safe. | ||
447 | */ | ||
448 | if (dm_queue_merge_is_compulsory(q) && !ti->type->merge) | ||
449 | blk_limits_max_hw_sectors(limits, | ||
450 | (unsigned int) (PAGE_SIZE >> 9)); | ||
451 | return 0; | 443 | return 0; |
452 | } | 444 | } |
453 | 445 | ||
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 2ade2c46dca9..f352e4990998 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -3845,20 +3845,6 @@ static int pool_iterate_devices(struct dm_target *ti, | |||
3845 | return fn(ti, pt->data_dev, 0, ti->len, data); | 3845 | return fn(ti, pt->data_dev, 0, ti->len, data); |
3846 | } | 3846 | } |
3847 | 3847 | ||
3848 | static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm, | ||
3849 | struct bio_vec *biovec, int max_size) | ||
3850 | { | ||
3851 | struct pool_c *pt = ti->private; | ||
3852 | struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); | ||
3853 | |||
3854 | if (!q->merge_bvec_fn) | ||
3855 | return max_size; | ||
3856 | |||
3857 | bvm->bi_bdev = pt->data_dev->bdev; | ||
3858 | |||
3859 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | ||
3860 | } | ||
3861 | |||
3862 | static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) | 3848 | static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) |
3863 | { | 3849 | { |
3864 | struct pool_c *pt = ti->private; | 3850 | struct pool_c *pt = ti->private; |
@@ -3935,7 +3921,6 @@ static struct target_type pool_target = { | |||
3935 | .resume = pool_resume, | 3921 | .resume = pool_resume, |
3936 | .message = pool_message, | 3922 | .message = pool_message, |
3937 | .status = pool_status, | 3923 | .status = pool_status, |
3938 | .merge = pool_merge, | ||
3939 | .iterate_devices = pool_iterate_devices, | 3924 | .iterate_devices = pool_iterate_devices, |
3940 | .io_hints = pool_io_hints, | 3925 | .io_hints = pool_io_hints, |
3941 | }; | 3926 | }; |
@@ -4262,21 +4247,6 @@ err: | |||
4262 | DMEMIT("Error"); | 4247 | DMEMIT("Error"); |
4263 | } | 4248 | } |
4264 | 4249 | ||
4265 | static int thin_merge(struct dm_target *ti, struct bvec_merge_data *bvm, | ||
4266 | struct bio_vec *biovec, int max_size) | ||
4267 | { | ||
4268 | struct thin_c *tc = ti->private; | ||
4269 | struct request_queue *q = bdev_get_queue(tc->pool_dev->bdev); | ||
4270 | |||
4271 | if (!q->merge_bvec_fn) | ||
4272 | return max_size; | ||
4273 | |||
4274 | bvm->bi_bdev = tc->pool_dev->bdev; | ||
4275 | bvm->bi_sector = dm_target_offset(ti, bvm->bi_sector); | ||
4276 | |||
4277 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | ||
4278 | } | ||
4279 | |||
4280 | static int thin_iterate_devices(struct dm_target *ti, | 4250 | static int thin_iterate_devices(struct dm_target *ti, |
4281 | iterate_devices_callout_fn fn, void *data) | 4251 | iterate_devices_callout_fn fn, void *data) |
4282 | { | 4252 | { |
@@ -4320,7 +4290,6 @@ static struct target_type thin_target = { | |||
4320 | .presuspend = thin_presuspend, | 4290 | .presuspend = thin_presuspend, |
4321 | .postsuspend = thin_postsuspend, | 4291 | .postsuspend = thin_postsuspend, |
4322 | .status = thin_status, | 4292 | .status = thin_status, |
4323 | .merge = thin_merge, | ||
4324 | .iterate_devices = thin_iterate_devices, | 4293 | .iterate_devices = thin_iterate_devices, |
4325 | .io_hints = thin_io_hints, | 4294 | .io_hints = thin_io_hints, |
4326 | }; | 4295 | }; |
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c index 4b34df8fdb58..c137dcb147b8 100644 --- a/drivers/md/dm-verity.c +++ b/drivers/md/dm-verity.c | |||
@@ -649,21 +649,6 @@ static int verity_ioctl(struct dm_target *ti, unsigned cmd, | |||
649 | cmd, arg); | 649 | cmd, arg); |
650 | } | 650 | } |
651 | 651 | ||
652 | static int verity_merge(struct dm_target *ti, struct bvec_merge_data *bvm, | ||
653 | struct bio_vec *biovec, int max_size) | ||
654 | { | ||
655 | struct dm_verity *v = ti->private; | ||
656 | struct request_queue *q = bdev_get_queue(v->data_dev->bdev); | ||
657 | |||
658 | if (!q->merge_bvec_fn) | ||
659 | return max_size; | ||
660 | |||
661 | bvm->bi_bdev = v->data_dev->bdev; | ||
662 | bvm->bi_sector = verity_map_sector(v, bvm->bi_sector); | ||
663 | |||
664 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | ||
665 | } | ||
666 | |||
667 | static int verity_iterate_devices(struct dm_target *ti, | 652 | static int verity_iterate_devices(struct dm_target *ti, |
668 | iterate_devices_callout_fn fn, void *data) | 653 | iterate_devices_callout_fn fn, void *data) |
669 | { | 654 | { |
@@ -996,7 +981,6 @@ static struct target_type verity_target = { | |||
996 | .map = verity_map, | 981 | .map = verity_map, |
997 | .status = verity_status, | 982 | .status = verity_status, |
998 | .ioctl = verity_ioctl, | 983 | .ioctl = verity_ioctl, |
999 | .merge = verity_merge, | ||
1000 | .iterate_devices = verity_iterate_devices, | 984 | .iterate_devices = verity_iterate_devices, |
1001 | .io_hints = verity_io_hints, | 985 | .io_hints = verity_io_hints, |
1002 | }; | 986 | }; |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 069f8d7e890e..8bb1ebb6ca7b 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -124,9 +124,8 @@ EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo); | |||
124 | #define DMF_FREEING 3 | 124 | #define DMF_FREEING 3 |
125 | #define DMF_DELETING 4 | 125 | #define DMF_DELETING 4 |
126 | #define DMF_NOFLUSH_SUSPENDING 5 | 126 | #define DMF_NOFLUSH_SUSPENDING 5 |
127 | #define DMF_MERGE_IS_OPTIONAL 6 | 127 | #define DMF_DEFERRED_REMOVE 6 |
128 | #define DMF_DEFERRED_REMOVE 7 | 128 | #define DMF_SUSPENDED_INTERNALLY 7 |
129 | #define DMF_SUSPENDED_INTERNALLY 8 | ||
130 | 129 | ||
131 | /* | 130 | /* |
132 | * A dummy definition to make RCU happy. | 131 | * A dummy definition to make RCU happy. |
@@ -1725,67 +1724,6 @@ static void __split_and_process_bio(struct mapped_device *md, | |||
1725 | * CRUD END | 1724 | * CRUD END |
1726 | *---------------------------------------------------------------*/ | 1725 | *---------------------------------------------------------------*/ |
1727 | 1726 | ||
1728 | static int dm_merge_bvec(struct request_queue *q, | ||
1729 | struct bvec_merge_data *bvm, | ||
1730 | struct bio_vec *biovec) | ||
1731 | { | ||
1732 | struct mapped_device *md = q->queuedata; | ||
1733 | struct dm_table *map = dm_get_live_table_fast(md); | ||
1734 | struct dm_target *ti; | ||
1735 | sector_t max_sectors, max_size = 0; | ||
1736 | |||
1737 | if (unlikely(!map)) | ||
1738 | goto out; | ||
1739 | |||
1740 | ti = dm_table_find_target(map, bvm->bi_sector); | ||
1741 | if (!dm_target_is_valid(ti)) | ||
1742 | goto out; | ||
1743 | |||
1744 | /* | ||
1745 | * Find maximum amount of I/O that won't need splitting | ||
1746 | */ | ||
1747 | max_sectors = min(max_io_len(bvm->bi_sector, ti), | ||
1748 | (sector_t) queue_max_sectors(q)); | ||
1749 | max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; | ||
1750 | |||
1751 | /* | ||
1752 | * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t | ||
1753 | * to the targets' merge function since it holds sectors not bytes). | ||
1754 | * Just doing this as an interim fix for stable@ because the more | ||
1755 | * comprehensive cleanup of switching to sector_t will impact every | ||
1756 | * DM target that implements a ->merge hook. | ||
1757 | */ | ||
1758 | if (max_size > INT_MAX) | ||
1759 | max_size = INT_MAX; | ||
1760 | |||
1761 | /* | ||
1762 | * merge_bvec_fn() returns number of bytes | ||
1763 | * it can accept at this offset | ||
1764 | * max is precomputed maximal io size | ||
1765 | */ | ||
1766 | if (max_size && ti->type->merge) | ||
1767 | max_size = ti->type->merge(ti, bvm, biovec, (int) max_size); | ||
1768 | /* | ||
1769 | * If the target doesn't support merge method and some of the devices | ||
1770 | * provided their merge_bvec method (we know this by looking for the | ||
1771 | * max_hw_sectors that dm_set_device_limits may set), then we can't | ||
1772 | * allow bios with multiple vector entries. So always set max_size | ||
1773 | * to 0, and the code below allows just one page. | ||
1774 | */ | ||
1775 | else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9) | ||
1776 | max_size = 0; | ||
1777 | |||
1778 | out: | ||
1779 | dm_put_live_table_fast(md); | ||
1780 | /* | ||
1781 | * Always allow an entire first page | ||
1782 | */ | ||
1783 | if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) | ||
1784 | max_size = biovec->bv_len; | ||
1785 | |||
1786 | return max_size; | ||
1787 | } | ||
1788 | |||
1789 | /* | 1727 | /* |
1790 | * The request function that just remaps the bio built up by | 1728 | * The request function that just remaps the bio built up by |
1791 | * dm_merge_bvec. | 1729 | * dm_merge_bvec. |
@@ -2508,59 +2446,6 @@ static void __set_size(struct mapped_device *md, sector_t size) | |||
2508 | } | 2446 | } |
2509 | 2447 | ||
2510 | /* | 2448 | /* |
2511 | * Return 1 if the queue has a compulsory merge_bvec_fn function. | ||
2512 | * | ||
2513 | * If this function returns 0, then the device is either a non-dm | ||
2514 | * device without a merge_bvec_fn, or it is a dm device that is | ||
2515 | * able to split any bios it receives that are too big. | ||
2516 | */ | ||
2517 | int dm_queue_merge_is_compulsory(struct request_queue *q) | ||
2518 | { | ||
2519 | struct mapped_device *dev_md; | ||
2520 | |||
2521 | if (!q->merge_bvec_fn) | ||
2522 | return 0; | ||
2523 | |||
2524 | if (q->make_request_fn == dm_make_request) { | ||
2525 | dev_md = q->queuedata; | ||
2526 | if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags)) | ||
2527 | return 0; | ||
2528 | } | ||
2529 | |||
2530 | return 1; | ||
2531 | } | ||
2532 | |||
2533 | static int dm_device_merge_is_compulsory(struct dm_target *ti, | ||
2534 | struct dm_dev *dev, sector_t start, | ||
2535 | sector_t len, void *data) | ||
2536 | { | ||
2537 | struct block_device *bdev = dev->bdev; | ||
2538 | struct request_queue *q = bdev_get_queue(bdev); | ||
2539 | |||
2540 | return dm_queue_merge_is_compulsory(q); | ||
2541 | } | ||
2542 | |||
2543 | /* | ||
2544 | * Return 1 if it is acceptable to ignore merge_bvec_fn based | ||
2545 | * on the properties of the underlying devices. | ||
2546 | */ | ||
2547 | static int dm_table_merge_is_optional(struct dm_table *table) | ||
2548 | { | ||
2549 | unsigned i = 0; | ||
2550 | struct dm_target *ti; | ||
2551 | |||
2552 | while (i < dm_table_get_num_targets(table)) { | ||
2553 | ti = dm_table_get_target(table, i++); | ||
2554 | |||
2555 | if (ti->type->iterate_devices && | ||
2556 | ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL)) | ||
2557 | return 0; | ||
2558 | } | ||
2559 | |||
2560 | return 1; | ||
2561 | } | ||
2562 | |||
2563 | /* | ||
2564 | * Returns old map, which caller must destroy. | 2449 | * Returns old map, which caller must destroy. |
2565 | */ | 2450 | */ |
2566 | static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, | 2451 | static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, |
@@ -2569,7 +2454,6 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, | |||
2569 | struct dm_table *old_map; | 2454 | struct dm_table *old_map; |
2570 | struct request_queue *q = md->queue; | 2455 | struct request_queue *q = md->queue; |
2571 | sector_t size; | 2456 | sector_t size; |
2572 | int merge_is_optional; | ||
2573 | 2457 | ||
2574 | size = dm_table_get_size(t); | 2458 | size = dm_table_get_size(t); |
2575 | 2459 | ||
@@ -2595,17 +2479,11 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, | |||
2595 | 2479 | ||
2596 | __bind_mempools(md, t); | 2480 | __bind_mempools(md, t); |
2597 | 2481 | ||
2598 | merge_is_optional = dm_table_merge_is_optional(t); | ||
2599 | |||
2600 | old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); | 2482 | old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); |
2601 | rcu_assign_pointer(md->map, t); | 2483 | rcu_assign_pointer(md->map, t); |
2602 | md->immutable_target_type = dm_table_get_immutable_target_type(t); | 2484 | md->immutable_target_type = dm_table_get_immutable_target_type(t); |
2603 | 2485 | ||
2604 | dm_table_set_restrictions(t, q, limits); | 2486 | dm_table_set_restrictions(t, q, limits); |
2605 | if (merge_is_optional) | ||
2606 | set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); | ||
2607 | else | ||
2608 | clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); | ||
2609 | if (old_map) | 2487 | if (old_map) |
2610 | dm_sync_table(md); | 2488 | dm_sync_table(md); |
2611 | 2489 | ||
@@ -2886,7 +2764,6 @@ int dm_setup_md_queue(struct mapped_device *md) | |||
2886 | case DM_TYPE_BIO_BASED: | 2764 | case DM_TYPE_BIO_BASED: |
2887 | dm_init_old_md_queue(md); | 2765 | dm_init_old_md_queue(md); |
2888 | blk_queue_make_request(md->queue, dm_make_request); | 2766 | blk_queue_make_request(md->queue, dm_make_request); |
2889 | blk_queue_merge_bvec(md->queue, dm_merge_bvec); | ||
2890 | break; | 2767 | break; |
2891 | } | 2768 | } |
2892 | 2769 | ||
diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 4e984993d40a..7edcf97dfa5a 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h | |||
@@ -78,8 +78,6 @@ bool dm_table_mq_request_based(struct dm_table *t); | |||
78 | void dm_table_free_md_mempools(struct dm_table *t); | 78 | void dm_table_free_md_mempools(struct dm_table *t); |
79 | struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); | 79 | struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); |
80 | 80 | ||
81 | int dm_queue_merge_is_compulsory(struct request_queue *q); | ||
82 | |||
83 | void dm_lock_md_type(struct mapped_device *md); | 81 | void dm_lock_md_type(struct mapped_device *md); |
84 | void dm_unlock_md_type(struct mapped_device *md); | 82 | void dm_unlock_md_type(struct mapped_device *md); |
85 | void dm_set_md_type(struct mapped_device *md, unsigned type); | 83 | void dm_set_md_type(struct mapped_device *md, unsigned type); |
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index aefd66142eef..b7fe7e9fc777 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
@@ -52,48 +52,6 @@ static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector) | |||
52 | return conf->disks + lo; | 52 | return conf->disks + lo; |
53 | } | 53 | } |
54 | 54 | ||
55 | /** | ||
56 | * linear_mergeable_bvec -- tell bio layer if two requests can be merged | ||
57 | * @q: request queue | ||
58 | * @bvm: properties of new bio | ||
59 | * @biovec: the request that could be merged to it. | ||
60 | * | ||
61 | * Return amount of bytes we can take at this offset | ||
62 | */ | ||
63 | static int linear_mergeable_bvec(struct mddev *mddev, | ||
64 | struct bvec_merge_data *bvm, | ||
65 | struct bio_vec *biovec) | ||
66 | { | ||
67 | struct dev_info *dev0; | ||
68 | unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9; | ||
69 | sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); | ||
70 | int maxbytes = biovec->bv_len; | ||
71 | struct request_queue *subq; | ||
72 | |||
73 | dev0 = which_dev(mddev, sector); | ||
74 | maxsectors = dev0->end_sector - sector; | ||
75 | subq = bdev_get_queue(dev0->rdev->bdev); | ||
76 | if (subq->merge_bvec_fn) { | ||
77 | bvm->bi_bdev = dev0->rdev->bdev; | ||
78 | bvm->bi_sector -= dev0->end_sector - dev0->rdev->sectors; | ||
79 | maxbytes = min(maxbytes, subq->merge_bvec_fn(subq, bvm, | ||
80 | biovec)); | ||
81 | } | ||
82 | |||
83 | if (maxsectors < bio_sectors) | ||
84 | maxsectors = 0; | ||
85 | else | ||
86 | maxsectors -= bio_sectors; | ||
87 | |||
88 | if (maxsectors <= (PAGE_SIZE >> 9 ) && bio_sectors == 0) | ||
89 | return maxbytes; | ||
90 | |||
91 | if (maxsectors > (maxbytes >> 9)) | ||
92 | return maxbytes; | ||
93 | else | ||
94 | return maxsectors << 9; | ||
95 | } | ||
96 | |||
97 | static int linear_congested(struct mddev *mddev, int bits) | 55 | static int linear_congested(struct mddev *mddev, int bits) |
98 | { | 56 | { |
99 | struct linear_conf *conf; | 57 | struct linear_conf *conf; |
@@ -338,7 +296,6 @@ static struct md_personality linear_personality = | |||
338 | .size = linear_size, | 296 | .size = linear_size, |
339 | .quiesce = linear_quiesce, | 297 | .quiesce = linear_quiesce, |
340 | .congested = linear_congested, | 298 | .congested = linear_congested, |
341 | .mergeable_bvec = linear_mergeable_bvec, | ||
342 | }; | 299 | }; |
343 | 300 | ||
344 | static int __init linear_init (void) | 301 | static int __init linear_init (void) |
diff --git a/drivers/md/md.c b/drivers/md/md.c index e1d8723720cc..d28bf5cea224 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -354,29 +354,6 @@ static int md_congested(void *data, int bits) | |||
354 | return mddev_congested(mddev, bits); | 354 | return mddev_congested(mddev, bits); |
355 | } | 355 | } |
356 | 356 | ||
357 | static int md_mergeable_bvec(struct request_queue *q, | ||
358 | struct bvec_merge_data *bvm, | ||
359 | struct bio_vec *biovec) | ||
360 | { | ||
361 | struct mddev *mddev = q->queuedata; | ||
362 | int ret; | ||
363 | rcu_read_lock(); | ||
364 | if (mddev->suspended) { | ||
365 | /* Must always allow one vec */ | ||
366 | if (bvm->bi_size == 0) | ||
367 | ret = biovec->bv_len; | ||
368 | else | ||
369 | ret = 0; | ||
370 | } else { | ||
371 | struct md_personality *pers = mddev->pers; | ||
372 | if (pers && pers->mergeable_bvec) | ||
373 | ret = pers->mergeable_bvec(mddev, bvm, biovec); | ||
374 | else | ||
375 | ret = biovec->bv_len; | ||
376 | } | ||
377 | rcu_read_unlock(); | ||
378 | return ret; | ||
379 | } | ||
380 | /* | 357 | /* |
381 | * Generic flush handling for md | 358 | * Generic flush handling for md |
382 | */ | 359 | */ |
@@ -5188,7 +5165,6 @@ int md_run(struct mddev *mddev) | |||
5188 | if (mddev->queue) { | 5165 | if (mddev->queue) { |
5189 | mddev->queue->backing_dev_info.congested_data = mddev; | 5166 | mddev->queue->backing_dev_info.congested_data = mddev; |
5190 | mddev->queue->backing_dev_info.congested_fn = md_congested; | 5167 | mddev->queue->backing_dev_info.congested_fn = md_congested; |
5191 | blk_queue_merge_bvec(mddev->queue, md_mergeable_bvec); | ||
5192 | } | 5168 | } |
5193 | if (pers->sync_request) { | 5169 | if (pers->sync_request) { |
5194 | if (mddev->kobj.sd && | 5170 | if (mddev->kobj.sd && |
@@ -5317,7 +5293,6 @@ static void md_clean(struct mddev *mddev) | |||
5317 | mddev->degraded = 0; | 5293 | mddev->degraded = 0; |
5318 | mddev->safemode = 0; | 5294 | mddev->safemode = 0; |
5319 | mddev->private = NULL; | 5295 | mddev->private = NULL; |
5320 | mddev->merge_check_needed = 0; | ||
5321 | mddev->bitmap_info.offset = 0; | 5296 | mddev->bitmap_info.offset = 0; |
5322 | mddev->bitmap_info.default_offset = 0; | 5297 | mddev->bitmap_info.default_offset = 0; |
5323 | mddev->bitmap_info.default_space = 0; | 5298 | mddev->bitmap_info.default_space = 0; |
@@ -5514,7 +5489,6 @@ static int do_md_stop(struct mddev *mddev, int mode, | |||
5514 | 5489 | ||
5515 | __md_stop_writes(mddev); | 5490 | __md_stop_writes(mddev); |
5516 | __md_stop(mddev); | 5491 | __md_stop(mddev); |
5517 | mddev->queue->merge_bvec_fn = NULL; | ||
5518 | mddev->queue->backing_dev_info.congested_fn = NULL; | 5492 | mddev->queue->backing_dev_info.congested_fn = NULL; |
5519 | 5493 | ||
5520 | /* tell userspace to handle 'inactive' */ | 5494 | /* tell userspace to handle 'inactive' */ |
diff --git a/drivers/md/md.h b/drivers/md/md.h index 7da6e9c3cb53..ab339571e57f 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -134,10 +134,6 @@ enum flag_bits { | |||
134 | Bitmap_sync, /* ..actually, not quite In_sync. Need a | 134 | Bitmap_sync, /* ..actually, not quite In_sync. Need a |
135 | * bitmap-based recovery to get fully in sync | 135 | * bitmap-based recovery to get fully in sync |
136 | */ | 136 | */ |
137 | Unmerged, /* device is being added to array and should | ||
138 | * be considerred for bvec_merge_fn but not | ||
139 | * yet for actual IO | ||
140 | */ | ||
141 | WriteMostly, /* Avoid reading if at all possible */ | 137 | WriteMostly, /* Avoid reading if at all possible */ |
142 | AutoDetected, /* added by auto-detect */ | 138 | AutoDetected, /* added by auto-detect */ |
143 | Blocked, /* An error occurred but has not yet | 139 | Blocked, /* An error occurred but has not yet |
@@ -374,10 +370,6 @@ struct mddev { | |||
374 | int degraded; /* whether md should consider | 370 | int degraded; /* whether md should consider |
375 | * adding a spare | 371 | * adding a spare |
376 | */ | 372 | */ |
377 | int merge_check_needed; /* at least one | ||
378 | * member device | ||
379 | * has a | ||
380 | * merge_bvec_fn */ | ||
381 | 373 | ||
382 | atomic_t recovery_active; /* blocks scheduled, but not written */ | 374 | atomic_t recovery_active; /* blocks scheduled, but not written */ |
383 | wait_queue_head_t recovery_wait; | 375 | wait_queue_head_t recovery_wait; |
@@ -532,10 +524,6 @@ struct md_personality | |||
532 | /* congested implements bdi.congested_fn(). | 524 | /* congested implements bdi.congested_fn(). |
533 | * Will not be called while array is 'suspended' */ | 525 | * Will not be called while array is 'suspended' */ |
534 | int (*congested)(struct mddev *mddev, int bits); | 526 | int (*congested)(struct mddev *mddev, int bits); |
535 | /* mergeable_bvec is use to implement ->merge_bvec_fn */ | ||
536 | int (*mergeable_bvec)(struct mddev *mddev, | ||
537 | struct bvec_merge_data *bvm, | ||
538 | struct bio_vec *biovec); | ||
539 | }; | 527 | }; |
540 | 528 | ||
541 | struct md_sysfs_entry { | 529 | struct md_sysfs_entry { |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 082a489af9d3..d222522c52e0 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -257,18 +257,6 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev) | |||
257 | disk_stack_limits(mddev->gendisk, rdev->bdev, | 257 | disk_stack_limits(mddev->gendisk, rdev->bdev, |
258 | rdev->data_offset << 9); | 258 | rdev->data_offset << 9); |
259 | 259 | ||
260 | /* as we don't honour merge_bvec_fn, we must never risk | ||
261 | * violating it, so limit ->max_segments to one, lying | ||
262 | * within a single page. | ||
263 | * (Note: it is very unlikely that a device with | ||
264 | * merge_bvec_fn will be involved in multipath.) | ||
265 | */ | ||
266 | if (q->merge_bvec_fn) { | ||
267 | blk_queue_max_segments(mddev->queue, 1); | ||
268 | blk_queue_segment_boundary(mddev->queue, | ||
269 | PAGE_CACHE_SIZE - 1); | ||
270 | } | ||
271 | |||
272 | spin_lock_irq(&conf->device_lock); | 260 | spin_lock_irq(&conf->device_lock); |
273 | mddev->degraded--; | 261 | mddev->degraded--; |
274 | rdev->raid_disk = path; | 262 | rdev->raid_disk = path; |
@@ -432,15 +420,6 @@ static int multipath_run (struct mddev *mddev) | |||
432 | disk_stack_limits(mddev->gendisk, rdev->bdev, | 420 | disk_stack_limits(mddev->gendisk, rdev->bdev, |
433 | rdev->data_offset << 9); | 421 | rdev->data_offset << 9); |
434 | 422 | ||
435 | /* as we don't honour merge_bvec_fn, we must never risk | ||
436 | * violating it, not that we ever expect a device with | ||
437 | * a merge_bvec_fn to be involved in multipath */ | ||
438 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { | ||
439 | blk_queue_max_segments(mddev->queue, 1); | ||
440 | blk_queue_segment_boundary(mddev->queue, | ||
441 | PAGE_CACHE_SIZE - 1); | ||
442 | } | ||
443 | |||
444 | if (!test_bit(Faulty, &rdev->flags)) | 423 | if (!test_bit(Faulty, &rdev->flags)) |
445 | working_disks++; | 424 | working_disks++; |
446 | } | 425 | } |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index e6e0ae56f66b..59cda501a224 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -192,9 +192,6 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) | |||
192 | disk_stack_limits(mddev->gendisk, rdev1->bdev, | 192 | disk_stack_limits(mddev->gendisk, rdev1->bdev, |
193 | rdev1->data_offset << 9); | 193 | rdev1->data_offset << 9); |
194 | 194 | ||
195 | if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) | ||
196 | conf->has_merge_bvec = 1; | ||
197 | |||
198 | if (!smallest || (rdev1->sectors < smallest->sectors)) | 195 | if (!smallest || (rdev1->sectors < smallest->sectors)) |
199 | smallest = rdev1; | 196 | smallest = rdev1; |
200 | cnt++; | 197 | cnt++; |
@@ -351,58 +348,6 @@ static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone, | |||
351 | + sector_div(sector, zone->nb_dev)]; | 348 | + sector_div(sector, zone->nb_dev)]; |
352 | } | 349 | } |
353 | 350 | ||
354 | /** | ||
355 | * raid0_mergeable_bvec -- tell bio layer if two requests can be merged | ||
356 | * @mddev: the md device | ||
357 | * @bvm: properties of new bio | ||
358 | * @biovec: the request that could be merged to it. | ||
359 | * | ||
360 | * Return amount of bytes we can accept at this offset | ||
361 | */ | ||
362 | static int raid0_mergeable_bvec(struct mddev *mddev, | ||
363 | struct bvec_merge_data *bvm, | ||
364 | struct bio_vec *biovec) | ||
365 | { | ||
366 | struct r0conf *conf = mddev->private; | ||
367 | sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); | ||
368 | sector_t sector_offset = sector; | ||
369 | int max; | ||
370 | unsigned int chunk_sectors = mddev->chunk_sectors; | ||
371 | unsigned int bio_sectors = bvm->bi_size >> 9; | ||
372 | struct strip_zone *zone; | ||
373 | struct md_rdev *rdev; | ||
374 | struct request_queue *subq; | ||
375 | |||
376 | if (is_power_of_2(chunk_sectors)) | ||
377 | max = (chunk_sectors - ((sector & (chunk_sectors-1)) | ||
378 | + bio_sectors)) << 9; | ||
379 | else | ||
380 | max = (chunk_sectors - (sector_div(sector, chunk_sectors) | ||
381 | + bio_sectors)) << 9; | ||
382 | if (max < 0) | ||
383 | max = 0; /* bio_add cannot handle a negative return */ | ||
384 | if (max <= biovec->bv_len && bio_sectors == 0) | ||
385 | return biovec->bv_len; | ||
386 | if (max < biovec->bv_len) | ||
387 | /* too small already, no need to check further */ | ||
388 | return max; | ||
389 | if (!conf->has_merge_bvec) | ||
390 | return max; | ||
391 | |||
392 | /* May need to check subordinate device */ | ||
393 | sector = sector_offset; | ||
394 | zone = find_zone(mddev->private, §or_offset); | ||
395 | rdev = map_sector(mddev, zone, sector, §or_offset); | ||
396 | subq = bdev_get_queue(rdev->bdev); | ||
397 | if (subq->merge_bvec_fn) { | ||
398 | bvm->bi_bdev = rdev->bdev; | ||
399 | bvm->bi_sector = sector_offset + zone->dev_start + | ||
400 | rdev->data_offset; | ||
401 | return min(max, subq->merge_bvec_fn(subq, bvm, biovec)); | ||
402 | } else | ||
403 | return max; | ||
404 | } | ||
405 | |||
406 | static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks) | 351 | static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks) |
407 | { | 352 | { |
408 | sector_t array_sectors = 0; | 353 | sector_t array_sectors = 0; |
@@ -727,7 +672,6 @@ static struct md_personality raid0_personality= | |||
727 | .takeover = raid0_takeover, | 672 | .takeover = raid0_takeover, |
728 | .quiesce = raid0_quiesce, | 673 | .quiesce = raid0_quiesce, |
729 | .congested = raid0_congested, | 674 | .congested = raid0_congested, |
730 | .mergeable_bvec = raid0_mergeable_bvec, | ||
731 | }; | 675 | }; |
732 | 676 | ||
733 | static int __init raid0_init (void) | 677 | static int __init raid0_init (void) |
diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h index 05539d9c97f0..7127a623f5da 100644 --- a/drivers/md/raid0.h +++ b/drivers/md/raid0.h | |||
@@ -12,8 +12,6 @@ struct r0conf { | |||
12 | struct md_rdev **devlist; /* lists of rdevs, pointed to | 12 | struct md_rdev **devlist; /* lists of rdevs, pointed to |
13 | * by strip_zone->dev */ | 13 | * by strip_zone->dev */ |
14 | int nr_strip_zones; | 14 | int nr_strip_zones; |
15 | int has_merge_bvec; /* at least one member has | ||
16 | * a merge_bvec_fn */ | ||
17 | }; | 15 | }; |
18 | 16 | ||
19 | #endif | 17 | #endif |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 60d0a8626e63..0ff06fdb83a9 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -557,7 +557,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect | |||
557 | rdev = rcu_dereference(conf->mirrors[disk].rdev); | 557 | rdev = rcu_dereference(conf->mirrors[disk].rdev); |
558 | if (r1_bio->bios[disk] == IO_BLOCKED | 558 | if (r1_bio->bios[disk] == IO_BLOCKED |
559 | || rdev == NULL | 559 | || rdev == NULL |
560 | || test_bit(Unmerged, &rdev->flags) | ||
561 | || test_bit(Faulty, &rdev->flags)) | 560 | || test_bit(Faulty, &rdev->flags)) |
562 | continue; | 561 | continue; |
563 | if (!test_bit(In_sync, &rdev->flags) && | 562 | if (!test_bit(In_sync, &rdev->flags) && |
@@ -708,38 +707,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect | |||
708 | return best_disk; | 707 | return best_disk; |
709 | } | 708 | } |
710 | 709 | ||
711 | static int raid1_mergeable_bvec(struct mddev *mddev, | ||
712 | struct bvec_merge_data *bvm, | ||
713 | struct bio_vec *biovec) | ||
714 | { | ||
715 | struct r1conf *conf = mddev->private; | ||
716 | sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); | ||
717 | int max = biovec->bv_len; | ||
718 | |||
719 | if (mddev->merge_check_needed) { | ||
720 | int disk; | ||
721 | rcu_read_lock(); | ||
722 | for (disk = 0; disk < conf->raid_disks * 2; disk++) { | ||
723 | struct md_rdev *rdev = rcu_dereference( | ||
724 | conf->mirrors[disk].rdev); | ||
725 | if (rdev && !test_bit(Faulty, &rdev->flags)) { | ||
726 | struct request_queue *q = | ||
727 | bdev_get_queue(rdev->bdev); | ||
728 | if (q->merge_bvec_fn) { | ||
729 | bvm->bi_sector = sector + | ||
730 | rdev->data_offset; | ||
731 | bvm->bi_bdev = rdev->bdev; | ||
732 | max = min(max, q->merge_bvec_fn( | ||
733 | q, bvm, biovec)); | ||
734 | } | ||
735 | } | ||
736 | } | ||
737 | rcu_read_unlock(); | ||
738 | } | ||
739 | return max; | ||
740 | |||
741 | } | ||
742 | |||
743 | static int raid1_congested(struct mddev *mddev, int bits) | 710 | static int raid1_congested(struct mddev *mddev, int bits) |
744 | { | 711 | { |
745 | struct r1conf *conf = mddev->private; | 712 | struct r1conf *conf = mddev->private; |
@@ -1268,8 +1235,7 @@ read_again: | |||
1268 | break; | 1235 | break; |
1269 | } | 1236 | } |
1270 | r1_bio->bios[i] = NULL; | 1237 | r1_bio->bios[i] = NULL; |
1271 | if (!rdev || test_bit(Faulty, &rdev->flags) | 1238 | if (!rdev || test_bit(Faulty, &rdev->flags)) { |
1272 | || test_bit(Unmerged, &rdev->flags)) { | ||
1273 | if (i < conf->raid_disks) | 1239 | if (i < conf->raid_disks) |
1274 | set_bit(R1BIO_Degraded, &r1_bio->state); | 1240 | set_bit(R1BIO_Degraded, &r1_bio->state); |
1275 | continue; | 1241 | continue; |
@@ -1614,7 +1580,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) | |||
1614 | struct raid1_info *p; | 1580 | struct raid1_info *p; |
1615 | int first = 0; | 1581 | int first = 0; |
1616 | int last = conf->raid_disks - 1; | 1582 | int last = conf->raid_disks - 1; |
1617 | struct request_queue *q = bdev_get_queue(rdev->bdev); | ||
1618 | 1583 | ||
1619 | if (mddev->recovery_disabled == conf->recovery_disabled) | 1584 | if (mddev->recovery_disabled == conf->recovery_disabled) |
1620 | return -EBUSY; | 1585 | return -EBUSY; |
@@ -1622,11 +1587,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) | |||
1622 | if (rdev->raid_disk >= 0) | 1587 | if (rdev->raid_disk >= 0) |
1623 | first = last = rdev->raid_disk; | 1588 | first = last = rdev->raid_disk; |
1624 | 1589 | ||
1625 | if (q->merge_bvec_fn) { | ||
1626 | set_bit(Unmerged, &rdev->flags); | ||
1627 | mddev->merge_check_needed = 1; | ||
1628 | } | ||
1629 | |||
1630 | for (mirror = first; mirror <= last; mirror++) { | 1590 | for (mirror = first; mirror <= last; mirror++) { |
1631 | p = conf->mirrors+mirror; | 1591 | p = conf->mirrors+mirror; |
1632 | if (!p->rdev) { | 1592 | if (!p->rdev) { |
@@ -1658,19 +1618,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) | |||
1658 | break; | 1618 | break; |
1659 | } | 1619 | } |
1660 | } | 1620 | } |
1661 | if (err == 0 && test_bit(Unmerged, &rdev->flags)) { | ||
1662 | /* Some requests might not have seen this new | ||
1663 | * merge_bvec_fn. We must wait for them to complete | ||
1664 | * before merging the device fully. | ||
1665 | * First we make sure any code which has tested | ||
1666 | * our function has submitted the request, then | ||
1667 | * we wait for all outstanding requests to complete. | ||
1668 | */ | ||
1669 | synchronize_sched(); | ||
1670 | freeze_array(conf, 0); | ||
1671 | unfreeze_array(conf); | ||
1672 | clear_bit(Unmerged, &rdev->flags); | ||
1673 | } | ||
1674 | md_integrity_add_rdev(rdev, mddev); | 1621 | md_integrity_add_rdev(rdev, mddev); |
1675 | if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) | 1622 | if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) |
1676 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); | 1623 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); |
@@ -2806,8 +2753,6 @@ static struct r1conf *setup_conf(struct mddev *mddev) | |||
2806 | goto abort; | 2753 | goto abort; |
2807 | disk->rdev = rdev; | 2754 | disk->rdev = rdev; |
2808 | q = bdev_get_queue(rdev->bdev); | 2755 | q = bdev_get_queue(rdev->bdev); |
2809 | if (q->merge_bvec_fn) | ||
2810 | mddev->merge_check_needed = 1; | ||
2811 | 2756 | ||
2812 | disk->head_position = 0; | 2757 | disk->head_position = 0; |
2813 | disk->seq_start = MaxSector; | 2758 | disk->seq_start = MaxSector; |
@@ -3172,7 +3117,6 @@ static struct md_personality raid1_personality = | |||
3172 | .quiesce = raid1_quiesce, | 3117 | .quiesce = raid1_quiesce, |
3173 | .takeover = raid1_takeover, | 3118 | .takeover = raid1_takeover, |
3174 | .congested = raid1_congested, | 3119 | .congested = raid1_congested, |
3175 | .mergeable_bvec = raid1_mergeable_bvec, | ||
3176 | }; | 3120 | }; |
3177 | 3121 | ||
3178 | static int __init raid_init(void) | 3122 | static int __init raid_init(void) |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 316ff6f611e9..d92098f3e65b 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -671,93 +671,6 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev) | |||
671 | return (vchunk << geo->chunk_shift) + offset; | 671 | return (vchunk << geo->chunk_shift) + offset; |
672 | } | 672 | } |
673 | 673 | ||
674 | /** | ||
675 | * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged | ||
676 | * @mddev: the md device | ||
677 | * @bvm: properties of new bio | ||
678 | * @biovec: the request that could be merged to it. | ||
679 | * | ||
680 | * Return amount of bytes we can accept at this offset | ||
681 | * This requires checking for end-of-chunk if near_copies != raid_disks, | ||
682 | * and for subordinate merge_bvec_fns if merge_check_needed. | ||
683 | */ | ||
684 | static int raid10_mergeable_bvec(struct mddev *mddev, | ||
685 | struct bvec_merge_data *bvm, | ||
686 | struct bio_vec *biovec) | ||
687 | { | ||
688 | struct r10conf *conf = mddev->private; | ||
689 | sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); | ||
690 | int max; | ||
691 | unsigned int chunk_sectors; | ||
692 | unsigned int bio_sectors = bvm->bi_size >> 9; | ||
693 | struct geom *geo = &conf->geo; | ||
694 | |||
695 | chunk_sectors = (conf->geo.chunk_mask & conf->prev.chunk_mask) + 1; | ||
696 | if (conf->reshape_progress != MaxSector && | ||
697 | ((sector >= conf->reshape_progress) != | ||
698 | conf->mddev->reshape_backwards)) | ||
699 | geo = &conf->prev; | ||
700 | |||
701 | if (geo->near_copies < geo->raid_disks) { | ||
702 | max = (chunk_sectors - ((sector & (chunk_sectors - 1)) | ||
703 | + bio_sectors)) << 9; | ||
704 | if (max < 0) | ||
705 | /* bio_add cannot handle a negative return */ | ||
706 | max = 0; | ||
707 | if (max <= biovec->bv_len && bio_sectors == 0) | ||
708 | return biovec->bv_len; | ||
709 | } else | ||
710 | max = biovec->bv_len; | ||
711 | |||
712 | if (mddev->merge_check_needed) { | ||
713 | struct { | ||
714 | struct r10bio r10_bio; | ||
715 | struct r10dev devs[conf->copies]; | ||
716 | } on_stack; | ||
717 | struct r10bio *r10_bio = &on_stack.r10_bio; | ||
718 | int s; | ||
719 | if (conf->reshape_progress != MaxSector) { | ||
720 | /* Cannot give any guidance during reshape */ | ||
721 | if (max <= biovec->bv_len && bio_sectors == 0) | ||
722 | return biovec->bv_len; | ||
723 | return 0; | ||
724 | } | ||
725 | r10_bio->sector = sector; | ||
726 | raid10_find_phys(conf, r10_bio); | ||
727 | rcu_read_lock(); | ||
728 | for (s = 0; s < conf->copies; s++) { | ||
729 | int disk = r10_bio->devs[s].devnum; | ||
730 | struct md_rdev *rdev = rcu_dereference( | ||
731 | conf->mirrors[disk].rdev); | ||
732 | if (rdev && !test_bit(Faulty, &rdev->flags)) { | ||
733 | struct request_queue *q = | ||
734 | bdev_get_queue(rdev->bdev); | ||
735 | if (q->merge_bvec_fn) { | ||
736 | bvm->bi_sector = r10_bio->devs[s].addr | ||
737 | + rdev->data_offset; | ||
738 | bvm->bi_bdev = rdev->bdev; | ||
739 | max = min(max, q->merge_bvec_fn( | ||
740 | q, bvm, biovec)); | ||
741 | } | ||
742 | } | ||
743 | rdev = rcu_dereference(conf->mirrors[disk].replacement); | ||
744 | if (rdev && !test_bit(Faulty, &rdev->flags)) { | ||
745 | struct request_queue *q = | ||
746 | bdev_get_queue(rdev->bdev); | ||
747 | if (q->merge_bvec_fn) { | ||
748 | bvm->bi_sector = r10_bio->devs[s].addr | ||
749 | + rdev->data_offset; | ||
750 | bvm->bi_bdev = rdev->bdev; | ||
751 | max = min(max, q->merge_bvec_fn( | ||
752 | q, bvm, biovec)); | ||
753 | } | ||
754 | } | ||
755 | } | ||
756 | rcu_read_unlock(); | ||
757 | } | ||
758 | return max; | ||
759 | } | ||
760 | |||
761 | /* | 674 | /* |
762 | * This routine returns the disk from which the requested read should | 675 | * This routine returns the disk from which the requested read should |
763 | * be done. There is a per-array 'next expected sequential IO' sector | 676 | * be done. There is a per-array 'next expected sequential IO' sector |
@@ -820,12 +733,10 @@ retry: | |||
820 | disk = r10_bio->devs[slot].devnum; | 733 | disk = r10_bio->devs[slot].devnum; |
821 | rdev = rcu_dereference(conf->mirrors[disk].replacement); | 734 | rdev = rcu_dereference(conf->mirrors[disk].replacement); |
822 | if (rdev == NULL || test_bit(Faulty, &rdev->flags) || | 735 | if (rdev == NULL || test_bit(Faulty, &rdev->flags) || |
823 | test_bit(Unmerged, &rdev->flags) || | ||
824 | r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) | 736 | r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) |
825 | rdev = rcu_dereference(conf->mirrors[disk].rdev); | 737 | rdev = rcu_dereference(conf->mirrors[disk].rdev); |
826 | if (rdev == NULL || | 738 | if (rdev == NULL || |
827 | test_bit(Faulty, &rdev->flags) || | 739 | test_bit(Faulty, &rdev->flags)) |
828 | test_bit(Unmerged, &rdev->flags)) | ||
829 | continue; | 740 | continue; |
830 | if (!test_bit(In_sync, &rdev->flags) && | 741 | if (!test_bit(In_sync, &rdev->flags) && |
831 | r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) | 742 | r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) |
@@ -1325,11 +1236,9 @@ retry_write: | |||
1325 | blocked_rdev = rrdev; | 1236 | blocked_rdev = rrdev; |
1326 | break; | 1237 | break; |
1327 | } | 1238 | } |
1328 | if (rdev && (test_bit(Faulty, &rdev->flags) | 1239 | if (rdev && (test_bit(Faulty, &rdev->flags))) |
1329 | || test_bit(Unmerged, &rdev->flags))) | ||
1330 | rdev = NULL; | 1240 | rdev = NULL; |
1331 | if (rrdev && (test_bit(Faulty, &rrdev->flags) | 1241 | if (rrdev && (test_bit(Faulty, &rrdev->flags))) |
1332 | || test_bit(Unmerged, &rrdev->flags))) | ||
1333 | rrdev = NULL; | 1242 | rrdev = NULL; |
1334 | 1243 | ||
1335 | r10_bio->devs[i].bio = NULL; | 1244 | r10_bio->devs[i].bio = NULL; |
@@ -1776,7 +1685,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) | |||
1776 | int mirror; | 1685 | int mirror; |
1777 | int first = 0; | 1686 | int first = 0; |
1778 | int last = conf->geo.raid_disks - 1; | 1687 | int last = conf->geo.raid_disks - 1; |
1779 | struct request_queue *q = bdev_get_queue(rdev->bdev); | ||
1780 | 1688 | ||
1781 | if (mddev->recovery_cp < MaxSector) | 1689 | if (mddev->recovery_cp < MaxSector) |
1782 | /* only hot-add to in-sync arrays, as recovery is | 1690 | /* only hot-add to in-sync arrays, as recovery is |
@@ -1789,11 +1697,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) | |||
1789 | if (rdev->raid_disk >= 0) | 1697 | if (rdev->raid_disk >= 0) |
1790 | first = last = rdev->raid_disk; | 1698 | first = last = rdev->raid_disk; |
1791 | 1699 | ||
1792 | if (q->merge_bvec_fn) { | ||
1793 | set_bit(Unmerged, &rdev->flags); | ||
1794 | mddev->merge_check_needed = 1; | ||
1795 | } | ||
1796 | |||
1797 | if (rdev->saved_raid_disk >= first && | 1700 | if (rdev->saved_raid_disk >= first && |
1798 | conf->mirrors[rdev->saved_raid_disk].rdev == NULL) | 1701 | conf->mirrors[rdev->saved_raid_disk].rdev == NULL) |
1799 | mirror = rdev->saved_raid_disk; | 1702 | mirror = rdev->saved_raid_disk; |
@@ -1832,19 +1735,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) | |||
1832 | rcu_assign_pointer(p->rdev, rdev); | 1735 | rcu_assign_pointer(p->rdev, rdev); |
1833 | break; | 1736 | break; |
1834 | } | 1737 | } |
1835 | if (err == 0 && test_bit(Unmerged, &rdev->flags)) { | ||
1836 | /* Some requests might not have seen this new | ||
1837 | * merge_bvec_fn. We must wait for them to complete | ||
1838 | * before merging the device fully. | ||
1839 | * First we make sure any code which has tested | ||
1840 | * our function has submitted the request, then | ||
1841 | * we wait for all outstanding requests to complete. | ||
1842 | */ | ||
1843 | synchronize_sched(); | ||
1844 | freeze_array(conf, 0); | ||
1845 | unfreeze_array(conf); | ||
1846 | clear_bit(Unmerged, &rdev->flags); | ||
1847 | } | ||
1848 | md_integrity_add_rdev(rdev, mddev); | 1738 | md_integrity_add_rdev(rdev, mddev); |
1849 | if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) | 1739 | if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) |
1850 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); | 1740 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); |
@@ -2392,7 +2282,6 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 | |||
2392 | d = r10_bio->devs[sl].devnum; | 2282 | d = r10_bio->devs[sl].devnum; |
2393 | rdev = rcu_dereference(conf->mirrors[d].rdev); | 2283 | rdev = rcu_dereference(conf->mirrors[d].rdev); |
2394 | if (rdev && | 2284 | if (rdev && |
2395 | !test_bit(Unmerged, &rdev->flags) && | ||
2396 | test_bit(In_sync, &rdev->flags) && | 2285 | test_bit(In_sync, &rdev->flags) && |
2397 | is_badblock(rdev, r10_bio->devs[sl].addr + sect, s, | 2286 | is_badblock(rdev, r10_bio->devs[sl].addr + sect, s, |
2398 | &first_bad, &bad_sectors) == 0) { | 2287 | &first_bad, &bad_sectors) == 0) { |
@@ -2446,7 +2335,6 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 | |||
2446 | d = r10_bio->devs[sl].devnum; | 2335 | d = r10_bio->devs[sl].devnum; |
2447 | rdev = rcu_dereference(conf->mirrors[d].rdev); | 2336 | rdev = rcu_dereference(conf->mirrors[d].rdev); |
2448 | if (!rdev || | 2337 | if (!rdev || |
2449 | test_bit(Unmerged, &rdev->flags) || | ||
2450 | !test_bit(In_sync, &rdev->flags)) | 2338 | !test_bit(In_sync, &rdev->flags)) |
2451 | continue; | 2339 | continue; |
2452 | 2340 | ||
@@ -3638,8 +3526,6 @@ static int run(struct mddev *mddev) | |||
3638 | disk->rdev = rdev; | 3526 | disk->rdev = rdev; |
3639 | } | 3527 | } |
3640 | q = bdev_get_queue(rdev->bdev); | 3528 | q = bdev_get_queue(rdev->bdev); |
3641 | if (q->merge_bvec_fn) | ||
3642 | mddev->merge_check_needed = 1; | ||
3643 | diff = (rdev->new_data_offset - rdev->data_offset); | 3529 | diff = (rdev->new_data_offset - rdev->data_offset); |
3644 | if (!mddev->reshape_backwards) | 3530 | if (!mddev->reshape_backwards) |
3645 | diff = -diff; | 3531 | diff = -diff; |
@@ -4692,7 +4578,6 @@ static struct md_personality raid10_personality = | |||
4692 | .start_reshape = raid10_start_reshape, | 4578 | .start_reshape = raid10_start_reshape, |
4693 | .finish_reshape = raid10_finish_reshape, | 4579 | .finish_reshape = raid10_finish_reshape, |
4694 | .congested = raid10_congested, | 4580 | .congested = raid10_congested, |
4695 | .mergeable_bvec = raid10_mergeable_bvec, | ||
4696 | }; | 4581 | }; |
4697 | 4582 | ||
4698 | static int __init raid_init(void) | 4583 | static int __init raid_init(void) |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 7823295332de..6d20692952d2 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -4663,35 +4663,6 @@ static int raid5_congested(struct mddev *mddev, int bits) | |||
4663 | return 0; | 4663 | return 0; |
4664 | } | 4664 | } |
4665 | 4665 | ||
4666 | /* We want read requests to align with chunks where possible, | ||
4667 | * but write requests don't need to. | ||
4668 | */ | ||
4669 | static int raid5_mergeable_bvec(struct mddev *mddev, | ||
4670 | struct bvec_merge_data *bvm, | ||
4671 | struct bio_vec *biovec) | ||
4672 | { | ||
4673 | sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); | ||
4674 | int max; | ||
4675 | unsigned int chunk_sectors = mddev->chunk_sectors; | ||
4676 | unsigned int bio_sectors = bvm->bi_size >> 9; | ||
4677 | |||
4678 | /* | ||
4679 | * always allow writes to be mergeable, read as well if array | ||
4680 | * is degraded as we'll go through stripe cache anyway. | ||
4681 | */ | ||
4682 | if ((bvm->bi_rw & 1) == WRITE || mddev->degraded) | ||
4683 | return biovec->bv_len; | ||
4684 | |||
4685 | if (mddev->new_chunk_sectors < mddev->chunk_sectors) | ||
4686 | chunk_sectors = mddev->new_chunk_sectors; | ||
4687 | max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; | ||
4688 | if (max < 0) max = 0; | ||
4689 | if (max <= biovec->bv_len && bio_sectors == 0) | ||
4690 | return biovec->bv_len; | ||
4691 | else | ||
4692 | return max; | ||
4693 | } | ||
4694 | |||
4695 | static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) | 4666 | static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) |
4696 | { | 4667 | { |
4697 | sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev); | 4668 | sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev); |
@@ -7764,7 +7735,6 @@ static struct md_personality raid6_personality = | |||
7764 | .quiesce = raid5_quiesce, | 7735 | .quiesce = raid5_quiesce, |
7765 | .takeover = raid6_takeover, | 7736 | .takeover = raid6_takeover, |
7766 | .congested = raid5_congested, | 7737 | .congested = raid5_congested, |
7767 | .mergeable_bvec = raid5_mergeable_bvec, | ||
7768 | }; | 7738 | }; |
7769 | static struct md_personality raid5_personality = | 7739 | static struct md_personality raid5_personality = |
7770 | { | 7740 | { |
@@ -7788,7 +7758,6 @@ static struct md_personality raid5_personality = | |||
7788 | .quiesce = raid5_quiesce, | 7758 | .quiesce = raid5_quiesce, |
7789 | .takeover = raid5_takeover, | 7759 | .takeover = raid5_takeover, |
7790 | .congested = raid5_congested, | 7760 | .congested = raid5_congested, |
7791 | .mergeable_bvec = raid5_mergeable_bvec, | ||
7792 | }; | 7761 | }; |
7793 | 7762 | ||
7794 | static struct md_personality raid4_personality = | 7763 | static struct md_personality raid4_personality = |
@@ -7813,7 +7782,6 @@ static struct md_personality raid4_personality = | |||
7813 | .quiesce = raid5_quiesce, | 7782 | .quiesce = raid5_quiesce, |
7814 | .takeover = raid4_takeover, | 7783 | .takeover = raid4_takeover, |
7815 | .congested = raid5_congested, | 7784 | .congested = raid5_congested, |
7816 | .mergeable_bvec = raid5_mergeable_bvec, | ||
7817 | }; | 7785 | }; |
7818 | 7786 | ||
7819 | static int __init raid5_init(void) | 7787 | static int __init raid5_init(void) |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ca778d9c7d81..a1feff54aeab 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -213,14 +213,6 @@ typedef int (prep_rq_fn) (struct request_queue *, struct request *); | |||
213 | typedef void (unprep_rq_fn) (struct request_queue *, struct request *); | 213 | typedef void (unprep_rq_fn) (struct request_queue *, struct request *); |
214 | 214 | ||
215 | struct bio_vec; | 215 | struct bio_vec; |
216 | struct bvec_merge_data { | ||
217 | struct block_device *bi_bdev; | ||
218 | sector_t bi_sector; | ||
219 | unsigned bi_size; | ||
220 | unsigned long bi_rw; | ||
221 | }; | ||
222 | typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, | ||
223 | struct bio_vec *); | ||
224 | typedef void (softirq_done_fn)(struct request *); | 216 | typedef void (softirq_done_fn)(struct request *); |
225 | typedef int (dma_drain_needed_fn)(struct request *); | 217 | typedef int (dma_drain_needed_fn)(struct request *); |
226 | typedef int (lld_busy_fn) (struct request_queue *q); | 218 | typedef int (lld_busy_fn) (struct request_queue *q); |
@@ -306,7 +298,6 @@ struct request_queue { | |||
306 | make_request_fn *make_request_fn; | 298 | make_request_fn *make_request_fn; |
307 | prep_rq_fn *prep_rq_fn; | 299 | prep_rq_fn *prep_rq_fn; |
308 | unprep_rq_fn *unprep_rq_fn; | 300 | unprep_rq_fn *unprep_rq_fn; |
309 | merge_bvec_fn *merge_bvec_fn; | ||
310 | softirq_done_fn *softirq_done_fn; | 301 | softirq_done_fn *softirq_done_fn; |
311 | rq_timed_out_fn *rq_timed_out_fn; | 302 | rq_timed_out_fn *rq_timed_out_fn; |
312 | dma_drain_needed_fn *dma_drain_needed; | 303 | dma_drain_needed_fn *dma_drain_needed; |
@@ -992,7 +983,6 @@ extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); | |||
992 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); | 983 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); |
993 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); | 984 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); |
994 | extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); | 985 | extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); |
995 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); | ||
996 | extern void blk_queue_dma_alignment(struct request_queue *, int); | 986 | extern void blk_queue_dma_alignment(struct request_queue *, int); |
997 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); | 987 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); |
998 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); | 988 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); |
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 51cc1deb7af3..76d23fa8c7d3 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -82,9 +82,6 @@ typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv); | |||
82 | typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd, | 82 | typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd, |
83 | unsigned long arg); | 83 | unsigned long arg); |
84 | 84 | ||
85 | typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm, | ||
86 | struct bio_vec *biovec, int max_size); | ||
87 | |||
88 | /* | 85 | /* |
89 | * These iteration functions are typically used to check (and combine) | 86 | * These iteration functions are typically used to check (and combine) |
90 | * properties of underlying devices. | 87 | * properties of underlying devices. |
@@ -160,7 +157,6 @@ struct target_type { | |||
160 | dm_status_fn status; | 157 | dm_status_fn status; |
161 | dm_message_fn message; | 158 | dm_message_fn message; |
162 | dm_ioctl_fn ioctl; | 159 | dm_ioctl_fn ioctl; |
163 | dm_merge_fn merge; | ||
164 | dm_busy_fn busy; | 160 | dm_busy_fn busy; |
165 | dm_iterate_devices_fn iterate_devices; | 161 | dm_iterate_devices_fn iterate_devices; |
166 | dm_io_hints_fn io_hints; | 162 | dm_io_hints_fn io_hints; |