diff options
author | Lars Ellenberg <lars.ellenberg@linbit.com> | 2011-02-23 09:38:47 -0500 |
---|---|---|
committer | Philipp Reisner <philipp.reisner@linbit.com> | 2012-05-09 09:17:03 -0400 |
commit | 4d95a10f97337415c1f74b4901d80e047f8dc128 (patch) | |
tree | 7dda1f2d9e018b9cd5cd061409e6b1741da6a9ca /drivers/block | |
parent | 4281808fb3580c381a23cceb0a29ced92d570a5f (diff) |
drbd: use the newly introduced page pool for bitmap IO
Conflicts:
drbd/drbd_bitmap.c
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/drbd/drbd_bitmap.c | 9 |
1 files changed, 4 insertions, 5 deletions
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 9dab3700ca2d..39a1b0dafff4 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c | |||
@@ -939,9 +939,8 @@ static void bm_async_io_complete(struct bio *bio, int error) | |||
939 | 939 | ||
940 | bm_page_unlock_io(mdev, idx); | 940 | bm_page_unlock_io(mdev, idx); |
941 | 941 | ||
942 | /* FIXME give back to page pool */ | ||
943 | if (ctx->flags & BM_AIO_COPY_PAGES) | 942 | if (ctx->flags & BM_AIO_COPY_PAGES) |
944 | put_page(bio->bi_io_vec[0].bv_page); | 943 | mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool); |
945 | 944 | ||
946 | bio_put(bio); | 945 | bio_put(bio); |
947 | 946 | ||
@@ -978,10 +977,8 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must | |||
978 | bm_set_page_unchanged(b->bm_pages[page_nr]); | 977 | bm_set_page_unchanged(b->bm_pages[page_nr]); |
979 | 978 | ||
980 | if (ctx->flags & BM_AIO_COPY_PAGES) { | 979 | if (ctx->flags & BM_AIO_COPY_PAGES) { |
981 | /* FIXME alloc_page is good enough for now, but actually needs | ||
982 | * to use pre-allocated page pool */ | ||
983 | void *src, *dest; | 980 | void *src, *dest; |
984 | page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT); | 981 | page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_WAIT); |
985 | dest = kmap_atomic(page); | 982 | dest = kmap_atomic(page); |
986 | src = kmap_atomic(b->bm_pages[page_nr]); | 983 | src = kmap_atomic(b->bm_pages[page_nr]); |
987 | memcpy(dest, src, PAGE_SIZE); | 984 | memcpy(dest, src, PAGE_SIZE); |
@@ -993,6 +990,8 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must | |||
993 | 990 | ||
994 | bio->bi_bdev = mdev->ldev->md_bdev; | 991 | bio->bi_bdev = mdev->ldev->md_bdev; |
995 | bio->bi_sector = on_disk_sector; | 992 | bio->bi_sector = on_disk_sector; |
993 | /* bio_add_page of a single page to an empty bio will always succeed, | ||
994 | * according to api. Do we want to assert that? */ | ||
996 | bio_add_page(bio, page, len, 0); | 995 | bio_add_page(bio, page, len, 0); |
997 | bio->bi_private = ctx; | 996 | bio->bi_private = ctx; |
998 | bio->bi_end_io = bm_async_io_complete; | 997 | bio->bi_end_io = bm_async_io_complete; |