diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-10 14:11:47 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-10 14:11:47 -0400 |
| commit | b0af205afb111e17ac8db64c3b9c4f2c332de92a (patch) | |
| tree | 3999a2ffbd36e9d1cc6ca30e6b9d6280f4e50116 | |
| parent | 73f6aa4d44ab6157badc456ddfa05b31e58de5f0 (diff) | |
| parent | 0c2322e4ce144e130c03d813fe92de3798662c5e (diff) | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm
* git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm:
dm: detect lost queue
dm: publish dm_vcalloc
dm: publish dm_table_unplug_all
dm: publish dm_get_mapinfo
dm: export struct dm_dev
dm crypt: avoid unnecessary wait when splitting bio
dm crypt: tidy ctx pending
dm crypt: fix async inc_pending
dm crypt: move dec_pending on error into write_io_submit
dm crypt: remove inc_pending from write_io_submit
dm crypt: tidy write loop pending
dm crypt: tidy crypt alloc
dm crypt: tidy inc pending
dm exception store: use chunk_t for_areas
dm exception store: introduce area_location function
dm raid1: kcopyd should stop on error if errors handled
dm mpath: remove is_active from struct dm_path
dm mpath: use more error codes
Fixed up trivial conflict in drivers/md/dm-mpath.c manually.
| -rw-r--r-- | drivers/md/dm-crypt.c | 109 | ||||
| -rw-r--r-- | drivers/md/dm-exception-store.c | 29 | ||||
| -rw-r--r-- | drivers/md/dm-ioctl.c | 4 | ||||
| -rw-r--r-- | drivers/md/dm-mpath.c | 35 | ||||
| -rw-r--r-- | drivers/md/dm-mpath.h | 2 | ||||
| -rw-r--r-- | drivers/md/dm-raid1.c | 4 | ||||
| -rw-r--r-- | drivers/md/dm-table.c | 97 | ||||
| -rw-r--r-- | drivers/md/dm.h | 10 | ||||
| -rw-r--r-- | include/linux/device-mapper.h | 18 |
9 files changed, 190 insertions, 118 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 13956437bc81..682ef9e6acd3 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
| @@ -333,7 +333,6 @@ static void crypt_convert_init(struct crypt_config *cc, | |||
| 333 | ctx->idx_out = bio_out ? bio_out->bi_idx : 0; | 333 | ctx->idx_out = bio_out ? bio_out->bi_idx : 0; |
| 334 | ctx->sector = sector + cc->iv_offset; | 334 | ctx->sector = sector + cc->iv_offset; |
| 335 | init_completion(&ctx->restart); | 335 | init_completion(&ctx->restart); |
| 336 | atomic_set(&ctx->pending, 1); | ||
| 337 | } | 336 | } |
| 338 | 337 | ||
| 339 | static int crypt_convert_block(struct crypt_config *cc, | 338 | static int crypt_convert_block(struct crypt_config *cc, |
| @@ -408,6 +407,8 @@ static int crypt_convert(struct crypt_config *cc, | |||
| 408 | { | 407 | { |
| 409 | int r; | 408 | int r; |
| 410 | 409 | ||
| 410 | atomic_set(&ctx->pending, 1); | ||
| 411 | |||
| 411 | while(ctx->idx_in < ctx->bio_in->bi_vcnt && | 412 | while(ctx->idx_in < ctx->bio_in->bi_vcnt && |
| 412 | ctx->idx_out < ctx->bio_out->bi_vcnt) { | 413 | ctx->idx_out < ctx->bio_out->bi_vcnt) { |
| 413 | 414 | ||
| @@ -456,9 +457,11 @@ static void dm_crypt_bio_destructor(struct bio *bio) | |||
| 456 | /* | 457 | /* |
| 457 | * Generate a new unfragmented bio with the given size | 458 | * Generate a new unfragmented bio with the given size |
| 458 | * This should never violate the device limitations | 459 | * This should never violate the device limitations |
| 459 | * May return a smaller bio when running out of pages | 460 | * May return a smaller bio when running out of pages, indicated by |
| 461 | * *out_of_pages set to 1. | ||
| 460 | */ | 462 | */ |
| 461 | static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) | 463 | static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, |
| 464 | unsigned *out_of_pages) | ||
| 462 | { | 465 | { |
| 463 | struct crypt_config *cc = io->target->private; | 466 | struct crypt_config *cc = io->target->private; |
| 464 | struct bio *clone; | 467 | struct bio *clone; |
| @@ -472,11 +475,14 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) | |||
| 472 | return NULL; | 475 | return NULL; |
| 473 | 476 | ||
| 474 | clone_init(io, clone); | 477 | clone_init(io, clone); |
| 478 | *out_of_pages = 0; | ||
| 475 | 479 | ||
| 476 | for (i = 0; i < nr_iovecs; i++) { | 480 | for (i = 0; i < nr_iovecs; i++) { |
| 477 | page = mempool_alloc(cc->page_pool, gfp_mask); | 481 | page = mempool_alloc(cc->page_pool, gfp_mask); |
| 478 | if (!page) | 482 | if (!page) { |
| 483 | *out_of_pages = 1; | ||
| 479 | break; | 484 | break; |
| 485 | } | ||
| 480 | 486 | ||
| 481 | /* | 487 | /* |
| 482 | * if additional pages cannot be allocated without waiting, | 488 | * if additional pages cannot be allocated without waiting, |
| @@ -517,6 +523,27 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) | |||
| 517 | } | 523 | } |
| 518 | } | 524 | } |
| 519 | 525 | ||
| 526 | static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti, | ||
| 527 | struct bio *bio, sector_t sector) | ||
| 528 | { | ||
| 529 | struct crypt_config *cc = ti->private; | ||
| 530 | struct dm_crypt_io *io; | ||
| 531 | |||
| 532 | io = mempool_alloc(cc->io_pool, GFP_NOIO); | ||
| 533 | io->target = ti; | ||
| 534 | io->base_bio = bio; | ||
| 535 | io->sector = sector; | ||
| 536 | io->error = 0; | ||
| 537 | atomic_set(&io->pending, 0); | ||
| 538 | |||
| 539 | return io; | ||
| 540 | } | ||
| 541 | |||
| 542 | static void crypt_inc_pending(struct dm_crypt_io *io) | ||
| 543 | { | ||
| 544 | atomic_inc(&io->pending); | ||
| 545 | } | ||
| 546 | |||
| 520 | /* | 547 | /* |
| 521 | * One of the bios was finished. Check for completion of | 548 | * One of the bios was finished. Check for completion of |
| 522 | * the whole request and correctly clean up the buffer. | 549 | * the whole request and correctly clean up the buffer. |
| @@ -591,7 +618,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io) | |||
| 591 | struct bio *base_bio = io->base_bio; | 618 | struct bio *base_bio = io->base_bio; |
| 592 | struct bio *clone; | 619 | struct bio *clone; |
| 593 | 620 | ||
| 594 | atomic_inc(&io->pending); | 621 | crypt_inc_pending(io); |
| 595 | 622 | ||
| 596 | /* | 623 | /* |
| 597 | * The block layer might modify the bvec array, so always | 624 | * The block layer might modify the bvec array, so always |
| @@ -653,6 +680,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, | |||
| 653 | crypt_free_buffer_pages(cc, clone); | 680 | crypt_free_buffer_pages(cc, clone); |
| 654 | bio_put(clone); | 681 | bio_put(clone); |
| 655 | io->error = -EIO; | 682 | io->error = -EIO; |
| 683 | crypt_dec_pending(io); | ||
| 656 | return; | 684 | return; |
| 657 | } | 685 | } |
| 658 | 686 | ||
| @@ -664,28 +692,34 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, | |||
| 664 | 692 | ||
| 665 | if (async) | 693 | if (async) |
| 666 | kcryptd_queue_io(io); | 694 | kcryptd_queue_io(io); |
| 667 | else { | 695 | else |
| 668 | atomic_inc(&io->pending); | ||
| 669 | generic_make_request(clone); | 696 | generic_make_request(clone); |
| 670 | } | ||
| 671 | } | 697 | } |
| 672 | 698 | ||
| 673 | static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io) | 699 | static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) |
| 674 | { | 700 | { |
| 675 | struct crypt_config *cc = io->target->private; | 701 | struct crypt_config *cc = io->target->private; |
| 676 | struct bio *clone; | 702 | struct bio *clone; |
| 703 | int crypt_finished; | ||
| 704 | unsigned out_of_pages = 0; | ||
| 677 | unsigned remaining = io->base_bio->bi_size; | 705 | unsigned remaining = io->base_bio->bi_size; |
| 678 | int r; | 706 | int r; |
| 679 | 707 | ||
| 680 | /* | 708 | /* |
| 709 | * Prevent io from disappearing until this function completes. | ||
| 710 | */ | ||
| 711 | crypt_inc_pending(io); | ||
| 712 | crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector); | ||
| 713 | |||
| 714 | /* | ||
| 681 | * The allocated buffers can be smaller than the whole bio, | 715 | * The allocated buffers can be smaller than the whole bio, |
| 682 | * so repeat the whole process until all the data can be handled. | 716 | * so repeat the whole process until all the data can be handled. |
| 683 | */ | 717 | */ |
| 684 | while (remaining) { | 718 | while (remaining) { |
| 685 | clone = crypt_alloc_buffer(io, remaining); | 719 | clone = crypt_alloc_buffer(io, remaining, &out_of_pages); |
| 686 | if (unlikely(!clone)) { | 720 | if (unlikely(!clone)) { |
| 687 | io->error = -ENOMEM; | 721 | io->error = -ENOMEM; |
| 688 | return; | 722 | break; |
| 689 | } | 723 | } |
| 690 | 724 | ||
| 691 | io->ctx.bio_out = clone; | 725 | io->ctx.bio_out = clone; |
| @@ -693,37 +727,32 @@ static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io) | |||
| 693 | 727 | ||
| 694 | remaining -= clone->bi_size; | 728 | remaining -= clone->bi_size; |
| 695 | 729 | ||
| 730 | crypt_inc_pending(io); | ||
| 696 | r = crypt_convert(cc, &io->ctx); | 731 | r = crypt_convert(cc, &io->ctx); |
| 732 | crypt_finished = atomic_dec_and_test(&io->ctx.pending); | ||
| 697 | 733 | ||
| 698 | if (atomic_dec_and_test(&io->ctx.pending)) { | 734 | /* Encryption was already finished, submit io now */ |
| 699 | /* processed, no running async crypto */ | 735 | if (crypt_finished) { |
| 700 | kcryptd_crypt_write_io_submit(io, r, 0); | 736 | kcryptd_crypt_write_io_submit(io, r, 0); |
| 701 | if (unlikely(r < 0)) | ||
| 702 | return; | ||
| 703 | } else | ||
| 704 | atomic_inc(&io->pending); | ||
| 705 | 737 | ||
| 706 | /* out of memory -> run queues */ | 738 | /* |
| 707 | if (unlikely(remaining)) { | 739 | * If there was an error, do not try next fragments. |
| 708 | /* wait for async crypto then reinitialize pending */ | 740 | * For async, error is processed in async handler. |
| 709 | wait_event(cc->writeq, !atomic_read(&io->ctx.pending)); | 741 | */ |
| 710 | atomic_set(&io->ctx.pending, 1); | 742 | if (unlikely(r < 0)) |
| 711 | congestion_wait(WRITE, HZ/100); | 743 | break; |
| 712 | } | 744 | } |
| 713 | } | ||
| 714 | } | ||
| 715 | 745 | ||
| 716 | static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | 746 | /* |
| 717 | { | 747 | * Out of memory -> run queues |
| 718 | struct crypt_config *cc = io->target->private; | 748 | * But don't wait if split was due to the io size restriction |
| 719 | 749 | */ | |
| 720 | /* | 750 | if (unlikely(out_of_pages)) |
| 721 | * Prevent io from disappearing until this function completes. | 751 | congestion_wait(WRITE, HZ/100); |
| 722 | */ | ||
| 723 | atomic_inc(&io->pending); | ||
| 724 | 752 | ||
| 725 | crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector); | 753 | if (unlikely(remaining)) |
| 726 | kcryptd_crypt_write_convert_loop(io); | 754 | wait_event(cc->writeq, !atomic_read(&io->ctx.pending)); |
| 755 | } | ||
| 727 | 756 | ||
| 728 | crypt_dec_pending(io); | 757 | crypt_dec_pending(io); |
| 729 | } | 758 | } |
| @@ -741,7 +770,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) | |||
| 741 | struct crypt_config *cc = io->target->private; | 770 | struct crypt_config *cc = io->target->private; |
| 742 | int r = 0; | 771 | int r = 0; |
| 743 | 772 | ||
| 744 | atomic_inc(&io->pending); | 773 | crypt_inc_pending(io); |
| 745 | 774 | ||
| 746 | crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, | 775 | crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, |
| 747 | io->sector); | 776 | io->sector); |
| @@ -1108,15 +1137,9 @@ static void crypt_dtr(struct dm_target *ti) | |||
| 1108 | static int crypt_map(struct dm_target *ti, struct bio *bio, | 1137 | static int crypt_map(struct dm_target *ti, struct bio *bio, |
| 1109 | union map_info *map_context) | 1138 | union map_info *map_context) |
| 1110 | { | 1139 | { |
| 1111 | struct crypt_config *cc = ti->private; | ||
| 1112 | struct dm_crypt_io *io; | 1140 | struct dm_crypt_io *io; |
| 1113 | 1141 | ||
| 1114 | io = mempool_alloc(cc->io_pool, GFP_NOIO); | 1142 | io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin); |
| 1115 | io->target = ti; | ||
| 1116 | io->base_bio = bio; | ||
| 1117 | io->sector = bio->bi_sector - ti->begin; | ||
| 1118 | io->error = 0; | ||
| 1119 | atomic_set(&io->pending, 0); | ||
| 1120 | 1143 | ||
| 1121 | if (bio_data_dir(io->base_bio) == READ) | 1144 | if (bio_data_dir(io->base_bio) == READ) |
| 1122 | kcryptd_queue_io(io); | 1145 | kcryptd_queue_io(io); |
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index 41f408068a7c..769ab677f8e0 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c | |||
| @@ -108,12 +108,12 @@ struct pstore { | |||
| 108 | * Used to keep track of which metadata area the data in | 108 | * Used to keep track of which metadata area the data in |
| 109 | * 'chunk' refers to. | 109 | * 'chunk' refers to. |
| 110 | */ | 110 | */ |
| 111 | uint32_t current_area; | 111 | chunk_t current_area; |
| 112 | 112 | ||
| 113 | /* | 113 | /* |
| 114 | * The next free chunk for an exception. | 114 | * The next free chunk for an exception. |
| 115 | */ | 115 | */ |
| 116 | uint32_t next_free; | 116 | chunk_t next_free; |
| 117 | 117 | ||
| 118 | /* | 118 | /* |
| 119 | * The index of next free exception in the current | 119 | * The index of next free exception in the current |
| @@ -175,7 +175,7 @@ static void do_metadata(struct work_struct *work) | |||
| 175 | /* | 175 | /* |
| 176 | * Read or write a chunk aligned and sized block of data from a device. | 176 | * Read or write a chunk aligned and sized block of data from a device. |
| 177 | */ | 177 | */ |
| 178 | static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata) | 178 | static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata) |
| 179 | { | 179 | { |
| 180 | struct dm_io_region where = { | 180 | struct dm_io_region where = { |
| 181 | .bdev = ps->snap->cow->bdev, | 181 | .bdev = ps->snap->cow->bdev, |
| @@ -209,16 +209,23 @@ static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata) | |||
| 209 | } | 209 | } |
| 210 | 210 | ||
| 211 | /* | 211 | /* |
| 212 | * Convert a metadata area index to a chunk index. | ||
| 213 | */ | ||
| 214 | static chunk_t area_location(struct pstore *ps, chunk_t area) | ||
| 215 | { | ||
| 216 | return 1 + ((ps->exceptions_per_area + 1) * area); | ||
| 217 | } | ||
| 218 | |||
| 219 | /* | ||
| 212 | * Read or write a metadata area. Remembering to skip the first | 220 | * Read or write a metadata area. Remembering to skip the first |
| 213 | * chunk which holds the header. | 221 | * chunk which holds the header. |
| 214 | */ | 222 | */ |
| 215 | static int area_io(struct pstore *ps, uint32_t area, int rw) | 223 | static int area_io(struct pstore *ps, chunk_t area, int rw) |
| 216 | { | 224 | { |
| 217 | int r; | 225 | int r; |
| 218 | uint32_t chunk; | 226 | chunk_t chunk; |
| 219 | 227 | ||
| 220 | /* convert a metadata area index to a chunk index */ | 228 | chunk = area_location(ps, area); |
| 221 | chunk = 1 + ((ps->exceptions_per_area + 1) * area); | ||
| 222 | 229 | ||
| 223 | r = chunk_io(ps, chunk, rw, 0); | 230 | r = chunk_io(ps, chunk, rw, 0); |
| 224 | if (r) | 231 | if (r) |
| @@ -228,7 +235,7 @@ static int area_io(struct pstore *ps, uint32_t area, int rw) | |||
| 228 | return 0; | 235 | return 0; |
| 229 | } | 236 | } |
| 230 | 237 | ||
| 231 | static int zero_area(struct pstore *ps, uint32_t area) | 238 | static int zero_area(struct pstore *ps, chunk_t area) |
| 232 | { | 239 | { |
| 233 | memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT); | 240 | memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT); |
| 234 | return area_io(ps, area, WRITE); | 241 | return area_io(ps, area, WRITE); |
| @@ -404,7 +411,7 @@ static int insert_exceptions(struct pstore *ps, int *full) | |||
| 404 | 411 | ||
| 405 | static int read_exceptions(struct pstore *ps) | 412 | static int read_exceptions(struct pstore *ps) |
| 406 | { | 413 | { |
| 407 | uint32_t area; | 414 | chunk_t area; |
| 408 | int r, full = 1; | 415 | int r, full = 1; |
| 409 | 416 | ||
| 410 | /* | 417 | /* |
| @@ -517,6 +524,7 @@ static int persistent_prepare(struct exception_store *store, | |||
| 517 | { | 524 | { |
| 518 | struct pstore *ps = get_info(store); | 525 | struct pstore *ps = get_info(store); |
| 519 | uint32_t stride; | 526 | uint32_t stride; |
| 527 | chunk_t next_free; | ||
| 520 | sector_t size = get_dev_size(store->snap->cow->bdev); | 528 | sector_t size = get_dev_size(store->snap->cow->bdev); |
| 521 | 529 | ||
| 522 | /* Is there enough room ? */ | 530 | /* Is there enough room ? */ |
| @@ -530,7 +538,8 @@ static int persistent_prepare(struct exception_store *store, | |||
| 530 | * into account the location of the metadata chunks. | 538 | * into account the location of the metadata chunks. |
| 531 | */ | 539 | */ |
| 532 | stride = (ps->exceptions_per_area + 1); | 540 | stride = (ps->exceptions_per_area + 1); |
| 533 | if ((++ps->next_free % stride) == 1) | 541 | next_free = ++ps->next_free; |
| 542 | if (sector_div(next_free, stride) == 1) | ||
| 534 | ps->next_free++; | 543 | ps->next_free++; |
| 535 | 544 | ||
| 536 | atomic_inc(&ps->pending_count); | 545 | atomic_inc(&ps->pending_count); |
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 5b919159f084..dca401dc70a0 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c | |||
| @@ -1131,7 +1131,7 @@ static void retrieve_deps(struct dm_table *table, | |||
| 1131 | unsigned int count = 0; | 1131 | unsigned int count = 0; |
| 1132 | struct list_head *tmp; | 1132 | struct list_head *tmp; |
| 1133 | size_t len, needed; | 1133 | size_t len, needed; |
| 1134 | struct dm_dev *dd; | 1134 | struct dm_dev_internal *dd; |
| 1135 | struct dm_target_deps *deps; | 1135 | struct dm_target_deps *deps; |
| 1136 | 1136 | ||
| 1137 | deps = get_result_buffer(param, param_size, &len); | 1137 | deps = get_result_buffer(param, param_size, &len); |
| @@ -1157,7 +1157,7 @@ static void retrieve_deps(struct dm_table *table, | |||
| 1157 | deps->count = count; | 1157 | deps->count = count; |
| 1158 | count = 0; | 1158 | count = 0; |
| 1159 | list_for_each_entry (dd, dm_table_get_devices(table), list) | 1159 | list_for_each_entry (dd, dm_table_get_devices(table), list) |
| 1160 | deps->dev[count++] = huge_encode_dev(dd->bdev->bd_dev); | 1160 | deps->dev[count++] = huge_encode_dev(dd->dm_dev.bdev->bd_dev); |
| 1161 | 1161 | ||
| 1162 | param->data_size = param->data_start + needed; | 1162 | param->data_size = param->data_start + needed; |
| 1163 | } | 1163 | } |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 3d3848132c69..103304c1e3b0 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
| @@ -30,6 +30,7 @@ struct pgpath { | |||
| 30 | struct list_head list; | 30 | struct list_head list; |
| 31 | 31 | ||
| 32 | struct priority_group *pg; /* Owning PG */ | 32 | struct priority_group *pg; /* Owning PG */ |
| 33 | unsigned is_active; /* Path status */ | ||
| 33 | unsigned fail_count; /* Cumulative failure count */ | 34 | unsigned fail_count; /* Cumulative failure count */ |
| 34 | 35 | ||
| 35 | struct dm_path path; | 36 | struct dm_path path; |
| @@ -125,7 +126,7 @@ static struct pgpath *alloc_pgpath(void) | |||
| 125 | struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL); | 126 | struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL); |
| 126 | 127 | ||
| 127 | if (pgpath) { | 128 | if (pgpath) { |
| 128 | pgpath->path.is_active = 1; | 129 | pgpath->is_active = 1; |
| 129 | INIT_WORK(&pgpath->deactivate_path, deactivate_path); | 130 | INIT_WORK(&pgpath->deactivate_path, deactivate_path); |
| 130 | } | 131 | } |
| 131 | 132 | ||
| @@ -575,12 +576,12 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps, | |||
| 575 | /* we need at least a path arg */ | 576 | /* we need at least a path arg */ |
| 576 | if (as->argc < 1) { | 577 | if (as->argc < 1) { |
| 577 | ti->error = "no device given"; | 578 | ti->error = "no device given"; |
| 578 | return NULL; | 579 | return ERR_PTR(-EINVAL); |
| 579 | } | 580 | } |
| 580 | 581 | ||
| 581 | p = alloc_pgpath(); | 582 | p = alloc_pgpath(); |
| 582 | if (!p) | 583 | if (!p) |
| 583 | return NULL; | 584 | return ERR_PTR(-ENOMEM); |
| 584 | 585 | ||
| 585 | r = dm_get_device(ti, shift(as), ti->begin, ti->len, | 586 | r = dm_get_device(ti, shift(as), ti->begin, ti->len, |
| 586 | dm_table_get_mode(ti->table), &p->path.dev); | 587 | dm_table_get_mode(ti->table), &p->path.dev); |
| @@ -608,7 +609,7 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps, | |||
| 608 | 609 | ||
| 609 | bad: | 610 | bad: |
| 610 | free_pgpath(p); | 611 | free_pgpath(p); |
| 611 | return NULL; | 612 | return ERR_PTR(r); |
| 612 | } | 613 | } |
| 613 | 614 | ||
| 614 | static struct priority_group *parse_priority_group(struct arg_set *as, | 615 | static struct priority_group *parse_priority_group(struct arg_set *as, |
| @@ -626,14 +627,14 @@ static struct priority_group *parse_priority_group(struct arg_set *as, | |||
| 626 | 627 | ||
| 627 | if (as->argc < 2) { | 628 | if (as->argc < 2) { |
| 628 | as->argc = 0; | 629 | as->argc = 0; |
| 629 | ti->error = "not enough priority group aruments"; | 630 | ti->error = "not enough priority group arguments"; |
| 630 | return NULL; | 631 | return ERR_PTR(-EINVAL); |
| 631 | } | 632 | } |
| 632 | 633 | ||
| 633 | pg = alloc_priority_group(); | 634 | pg = alloc_priority_group(); |
| 634 | if (!pg) { | 635 | if (!pg) { |
| 635 | ti->error = "couldn't allocate priority group"; | 636 | ti->error = "couldn't allocate priority group"; |
| 636 | return NULL; | 637 | return ERR_PTR(-ENOMEM); |
| 637 | } | 638 | } |
| 638 | pg->m = m; | 639 | pg->m = m; |
| 639 | 640 | ||
| @@ -666,8 +667,10 @@ static struct priority_group *parse_priority_group(struct arg_set *as, | |||
| 666 | path_args.argv = as->argv; | 667 | path_args.argv = as->argv; |
| 667 | 668 | ||
| 668 | pgpath = parse_path(&path_args, &pg->ps, ti); | 669 | pgpath = parse_path(&path_args, &pg->ps, ti); |
| 669 | if (!pgpath) | 670 | if (IS_ERR(pgpath)) { |
| 671 | r = PTR_ERR(pgpath); | ||
| 670 | goto bad; | 672 | goto bad; |
| 673 | } | ||
| 671 | 674 | ||
| 672 | pgpath->pg = pg; | 675 | pgpath->pg = pg; |
| 673 | list_add_tail(&pgpath->list, &pg->pgpaths); | 676 | list_add_tail(&pgpath->list, &pg->pgpaths); |
| @@ -678,7 +681,7 @@ static struct priority_group *parse_priority_group(struct arg_set *as, | |||
| 678 | 681 | ||
| 679 | bad: | 682 | bad: |
| 680 | free_priority_group(pg, ti); | 683 | free_priority_group(pg, ti); |
| 681 | return NULL; | 684 | return ERR_PTR(r); |
| 682 | } | 685 | } |
| 683 | 686 | ||
| 684 | static int parse_hw_handler(struct arg_set *as, struct multipath *m) | 687 | static int parse_hw_handler(struct arg_set *as, struct multipath *m) |
| @@ -797,8 +800,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, | |||
| 797 | struct priority_group *pg; | 800 | struct priority_group *pg; |
| 798 | 801 | ||
| 799 | pg = parse_priority_group(&as, m); | 802 | pg = parse_priority_group(&as, m); |
| 800 | if (!pg) { | 803 | if (IS_ERR(pg)) { |
| 801 | r = -EINVAL; | 804 | r = PTR_ERR(pg); |
| 802 | goto bad; | 805 | goto bad; |
| 803 | } | 806 | } |
| 804 | 807 | ||
| @@ -864,13 +867,13 @@ static int fail_path(struct pgpath *pgpath) | |||
| 864 | 867 | ||
| 865 | spin_lock_irqsave(&m->lock, flags); | 868 | spin_lock_irqsave(&m->lock, flags); |
| 866 | 869 | ||
| 867 | if (!pgpath->path.is_active) | 870 | if (!pgpath->is_active) |
| 868 | goto out; | 871 | goto out; |
| 869 | 872 | ||
| 870 | DMWARN("Failing path %s.", pgpath->path.dev->name); | 873 | DMWARN("Failing path %s.", pgpath->path.dev->name); |
| 871 | 874 | ||
| 872 | pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path); | 875 | pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path); |
| 873 | pgpath->path.is_active = 0; | 876 | pgpath->is_active = 0; |
| 874 | pgpath->fail_count++; | 877 | pgpath->fail_count++; |
| 875 | 878 | ||
| 876 | m->nr_valid_paths--; | 879 | m->nr_valid_paths--; |
| @@ -901,7 +904,7 @@ static int reinstate_path(struct pgpath *pgpath) | |||
| 901 | 904 | ||
| 902 | spin_lock_irqsave(&m->lock, flags); | 905 | spin_lock_irqsave(&m->lock, flags); |
| 903 | 906 | ||
| 904 | if (pgpath->path.is_active) | 907 | if (pgpath->is_active) |
| 905 | goto out; | 908 | goto out; |
| 906 | 909 | ||
| 907 | if (!pgpath->pg->ps.type->reinstate_path) { | 910 | if (!pgpath->pg->ps.type->reinstate_path) { |
| @@ -915,7 +918,7 @@ static int reinstate_path(struct pgpath *pgpath) | |||
| 915 | if (r) | 918 | if (r) |
| 916 | goto out; | 919 | goto out; |
| 917 | 920 | ||
| 918 | pgpath->path.is_active = 1; | 921 | pgpath->is_active = 1; |
| 919 | 922 | ||
| 920 | m->current_pgpath = NULL; | 923 | m->current_pgpath = NULL; |
| 921 | if (!m->nr_valid_paths++ && m->queue_size) | 924 | if (!m->nr_valid_paths++ && m->queue_size) |
| @@ -1303,7 +1306,7 @@ static int multipath_status(struct dm_target *ti, status_type_t type, | |||
| 1303 | 1306 | ||
| 1304 | list_for_each_entry(p, &pg->pgpaths, list) { | 1307 | list_for_each_entry(p, &pg->pgpaths, list) { |
| 1305 | DMEMIT("%s %s %u ", p->path.dev->name, | 1308 | DMEMIT("%s %s %u ", p->path.dev->name, |
| 1306 | p->path.is_active ? "A" : "F", | 1309 | p->is_active ? "A" : "F", |
| 1307 | p->fail_count); | 1310 | p->fail_count); |
| 1308 | if (pg->ps.type->status) | 1311 | if (pg->ps.type->status) |
| 1309 | sz += pg->ps.type->status(&pg->ps, | 1312 | sz += pg->ps.type->status(&pg->ps, |
diff --git a/drivers/md/dm-mpath.h b/drivers/md/dm-mpath.h index c198b856a452..e230f7196259 100644 --- a/drivers/md/dm-mpath.h +++ b/drivers/md/dm-mpath.h | |||
| @@ -13,8 +13,6 @@ struct dm_dev; | |||
| 13 | 13 | ||
| 14 | struct dm_path { | 14 | struct dm_path { |
| 15 | struct dm_dev *dev; /* Read-only */ | 15 | struct dm_dev *dev; /* Read-only */ |
| 16 | unsigned is_active; /* Read-only */ | ||
| 17 | |||
| 18 | void *pscontext; /* For path-selector use */ | 16 | void *pscontext; /* For path-selector use */ |
| 19 | }; | 17 | }; |
| 20 | 18 | ||
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index ff05fe893083..29913e42c4ab 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
| @@ -842,7 +842,9 @@ static int recover(struct mirror_set *ms, struct region *reg) | |||
| 842 | } | 842 | } |
| 843 | 843 | ||
| 844 | /* hand to kcopyd */ | 844 | /* hand to kcopyd */ |
| 845 | set_bit(DM_KCOPYD_IGNORE_ERROR, &flags); | 845 | if (!errors_handled(ms)) |
| 846 | set_bit(DM_KCOPYD_IGNORE_ERROR, &flags); | ||
| 847 | |||
| 846 | r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, | 848 | r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, |
| 847 | flags, recovery_complete, reg); | 849 | flags, recovery_complete, reg); |
| 848 | 850 | ||
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 61f441409234..a740a6950f59 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
| @@ -250,7 +250,8 @@ static void free_devices(struct list_head *devices) | |||
| 250 | struct list_head *tmp, *next; | 250 | struct list_head *tmp, *next; |
| 251 | 251 | ||
| 252 | list_for_each_safe(tmp, next, devices) { | 252 | list_for_each_safe(tmp, next, devices) { |
| 253 | struct dm_dev *dd = list_entry(tmp, struct dm_dev, list); | 253 | struct dm_dev_internal *dd = |
| 254 | list_entry(tmp, struct dm_dev_internal, list); | ||
| 254 | kfree(dd); | 255 | kfree(dd); |
| 255 | } | 256 | } |
| 256 | } | 257 | } |
| @@ -327,12 +328,12 @@ static int lookup_device(const char *path, dev_t *dev) | |||
| 327 | /* | 328 | /* |
| 328 | * See if we've already got a device in the list. | 329 | * See if we've already got a device in the list. |
| 329 | */ | 330 | */ |
| 330 | static struct dm_dev *find_device(struct list_head *l, dev_t dev) | 331 | static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev) |
| 331 | { | 332 | { |
| 332 | struct dm_dev *dd; | 333 | struct dm_dev_internal *dd; |
| 333 | 334 | ||
| 334 | list_for_each_entry (dd, l, list) | 335 | list_for_each_entry (dd, l, list) |
| 335 | if (dd->bdev->bd_dev == dev) | 336 | if (dd->dm_dev.bdev->bd_dev == dev) |
| 336 | return dd; | 337 | return dd; |
| 337 | 338 | ||
| 338 | return NULL; | 339 | return NULL; |
| @@ -341,45 +342,47 @@ static struct dm_dev *find_device(struct list_head *l, dev_t dev) | |||
| 341 | /* | 342 | /* |
| 342 | * Open a device so we can use it as a map destination. | 343 | * Open a device so we can use it as a map destination. |
| 343 | */ | 344 | */ |
| 344 | static int open_dev(struct dm_dev *d, dev_t dev, struct mapped_device *md) | 345 | static int open_dev(struct dm_dev_internal *d, dev_t dev, |
| 346 | struct mapped_device *md) | ||
| 345 | { | 347 | { |
| 346 | static char *_claim_ptr = "I belong to device-mapper"; | 348 | static char *_claim_ptr = "I belong to device-mapper"; |
| 347 | struct block_device *bdev; | 349 | struct block_device *bdev; |
| 348 | 350 | ||
| 349 | int r; | 351 | int r; |
| 350 | 352 | ||
| 351 | BUG_ON(d->bdev); | 353 | BUG_ON(d->dm_dev.bdev); |
| 352 | 354 | ||
| 353 | bdev = open_by_devnum(dev, d->mode); | 355 | bdev = open_by_devnum(dev, d->dm_dev.mode); |
| 354 | if (IS_ERR(bdev)) | 356 | if (IS_ERR(bdev)) |
| 355 | return PTR_ERR(bdev); | 357 | return PTR_ERR(bdev); |
| 356 | r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md)); | 358 | r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md)); |
| 357 | if (r) | 359 | if (r) |
| 358 | blkdev_put(bdev); | 360 | blkdev_put(bdev); |
| 359 | else | 361 | else |
| 360 | d->bdev = bdev; | 362 | d->dm_dev.bdev = bdev; |
| 361 | return r; | 363 | return r; |
| 362 | } | 364 | } |
| 363 | 365 | ||
| 364 | /* | 366 | /* |
| 365 | * Close a device that we've been using. | 367 | * Close a device that we've been using. |
| 366 | */ | 368 | */ |
| 367 | static void close_dev(struct dm_dev *d, struct mapped_device *md) | 369 | static void close_dev(struct dm_dev_internal *d, struct mapped_device *md) |
| 368 | { | 370 | { |
| 369 | if (!d->bdev) | 371 | if (!d->dm_dev.bdev) |
| 370 | return; | 372 | return; |
| 371 | 373 | ||
| 372 | bd_release_from_disk(d->bdev, dm_disk(md)); | 374 | bd_release_from_disk(d->dm_dev.bdev, dm_disk(md)); |
| 373 | blkdev_put(d->bdev); | 375 | blkdev_put(d->dm_dev.bdev); |
| 374 | d->bdev = NULL; | 376 | d->dm_dev.bdev = NULL; |
| 375 | } | 377 | } |
| 376 | 378 | ||
| 377 | /* | 379 | /* |
| 378 | * If possible, this checks an area of a destination device is valid. | 380 | * If possible, this checks an area of a destination device is valid. |
| 379 | */ | 381 | */ |
| 380 | static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len) | 382 | static int check_device_area(struct dm_dev_internal *dd, sector_t start, |
| 383 | sector_t len) | ||
| 381 | { | 384 | { |
| 382 | sector_t dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT; | 385 | sector_t dev_size = dd->dm_dev.bdev->bd_inode->i_size >> SECTOR_SHIFT; |
| 383 | 386 | ||
| 384 | if (!dev_size) | 387 | if (!dev_size) |
| 385 | return 1; | 388 | return 1; |
| @@ -392,16 +395,17 @@ static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len) | |||
| 392 | * careful to leave things as they were if we fail to reopen the | 395 | * careful to leave things as they were if we fail to reopen the |
| 393 | * device. | 396 | * device. |
| 394 | */ | 397 | */ |
| 395 | static int upgrade_mode(struct dm_dev *dd, int new_mode, struct mapped_device *md) | 398 | static int upgrade_mode(struct dm_dev_internal *dd, int new_mode, |
| 399 | struct mapped_device *md) | ||
| 396 | { | 400 | { |
| 397 | int r; | 401 | int r; |
| 398 | struct dm_dev dd_copy; | 402 | struct dm_dev_internal dd_copy; |
| 399 | dev_t dev = dd->bdev->bd_dev; | 403 | dev_t dev = dd->dm_dev.bdev->bd_dev; |
| 400 | 404 | ||
| 401 | dd_copy = *dd; | 405 | dd_copy = *dd; |
| 402 | 406 | ||
| 403 | dd->mode |= new_mode; | 407 | dd->dm_dev.mode |= new_mode; |
| 404 | dd->bdev = NULL; | 408 | dd->dm_dev.bdev = NULL; |
| 405 | r = open_dev(dd, dev, md); | 409 | r = open_dev(dd, dev, md); |
| 406 | if (!r) | 410 | if (!r) |
| 407 | close_dev(&dd_copy, md); | 411 | close_dev(&dd_copy, md); |
| @@ -421,7 +425,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, | |||
| 421 | { | 425 | { |
| 422 | int r; | 426 | int r; |
| 423 | dev_t uninitialized_var(dev); | 427 | dev_t uninitialized_var(dev); |
| 424 | struct dm_dev *dd; | 428 | struct dm_dev_internal *dd; |
| 425 | unsigned int major, minor; | 429 | unsigned int major, minor; |
| 426 | 430 | ||
| 427 | BUG_ON(!t); | 431 | BUG_ON(!t); |
| @@ -443,20 +447,20 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, | |||
| 443 | if (!dd) | 447 | if (!dd) |
| 444 | return -ENOMEM; | 448 | return -ENOMEM; |
| 445 | 449 | ||
| 446 | dd->mode = mode; | 450 | dd->dm_dev.mode = mode; |
| 447 | dd->bdev = NULL; | 451 | dd->dm_dev.bdev = NULL; |
| 448 | 452 | ||
| 449 | if ((r = open_dev(dd, dev, t->md))) { | 453 | if ((r = open_dev(dd, dev, t->md))) { |
| 450 | kfree(dd); | 454 | kfree(dd); |
| 451 | return r; | 455 | return r; |
| 452 | } | 456 | } |
| 453 | 457 | ||
| 454 | format_dev_t(dd->name, dev); | 458 | format_dev_t(dd->dm_dev.name, dev); |
| 455 | 459 | ||
| 456 | atomic_set(&dd->count, 0); | 460 | atomic_set(&dd->count, 0); |
| 457 | list_add(&dd->list, &t->devices); | 461 | list_add(&dd->list, &t->devices); |
| 458 | 462 | ||
| 459 | } else if (dd->mode != (mode | dd->mode)) { | 463 | } else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) { |
| 460 | r = upgrade_mode(dd, mode, t->md); | 464 | r = upgrade_mode(dd, mode, t->md); |
| 461 | if (r) | 465 | if (r) |
| 462 | return r; | 466 | return r; |
| @@ -465,11 +469,11 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, | |||
| 465 | 469 | ||
| 466 | if (!check_device_area(dd, start, len)) { | 470 | if (!check_device_area(dd, start, len)) { |
| 467 | DMWARN("device %s too small for target", path); | 471 | DMWARN("device %s too small for target", path); |
| 468 | dm_put_device(ti, dd); | 472 | dm_put_device(ti, &dd->dm_dev); |
| 469 | return -EINVAL; | 473 | return -EINVAL; |
| 470 | } | 474 | } |
| 471 | 475 | ||
| 472 | *result = dd; | 476 | *result = &dd->dm_dev; |
| 473 | 477 | ||
| 474 | return 0; | 478 | return 0; |
| 475 | } | 479 | } |
| @@ -478,6 +482,13 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) | |||
| 478 | { | 482 | { |
| 479 | struct request_queue *q = bdev_get_queue(bdev); | 483 | struct request_queue *q = bdev_get_queue(bdev); |
| 480 | struct io_restrictions *rs = &ti->limits; | 484 | struct io_restrictions *rs = &ti->limits; |
| 485 | char b[BDEVNAME_SIZE]; | ||
| 486 | |||
| 487 | if (unlikely(!q)) { | ||
| 488 | DMWARN("%s: Cannot set limits for nonexistent device %s", | ||
| 489 | dm_device_name(ti->table->md), bdevname(bdev, b)); | ||
| 490 | return; | ||
| 491 | } | ||
| 481 | 492 | ||
| 482 | /* | 493 | /* |
| 483 | * Combine the device limits low. | 494 | * Combine the device limits low. |
| @@ -540,8 +551,11 @@ int dm_get_device(struct dm_target *ti, const char *path, sector_t start, | |||
| 540 | /* | 551 | /* |
| 541 | * Decrement a devices use count and remove it if necessary. | 552 | * Decrement a devices use count and remove it if necessary. |
| 542 | */ | 553 | */ |
| 543 | void dm_put_device(struct dm_target *ti, struct dm_dev *dd) | 554 | void dm_put_device(struct dm_target *ti, struct dm_dev *d) |
| 544 | { | 555 | { |
| 556 | struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal, | ||
| 557 | dm_dev); | ||
| 558 | |||
| 545 | if (atomic_dec_and_test(&dd->count)) { | 559 | if (atomic_dec_and_test(&dd->count)) { |
| 546 | close_dev(dd, ti->table->md); | 560 | close_dev(dd, ti->table->md); |
| 547 | list_del(&dd->list); | 561 | list_del(&dd->list); |
| @@ -937,13 +951,20 @@ int dm_table_resume_targets(struct dm_table *t) | |||
| 937 | 951 | ||
| 938 | int dm_table_any_congested(struct dm_table *t, int bdi_bits) | 952 | int dm_table_any_congested(struct dm_table *t, int bdi_bits) |
| 939 | { | 953 | { |
| 940 | struct dm_dev *dd; | 954 | struct dm_dev_internal *dd; |
| 941 | struct list_head *devices = dm_table_get_devices(t); | 955 | struct list_head *devices = dm_table_get_devices(t); |
| 942 | int r = 0; | 956 | int r = 0; |
| 943 | 957 | ||
| 944 | list_for_each_entry(dd, devices, list) { | 958 | list_for_each_entry(dd, devices, list) { |
| 945 | struct request_queue *q = bdev_get_queue(dd->bdev); | 959 | struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev); |
| 946 | r |= bdi_congested(&q->backing_dev_info, bdi_bits); | 960 | char b[BDEVNAME_SIZE]; |
| 961 | |||
| 962 | if (likely(q)) | ||
| 963 | r |= bdi_congested(&q->backing_dev_info, bdi_bits); | ||
| 964 | else | ||
| 965 | DMWARN_LIMIT("%s: any_congested: nonexistent device %s", | ||
| 966 | dm_device_name(t->md), | ||
| 967 | bdevname(dd->dm_dev.bdev, b)); | ||
| 947 | } | 968 | } |
| 948 | 969 | ||
| 949 | return r; | 970 | return r; |
| @@ -951,13 +972,19 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits) | |||
| 951 | 972 | ||
| 952 | void dm_table_unplug_all(struct dm_table *t) | 973 | void dm_table_unplug_all(struct dm_table *t) |
| 953 | { | 974 | { |
| 954 | struct dm_dev *dd; | 975 | struct dm_dev_internal *dd; |
| 955 | struct list_head *devices = dm_table_get_devices(t); | 976 | struct list_head *devices = dm_table_get_devices(t); |
| 956 | 977 | ||
| 957 | list_for_each_entry(dd, devices, list) { | 978 | list_for_each_entry(dd, devices, list) { |
| 958 | struct request_queue *q = bdev_get_queue(dd->bdev); | 979 | struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev); |
| 959 | 980 | char b[BDEVNAME_SIZE]; | |
| 960 | blk_unplug(q); | 981 | |
| 982 | if (likely(q)) | ||
| 983 | blk_unplug(q); | ||
| 984 | else | ||
| 985 | DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s", | ||
| 986 | dm_device_name(t->md), | ||
| 987 | bdevname(dd->dm_dev.bdev, b)); | ||
| 961 | } | 988 | } |
| 962 | } | 989 | } |
| 963 | 990 | ||
diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 1e59a0b0a78a..cd189da2b2fa 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h | |||
| @@ -25,13 +25,10 @@ | |||
| 25 | /* | 25 | /* |
| 26 | * List of devices that a metadevice uses and should open/close. | 26 | * List of devices that a metadevice uses and should open/close. |
| 27 | */ | 27 | */ |
| 28 | struct dm_dev { | 28 | struct dm_dev_internal { |
| 29 | struct list_head list; | 29 | struct list_head list; |
| 30 | |||
| 31 | atomic_t count; | 30 | atomic_t count; |
| 32 | int mode; | 31 | struct dm_dev dm_dev; |
| 33 | struct block_device *bdev; | ||
| 34 | char name[16]; | ||
| 35 | }; | 32 | }; |
| 36 | 33 | ||
| 37 | struct dm_table; | 34 | struct dm_table; |
| @@ -49,7 +46,6 @@ void dm_table_presuspend_targets(struct dm_table *t); | |||
| 49 | void dm_table_postsuspend_targets(struct dm_table *t); | 46 | void dm_table_postsuspend_targets(struct dm_table *t); |
| 50 | int dm_table_resume_targets(struct dm_table *t); | 47 | int dm_table_resume_targets(struct dm_table *t); |
| 51 | int dm_table_any_congested(struct dm_table *t, int bdi_bits); | 48 | int dm_table_any_congested(struct dm_table *t, int bdi_bits); |
| 52 | void dm_table_unplug_all(struct dm_table *t); | ||
| 53 | 49 | ||
| 54 | /* | 50 | /* |
| 55 | * To check the return value from dm_table_find_target(). | 51 | * To check the return value from dm_table_find_target(). |
| @@ -93,8 +89,6 @@ void dm_linear_exit(void); | |||
| 93 | int dm_stripe_init(void); | 89 | int dm_stripe_init(void); |
| 94 | void dm_stripe_exit(void); | 90 | void dm_stripe_exit(void); |
| 95 | 91 | ||
| 96 | void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); | ||
| 97 | union map_info *dm_get_mapinfo(struct bio *bio); | ||
| 98 | int dm_open_count(struct mapped_device *md); | 92 | int dm_open_count(struct mapped_device *md); |
| 99 | int dm_lock_for_deletion(struct mapped_device *md); | 93 | int dm_lock_for_deletion(struct mapped_device *md); |
| 100 | 94 | ||
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index a90222e3297d..08d783592b73 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
| @@ -13,7 +13,6 @@ | |||
| 13 | 13 | ||
| 14 | struct dm_target; | 14 | struct dm_target; |
| 15 | struct dm_table; | 15 | struct dm_table; |
| 16 | struct dm_dev; | ||
| 17 | struct mapped_device; | 16 | struct mapped_device; |
| 18 | struct bio_vec; | 17 | struct bio_vec; |
| 19 | 18 | ||
| @@ -84,6 +83,12 @@ void dm_error(const char *message); | |||
| 84 | */ | 83 | */ |
| 85 | void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev); | 84 | void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev); |
| 86 | 85 | ||
| 86 | struct dm_dev { | ||
| 87 | struct block_device *bdev; | ||
| 88 | int mode; | ||
| 89 | char name[16]; | ||
| 90 | }; | ||
| 91 | |||
| 87 | /* | 92 | /* |
| 88 | * Constructors should call these functions to ensure destination devices | 93 | * Constructors should call these functions to ensure destination devices |
| 89 | * are opened/closed correctly. | 94 | * are opened/closed correctly. |
| @@ -202,6 +207,7 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); | |||
| 202 | struct gendisk *dm_disk(struct mapped_device *md); | 207 | struct gendisk *dm_disk(struct mapped_device *md); |
| 203 | int dm_suspended(struct mapped_device *md); | 208 | int dm_suspended(struct mapped_device *md); |
| 204 | int dm_noflush_suspending(struct dm_target *ti); | 209 | int dm_noflush_suspending(struct dm_target *ti); |
| 210 | union map_info *dm_get_mapinfo(struct bio *bio); | ||
| 205 | 211 | ||
| 206 | /* | 212 | /* |
| 207 | * Geometry functions. | 213 | * Geometry functions. |
| @@ -232,6 +238,11 @@ int dm_table_add_target(struct dm_table *t, const char *type, | |||
| 232 | int dm_table_complete(struct dm_table *t); | 238 | int dm_table_complete(struct dm_table *t); |
| 233 | 239 | ||
| 234 | /* | 240 | /* |
| 241 | * Unplug all devices in a table. | ||
| 242 | */ | ||
| 243 | void dm_table_unplug_all(struct dm_table *t); | ||
| 244 | |||
| 245 | /* | ||
| 235 | * Table reference counting. | 246 | * Table reference counting. |
| 236 | */ | 247 | */ |
| 237 | struct dm_table *dm_get_table(struct mapped_device *md); | 248 | struct dm_table *dm_get_table(struct mapped_device *md); |
| @@ -256,6 +267,11 @@ void dm_table_event(struct dm_table *t); | |||
| 256 | */ | 267 | */ |
| 257 | int dm_swap_table(struct mapped_device *md, struct dm_table *t); | 268 | int dm_swap_table(struct mapped_device *md, struct dm_table *t); |
| 258 | 269 | ||
| 270 | /* | ||
| 271 | * A wrapper around vmalloc. | ||
| 272 | */ | ||
| 273 | void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); | ||
| 274 | |||
| 259 | /*----------------------------------------------------------------- | 275 | /*----------------------------------------------------------------- |
| 260 | * Macros. | 276 | * Macros. |
| 261 | *---------------------------------------------------------------*/ | 277 | *---------------------------------------------------------------*/ |
