diff options
author | Benjamin Marzinski <bmarzins@redhat.com> | 2014-08-13 14:53:43 -0400 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2014-10-05 20:03:35 -0400 |
commit | 86f1152b117a404229fd6f08ec3faca779f37b92 (patch) | |
tree | dceee3703ab97065c3e425b55d42cb4edb9a079b | |
parent | 1f271972478d84dd9e4d6dd82f414d70ed9e78ce (diff) |
dm: allow active and inactive tables to share dm_devs
Until this change, when loading a new DM table, DM core would re-open
all of the devices in the DM table. Now, DM core will avoid redundant
device opens (and closes when destroying the old table) if the old
table already has a device open using the same mode. This is achieved
by managing reference counts on the table_devices that DM core now
stores in the mapped_device structure (rather than in the dm_table
structure). So a mapped_device's active and inactive dm_tables' dm_dev
lists now just point to the dm_devs stored in the mapped_device's
table_devices list.
This improvement in DM core's device reference counting has the
side-effect of fixing a long-standing limitation of the multipath
target: a DM multipath table couldn't include any paths that were unusable
(failed). For example: if all paths have failed and you add a new,
working, path to the table; you can't use it since the table load would
fail due to it still containing failed paths. Now a re-load of a
multipath table can include failed devices and when those devices become
active again they can be used instantly.
The device list code in dm.c isn't a straight copy/paste from the code in
dm-table.c, but it's very close (aside from some variable renames). One
subtle difference is that find_table_device for the tables_devices list
will only match devices with the same name and mode. This is because we
don't want to upgrade a device's mode in the active table when an
inactive table is loaded.
Access to the mapped_device structure's tables_devices list requires a
mutex (tables_devices_lock), so that tables cannot be created and
destroyed concurrently.
Signed-off-by: Benjamin Marzinski <bmarzins@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r-- | drivers/md/dm-ioctl.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 102 | ||||
-rw-r--r-- | drivers/md/dm.c | 126 | ||||
-rw-r--r-- | drivers/md/dm.h | 5 | ||||
-rw-r--r-- | include/uapi/linux/dm-ioctl.h | 4 |
5 files changed, 167 insertions, 72 deletions
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 51521429fb59..0be9381365d7 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c | |||
@@ -1418,7 +1418,7 @@ static void retrieve_deps(struct dm_table *table, | |||
1418 | deps->count = count; | 1418 | deps->count = count; |
1419 | count = 0; | 1419 | count = 0; |
1420 | list_for_each_entry (dd, dm_table_get_devices(table), list) | 1420 | list_for_each_entry (dd, dm_table_get_devices(table), list) |
1421 | deps->dev[count++] = huge_encode_dev(dd->dm_dev.bdev->bd_dev); | 1421 | deps->dev[count++] = huge_encode_dev(dd->dm_dev->bdev->bd_dev); |
1422 | 1422 | ||
1423 | param->data_size = param->data_start + needed; | 1423 | param->data_size = param->data_start + needed; |
1424 | } | 1424 | } |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index f9c6cb8dbcf8..b2bd1ebf4562 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -210,15 +210,16 @@ int dm_table_create(struct dm_table **result, fmode_t mode, | |||
210 | return 0; | 210 | return 0; |
211 | } | 211 | } |
212 | 212 | ||
213 | static void free_devices(struct list_head *devices) | 213 | static void free_devices(struct list_head *devices, struct mapped_device *md) |
214 | { | 214 | { |
215 | struct list_head *tmp, *next; | 215 | struct list_head *tmp, *next; |
216 | 216 | ||
217 | list_for_each_safe(tmp, next, devices) { | 217 | list_for_each_safe(tmp, next, devices) { |
218 | struct dm_dev_internal *dd = | 218 | struct dm_dev_internal *dd = |
219 | list_entry(tmp, struct dm_dev_internal, list); | 219 | list_entry(tmp, struct dm_dev_internal, list); |
220 | DMWARN("dm_table_destroy: dm_put_device call missing for %s", | 220 | DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s", |
221 | dd->dm_dev.name); | 221 | dm_device_name(md), dd->dm_dev->name); |
222 | dm_put_table_device(md, dd->dm_dev); | ||
222 | kfree(dd); | 223 | kfree(dd); |
223 | } | 224 | } |
224 | } | 225 | } |
@@ -247,7 +248,7 @@ void dm_table_destroy(struct dm_table *t) | |||
247 | vfree(t->highs); | 248 | vfree(t->highs); |
248 | 249 | ||
249 | /* free the device list */ | 250 | /* free the device list */ |
250 | free_devices(&t->devices); | 251 | free_devices(&t->devices, t->md); |
251 | 252 | ||
252 | dm_free_md_mempools(t->mempools); | 253 | dm_free_md_mempools(t->mempools); |
253 | 254 | ||
@@ -262,53 +263,13 @@ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev) | |||
262 | struct dm_dev_internal *dd; | 263 | struct dm_dev_internal *dd; |
263 | 264 | ||
264 | list_for_each_entry (dd, l, list) | 265 | list_for_each_entry (dd, l, list) |
265 | if (dd->dm_dev.bdev->bd_dev == dev) | 266 | if (dd->dm_dev->bdev->bd_dev == dev) |
266 | return dd; | 267 | return dd; |
267 | 268 | ||
268 | return NULL; | 269 | return NULL; |
269 | } | 270 | } |
270 | 271 | ||
271 | /* | 272 | /* |
272 | * Open a device so we can use it as a map destination. | ||
273 | */ | ||
274 | static int open_dev(struct dm_dev_internal *d, dev_t dev, | ||
275 | struct mapped_device *md) | ||
276 | { | ||
277 | static char *_claim_ptr = "I belong to device-mapper"; | ||
278 | struct block_device *bdev; | ||
279 | |||
280 | int r; | ||
281 | |||
282 | BUG_ON(d->dm_dev.bdev); | ||
283 | |||
284 | bdev = blkdev_get_by_dev(dev, d->dm_dev.mode | FMODE_EXCL, _claim_ptr); | ||
285 | if (IS_ERR(bdev)) | ||
286 | return PTR_ERR(bdev); | ||
287 | |||
288 | r = bd_link_disk_holder(bdev, dm_disk(md)); | ||
289 | if (r) { | ||
290 | blkdev_put(bdev, d->dm_dev.mode | FMODE_EXCL); | ||
291 | return r; | ||
292 | } | ||
293 | |||
294 | d->dm_dev.bdev = bdev; | ||
295 | return 0; | ||
296 | } | ||
297 | |||
298 | /* | ||
299 | * Close a device that we've been using. | ||
300 | */ | ||
301 | static void close_dev(struct dm_dev_internal *d, struct mapped_device *md) | ||
302 | { | ||
303 | if (!d->dm_dev.bdev) | ||
304 | return; | ||
305 | |||
306 | bd_unlink_disk_holder(d->dm_dev.bdev, dm_disk(md)); | ||
307 | blkdev_put(d->dm_dev.bdev, d->dm_dev.mode | FMODE_EXCL); | ||
308 | d->dm_dev.bdev = NULL; | ||
309 | } | ||
310 | |||
311 | /* | ||
312 | * If possible, this checks an area of a destination device is invalid. | 273 | * If possible, this checks an area of a destination device is invalid. |
313 | */ | 274 | */ |
314 | static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, | 275 | static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, |
@@ -386,19 +347,17 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, | |||
386 | struct mapped_device *md) | 347 | struct mapped_device *md) |
387 | { | 348 | { |
388 | int r; | 349 | int r; |
389 | struct dm_dev_internal dd_new, dd_old; | 350 | struct dm_dev *old_dev, *new_dev; |
390 | 351 | ||
391 | dd_new = dd_old = *dd; | 352 | old_dev = dd->dm_dev; |
392 | 353 | ||
393 | dd_new.dm_dev.mode |= new_mode; | 354 | r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev, |
394 | dd_new.dm_dev.bdev = NULL; | 355 | dd->dm_dev->mode | new_mode, &new_dev); |
395 | |||
396 | r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md); | ||
397 | if (r) | 356 | if (r) |
398 | return r; | 357 | return r; |
399 | 358 | ||
400 | dd->dm_dev.mode |= new_mode; | 359 | dd->dm_dev = new_dev; |
401 | close_dev(&dd_old, md); | 360 | dm_put_table_device(md, old_dev); |
402 | 361 | ||
403 | return 0; | 362 | return 0; |
404 | } | 363 | } |
@@ -440,27 +399,22 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, | |||
440 | if (!dd) | 399 | if (!dd) |
441 | return -ENOMEM; | 400 | return -ENOMEM; |
442 | 401 | ||
443 | dd->dm_dev.mode = mode; | 402 | if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) { |
444 | dd->dm_dev.bdev = NULL; | ||
445 | |||
446 | if ((r = open_dev(dd, dev, t->md))) { | ||
447 | kfree(dd); | 403 | kfree(dd); |
448 | return r; | 404 | return r; |
449 | } | 405 | } |
450 | 406 | ||
451 | format_dev_t(dd->dm_dev.name, dev); | ||
452 | |||
453 | atomic_set(&dd->count, 0); | 407 | atomic_set(&dd->count, 0); |
454 | list_add(&dd->list, &t->devices); | 408 | list_add(&dd->list, &t->devices); |
455 | 409 | ||
456 | } else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) { | 410 | } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) { |
457 | r = upgrade_mode(dd, mode, t->md); | 411 | r = upgrade_mode(dd, mode, t->md); |
458 | if (r) | 412 | if (r) |
459 | return r; | 413 | return r; |
460 | } | 414 | } |
461 | atomic_inc(&dd->count); | 415 | atomic_inc(&dd->count); |
462 | 416 | ||
463 | *result = &dd->dm_dev; | 417 | *result = dd->dm_dev; |
464 | return 0; | 418 | return 0; |
465 | } | 419 | } |
466 | EXPORT_SYMBOL(dm_get_device); | 420 | EXPORT_SYMBOL(dm_get_device); |
@@ -505,11 +459,23 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, | |||
505 | */ | 459 | */ |
506 | void dm_put_device(struct dm_target *ti, struct dm_dev *d) | 460 | void dm_put_device(struct dm_target *ti, struct dm_dev *d) |
507 | { | 461 | { |
508 | struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal, | 462 | int found = 0; |
509 | dm_dev); | 463 | struct list_head *devices = &ti->table->devices; |
464 | struct dm_dev_internal *dd; | ||
510 | 465 | ||
466 | list_for_each_entry(dd, devices, list) { | ||
467 | if (dd->dm_dev == d) { | ||
468 | found = 1; | ||
469 | break; | ||
470 | } | ||
471 | } | ||
472 | if (!found) { | ||
473 | DMWARN("%s: device %s not in table devices list", | ||
474 | dm_device_name(ti->table->md), d->name); | ||
475 | return; | ||
476 | } | ||
511 | if (atomic_dec_and_test(&dd->count)) { | 477 | if (atomic_dec_and_test(&dd->count)) { |
512 | close_dev(dd, ti->table->md); | 478 | dm_put_table_device(ti->table->md, d); |
513 | list_del(&dd->list); | 479 | list_del(&dd->list); |
514 | kfree(dd); | 480 | kfree(dd); |
515 | } | 481 | } |
@@ -906,7 +872,7 @@ static int dm_table_set_type(struct dm_table *t) | |||
906 | /* Non-request-stackable devices can't be used for request-based dm */ | 872 | /* Non-request-stackable devices can't be used for request-based dm */ |
907 | devices = dm_table_get_devices(t); | 873 | devices = dm_table_get_devices(t); |
908 | list_for_each_entry(dd, devices, list) { | 874 | list_for_each_entry(dd, devices, list) { |
909 | if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev.bdev))) { | 875 | if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev->bdev))) { |
910 | DMWARN("table load rejected: including" | 876 | DMWARN("table load rejected: including" |
911 | " non-request-stackable devices"); | 877 | " non-request-stackable devices"); |
912 | return -EINVAL; | 878 | return -EINVAL; |
@@ -1043,7 +1009,7 @@ static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t, | |||
1043 | struct gendisk *prev_disk = NULL, *template_disk = NULL; | 1009 | struct gendisk *prev_disk = NULL, *template_disk = NULL; |
1044 | 1010 | ||
1045 | list_for_each_entry(dd, devices, list) { | 1011 | list_for_each_entry(dd, devices, list) { |
1046 | template_disk = dd->dm_dev.bdev->bd_disk; | 1012 | template_disk = dd->dm_dev->bdev->bd_disk; |
1047 | if (!blk_get_integrity(template_disk)) | 1013 | if (!blk_get_integrity(template_disk)) |
1048 | goto no_integrity; | 1014 | goto no_integrity; |
1049 | if (!match_all && !blk_integrity_is_initialized(template_disk)) | 1015 | if (!match_all && !blk_integrity_is_initialized(template_disk)) |
@@ -1629,7 +1595,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits) | |||
1629 | int r = 0; | 1595 | int r = 0; |
1630 | 1596 | ||
1631 | list_for_each_entry(dd, devices, list) { | 1597 | list_for_each_entry(dd, devices, list) { |
1632 | struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev); | 1598 | struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); |
1633 | char b[BDEVNAME_SIZE]; | 1599 | char b[BDEVNAME_SIZE]; |
1634 | 1600 | ||
1635 | if (likely(q)) | 1601 | if (likely(q)) |
@@ -1637,7 +1603,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits) | |||
1637 | else | 1603 | else |
1638 | DMWARN_LIMIT("%s: any_congested: nonexistent device %s", | 1604 | DMWARN_LIMIT("%s: any_congested: nonexistent device %s", |
1639 | dm_device_name(t->md), | 1605 | dm_device_name(t->md), |
1640 | bdevname(dd->dm_dev.bdev, b)); | 1606 | bdevname(dd->dm_dev->bdev, b)); |
1641 | } | 1607 | } |
1642 | 1608 | ||
1643 | list_for_each_entry(cb, &t->target_callbacks, list) | 1609 | list_for_each_entry(cb, &t->target_callbacks, list) |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 56a2c74c9a3f..58f3927fd7cc 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -142,6 +142,9 @@ struct mapped_device { | |||
142 | */ | 142 | */ |
143 | struct dm_table *map; | 143 | struct dm_table *map; |
144 | 144 | ||
145 | struct list_head table_devices; | ||
146 | struct mutex table_devices_lock; | ||
147 | |||
145 | unsigned long flags; | 148 | unsigned long flags; |
146 | 149 | ||
147 | struct request_queue *queue; | 150 | struct request_queue *queue; |
@@ -212,6 +215,12 @@ struct dm_md_mempools { | |||
212 | struct bio_set *bs; | 215 | struct bio_set *bs; |
213 | }; | 216 | }; |
214 | 217 | ||
218 | struct table_device { | ||
219 | struct list_head list; | ||
220 | atomic_t count; | ||
221 | struct dm_dev dm_dev; | ||
222 | }; | ||
223 | |||
215 | #define RESERVED_BIO_BASED_IOS 16 | 224 | #define RESERVED_BIO_BASED_IOS 16 |
216 | #define RESERVED_REQUEST_BASED_IOS 256 | 225 | #define RESERVED_REQUEST_BASED_IOS 256 |
217 | #define RESERVED_MAX_IOS 1024 | 226 | #define RESERVED_MAX_IOS 1024 |
@@ -670,6 +679,120 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) | |||
670 | } | 679 | } |
671 | 680 | ||
672 | /* | 681 | /* |
682 | * Open a table device so we can use it as a map destination. | ||
683 | */ | ||
684 | static int open_table_device(struct table_device *td, dev_t dev, | ||
685 | struct mapped_device *md) | ||
686 | { | ||
687 | static char *_claim_ptr = "I belong to device-mapper"; | ||
688 | struct block_device *bdev; | ||
689 | |||
690 | int r; | ||
691 | |||
692 | BUG_ON(td->dm_dev.bdev); | ||
693 | |||
694 | bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr); | ||
695 | if (IS_ERR(bdev)) | ||
696 | return PTR_ERR(bdev); | ||
697 | |||
698 | r = bd_link_disk_holder(bdev, dm_disk(md)); | ||
699 | if (r) { | ||
700 | blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); | ||
701 | return r; | ||
702 | } | ||
703 | |||
704 | td->dm_dev.bdev = bdev; | ||
705 | return 0; | ||
706 | } | ||
707 | |||
708 | /* | ||
709 | * Close a table device that we've been using. | ||
710 | */ | ||
711 | static void close_table_device(struct table_device *td, struct mapped_device *md) | ||
712 | { | ||
713 | if (!td->dm_dev.bdev) | ||
714 | return; | ||
715 | |||
716 | bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); | ||
717 | blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); | ||
718 | td->dm_dev.bdev = NULL; | ||
719 | } | ||
720 | |||
721 | static struct table_device *find_table_device(struct list_head *l, dev_t dev, | ||
722 | fmode_t mode) { | ||
723 | struct table_device *td; | ||
724 | |||
725 | list_for_each_entry(td, l, list) | ||
726 | if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) | ||
727 | return td; | ||
728 | |||
729 | return NULL; | ||
730 | } | ||
731 | |||
732 | int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, | ||
733 | struct dm_dev **result) { | ||
734 | int r; | ||
735 | struct table_device *td; | ||
736 | |||
737 | mutex_lock(&md->table_devices_lock); | ||
738 | td = find_table_device(&md->table_devices, dev, mode); | ||
739 | if (!td) { | ||
740 | td = kmalloc(sizeof(*td), GFP_KERNEL); | ||
741 | if (!td) { | ||
742 | mutex_unlock(&md->table_devices_lock); | ||
743 | return -ENOMEM; | ||
744 | } | ||
745 | |||
746 | td->dm_dev.mode = mode; | ||
747 | td->dm_dev.bdev = NULL; | ||
748 | |||
749 | if ((r = open_table_device(td, dev, md))) { | ||
750 | mutex_unlock(&md->table_devices_lock); | ||
751 | kfree(td); | ||
752 | return r; | ||
753 | } | ||
754 | |||
755 | format_dev_t(td->dm_dev.name, dev); | ||
756 | |||
757 | atomic_set(&td->count, 0); | ||
758 | list_add(&td->list, &md->table_devices); | ||
759 | } | ||
760 | atomic_inc(&td->count); | ||
761 | mutex_unlock(&md->table_devices_lock); | ||
762 | |||
763 | *result = &td->dm_dev; | ||
764 | return 0; | ||
765 | } | ||
766 | EXPORT_SYMBOL_GPL(dm_get_table_device); | ||
767 | |||
768 | void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) | ||
769 | { | ||
770 | struct table_device *td = container_of(d, struct table_device, dm_dev); | ||
771 | |||
772 | mutex_lock(&md->table_devices_lock); | ||
773 | if (atomic_dec_and_test(&td->count)) { | ||
774 | close_table_device(td, md); | ||
775 | list_del(&td->list); | ||
776 | kfree(td); | ||
777 | } | ||
778 | mutex_unlock(&md->table_devices_lock); | ||
779 | } | ||
780 | EXPORT_SYMBOL(dm_put_table_device); | ||
781 | |||
782 | static void free_table_devices(struct list_head *devices) | ||
783 | { | ||
784 | struct list_head *tmp, *next; | ||
785 | |||
786 | list_for_each_safe(tmp, next, devices) { | ||
787 | struct table_device *td = list_entry(tmp, struct table_device, list); | ||
788 | |||
789 | DMWARN("dm_destroy: %s still exists with %d references", | ||
790 | td->dm_dev.name, atomic_read(&td->count)); | ||
791 | kfree(td); | ||
792 | } | ||
793 | } | ||
794 | |||
795 | /* | ||
673 | * Get the geometry associated with a dm device | 796 | * Get the geometry associated with a dm device |
674 | */ | 797 | */ |
675 | int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) | 798 | int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) |
@@ -1944,12 +2067,14 @@ static struct mapped_device *alloc_dev(int minor) | |||
1944 | md->type = DM_TYPE_NONE; | 2067 | md->type = DM_TYPE_NONE; |
1945 | mutex_init(&md->suspend_lock); | 2068 | mutex_init(&md->suspend_lock); |
1946 | mutex_init(&md->type_lock); | 2069 | mutex_init(&md->type_lock); |
2070 | mutex_init(&md->table_devices_lock); | ||
1947 | spin_lock_init(&md->deferred_lock); | 2071 | spin_lock_init(&md->deferred_lock); |
1948 | atomic_set(&md->holders, 1); | 2072 | atomic_set(&md->holders, 1); |
1949 | atomic_set(&md->open_count, 0); | 2073 | atomic_set(&md->open_count, 0); |
1950 | atomic_set(&md->event_nr, 0); | 2074 | atomic_set(&md->event_nr, 0); |
1951 | atomic_set(&md->uevent_seq, 0); | 2075 | atomic_set(&md->uevent_seq, 0); |
1952 | INIT_LIST_HEAD(&md->uevent_list); | 2076 | INIT_LIST_HEAD(&md->uevent_list); |
2077 | INIT_LIST_HEAD(&md->table_devices); | ||
1953 | spin_lock_init(&md->uevent_lock); | 2078 | spin_lock_init(&md->uevent_lock); |
1954 | 2079 | ||
1955 | md->queue = blk_alloc_queue(GFP_KERNEL); | 2080 | md->queue = blk_alloc_queue(GFP_KERNEL); |
@@ -2035,6 +2160,7 @@ static void free_dev(struct mapped_device *md) | |||
2035 | blk_integrity_unregister(md->disk); | 2160 | blk_integrity_unregister(md->disk); |
2036 | del_gendisk(md->disk); | 2161 | del_gendisk(md->disk); |
2037 | cleanup_srcu_struct(&md->io_barrier); | 2162 | cleanup_srcu_struct(&md->io_barrier); |
2163 | free_table_devices(&md->table_devices); | ||
2038 | free_minor(minor); | 2164 | free_minor(minor); |
2039 | 2165 | ||
2040 | spin_lock(&_minor_lock); | 2166 | spin_lock(&_minor_lock); |
diff --git a/drivers/md/dm.h b/drivers/md/dm.h index e81d2152fa68..988c7fb7b145 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h | |||
@@ -44,7 +44,7 @@ | |||
44 | struct dm_dev_internal { | 44 | struct dm_dev_internal { |
45 | struct list_head list; | 45 | struct list_head list; |
46 | atomic_t count; | 46 | atomic_t count; |
47 | struct dm_dev dm_dev; | 47 | struct dm_dev *dm_dev; |
48 | }; | 48 | }; |
49 | 49 | ||
50 | struct dm_table; | 50 | struct dm_table; |
@@ -188,6 +188,9 @@ int dm_cancel_deferred_remove(struct mapped_device *md); | |||
188 | int dm_request_based(struct mapped_device *md); | 188 | int dm_request_based(struct mapped_device *md); |
189 | sector_t dm_get_size(struct mapped_device *md); | 189 | sector_t dm_get_size(struct mapped_device *md); |
190 | struct request_queue *dm_get_md_queue(struct mapped_device *md); | 190 | struct request_queue *dm_get_md_queue(struct mapped_device *md); |
191 | int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, | ||
192 | struct dm_dev **result); | ||
193 | void dm_put_table_device(struct mapped_device *md, struct dm_dev *d); | ||
191 | struct dm_stats *dm_get_stats(struct mapped_device *md); | 194 | struct dm_stats *dm_get_stats(struct mapped_device *md); |
192 | 195 | ||
193 | int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, | 196 | int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, |
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h index c8a4302093a3..3315ab21f728 100644 --- a/include/uapi/linux/dm-ioctl.h +++ b/include/uapi/linux/dm-ioctl.h | |||
@@ -267,9 +267,9 @@ enum { | |||
267 | #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) | 267 | #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) |
268 | 268 | ||
269 | #define DM_VERSION_MAJOR 4 | 269 | #define DM_VERSION_MAJOR 4 |
270 | #define DM_VERSION_MINOR 27 | 270 | #define DM_VERSION_MINOR 28 |
271 | #define DM_VERSION_PATCHLEVEL 0 | 271 | #define DM_VERSION_PATCHLEVEL 0 |
272 | #define DM_VERSION_EXTRA "-ioctl (2013-10-30)" | 272 | #define DM_VERSION_EXTRA "-ioctl (2014-09-17)" |
273 | 273 | ||
274 | /* Status bits */ | 274 | /* Status bits */ |
275 | #define DM_READONLY_FLAG (1 << 0) /* In/Out */ | 275 | #define DM_READONLY_FLAG (1 << 0) /* In/Out */ |