diff options
Diffstat (limited to 'drivers/md/dm-table.c')
-rw-r--r-- | drivers/md/dm-table.c | 53 |
1 files changed, 37 insertions, 16 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 907b08ddb783..8f56a54cf0ce 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/ctype.h> | 14 | #include <linux/ctype.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/mutex.h> | ||
17 | #include <asm/atomic.h> | 18 | #include <asm/atomic.h> |
18 | 19 | ||
19 | #define MAX_DEPTH 16 | 20 | #define MAX_DEPTH 16 |
@@ -22,6 +23,7 @@ | |||
22 | #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) | 23 | #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) |
23 | 24 | ||
24 | struct dm_table { | 25 | struct dm_table { |
26 | struct mapped_device *md; | ||
25 | atomic_t holders; | 27 | atomic_t holders; |
26 | 28 | ||
27 | /* btree table */ | 29 | /* btree table */ |
@@ -97,6 +99,8 @@ static void combine_restrictions_low(struct io_restrictions *lhs, | |||
97 | 99 | ||
98 | lhs->seg_boundary_mask = | 100 | lhs->seg_boundary_mask = |
99 | min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask); | 101 | min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask); |
102 | |||
103 | lhs->no_cluster |= rhs->no_cluster; | ||
100 | } | 104 | } |
101 | 105 | ||
102 | /* | 106 | /* |
@@ -204,7 +208,8 @@ static int alloc_targets(struct dm_table *t, unsigned int num) | |||
204 | return 0; | 208 | return 0; |
205 | } | 209 | } |
206 | 210 | ||
207 | int dm_table_create(struct dm_table **result, int mode, unsigned num_targets) | 211 | int dm_table_create(struct dm_table **result, int mode, |
212 | unsigned num_targets, struct mapped_device *md) | ||
208 | { | 213 | { |
209 | struct dm_table *t = kmalloc(sizeof(*t), GFP_KERNEL); | 214 | struct dm_table *t = kmalloc(sizeof(*t), GFP_KERNEL); |
210 | 215 | ||
@@ -227,6 +232,7 @@ int dm_table_create(struct dm_table **result, int mode, unsigned num_targets) | |||
227 | } | 232 | } |
228 | 233 | ||
229 | t->mode = mode; | 234 | t->mode = mode; |
235 | t->md = md; | ||
230 | *result = t; | 236 | *result = t; |
231 | return 0; | 237 | return 0; |
232 | } | 238 | } |
@@ -345,7 +351,7 @@ static struct dm_dev *find_device(struct list_head *l, dev_t dev) | |||
345 | /* | 351 | /* |
346 | * Open a device so we can use it as a map destination. | 352 | * Open a device so we can use it as a map destination. |
347 | */ | 353 | */ |
348 | static int open_dev(struct dm_dev *d, dev_t dev) | 354 | static int open_dev(struct dm_dev *d, dev_t dev, struct mapped_device *md) |
349 | { | 355 | { |
350 | static char *_claim_ptr = "I belong to device-mapper"; | 356 | static char *_claim_ptr = "I belong to device-mapper"; |
351 | struct block_device *bdev; | 357 | struct block_device *bdev; |
@@ -357,7 +363,7 @@ static int open_dev(struct dm_dev *d, dev_t dev) | |||
357 | bdev = open_by_devnum(dev, d->mode); | 363 | bdev = open_by_devnum(dev, d->mode); |
358 | if (IS_ERR(bdev)) | 364 | if (IS_ERR(bdev)) |
359 | return PTR_ERR(bdev); | 365 | return PTR_ERR(bdev); |
360 | r = bd_claim(bdev, _claim_ptr); | 366 | r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md)); |
361 | if (r) | 367 | if (r) |
362 | blkdev_put(bdev); | 368 | blkdev_put(bdev); |
363 | else | 369 | else |
@@ -368,12 +374,12 @@ static int open_dev(struct dm_dev *d, dev_t dev) | |||
368 | /* | 374 | /* |
369 | * Close a device that we've been using. | 375 | * Close a device that we've been using. |
370 | */ | 376 | */ |
371 | static void close_dev(struct dm_dev *d) | 377 | static void close_dev(struct dm_dev *d, struct mapped_device *md) |
372 | { | 378 | { |
373 | if (!d->bdev) | 379 | if (!d->bdev) |
374 | return; | 380 | return; |
375 | 381 | ||
376 | bd_release(d->bdev); | 382 | bd_release_from_disk(d->bdev, dm_disk(md)); |
377 | blkdev_put(d->bdev); | 383 | blkdev_put(d->bdev); |
378 | d->bdev = NULL; | 384 | d->bdev = NULL; |
379 | } | 385 | } |
@@ -394,7 +400,7 @@ static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len) | |||
394 | * careful to leave things as they were if we fail to reopen the | 400 | * careful to leave things as they were if we fail to reopen the |
395 | * device. | 401 | * device. |
396 | */ | 402 | */ |
397 | static int upgrade_mode(struct dm_dev *dd, int new_mode) | 403 | static int upgrade_mode(struct dm_dev *dd, int new_mode, struct mapped_device *md) |
398 | { | 404 | { |
399 | int r; | 405 | int r; |
400 | struct dm_dev dd_copy; | 406 | struct dm_dev dd_copy; |
@@ -404,9 +410,9 @@ static int upgrade_mode(struct dm_dev *dd, int new_mode) | |||
404 | 410 | ||
405 | dd->mode |= new_mode; | 411 | dd->mode |= new_mode; |
406 | dd->bdev = NULL; | 412 | dd->bdev = NULL; |
407 | r = open_dev(dd, dev); | 413 | r = open_dev(dd, dev, md); |
408 | if (!r) | 414 | if (!r) |
409 | close_dev(&dd_copy); | 415 | close_dev(&dd_copy, md); |
410 | else | 416 | else |
411 | *dd = dd_copy; | 417 | *dd = dd_copy; |
412 | 418 | ||
@@ -448,7 +454,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, | |||
448 | dd->mode = mode; | 454 | dd->mode = mode; |
449 | dd->bdev = NULL; | 455 | dd->bdev = NULL; |
450 | 456 | ||
451 | if ((r = open_dev(dd, dev))) { | 457 | if ((r = open_dev(dd, dev, t->md))) { |
452 | kfree(dd); | 458 | kfree(dd); |
453 | return r; | 459 | return r; |
454 | } | 460 | } |
@@ -459,7 +465,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, | |||
459 | list_add(&dd->list, &t->devices); | 465 | list_add(&dd->list, &t->devices); |
460 | 466 | ||
461 | } else if (dd->mode != (mode | dd->mode)) { | 467 | } else if (dd->mode != (mode | dd->mode)) { |
462 | r = upgrade_mode(dd, mode); | 468 | r = upgrade_mode(dd, mode, t->md); |
463 | if (r) | 469 | if (r) |
464 | return r; | 470 | return r; |
465 | } | 471 | } |
@@ -523,6 +529,8 @@ int dm_get_device(struct dm_target *ti, const char *path, sector_t start, | |||
523 | rs->seg_boundary_mask = | 529 | rs->seg_boundary_mask = |
524 | min_not_zero(rs->seg_boundary_mask, | 530 | min_not_zero(rs->seg_boundary_mask, |
525 | q->seg_boundary_mask); | 531 | q->seg_boundary_mask); |
532 | |||
533 | rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); | ||
526 | } | 534 | } |
527 | 535 | ||
528 | return r; | 536 | return r; |
@@ -534,7 +542,7 @@ int dm_get_device(struct dm_target *ti, const char *path, sector_t start, | |||
534 | void dm_put_device(struct dm_target *ti, struct dm_dev *dd) | 542 | void dm_put_device(struct dm_target *ti, struct dm_dev *dd) |
535 | { | 543 | { |
536 | if (atomic_dec_and_test(&dd->count)) { | 544 | if (atomic_dec_and_test(&dd->count)) { |
537 | close_dev(dd); | 545 | close_dev(dd, ti->table->md); |
538 | list_del(&dd->list); | 546 | list_del(&dd->list); |
539 | kfree(dd); | 547 | kfree(dd); |
540 | } | 548 | } |
@@ -763,14 +771,14 @@ int dm_table_complete(struct dm_table *t) | |||
763 | return r; | 771 | return r; |
764 | } | 772 | } |
765 | 773 | ||
766 | static DECLARE_MUTEX(_event_lock); | 774 | static DEFINE_MUTEX(_event_lock); |
767 | void dm_table_event_callback(struct dm_table *t, | 775 | void dm_table_event_callback(struct dm_table *t, |
768 | void (*fn)(void *), void *context) | 776 | void (*fn)(void *), void *context) |
769 | { | 777 | { |
770 | down(&_event_lock); | 778 | mutex_lock(&_event_lock); |
771 | t->event_fn = fn; | 779 | t->event_fn = fn; |
772 | t->event_context = context; | 780 | t->event_context = context; |
773 | up(&_event_lock); | 781 | mutex_unlock(&_event_lock); |
774 | } | 782 | } |
775 | 783 | ||
776 | void dm_table_event(struct dm_table *t) | 784 | void dm_table_event(struct dm_table *t) |
@@ -781,10 +789,10 @@ void dm_table_event(struct dm_table *t) | |||
781 | */ | 789 | */ |
782 | BUG_ON(in_interrupt()); | 790 | BUG_ON(in_interrupt()); |
783 | 791 | ||
784 | down(&_event_lock); | 792 | mutex_lock(&_event_lock); |
785 | if (t->event_fn) | 793 | if (t->event_fn) |
786 | t->event_fn(t->event_context); | 794 | t->event_fn(t->event_context); |
787 | up(&_event_lock); | 795 | mutex_unlock(&_event_lock); |
788 | } | 796 | } |
789 | 797 | ||
790 | sector_t dm_table_get_size(struct dm_table *t) | 798 | sector_t dm_table_get_size(struct dm_table *t) |
@@ -832,6 +840,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) | |||
832 | q->hardsect_size = t->limits.hardsect_size; | 840 | q->hardsect_size = t->limits.hardsect_size; |
833 | q->max_segment_size = t->limits.max_segment_size; | 841 | q->max_segment_size = t->limits.max_segment_size; |
834 | q->seg_boundary_mask = t->limits.seg_boundary_mask; | 842 | q->seg_boundary_mask = t->limits.seg_boundary_mask; |
843 | if (t->limits.no_cluster) | ||
844 | q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER); | ||
845 | else | ||
846 | q->queue_flags |= (1 << QUEUE_FLAG_CLUSTER); | ||
847 | |||
835 | } | 848 | } |
836 | 849 | ||
837 | unsigned int dm_table_get_num_targets(struct dm_table *t) | 850 | unsigned int dm_table_get_num_targets(struct dm_table *t) |
@@ -943,12 +956,20 @@ int dm_table_flush_all(struct dm_table *t) | |||
943 | return ret; | 956 | return ret; |
944 | } | 957 | } |
945 | 958 | ||
959 | struct mapped_device *dm_table_get_md(struct dm_table *t) | ||
960 | { | ||
961 | dm_get(t->md); | ||
962 | |||
963 | return t->md; | ||
964 | } | ||
965 | |||
946 | EXPORT_SYMBOL(dm_vcalloc); | 966 | EXPORT_SYMBOL(dm_vcalloc); |
947 | EXPORT_SYMBOL(dm_get_device); | 967 | EXPORT_SYMBOL(dm_get_device); |
948 | EXPORT_SYMBOL(dm_put_device); | 968 | EXPORT_SYMBOL(dm_put_device); |
949 | EXPORT_SYMBOL(dm_table_event); | 969 | EXPORT_SYMBOL(dm_table_event); |
950 | EXPORT_SYMBOL(dm_table_get_size); | 970 | EXPORT_SYMBOL(dm_table_get_size); |
951 | EXPORT_SYMBOL(dm_table_get_mode); | 971 | EXPORT_SYMBOL(dm_table_get_mode); |
972 | EXPORT_SYMBOL(dm_table_get_md); | ||
952 | EXPORT_SYMBOL(dm_table_put); | 973 | EXPORT_SYMBOL(dm_table_put); |
953 | EXPORT_SYMBOL(dm_table_get); | 974 | EXPORT_SYMBOL(dm_table_get); |
954 | EXPORT_SYMBOL(dm_table_unplug_all); | 975 | EXPORT_SYMBOL(dm_table_unplug_all); |