aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-table.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-table.c')
-rw-r--r--drivers/md/dm-table.c59
1 files changed, 39 insertions, 20 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 9b1e2f5ca630..8f56a54cf0ce 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -14,6 +14,7 @@
14#include <linux/ctype.h> 14#include <linux/ctype.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/mutex.h>
17#include <asm/atomic.h> 18#include <asm/atomic.h>
18 19
19#define MAX_DEPTH 16 20#define MAX_DEPTH 16
@@ -22,6 +23,7 @@
22#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) 23#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
23 24
24struct dm_table { 25struct dm_table {
26 struct mapped_device *md;
25 atomic_t holders; 27 atomic_t holders;
26 28
27 /* btree table */ 29 /* btree table */
@@ -97,6 +99,8 @@ static void combine_restrictions_low(struct io_restrictions *lhs,
97 99
98 lhs->seg_boundary_mask = 100 lhs->seg_boundary_mask =
99 min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask); 101 min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);
102
103 lhs->no_cluster |= rhs->no_cluster;
100} 104}
101 105
102/* 106/*
@@ -204,7 +208,8 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
204 return 0; 208 return 0;
205} 209}
206 210
207int dm_table_create(struct dm_table **result, int mode, unsigned num_targets) 211int dm_table_create(struct dm_table **result, int mode,
212 unsigned num_targets, struct mapped_device *md)
208{ 213{
209 struct dm_table *t = kmalloc(sizeof(*t), GFP_KERNEL); 214 struct dm_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
210 215
@@ -227,6 +232,7 @@ int dm_table_create(struct dm_table **result, int mode, unsigned num_targets)
227 } 232 }
228 233
229 t->mode = mode; 234 t->mode = mode;
235 t->md = md;
230 *result = t; 236 *result = t;
231 return 0; 237 return 0;
232} 238}
@@ -345,20 +351,19 @@ static struct dm_dev *find_device(struct list_head *l, dev_t dev)
345/* 351/*
346 * Open a device so we can use it as a map destination. 352 * Open a device so we can use it as a map destination.
347 */ 353 */
348static int open_dev(struct dm_dev *d, dev_t dev) 354static int open_dev(struct dm_dev *d, dev_t dev, struct mapped_device *md)
349{ 355{
350 static char *_claim_ptr = "I belong to device-mapper"; 356 static char *_claim_ptr = "I belong to device-mapper";
351 struct block_device *bdev; 357 struct block_device *bdev;
352 358
353 int r; 359 int r;
354 360
355 if (d->bdev) 361 BUG_ON(d->bdev);
356 BUG();
357 362
358 bdev = open_by_devnum(dev, d->mode); 363 bdev = open_by_devnum(dev, d->mode);
359 if (IS_ERR(bdev)) 364 if (IS_ERR(bdev))
360 return PTR_ERR(bdev); 365 return PTR_ERR(bdev);
361 r = bd_claim(bdev, _claim_ptr); 366 r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
362 if (r) 367 if (r)
363 blkdev_put(bdev); 368 blkdev_put(bdev);
364 else 369 else
@@ -369,12 +374,12 @@ static int open_dev(struct dm_dev *d, dev_t dev)
369/* 374/*
370 * Close a device that we've been using. 375 * Close a device that we've been using.
371 */ 376 */
372static void close_dev(struct dm_dev *d) 377static void close_dev(struct dm_dev *d, struct mapped_device *md)
373{ 378{
374 if (!d->bdev) 379 if (!d->bdev)
375 return; 380 return;
376 381
377 bd_release(d->bdev); 382 bd_release_from_disk(d->bdev, dm_disk(md));
378 blkdev_put(d->bdev); 383 blkdev_put(d->bdev);
379 d->bdev = NULL; 384 d->bdev = NULL;
380} 385}
@@ -395,7 +400,7 @@ static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
395 * careful to leave things as they were if we fail to reopen the 400 * careful to leave things as they were if we fail to reopen the
396 * device. 401 * device.
397 */ 402 */
398static int upgrade_mode(struct dm_dev *dd, int new_mode) 403static int upgrade_mode(struct dm_dev *dd, int new_mode, struct mapped_device *md)
399{ 404{
400 int r; 405 int r;
401 struct dm_dev dd_copy; 406 struct dm_dev dd_copy;
@@ -405,9 +410,9 @@ static int upgrade_mode(struct dm_dev *dd, int new_mode)
405 410
406 dd->mode |= new_mode; 411 dd->mode |= new_mode;
407 dd->bdev = NULL; 412 dd->bdev = NULL;
408 r = open_dev(dd, dev); 413 r = open_dev(dd, dev, md);
409 if (!r) 414 if (!r)
410 close_dev(&dd_copy); 415 close_dev(&dd_copy, md);
411 else 416 else
412 *dd = dd_copy; 417 *dd = dd_copy;
413 418
@@ -427,8 +432,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
427 struct dm_dev *dd; 432 struct dm_dev *dd;
428 unsigned int major, minor; 433 unsigned int major, minor;
429 434
430 if (!t) 435 BUG_ON(!t);
431 BUG();
432 436
433 if (sscanf(path, "%u:%u", &major, &minor) == 2) { 437 if (sscanf(path, "%u:%u", &major, &minor) == 2) {
434 /* Extract the major/minor numbers */ 438 /* Extract the major/minor numbers */
@@ -450,7 +454,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
450 dd->mode = mode; 454 dd->mode = mode;
451 dd->bdev = NULL; 455 dd->bdev = NULL;
452 456
453 if ((r = open_dev(dd, dev))) { 457 if ((r = open_dev(dd, dev, t->md))) {
454 kfree(dd); 458 kfree(dd);
455 return r; 459 return r;
456 } 460 }
@@ -461,7 +465,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
461 list_add(&dd->list, &t->devices); 465 list_add(&dd->list, &t->devices);
462 466
463 } else if (dd->mode != (mode | dd->mode)) { 467 } else if (dd->mode != (mode | dd->mode)) {
464 r = upgrade_mode(dd, mode); 468 r = upgrade_mode(dd, mode, t->md);
465 if (r) 469 if (r)
466 return r; 470 return r;
467 } 471 }
@@ -525,6 +529,8 @@ int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
525 rs->seg_boundary_mask = 529 rs->seg_boundary_mask =
526 min_not_zero(rs->seg_boundary_mask, 530 min_not_zero(rs->seg_boundary_mask,
527 q->seg_boundary_mask); 531 q->seg_boundary_mask);
532
533 rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
528 } 534 }
529 535
530 return r; 536 return r;
@@ -536,7 +542,7 @@ int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
536void dm_put_device(struct dm_target *ti, struct dm_dev *dd) 542void dm_put_device(struct dm_target *ti, struct dm_dev *dd)
537{ 543{
538 if (atomic_dec_and_test(&dd->count)) { 544 if (atomic_dec_and_test(&dd->count)) {
539 close_dev(dd); 545 close_dev(dd, ti->table->md);
540 list_del(&dd->list); 546 list_del(&dd->list);
541 kfree(dd); 547 kfree(dd);
542 } 548 }
@@ -765,14 +771,14 @@ int dm_table_complete(struct dm_table *t)
765 return r; 771 return r;
766} 772}
767 773
768static DECLARE_MUTEX(_event_lock); 774static DEFINE_MUTEX(_event_lock);
769void dm_table_event_callback(struct dm_table *t, 775void dm_table_event_callback(struct dm_table *t,
770 void (*fn)(void *), void *context) 776 void (*fn)(void *), void *context)
771{ 777{
772 down(&_event_lock); 778 mutex_lock(&_event_lock);
773 t->event_fn = fn; 779 t->event_fn = fn;
774 t->event_context = context; 780 t->event_context = context;
775 up(&_event_lock); 781 mutex_unlock(&_event_lock);
776} 782}
777 783
778void dm_table_event(struct dm_table *t) 784void dm_table_event(struct dm_table *t)
@@ -783,10 +789,10 @@ void dm_table_event(struct dm_table *t)
783 */ 789 */
784 BUG_ON(in_interrupt()); 790 BUG_ON(in_interrupt());
785 791
786 down(&_event_lock); 792 mutex_lock(&_event_lock);
787 if (t->event_fn) 793 if (t->event_fn)
788 t->event_fn(t->event_context); 794 t->event_fn(t->event_context);
789 up(&_event_lock); 795 mutex_unlock(&_event_lock);
790} 796}
791 797
792sector_t dm_table_get_size(struct dm_table *t) 798sector_t dm_table_get_size(struct dm_table *t)
@@ -834,6 +840,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
834 q->hardsect_size = t->limits.hardsect_size; 840 q->hardsect_size = t->limits.hardsect_size;
835 q->max_segment_size = t->limits.max_segment_size; 841 q->max_segment_size = t->limits.max_segment_size;
836 q->seg_boundary_mask = t->limits.seg_boundary_mask; 842 q->seg_boundary_mask = t->limits.seg_boundary_mask;
843 if (t->limits.no_cluster)
844 q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER);
845 else
846 q->queue_flags |= (1 << QUEUE_FLAG_CLUSTER);
847
837} 848}
838 849
839unsigned int dm_table_get_num_targets(struct dm_table *t) 850unsigned int dm_table_get_num_targets(struct dm_table *t)
@@ -945,12 +956,20 @@ int dm_table_flush_all(struct dm_table *t)
945 return ret; 956 return ret;
946} 957}
947 958
959struct mapped_device *dm_table_get_md(struct dm_table *t)
960{
961 dm_get(t->md);
962
963 return t->md;
964}
965
948EXPORT_SYMBOL(dm_vcalloc); 966EXPORT_SYMBOL(dm_vcalloc);
949EXPORT_SYMBOL(dm_get_device); 967EXPORT_SYMBOL(dm_get_device);
950EXPORT_SYMBOL(dm_put_device); 968EXPORT_SYMBOL(dm_put_device);
951EXPORT_SYMBOL(dm_table_event); 969EXPORT_SYMBOL(dm_table_event);
952EXPORT_SYMBOL(dm_table_get_size); 970EXPORT_SYMBOL(dm_table_get_size);
953EXPORT_SYMBOL(dm_table_get_mode); 971EXPORT_SYMBOL(dm_table_get_mode);
972EXPORT_SYMBOL(dm_table_get_md);
954EXPORT_SYMBOL(dm_table_put); 973EXPORT_SYMBOL(dm_table_put);
955EXPORT_SYMBOL(dm_table_get); 974EXPORT_SYMBOL(dm_table_get);
956EXPORT_SYMBOL(dm_table_unplug_all); 975EXPORT_SYMBOL(dm_table_unplug_all);