diff options
Diffstat (limited to 'drivers/md/dm-table.c')
-rw-r--r-- | drivers/md/dm-table.c | 97 |
1 files changed, 62 insertions, 35 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 61f441409234..a740a6950f59 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -250,7 +250,8 @@ static void free_devices(struct list_head *devices) | |||
250 | struct list_head *tmp, *next; | 250 | struct list_head *tmp, *next; |
251 | 251 | ||
252 | list_for_each_safe(tmp, next, devices) { | 252 | list_for_each_safe(tmp, next, devices) { |
253 | struct dm_dev *dd = list_entry(tmp, struct dm_dev, list); | 253 | struct dm_dev_internal *dd = |
254 | list_entry(tmp, struct dm_dev_internal, list); | ||
254 | kfree(dd); | 255 | kfree(dd); |
255 | } | 256 | } |
256 | } | 257 | } |
@@ -327,12 +328,12 @@ static int lookup_device(const char *path, dev_t *dev) | |||
327 | /* | 328 | /* |
328 | * See if we've already got a device in the list. | 329 | * See if we've already got a device in the list. |
329 | */ | 330 | */ |
330 | static struct dm_dev *find_device(struct list_head *l, dev_t dev) | 331 | static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev) |
331 | { | 332 | { |
332 | struct dm_dev *dd; | 333 | struct dm_dev_internal *dd; |
333 | 334 | ||
334 | list_for_each_entry (dd, l, list) | 335 | list_for_each_entry (dd, l, list) |
335 | if (dd->bdev->bd_dev == dev) | 336 | if (dd->dm_dev.bdev->bd_dev == dev) |
336 | return dd; | 337 | return dd; |
337 | 338 | ||
338 | return NULL; | 339 | return NULL; |
@@ -341,45 +342,47 @@ static struct dm_dev *find_device(struct list_head *l, dev_t dev) | |||
341 | /* | 342 | /* |
342 | * Open a device so we can use it as a map destination. | 343 | * Open a device so we can use it as a map destination. |
343 | */ | 344 | */ |
344 | static int open_dev(struct dm_dev *d, dev_t dev, struct mapped_device *md) | 345 | static int open_dev(struct dm_dev_internal *d, dev_t dev, |
346 | struct mapped_device *md) | ||
345 | { | 347 | { |
346 | static char *_claim_ptr = "I belong to device-mapper"; | 348 | static char *_claim_ptr = "I belong to device-mapper"; |
347 | struct block_device *bdev; | 349 | struct block_device *bdev; |
348 | 350 | ||
349 | int r; | 351 | int r; |
350 | 352 | ||
351 | BUG_ON(d->bdev); | 353 | BUG_ON(d->dm_dev.bdev); |
352 | 354 | ||
353 | bdev = open_by_devnum(dev, d->mode); | 355 | bdev = open_by_devnum(dev, d->dm_dev.mode); |
354 | if (IS_ERR(bdev)) | 356 | if (IS_ERR(bdev)) |
355 | return PTR_ERR(bdev); | 357 | return PTR_ERR(bdev); |
356 | r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md)); | 358 | r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md)); |
357 | if (r) | 359 | if (r) |
358 | blkdev_put(bdev); | 360 | blkdev_put(bdev); |
359 | else | 361 | else |
360 | d->bdev = bdev; | 362 | d->dm_dev.bdev = bdev; |
361 | return r; | 363 | return r; |
362 | } | 364 | } |
363 | 365 | ||
364 | /* | 366 | /* |
365 | * Close a device that we've been using. | 367 | * Close a device that we've been using. |
366 | */ | 368 | */ |
367 | static void close_dev(struct dm_dev *d, struct mapped_device *md) | 369 | static void close_dev(struct dm_dev_internal *d, struct mapped_device *md) |
368 | { | 370 | { |
369 | if (!d->bdev) | 371 | if (!d->dm_dev.bdev) |
370 | return; | 372 | return; |
371 | 373 | ||
372 | bd_release_from_disk(d->bdev, dm_disk(md)); | 374 | bd_release_from_disk(d->dm_dev.bdev, dm_disk(md)); |
373 | blkdev_put(d->bdev); | 375 | blkdev_put(d->dm_dev.bdev); |
374 | d->bdev = NULL; | 376 | d->dm_dev.bdev = NULL; |
375 | } | 377 | } |
376 | 378 | ||
377 | /* | 379 | /* |
378 | * If possible, this checks an area of a destination device is valid. | 380 | * If possible, this checks an area of a destination device is valid. |
379 | */ | 381 | */ |
380 | static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len) | 382 | static int check_device_area(struct dm_dev_internal *dd, sector_t start, |
383 | sector_t len) | ||
381 | { | 384 | { |
382 | sector_t dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT; | 385 | sector_t dev_size = dd->dm_dev.bdev->bd_inode->i_size >> SECTOR_SHIFT; |
383 | 386 | ||
384 | if (!dev_size) | 387 | if (!dev_size) |
385 | return 1; | 388 | return 1; |
@@ -392,16 +395,17 @@ static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len) | |||
392 | * careful to leave things as they were if we fail to reopen the | 395 | * careful to leave things as they were if we fail to reopen the |
393 | * device. | 396 | * device. |
394 | */ | 397 | */ |
395 | static int upgrade_mode(struct dm_dev *dd, int new_mode, struct mapped_device *md) | 398 | static int upgrade_mode(struct dm_dev_internal *dd, int new_mode, |
399 | struct mapped_device *md) | ||
396 | { | 400 | { |
397 | int r; | 401 | int r; |
398 | struct dm_dev dd_copy; | 402 | struct dm_dev_internal dd_copy; |
399 | dev_t dev = dd->bdev->bd_dev; | 403 | dev_t dev = dd->dm_dev.bdev->bd_dev; |
400 | 404 | ||
401 | dd_copy = *dd; | 405 | dd_copy = *dd; |
402 | 406 | ||
403 | dd->mode |= new_mode; | 407 | dd->dm_dev.mode |= new_mode; |
404 | dd->bdev = NULL; | 408 | dd->dm_dev.bdev = NULL; |
405 | r = open_dev(dd, dev, md); | 409 | r = open_dev(dd, dev, md); |
406 | if (!r) | 410 | if (!r) |
407 | close_dev(&dd_copy, md); | 411 | close_dev(&dd_copy, md); |
@@ -421,7 +425,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, | |||
421 | { | 425 | { |
422 | int r; | 426 | int r; |
423 | dev_t uninitialized_var(dev); | 427 | dev_t uninitialized_var(dev); |
424 | struct dm_dev *dd; | 428 | struct dm_dev_internal *dd; |
425 | unsigned int major, minor; | 429 | unsigned int major, minor; |
426 | 430 | ||
427 | BUG_ON(!t); | 431 | BUG_ON(!t); |
@@ -443,20 +447,20 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, | |||
443 | if (!dd) | 447 | if (!dd) |
444 | return -ENOMEM; | 448 | return -ENOMEM; |
445 | 449 | ||
446 | dd->mode = mode; | 450 | dd->dm_dev.mode = mode; |
447 | dd->bdev = NULL; | 451 | dd->dm_dev.bdev = NULL; |
448 | 452 | ||
449 | if ((r = open_dev(dd, dev, t->md))) { | 453 | if ((r = open_dev(dd, dev, t->md))) { |
450 | kfree(dd); | 454 | kfree(dd); |
451 | return r; | 455 | return r; |
452 | } | 456 | } |
453 | 457 | ||
454 | format_dev_t(dd->name, dev); | 458 | format_dev_t(dd->dm_dev.name, dev); |
455 | 459 | ||
456 | atomic_set(&dd->count, 0); | 460 | atomic_set(&dd->count, 0); |
457 | list_add(&dd->list, &t->devices); | 461 | list_add(&dd->list, &t->devices); |
458 | 462 | ||
459 | } else if (dd->mode != (mode | dd->mode)) { | 463 | } else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) { |
460 | r = upgrade_mode(dd, mode, t->md); | 464 | r = upgrade_mode(dd, mode, t->md); |
461 | if (r) | 465 | if (r) |
462 | return r; | 466 | return r; |
@@ -465,11 +469,11 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, | |||
465 | 469 | ||
466 | if (!check_device_area(dd, start, len)) { | 470 | if (!check_device_area(dd, start, len)) { |
467 | DMWARN("device %s too small for target", path); | 471 | DMWARN("device %s too small for target", path); |
468 | dm_put_device(ti, dd); | 472 | dm_put_device(ti, &dd->dm_dev); |
469 | return -EINVAL; | 473 | return -EINVAL; |
470 | } | 474 | } |
471 | 475 | ||
472 | *result = dd; | 476 | *result = &dd->dm_dev; |
473 | 477 | ||
474 | return 0; | 478 | return 0; |
475 | } | 479 | } |
@@ -478,6 +482,13 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) | |||
478 | { | 482 | { |
479 | struct request_queue *q = bdev_get_queue(bdev); | 483 | struct request_queue *q = bdev_get_queue(bdev); |
480 | struct io_restrictions *rs = &ti->limits; | 484 | struct io_restrictions *rs = &ti->limits; |
485 | char b[BDEVNAME_SIZE]; | ||
486 | |||
487 | if (unlikely(!q)) { | ||
488 | DMWARN("%s: Cannot set limits for nonexistent device %s", | ||
489 | dm_device_name(ti->table->md), bdevname(bdev, b)); | ||
490 | return; | ||
491 | } | ||
481 | 492 | ||
482 | /* | 493 | /* |
483 | * Combine the device limits low. | 494 | * Combine the device limits low. |
@@ -540,8 +551,11 @@ int dm_get_device(struct dm_target *ti, const char *path, sector_t start, | |||
540 | /* | 551 | /* |
541 | * Decrement a devices use count and remove it if necessary. | 552 | * Decrement a devices use count and remove it if necessary. |
542 | */ | 553 | */ |
543 | void dm_put_device(struct dm_target *ti, struct dm_dev *dd) | 554 | void dm_put_device(struct dm_target *ti, struct dm_dev *d) |
544 | { | 555 | { |
556 | struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal, | ||
557 | dm_dev); | ||
558 | |||
545 | if (atomic_dec_and_test(&dd->count)) { | 559 | if (atomic_dec_and_test(&dd->count)) { |
546 | close_dev(dd, ti->table->md); | 560 | close_dev(dd, ti->table->md); |
547 | list_del(&dd->list); | 561 | list_del(&dd->list); |
@@ -937,13 +951,20 @@ int dm_table_resume_targets(struct dm_table *t) | |||
937 | 951 | ||
938 | int dm_table_any_congested(struct dm_table *t, int bdi_bits) | 952 | int dm_table_any_congested(struct dm_table *t, int bdi_bits) |
939 | { | 953 | { |
940 | struct dm_dev *dd; | 954 | struct dm_dev_internal *dd; |
941 | struct list_head *devices = dm_table_get_devices(t); | 955 | struct list_head *devices = dm_table_get_devices(t); |
942 | int r = 0; | 956 | int r = 0; |
943 | 957 | ||
944 | list_for_each_entry(dd, devices, list) { | 958 | list_for_each_entry(dd, devices, list) { |
945 | struct request_queue *q = bdev_get_queue(dd->bdev); | 959 | struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev); |
946 | r |= bdi_congested(&q->backing_dev_info, bdi_bits); | 960 | char b[BDEVNAME_SIZE]; |
961 | |||
962 | if (likely(q)) | ||
963 | r |= bdi_congested(&q->backing_dev_info, bdi_bits); | ||
964 | else | ||
965 | DMWARN_LIMIT("%s: any_congested: nonexistent device %s", | ||
966 | dm_device_name(t->md), | ||
967 | bdevname(dd->dm_dev.bdev, b)); | ||
947 | } | 968 | } |
948 | 969 | ||
949 | return r; | 970 | return r; |
@@ -951,13 +972,19 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits) | |||
951 | 972 | ||
952 | void dm_table_unplug_all(struct dm_table *t) | 973 | void dm_table_unplug_all(struct dm_table *t) |
953 | { | 974 | { |
954 | struct dm_dev *dd; | 975 | struct dm_dev_internal *dd; |
955 | struct list_head *devices = dm_table_get_devices(t); | 976 | struct list_head *devices = dm_table_get_devices(t); |
956 | 977 | ||
957 | list_for_each_entry(dd, devices, list) { | 978 | list_for_each_entry(dd, devices, list) { |
958 | struct request_queue *q = bdev_get_queue(dd->bdev); | 979 | struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev); |
959 | 980 | char b[BDEVNAME_SIZE]; | |
960 | blk_unplug(q); | 981 | |
982 | if (likely(q)) | ||
983 | blk_unplug(q); | ||
984 | else | ||
985 | DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s", | ||
986 | dm_device_name(t->md), | ||
987 | bdevname(dd->dm_dev.bdev, b)); | ||
961 | } | 988 | } |
962 | } | 989 | } |
963 | 990 | ||