diff options
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r-- | drivers/md/dm.c | 88 |
1 files changed, 67 insertions, 21 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index a64798ef481e..4d710b7a133b 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -10,6 +10,7 @@ | |||
10 | 10 | ||
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/mutex.h> | ||
13 | #include <linux/moduleparam.h> | 14 | #include <linux/moduleparam.h> |
14 | #include <linux/blkpg.h> | 15 | #include <linux/blkpg.h> |
15 | #include <linux/bio.h> | 16 | #include <linux/bio.h> |
@@ -17,6 +18,7 @@ | |||
17 | #include <linux/mempool.h> | 18 | #include <linux/mempool.h> |
18 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
19 | #include <linux/idr.h> | 20 | #include <linux/idr.h> |
21 | #include <linux/hdreg.h> | ||
20 | #include <linux/blktrace_api.h> | 22 | #include <linux/blktrace_api.h> |
21 | 23 | ||
22 | static const char *_name = DM_NAME; | 24 | static const char *_name = DM_NAME; |
@@ -69,6 +71,7 @@ struct mapped_device { | |||
69 | 71 | ||
70 | request_queue_t *queue; | 72 | request_queue_t *queue; |
71 | struct gendisk *disk; | 73 | struct gendisk *disk; |
74 | char name[16]; | ||
72 | 75 | ||
73 | void *interface_ptr; | 76 | void *interface_ptr; |
74 | 77 | ||
@@ -101,6 +104,9 @@ struct mapped_device { | |||
101 | */ | 104 | */ |
102 | struct super_block *frozen_sb; | 105 | struct super_block *frozen_sb; |
103 | struct block_device *suspended_bdev; | 106 | struct block_device *suspended_bdev; |
107 | |||
108 | /* forced geometry settings */ | ||
109 | struct hd_geometry geometry; | ||
104 | }; | 110 | }; |
105 | 111 | ||
106 | #define MIN_IOS 256 | 112 | #define MIN_IOS 256 |
@@ -226,6 +232,13 @@ static int dm_blk_close(struct inode *inode, struct file *file) | |||
226 | return 0; | 232 | return 0; |
227 | } | 233 | } |
228 | 234 | ||
235 | static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) | ||
236 | { | ||
237 | struct mapped_device *md = bdev->bd_disk->private_data; | ||
238 | |||
239 | return dm_get_geometry(md, geo); | ||
240 | } | ||
241 | |||
229 | static inline struct dm_io *alloc_io(struct mapped_device *md) | 242 | static inline struct dm_io *alloc_io(struct mapped_device *md) |
230 | { | 243 | { |
231 | return mempool_alloc(md->io_pool, GFP_NOIO); | 244 | return mempool_alloc(md->io_pool, GFP_NOIO); |
@@ -312,6 +325,33 @@ struct dm_table *dm_get_table(struct mapped_device *md) | |||
312 | return t; | 325 | return t; |
313 | } | 326 | } |
314 | 327 | ||
328 | /* | ||
329 | * Get the geometry associated with a dm device | ||
330 | */ | ||
331 | int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) | ||
332 | { | ||
333 | *geo = md->geometry; | ||
334 | |||
335 | return 0; | ||
336 | } | ||
337 | |||
338 | /* | ||
339 | * Set the geometry of a device. | ||
340 | */ | ||
341 | int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) | ||
342 | { | ||
343 | sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; | ||
344 | |||
345 | if (geo->start > sz) { | ||
346 | DMWARN("Start sector is beyond the geometry limits."); | ||
347 | return -EINVAL; | ||
348 | } | ||
349 | |||
350 | md->geometry = *geo; | ||
351 | |||
352 | return 0; | ||
353 | } | ||
354 | |||
315 | /*----------------------------------------------------------------- | 355 | /*----------------------------------------------------------------- |
316 | * CRUD START: | 356 | * CRUD START: |
317 | * A more elegant soln is in the works that uses the queue | 357 | * A more elegant soln is in the works that uses the queue |
@@ -704,14 +744,14 @@ static int dm_any_congested(void *congested_data, int bdi_bits) | |||
704 | /*----------------------------------------------------------------- | 744 | /*----------------------------------------------------------------- |
705 | * An IDR is used to keep track of allocated minor numbers. | 745 | * An IDR is used to keep track of allocated minor numbers. |
706 | *---------------------------------------------------------------*/ | 746 | *---------------------------------------------------------------*/ |
707 | static DECLARE_MUTEX(_minor_lock); | 747 | static DEFINE_MUTEX(_minor_lock); |
708 | static DEFINE_IDR(_minor_idr); | 748 | static DEFINE_IDR(_minor_idr); |
709 | 749 | ||
710 | static void free_minor(unsigned int minor) | 750 | static void free_minor(unsigned int minor) |
711 | { | 751 | { |
712 | down(&_minor_lock); | 752 | mutex_lock(&_minor_lock); |
713 | idr_remove(&_minor_idr, minor); | 753 | idr_remove(&_minor_idr, minor); |
714 | up(&_minor_lock); | 754 | mutex_unlock(&_minor_lock); |
715 | } | 755 | } |
716 | 756 | ||
717 | /* | 757 | /* |
@@ -724,7 +764,7 @@ static int specific_minor(struct mapped_device *md, unsigned int minor) | |||
724 | if (minor >= (1 << MINORBITS)) | 764 | if (minor >= (1 << MINORBITS)) |
725 | return -EINVAL; | 765 | return -EINVAL; |
726 | 766 | ||
727 | down(&_minor_lock); | 767 | mutex_lock(&_minor_lock); |
728 | 768 | ||
729 | if (idr_find(&_minor_idr, minor)) { | 769 | if (idr_find(&_minor_idr, minor)) { |
730 | r = -EBUSY; | 770 | r = -EBUSY; |
@@ -749,7 +789,7 @@ static int specific_minor(struct mapped_device *md, unsigned int minor) | |||
749 | } | 789 | } |
750 | 790 | ||
751 | out: | 791 | out: |
752 | up(&_minor_lock); | 792 | mutex_unlock(&_minor_lock); |
753 | return r; | 793 | return r; |
754 | } | 794 | } |
755 | 795 | ||
@@ -758,7 +798,7 @@ static int next_free_minor(struct mapped_device *md, unsigned int *minor) | |||
758 | int r; | 798 | int r; |
759 | unsigned int m; | 799 | unsigned int m; |
760 | 800 | ||
761 | down(&_minor_lock); | 801 | mutex_lock(&_minor_lock); |
762 | 802 | ||
763 | r = idr_pre_get(&_minor_idr, GFP_KERNEL); | 803 | r = idr_pre_get(&_minor_idr, GFP_KERNEL); |
764 | if (!r) { | 804 | if (!r) { |
@@ -780,7 +820,7 @@ static int next_free_minor(struct mapped_device *md, unsigned int *minor) | |||
780 | *minor = m; | 820 | *minor = m; |
781 | 821 | ||
782 | out: | 822 | out: |
783 | up(&_minor_lock); | 823 | mutex_unlock(&_minor_lock); |
784 | return r; | 824 | return r; |
785 | } | 825 | } |
786 | 826 | ||
@@ -842,6 +882,7 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent) | |||
842 | md->disk->private_data = md; | 882 | md->disk->private_data = md; |
843 | sprintf(md->disk->disk_name, "dm-%d", minor); | 883 | sprintf(md->disk->disk_name, "dm-%d", minor); |
844 | add_disk(md->disk); | 884 | add_disk(md->disk); |
885 | format_dev_t(md->name, MKDEV(_major, minor)); | ||
845 | 886 | ||
846 | atomic_set(&md->pending, 0); | 887 | atomic_set(&md->pending, 0); |
847 | init_waitqueue_head(&md->wait); | 888 | init_waitqueue_head(&md->wait); |
@@ -904,6 +945,13 @@ static int __bind(struct mapped_device *md, struct dm_table *t) | |||
904 | sector_t size; | 945 | sector_t size; |
905 | 946 | ||
906 | size = dm_table_get_size(t); | 947 | size = dm_table_get_size(t); |
948 | |||
949 | /* | ||
950 | * Wipe any geometry if the size of the table changed. | ||
951 | */ | ||
952 | if (size != get_capacity(md->disk)) | ||
953 | memset(&md->geometry, 0, sizeof(md->geometry)); | ||
954 | |||
907 | __set_size(md, size); | 955 | __set_size(md, size); |
908 | if (size == 0) | 956 | if (size == 0) |
909 | return 0; | 957 | return 0; |
@@ -967,13 +1015,13 @@ static struct mapped_device *dm_find_md(dev_t dev) | |||
967 | if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) | 1015 | if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) |
968 | return NULL; | 1016 | return NULL; |
969 | 1017 | ||
970 | down(&_minor_lock); | 1018 | mutex_lock(&_minor_lock); |
971 | 1019 | ||
972 | md = idr_find(&_minor_idr, minor); | 1020 | md = idr_find(&_minor_idr, minor); |
973 | if (!md || (dm_disk(md)->first_minor != minor)) | 1021 | if (!md || (dm_disk(md)->first_minor != minor)) |
974 | md = NULL; | 1022 | md = NULL; |
975 | 1023 | ||
976 | up(&_minor_lock); | 1024 | mutex_unlock(&_minor_lock); |
977 | 1025 | ||
978 | return md; | 1026 | return md; |
979 | } | 1027 | } |
@@ -988,15 +1036,9 @@ struct mapped_device *dm_get_md(dev_t dev) | |||
988 | return md; | 1036 | return md; |
989 | } | 1037 | } |
990 | 1038 | ||
991 | void *dm_get_mdptr(dev_t dev) | 1039 | void *dm_get_mdptr(struct mapped_device *md) |
992 | { | 1040 | { |
993 | struct mapped_device *md; | 1041 | return md->interface_ptr; |
994 | void *mdptr = NULL; | ||
995 | |||
996 | md = dm_find_md(dev); | ||
997 | if (md) | ||
998 | mdptr = md->interface_ptr; | ||
999 | return mdptr; | ||
1000 | } | 1042 | } |
1001 | 1043 | ||
1002 | void dm_set_mdptr(struct mapped_device *md, void *ptr) | 1044 | void dm_set_mdptr(struct mapped_device *md, void *ptr) |
@@ -1011,18 +1053,18 @@ void dm_get(struct mapped_device *md) | |||
1011 | 1053 | ||
1012 | void dm_put(struct mapped_device *md) | 1054 | void dm_put(struct mapped_device *md) |
1013 | { | 1055 | { |
1014 | struct dm_table *map = dm_get_table(md); | 1056 | struct dm_table *map; |
1015 | 1057 | ||
1016 | if (atomic_dec_and_test(&md->holders)) { | 1058 | if (atomic_dec_and_test(&md->holders)) { |
1059 | map = dm_get_table(md); | ||
1017 | if (!dm_suspended(md)) { | 1060 | if (!dm_suspended(md)) { |
1018 | dm_table_presuspend_targets(map); | 1061 | dm_table_presuspend_targets(map); |
1019 | dm_table_postsuspend_targets(map); | 1062 | dm_table_postsuspend_targets(map); |
1020 | } | 1063 | } |
1021 | __unbind(md); | 1064 | __unbind(md); |
1065 | dm_table_put(map); | ||
1022 | free_dev(md); | 1066 | free_dev(md); |
1023 | } | 1067 | } |
1024 | |||
1025 | dm_table_put(map); | ||
1026 | } | 1068 | } |
1027 | 1069 | ||
1028 | /* | 1070 | /* |
@@ -1107,6 +1149,7 @@ int dm_suspend(struct mapped_device *md, int do_lockfs) | |||
1107 | { | 1149 | { |
1108 | struct dm_table *map = NULL; | 1150 | struct dm_table *map = NULL; |
1109 | DECLARE_WAITQUEUE(wait, current); | 1151 | DECLARE_WAITQUEUE(wait, current); |
1152 | struct bio *def; | ||
1110 | int r = -EINVAL; | 1153 | int r = -EINVAL; |
1111 | 1154 | ||
1112 | down(&md->suspend_lock); | 1155 | down(&md->suspend_lock); |
@@ -1166,9 +1209,11 @@ int dm_suspend(struct mapped_device *md, int do_lockfs) | |||
1166 | /* were we interrupted ? */ | 1209 | /* were we interrupted ? */ |
1167 | r = -EINTR; | 1210 | r = -EINTR; |
1168 | if (atomic_read(&md->pending)) { | 1211 | if (atomic_read(&md->pending)) { |
1212 | clear_bit(DMF_BLOCK_IO, &md->flags); | ||
1213 | def = bio_list_get(&md->deferred); | ||
1214 | __flush_deferred_io(md, def); | ||
1169 | up_write(&md->io_lock); | 1215 | up_write(&md->io_lock); |
1170 | unlock_fs(md); | 1216 | unlock_fs(md); |
1171 | clear_bit(DMF_BLOCK_IO, &md->flags); | ||
1172 | goto out; | 1217 | goto out; |
1173 | } | 1218 | } |
1174 | up_write(&md->io_lock); | 1219 | up_write(&md->io_lock); |
@@ -1262,6 +1307,7 @@ int dm_suspended(struct mapped_device *md) | |||
1262 | static struct block_device_operations dm_blk_dops = { | 1307 | static struct block_device_operations dm_blk_dops = { |
1263 | .open = dm_blk_open, | 1308 | .open = dm_blk_open, |
1264 | .release = dm_blk_close, | 1309 | .release = dm_blk_close, |
1310 | .getgeo = dm_blk_getgeo, | ||
1265 | .owner = THIS_MODULE | 1311 | .owner = THIS_MODULE |
1266 | }; | 1312 | }; |
1267 | 1313 | ||