diff options
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r-- | drivers/md/dm.c | 101 |
1 files changed, 79 insertions, 22 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 421c9f02d8ca..51ba1db4b3e7 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. | 2 | * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. |
3 | * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. | 3 | * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. |
4 | * | 4 | * |
5 | * This file is released under the GPL. | 5 | * This file is released under the GPL. |
6 | */ | 6 | */ |
@@ -32,6 +32,7 @@ static unsigned int _major = 0; | |||
32 | 32 | ||
33 | static DEFINE_SPINLOCK(_minor_lock); | 33 | static DEFINE_SPINLOCK(_minor_lock); |
34 | /* | 34 | /* |
35 | * For bio-based dm. | ||
35 | * One of these is allocated per bio. | 36 | * One of these is allocated per bio. |
36 | */ | 37 | */ |
37 | struct dm_io { | 38 | struct dm_io { |
@@ -43,6 +44,7 @@ struct dm_io { | |||
43 | }; | 44 | }; |
44 | 45 | ||
45 | /* | 46 | /* |
47 | * For bio-based dm. | ||
46 | * One of these is allocated per target within a bio. Hopefully | 48 | * One of these is allocated per target within a bio. Hopefully |
47 | * this will be simplified out one day. | 49 | * this will be simplified out one day. |
48 | */ | 50 | */ |
@@ -54,6 +56,27 @@ struct dm_target_io { | |||
54 | 56 | ||
55 | DEFINE_TRACE(block_bio_complete); | 57 | DEFINE_TRACE(block_bio_complete); |
56 | 58 | ||
59 | /* | ||
60 | * For request-based dm. | ||
61 | * One of these is allocated per request. | ||
62 | */ | ||
63 | struct dm_rq_target_io { | ||
64 | struct mapped_device *md; | ||
65 | struct dm_target *ti; | ||
66 | struct request *orig, clone; | ||
67 | int error; | ||
68 | union map_info info; | ||
69 | }; | ||
70 | |||
71 | /* | ||
72 | * For request-based dm. | ||
73 | * One of these is allocated per bio. | ||
74 | */ | ||
75 | struct dm_rq_clone_bio_info { | ||
76 | struct bio *orig; | ||
77 | struct request *rq; | ||
78 | }; | ||
79 | |||
57 | union map_info *dm_get_mapinfo(struct bio *bio) | 80 | union map_info *dm_get_mapinfo(struct bio *bio) |
58 | { | 81 | { |
59 | if (bio && bio->bi_private) | 82 | if (bio && bio->bi_private) |
@@ -144,11 +167,16 @@ struct mapped_device { | |||
144 | 167 | ||
145 | /* forced geometry settings */ | 168 | /* forced geometry settings */ |
146 | struct hd_geometry geometry; | 169 | struct hd_geometry geometry; |
170 | |||
171 | /* sysfs handle */ | ||
172 | struct kobject kobj; | ||
147 | }; | 173 | }; |
148 | 174 | ||
149 | #define MIN_IOS 256 | 175 | #define MIN_IOS 256 |
150 | static struct kmem_cache *_io_cache; | 176 | static struct kmem_cache *_io_cache; |
151 | static struct kmem_cache *_tio_cache; | 177 | static struct kmem_cache *_tio_cache; |
178 | static struct kmem_cache *_rq_tio_cache; | ||
179 | static struct kmem_cache *_rq_bio_info_cache; | ||
152 | 180 | ||
153 | static int __init local_init(void) | 181 | static int __init local_init(void) |
154 | { | 182 | { |
@@ -164,9 +192,17 @@ static int __init local_init(void) | |||
164 | if (!_tio_cache) | 192 | if (!_tio_cache) |
165 | goto out_free_io_cache; | 193 | goto out_free_io_cache; |
166 | 194 | ||
195 | _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); | ||
196 | if (!_rq_tio_cache) | ||
197 | goto out_free_tio_cache; | ||
198 | |||
199 | _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0); | ||
200 | if (!_rq_bio_info_cache) | ||
201 | goto out_free_rq_tio_cache; | ||
202 | |||
167 | r = dm_uevent_init(); | 203 | r = dm_uevent_init(); |
168 | if (r) | 204 | if (r) |
169 | goto out_free_tio_cache; | 205 | goto out_free_rq_bio_info_cache; |
170 | 206 | ||
171 | _major = major; | 207 | _major = major; |
172 | r = register_blkdev(_major, _name); | 208 | r = register_blkdev(_major, _name); |
@@ -180,6 +216,10 @@ static int __init local_init(void) | |||
180 | 216 | ||
181 | out_uevent_exit: | 217 | out_uevent_exit: |
182 | dm_uevent_exit(); | 218 | dm_uevent_exit(); |
219 | out_free_rq_bio_info_cache: | ||
220 | kmem_cache_destroy(_rq_bio_info_cache); | ||
221 | out_free_rq_tio_cache: | ||
222 | kmem_cache_destroy(_rq_tio_cache); | ||
183 | out_free_tio_cache: | 223 | out_free_tio_cache: |
184 | kmem_cache_destroy(_tio_cache); | 224 | kmem_cache_destroy(_tio_cache); |
185 | out_free_io_cache: | 225 | out_free_io_cache: |
@@ -190,6 +230,8 @@ out_free_io_cache: | |||
190 | 230 | ||
191 | static void local_exit(void) | 231 | static void local_exit(void) |
192 | { | 232 | { |
233 | kmem_cache_destroy(_rq_bio_info_cache); | ||
234 | kmem_cache_destroy(_rq_tio_cache); | ||
193 | kmem_cache_destroy(_tio_cache); | 235 | kmem_cache_destroy(_tio_cache); |
194 | kmem_cache_destroy(_io_cache); | 236 | kmem_cache_destroy(_io_cache); |
195 | unregister_blkdev(_major, _name); | 237 | unregister_blkdev(_major, _name); |
@@ -796,7 +838,11 @@ static int __split_bio(struct mapped_device *md, struct bio *bio) | |||
796 | ci.map = dm_get_table(md); | 838 | ci.map = dm_get_table(md); |
797 | if (unlikely(!ci.map)) | 839 | if (unlikely(!ci.map)) |
798 | return -EIO; | 840 | return -EIO; |
799 | 841 | if (unlikely(bio_barrier(bio) && !dm_table_barrier_ok(ci.map))) { | |
842 | dm_table_put(ci.map); | ||
843 | bio_endio(bio, -EOPNOTSUPP); | ||
844 | return 0; | ||
845 | } | ||
800 | ci.md = md; | 846 | ci.md = md; |
801 | ci.bio = bio; | 847 | ci.bio = bio; |
802 | ci.io = alloc_io(md); | 848 | ci.io = alloc_io(md); |
@@ -880,15 +926,6 @@ static int dm_request(struct request_queue *q, struct bio *bio) | |||
880 | struct mapped_device *md = q->queuedata; | 926 | struct mapped_device *md = q->queuedata; |
881 | int cpu; | 927 | int cpu; |
882 | 928 | ||
883 | /* | ||
884 | * There is no use in forwarding any barrier request since we can't | ||
885 | * guarantee it is (or can be) handled by the targets correctly. | ||
886 | */ | ||
887 | if (unlikely(bio_barrier(bio))) { | ||
888 | bio_endio(bio, -EOPNOTSUPP); | ||
889 | return 0; | ||
890 | } | ||
891 | |||
892 | down_read(&md->io_lock); | 929 | down_read(&md->io_lock); |
893 | 930 | ||
894 | cpu = part_stat_lock(); | 931 | cpu = part_stat_lock(); |
@@ -943,8 +980,6 @@ static int dm_any_congested(void *congested_data, int bdi_bits) | |||
943 | struct mapped_device *md = congested_data; | 980 | struct mapped_device *md = congested_data; |
944 | struct dm_table *map; | 981 | struct dm_table *map; |
945 | 982 | ||
946 | atomic_inc(&md->pending); | ||
947 | |||
948 | if (!test_bit(DMF_BLOCK_IO, &md->flags)) { | 983 | if (!test_bit(DMF_BLOCK_IO, &md->flags)) { |
949 | map = dm_get_table(md); | 984 | map = dm_get_table(md); |
950 | if (map) { | 985 | if (map) { |
@@ -953,10 +988,6 @@ static int dm_any_congested(void *congested_data, int bdi_bits) | |||
953 | } | 988 | } |
954 | } | 989 | } |
955 | 990 | ||
956 | if (!atomic_dec_return(&md->pending)) | ||
957 | /* nudge anyone waiting on suspend queue */ | ||
958 | wake_up(&md->wait); | ||
959 | |||
960 | return r; | 991 | return r; |
961 | } | 992 | } |
962 | 993 | ||
@@ -1216,10 +1247,12 @@ static int __bind(struct mapped_device *md, struct dm_table *t) | |||
1216 | 1247 | ||
1217 | if (md->suspended_bdev) | 1248 | if (md->suspended_bdev) |
1218 | __set_size(md, size); | 1249 | __set_size(md, size); |
1219 | if (size == 0) | 1250 | |
1251 | if (!size) { | ||
1252 | dm_table_destroy(t); | ||
1220 | return 0; | 1253 | return 0; |
1254 | } | ||
1221 | 1255 | ||
1222 | dm_table_get(t); | ||
1223 | dm_table_event_callback(t, event_callback, md); | 1256 | dm_table_event_callback(t, event_callback, md); |
1224 | 1257 | ||
1225 | write_lock(&md->map_lock); | 1258 | write_lock(&md->map_lock); |
@@ -1241,7 +1274,7 @@ static void __unbind(struct mapped_device *md) | |||
1241 | write_lock(&md->map_lock); | 1274 | write_lock(&md->map_lock); |
1242 | md->map = NULL; | 1275 | md->map = NULL; |
1243 | write_unlock(&md->map_lock); | 1276 | write_unlock(&md->map_lock); |
1244 | dm_table_put(map); | 1277 | dm_table_destroy(map); |
1245 | } | 1278 | } |
1246 | 1279 | ||
1247 | /* | 1280 | /* |
@@ -1255,6 +1288,8 @@ int dm_create(int minor, struct mapped_device **result) | |||
1255 | if (!md) | 1288 | if (!md) |
1256 | return -ENXIO; | 1289 | return -ENXIO; |
1257 | 1290 | ||
1291 | dm_sysfs_init(md); | ||
1292 | |||
1258 | *result = md; | 1293 | *result = md; |
1259 | return 0; | 1294 | return 0; |
1260 | } | 1295 | } |
@@ -1330,8 +1365,9 @@ void dm_put(struct mapped_device *md) | |||
1330 | dm_table_presuspend_targets(map); | 1365 | dm_table_presuspend_targets(map); |
1331 | dm_table_postsuspend_targets(map); | 1366 | dm_table_postsuspend_targets(map); |
1332 | } | 1367 | } |
1333 | __unbind(md); | 1368 | dm_sysfs_exit(md); |
1334 | dm_table_put(map); | 1369 | dm_table_put(map); |
1370 | __unbind(md); | ||
1335 | free_dev(md); | 1371 | free_dev(md); |
1336 | } | 1372 | } |
1337 | } | 1373 | } |
@@ -1669,6 +1705,27 @@ struct gendisk *dm_disk(struct mapped_device *md) | |||
1669 | return md->disk; | 1705 | return md->disk; |
1670 | } | 1706 | } |
1671 | 1707 | ||
1708 | struct kobject *dm_kobject(struct mapped_device *md) | ||
1709 | { | ||
1710 | return &md->kobj; | ||
1711 | } | ||
1712 | |||
1713 | /* | ||
1714 | * struct mapped_device should not be exported outside of dm.c | ||
1715 | * so use this check to verify that kobj is part of md structure | ||
1716 | */ | ||
1717 | struct mapped_device *dm_get_from_kobject(struct kobject *kobj) | ||
1718 | { | ||
1719 | struct mapped_device *md; | ||
1720 | |||
1721 | md = container_of(kobj, struct mapped_device, kobj); | ||
1722 | if (&md->kobj != kobj) | ||
1723 | return NULL; | ||
1724 | |||
1725 | dm_get(md); | ||
1726 | return md; | ||
1727 | } | ||
1728 | |||
1672 | int dm_suspended(struct mapped_device *md) | 1729 | int dm_suspended(struct mapped_device *md) |
1673 | { | 1730 | { |
1674 | return test_bit(DMF_SUSPENDED, &md->flags); | 1731 | return test_bit(DMF_SUSPENDED, &md->flags); |