aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-table.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-table.c')
-rw-r--r--drivers/md/dm-table.c99
1 files changed, 96 insertions, 3 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 9924ea23032d..f9fc07d7a4b9 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -54,6 +54,8 @@ struct dm_table {
54 sector_t *highs; 54 sector_t *highs;
55 struct dm_target *targets; 55 struct dm_target *targets;
56 56
57 unsigned discards_supported:1;
58
57 /* 59 /*
58 * Indicates the rw permissions for the new logical 60 * Indicates the rw permissions for the new logical
59 * device. This should be a combination of FMODE_READ 61 * device. This should be a combination of FMODE_READ
@@ -203,6 +205,7 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
203 205
204 INIT_LIST_HEAD(&t->devices); 206 INIT_LIST_HEAD(&t->devices);
205 atomic_set(&t->holders, 0); 207 atomic_set(&t->holders, 0);
208 t->discards_supported = 1;
206 209
207 if (!num_targets) 210 if (!num_targets)
208 num_targets = KEYS_PER_NODE; 211 num_targets = KEYS_PER_NODE;
@@ -245,7 +248,7 @@ void dm_table_destroy(struct dm_table *t)
245 msleep(1); 248 msleep(1);
246 smp_mb(); 249 smp_mb();
247 250
248 /* free the indexes (see dm_table_complete) */ 251 /* free the indexes */
249 if (t->depth >= 2) 252 if (t->depth >= 2)
250 vfree(t->index[t->depth - 2]); 253 vfree(t->index[t->depth - 2]);
251 254
@@ -770,6 +773,9 @@ int dm_table_add_target(struct dm_table *t, const char *type,
770 773
771 t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; 774 t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
772 775
776 if (!tgt->num_discard_requests)
777 t->discards_supported = 0;
778
773 return 0; 779 return 0;
774 780
775 bad: 781 bad:
@@ -778,7 +784,7 @@ int dm_table_add_target(struct dm_table *t, const char *type,
778 return r; 784 return r;
779} 785}
780 786
781int dm_table_set_type(struct dm_table *t) 787static int dm_table_set_type(struct dm_table *t)
782{ 788{
783 unsigned i; 789 unsigned i;
784 unsigned bio_based = 0, request_based = 0; 790 unsigned bio_based = 0, request_based = 0;
@@ -900,7 +906,7 @@ static int setup_indexes(struct dm_table *t)
900/* 906/*
901 * Builds the btree to index the map. 907 * Builds the btree to index the map.
902 */ 908 */
903int dm_table_complete(struct dm_table *t) 909static int dm_table_build_index(struct dm_table *t)
904{ 910{
905 int r = 0; 911 int r = 0;
906 unsigned int leaf_nodes; 912 unsigned int leaf_nodes;
@@ -919,6 +925,55 @@ int dm_table_complete(struct dm_table *t)
919 return r; 925 return r;
920} 926}
921 927
928/*
929 * Register the mapped device for blk_integrity support if
930 * the underlying devices support it.
931 */
932static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md)
933{
934 struct list_head *devices = dm_table_get_devices(t);
935 struct dm_dev_internal *dd;
936
937 list_for_each_entry(dd, devices, list)
938 if (bdev_get_integrity(dd->dm_dev.bdev))
939 return blk_integrity_register(dm_disk(md), NULL);
940
941 return 0;
942}
943
944/*
945 * Prepares the table for use by building the indices,
946 * setting the type, and allocating mempools.
947 */
948int dm_table_complete(struct dm_table *t)
949{
950 int r;
951
952 r = dm_table_set_type(t);
953 if (r) {
954 DMERR("unable to set table type");
955 return r;
956 }
957
958 r = dm_table_build_index(t);
959 if (r) {
960 DMERR("unable to build btrees");
961 return r;
962 }
963
964 r = dm_table_prealloc_integrity(t, t->md);
965 if (r) {
966 DMERR("could not register integrity profile.");
967 return r;
968 }
969
970 r = dm_table_alloc_md_mempools(t);
971 if (r)
972 DMERR("unable to allocate mempools");
973
974 return r;
975}
976
922static DEFINE_MUTEX(_event_lock); 977static DEFINE_MUTEX(_event_lock);
923void dm_table_event_callback(struct dm_table *t, 978void dm_table_event_callback(struct dm_table *t,
924 void (*fn)(void *), void *context) 979 void (*fn)(void *), void *context)
@@ -1086,6 +1141,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1086 else 1141 else
1087 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); 1142 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
1088 1143
1144 if (!dm_table_supports_discards(t))
1145 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
1146 else
1147 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
1148
1089 dm_table_set_integrity(t); 1149 dm_table_set_integrity(t);
1090 1150
1091 /* 1151 /*
@@ -1232,6 +1292,39 @@ struct mapped_device *dm_table_get_md(struct dm_table *t)
1232 return t->md; 1292 return t->md;
1233} 1293}
1234 1294
1295static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1296 sector_t start, sector_t len, void *data)
1297{
1298 struct request_queue *q = bdev_get_queue(dev->bdev);
1299
1300 return q && blk_queue_discard(q);
1301}
1302
1303bool dm_table_supports_discards(struct dm_table *t)
1304{
1305 struct dm_target *ti;
1306 unsigned i = 0;
1307
1308 if (!t->discards_supported)
1309 return 0;
1310
1311 /*
1312 * Ensure that at least one underlying device supports discards.
1313 * t->devices includes internal dm devices such as mirror logs
1314 * so we need to use iterate_devices here, which targets
1315 * supporting discard must provide.
1316 */
1317 while (i < dm_table_get_num_targets(t)) {
1318 ti = dm_table_get_target(t, i++);
1319
1320 if (ti->type->iterate_devices &&
1321 ti->type->iterate_devices(ti, device_discard_capable, NULL))
1322 return 1;
1323 }
1324
1325 return 0;
1326}
1327
1235EXPORT_SYMBOL(dm_vcalloc); 1328EXPORT_SYMBOL(dm_vcalloc);
1236EXPORT_SYMBOL(dm_get_device); 1329EXPORT_SYMBOL(dm_get_device);
1237EXPORT_SYMBOL(dm_put_device); 1330EXPORT_SYMBOL(dm_put_device);