diff options
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r-- | drivers/md/dm.c | 75 |
1 files changed, 71 insertions, 4 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 0cf68b478878..52b39f335bb3 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -37,6 +37,8 @@ static const char *_name = DM_NAME; | |||
37 | static unsigned int major = 0; | 37 | static unsigned int major = 0; |
38 | static unsigned int _major = 0; | 38 | static unsigned int _major = 0; |
39 | 39 | ||
40 | static DEFINE_IDR(_minor_idr); | ||
41 | |||
40 | static DEFINE_SPINLOCK(_minor_lock); | 42 | static DEFINE_SPINLOCK(_minor_lock); |
41 | /* | 43 | /* |
42 | * For bio-based dm. | 44 | * For bio-based dm. |
@@ -109,6 +111,7 @@ EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo); | |||
109 | #define DMF_FREEING 3 | 111 | #define DMF_FREEING 3 |
110 | #define DMF_DELETING 4 | 112 | #define DMF_DELETING 4 |
111 | #define DMF_NOFLUSH_SUSPENDING 5 | 113 | #define DMF_NOFLUSH_SUSPENDING 5 |
114 | #define DMF_MERGE_IS_OPTIONAL 6 | ||
112 | 115 | ||
113 | /* | 116 | /* |
114 | * Work processed by per-device workqueue. | 117 | * Work processed by per-device workqueue. |
@@ -313,6 +316,12 @@ static void __exit dm_exit(void) | |||
313 | 316 | ||
314 | while (i--) | 317 | while (i--) |
315 | _exits[i](); | 318 | _exits[i](); |
319 | |||
320 | /* | ||
321 | * Should be empty by this point. | ||
322 | */ | ||
323 | idr_remove_all(&_minor_idr); | ||
324 | idr_destroy(&_minor_idr); | ||
316 | } | 325 | } |
317 | 326 | ||
318 | /* | 327 | /* |
@@ -1171,7 +1180,8 @@ static int __clone_and_map_discard(struct clone_info *ci) | |||
1171 | 1180 | ||
1172 | /* | 1181 | /* |
1173 | * Even though the device advertised discard support, | 1182 | * Even though the device advertised discard support, |
1174 | * reconfiguration might have changed that since the | 1183 | * that does not mean every target supports it, and |
1184 | * reconfiguration might also have changed that since the | ||
1175 | * check was performed. | 1185 | * check was performed. |
1176 | */ | 1186 | */ |
1177 | if (!ti->num_discard_requests) | 1187 | if (!ti->num_discard_requests) |
@@ -1705,8 +1715,6 @@ static int dm_any_congested(void *congested_data, int bdi_bits) | |||
1705 | /*----------------------------------------------------------------- | 1715 | /*----------------------------------------------------------------- |
1706 | * An IDR is used to keep track of allocated minor numbers. | 1716 | * An IDR is used to keep track of allocated minor numbers. |
1707 | *---------------------------------------------------------------*/ | 1717 | *---------------------------------------------------------------*/ |
1708 | static DEFINE_IDR(_minor_idr); | ||
1709 | |||
1710 | static void free_minor(int minor) | 1718 | static void free_minor(int minor) |
1711 | { | 1719 | { |
1712 | spin_lock(&_minor_lock); | 1720 | spin_lock(&_minor_lock); |
@@ -1800,7 +1808,6 @@ static void dm_init_md_queue(struct mapped_device *md) | |||
1800 | blk_queue_make_request(md->queue, dm_request); | 1808 | blk_queue_make_request(md->queue, dm_request); |
1801 | blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); | 1809 | blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); |
1802 | blk_queue_merge_bvec(md->queue, dm_merge_bvec); | 1810 | blk_queue_merge_bvec(md->queue, dm_merge_bvec); |
1803 | blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA); | ||
1804 | } | 1811 | } |
1805 | 1812 | ||
1806 | /* | 1813 | /* |
@@ -1986,6 +1993,59 @@ static void __set_size(struct mapped_device *md, sector_t size) | |||
1986 | } | 1993 | } |
1987 | 1994 | ||
1988 | /* | 1995 | /* |
1996 | * Return 1 if the queue has a compulsory merge_bvec_fn function. | ||
1997 | * | ||
1998 | * If this function returns 0, then the device is either a non-dm | ||
1999 | * device without a merge_bvec_fn, or it is a dm device that is | ||
2000 | * able to split any bios it receives that are too big. | ||
2001 | */ | ||
2002 | int dm_queue_merge_is_compulsory(struct request_queue *q) | ||
2003 | { | ||
2004 | struct mapped_device *dev_md; | ||
2005 | |||
2006 | if (!q->merge_bvec_fn) | ||
2007 | return 0; | ||
2008 | |||
2009 | if (q->make_request_fn == dm_request) { | ||
2010 | dev_md = q->queuedata; | ||
2011 | if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags)) | ||
2012 | return 0; | ||
2013 | } | ||
2014 | |||
2015 | return 1; | ||
2016 | } | ||
2017 | |||
2018 | static int dm_device_merge_is_compulsory(struct dm_target *ti, | ||
2019 | struct dm_dev *dev, sector_t start, | ||
2020 | sector_t len, void *data) | ||
2021 | { | ||
2022 | struct block_device *bdev = dev->bdev; | ||
2023 | struct request_queue *q = bdev_get_queue(bdev); | ||
2024 | |||
2025 | return dm_queue_merge_is_compulsory(q); | ||
2026 | } | ||
2027 | |||
2028 | /* | ||
2029 | * Return 1 if it is acceptable to ignore merge_bvec_fn based | ||
2030 | * on the properties of the underlying devices. | ||
2031 | */ | ||
2032 | static int dm_table_merge_is_optional(struct dm_table *table) | ||
2033 | { | ||
2034 | unsigned i = 0; | ||
2035 | struct dm_target *ti; | ||
2036 | |||
2037 | while (i < dm_table_get_num_targets(table)) { | ||
2038 | ti = dm_table_get_target(table, i++); | ||
2039 | |||
2040 | if (ti->type->iterate_devices && | ||
2041 | ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL)) | ||
2042 | return 0; | ||
2043 | } | ||
2044 | |||
2045 | return 1; | ||
2046 | } | ||
2047 | |||
2048 | /* | ||
1989 | * Returns old map, which caller must destroy. | 2049 | * Returns old map, which caller must destroy. |
1990 | */ | 2050 | */ |
1991 | static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, | 2051 | static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, |
@@ -1995,6 +2055,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, | |||
1995 | struct request_queue *q = md->queue; | 2055 | struct request_queue *q = md->queue; |
1996 | sector_t size; | 2056 | sector_t size; |
1997 | unsigned long flags; | 2057 | unsigned long flags; |
2058 | int merge_is_optional; | ||
1998 | 2059 | ||
1999 | size = dm_table_get_size(t); | 2060 | size = dm_table_get_size(t); |
2000 | 2061 | ||
@@ -2020,10 +2081,16 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, | |||
2020 | 2081 | ||
2021 | __bind_mempools(md, t); | 2082 | __bind_mempools(md, t); |
2022 | 2083 | ||
2084 | merge_is_optional = dm_table_merge_is_optional(t); | ||
2085 | |||
2023 | write_lock_irqsave(&md->map_lock, flags); | 2086 | write_lock_irqsave(&md->map_lock, flags); |
2024 | old_map = md->map; | 2087 | old_map = md->map; |
2025 | md->map = t; | 2088 | md->map = t; |
2026 | dm_table_set_restrictions(t, q, limits); | 2089 | dm_table_set_restrictions(t, q, limits); |
2090 | if (merge_is_optional) | ||
2091 | set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); | ||
2092 | else | ||
2093 | clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); | ||
2027 | write_unlock_irqrestore(&md->map_lock, flags); | 2094 | write_unlock_irqrestore(&md->map_lock, flags); |
2028 | 2095 | ||
2029 | return old_map; | 2096 | return old_map; |