aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2010-08-11 23:14:02 -0400
committerAlasdair G Kergon <agk@redhat.com>2010-08-11 23:14:02 -0400
commit4a0b4ddf261fc89c050fe0a10ec57a61251d7ac0 (patch)
tree23854bb6d42e7cedec9100fcdd63c307ee5d1332 /drivers/md
parenta5664dad7e1a278d2915c2bf79cf42250e12d7db (diff)
dm: do not initialise full request queue when bio based
Change bio-based mapped devices no longer to have a fully initialized request_queue (request_fn, elevator, etc). This means bio-based DM devices no longer register elevator sysfs attributes ('iosched/' tree or 'scheduler' other than "none"). In contrast, a request-based DM device will continue to have a full request_queue and will register elevator sysfs attributes. Therefore a user can determine a DM device's type by checking if elevator sysfs attributes exist. First allocate a minimalist request_queue structure for a DM device (needed for both bio and request-based DM). Initialization of a full request_queue is deferred until it is known that the DM device is request-based, at the end of the table load sequence. Factor DM device's request_queue initialization: - common to both request-based and bio-based into dm_init_md_queue(). - specific to request-based into dm_init_request_based_queue(). The md->type_lock mutex is used to protect md->queue, in addition to md->type, during table_load(). A DM device's first table_load will establish the immutable md->type. But md->queue initialization, based on md->type, may fail at that time (because blk_init_allocated_queue cannot allocate memory). Therefore any subsequent table_load must (re)try dm_setup_md_queue independently of establishing md->type. Signed-off-by: Mike Snitzer <snitzer@redhat.com> Acked-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-ioctl.c11
-rw-r--r--drivers/md/dm.c92
-rw-r--r--drivers/md/dm.h2
3 files changed, 79 insertions, 26 deletions
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 4702f380cb45..ed8585954a3a 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1189,7 +1189,7 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
1189 goto out; 1189 goto out;
1190 } 1190 }
1191 1191
1192 /* Protect md->type against concurrent table loads. */ 1192 /* Protect md->type and md->queue against concurrent table loads. */
1193 dm_lock_md_type(md); 1193 dm_lock_md_type(md);
1194 if (dm_get_md_type(md) == DM_TYPE_NONE) 1194 if (dm_get_md_type(md) == DM_TYPE_NONE)
1195 /* Initial table load: acquire type of table. */ 1195 /* Initial table load: acquire type of table. */
@@ -1201,6 +1201,15 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
1201 r = -EINVAL; 1201 r = -EINVAL;
1202 goto out; 1202 goto out;
1203 } 1203 }
1204
1205 /* setup md->queue to reflect md's type (may block) */
1206 r = dm_setup_md_queue(md);
1207 if (r) {
1208 DMWARN("unable to set up device queue for new table.");
1209 dm_table_destroy(t);
1210 dm_unlock_md_type(md);
1211 goto out;
1212 }
1204 dm_unlock_md_type(md); 1213 dm_unlock_md_type(md);
1205 1214
1206 /* stage inactive table */ 1215 /* stage inactive table */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 345e94c10c65..5ae0a05b4811 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -126,7 +126,7 @@ struct mapped_device {
126 126
127 struct request_queue *queue; 127 struct request_queue *queue;
128 unsigned type; 128 unsigned type;
129 /* Protect type against concurrent access. */ 129 /* Protect queue and type against concurrent access. */
130 struct mutex type_lock; 130 struct mutex type_lock;
131 131
132 struct gendisk *disk; 132 struct gendisk *disk;
@@ -1856,6 +1856,28 @@ static const struct block_device_operations dm_blk_dops;
1856static void dm_wq_work(struct work_struct *work); 1856static void dm_wq_work(struct work_struct *work);
1857static void dm_rq_barrier_work(struct work_struct *work); 1857static void dm_rq_barrier_work(struct work_struct *work);
1858 1858
1859static void dm_init_md_queue(struct mapped_device *md)
1860{
1861 /*
1862 * Request-based dm devices cannot be stacked on top of bio-based dm
1863 * devices. The type of this dm device has not been decided yet.
1864 * The type is decided at the first table loading time.
1865 * To prevent problematic device stacking, clear the queue flag
1866 * for request stacking support until then.
1867 *
1868 * This queue is new, so no concurrency on the queue_flags.
1869 */
1870 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
1871
1872 md->queue->queuedata = md;
1873 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1874 md->queue->backing_dev_info.congested_data = md;
1875 blk_queue_make_request(md->queue, dm_request);
1876 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1877 md->queue->unplug_fn = dm_unplug_all;
1878 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1879}
1880
1859/* 1881/*
1860 * Allocate and initialise a blank device with a given minor. 1882 * Allocate and initialise a blank device with a given minor.
1861 */ 1883 */
@@ -1895,33 +1917,11 @@ static struct mapped_device *alloc_dev(int minor)
1895 INIT_LIST_HEAD(&md->uevent_list); 1917 INIT_LIST_HEAD(&md->uevent_list);
1896 spin_lock_init(&md->uevent_lock); 1918 spin_lock_init(&md->uevent_lock);
1897 1919
1898 md->queue = blk_init_queue(dm_request_fn, NULL); 1920 md->queue = blk_alloc_queue(GFP_KERNEL);
1899 if (!md->queue) 1921 if (!md->queue)
1900 goto bad_queue; 1922 goto bad_queue;
1901 1923
1902 /* 1924 dm_init_md_queue(md);
1903 * Request-based dm devices cannot be stacked on top of bio-based dm
1904 * devices. The type of this dm device has not been decided yet,
1905 * although we initialized the queue using blk_init_queue().
1906 * The type is decided at the first table loading time.
1907 * To prevent problematic device stacking, clear the queue flag
1908 * for request stacking support until then.
1909 *
1910 * This queue is new, so no concurrency on the queue_flags.
1911 */
1912 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
1913 md->saved_make_request_fn = md->queue->make_request_fn;
1914 md->queue->queuedata = md;
1915 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1916 md->queue->backing_dev_info.congested_data = md;
1917 blk_queue_make_request(md->queue, dm_request);
1918 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1919 md->queue->unplug_fn = dm_unplug_all;
1920 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1921 blk_queue_softirq_done(md->queue, dm_softirq_done);
1922 blk_queue_prep_rq(md->queue, dm_prep_fn);
1923 blk_queue_lld_busy(md->queue, dm_lld_busy);
1924 blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH);
1925 1925
1926 md->disk = alloc_disk(1); 1926 md->disk = alloc_disk(1);
1927 if (!md->disk) 1927 if (!md->disk)
@@ -2160,6 +2160,48 @@ unsigned dm_get_md_type(struct mapped_device *md)
2160 return md->type; 2160 return md->type;
2161} 2161}
2162 2162
2163/*
2164 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2165 */
2166static int dm_init_request_based_queue(struct mapped_device *md)
2167{
2168 struct request_queue *q = NULL;
2169
2170 if (md->queue->elevator)
2171 return 1;
2172
2173 /* Fully initialize the queue */
2174 q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
2175 if (!q)
2176 return 0;
2177
2178 md->queue = q;
2179 md->saved_make_request_fn = md->queue->make_request_fn;
2180 dm_init_md_queue(md);
2181 blk_queue_softirq_done(md->queue, dm_softirq_done);
2182 blk_queue_prep_rq(md->queue, dm_prep_fn);
2183 blk_queue_lld_busy(md->queue, dm_lld_busy);
2184 blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH);
2185
2186 elv_register_queue(md->queue);
2187
2188 return 1;
2189}
2190
2191/*
2192 * Setup the DM device's queue based on md's type
2193 */
2194int dm_setup_md_queue(struct mapped_device *md)
2195{
2196 if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
2197 !dm_init_request_based_queue(md)) {
2198 DMWARN("Cannot initialize queue for request-based mapped device");
2199 return -EINVAL;
2200 }
2201
2202 return 0;
2203}
2204
2163static struct mapped_device *dm_find_md(dev_t dev) 2205static struct mapped_device *dm_find_md(dev_t dev)
2164{ 2206{
2165 struct mapped_device *md; 2207 struct mapped_device *md;
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 1db782530ce6..450fbd98c48c 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -71,6 +71,8 @@ void dm_unlock_md_type(struct mapped_device *md);
71void dm_set_md_type(struct mapped_device *md, unsigned type); 71void dm_set_md_type(struct mapped_device *md, unsigned type);
72unsigned dm_get_md_type(struct mapped_device *md); 72unsigned dm_get_md_type(struct mapped_device *md);
73 73
74int dm_setup_md_queue(struct mapped_device *md);
75
74/* 76/*
75 * To check the return value from dm_table_find_target(). 77 * To check the return value from dm_table_find_target().
76 */ 78 */