diff options
author | Kiyoshi Ueda <k-ueda@ct.jp.nec.com> | 2009-06-22 05:12:36 -0400 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2009-06-22 05:12:36 -0400 |
commit | 5d67aa2366ccb8257d103d0b43df855605c3c086 (patch) | |
tree | 1d03f2e89ef47773a757f05b17741b8cbe47e882 | |
parent | e6ee8c0b767540f59e20da3ced282601db8aa502 (diff) |
dm: do not set QUEUE_ORDERED_DRAIN if request based
Request-based dm doesn't have barrier support yet.
So we need to set QUEUE_ORDERED_DRAIN only for bio-based dm.
Since the device type is decided at the first table loading time,
the flag set is deferred until then.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Acked-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
-rw-r--r-- | drivers/md/dm-table.c | 5 | ||||
-rw-r--r-- | drivers/md/dm.c | 11 | ||||
-rw-r--r-- | drivers/md/dm.h | 1 |
3 files changed, 16 insertions, 1 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index aaeb82ed2852..4899ebe767c8 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -830,6 +830,11 @@ unsigned dm_table_get_type(struct dm_table *t) | |||
830 | return t->type; | 830 | return t->type; |
831 | } | 831 | } |
832 | 832 | ||
833 | bool dm_table_bio_based(struct dm_table *t) | ||
834 | { | ||
835 | return dm_table_get_type(t) == DM_TYPE_BIO_BASED; | ||
836 | } | ||
837 | |||
833 | bool dm_table_request_based(struct dm_table *t) | 838 | bool dm_table_request_based(struct dm_table *t) |
834 | { | 839 | { |
835 | return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED; | 840 | return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED; |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 5a843c1f4d64..00c768860818 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -1768,7 +1768,6 @@ static struct mapped_device *alloc_dev(int minor) | |||
1768 | md->queue->backing_dev_info.congested_fn = dm_any_congested; | 1768 | md->queue->backing_dev_info.congested_fn = dm_any_congested; |
1769 | md->queue->backing_dev_info.congested_data = md; | 1769 | md->queue->backing_dev_info.congested_data = md; |
1770 | blk_queue_make_request(md->queue, dm_request); | 1770 | blk_queue_make_request(md->queue, dm_request); |
1771 | blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL); | ||
1772 | blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); | 1771 | blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); |
1773 | md->queue->unplug_fn = dm_unplug_all; | 1772 | md->queue->unplug_fn = dm_unplug_all; |
1774 | blk_queue_merge_bvec(md->queue, dm_merge_bvec); | 1773 | blk_queue_merge_bvec(md->queue, dm_merge_bvec); |
@@ -2201,6 +2200,16 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table) | |||
2201 | goto out; | 2200 | goto out; |
2202 | } | 2201 | } |
2203 | 2202 | ||
2203 | /* | ||
2204 | * It is enought that blk_queue_ordered() is called only once when | ||
2205 | * the first bio-based table is bound. | ||
2206 | * | ||
2207 | * This setting should be moved to alloc_dev() when request-based dm | ||
2208 | * supports barrier. | ||
2209 | */ | ||
2210 | if (!md->map && dm_table_bio_based(table)) | ||
2211 | blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL); | ||
2212 | |||
2204 | __unbind(md); | 2213 | __unbind(md); |
2205 | r = __bind(md, table, &limits); | 2214 | r = __bind(md, table, &limits); |
2206 | 2215 | ||
diff --git a/drivers/md/dm.h b/drivers/md/dm.h index a7663eba17e2..23278ae80f08 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h | |||
@@ -61,6 +61,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits); | |||
61 | int dm_table_any_busy_target(struct dm_table *t); | 61 | int dm_table_any_busy_target(struct dm_table *t); |
62 | int dm_table_set_type(struct dm_table *t); | 62 | int dm_table_set_type(struct dm_table *t); |
63 | unsigned dm_table_get_type(struct dm_table *t); | 63 | unsigned dm_table_get_type(struct dm_table *t); |
64 | bool dm_table_bio_based(struct dm_table *t); | ||
64 | bool dm_table_request_based(struct dm_table *t); | 65 | bool dm_table_request_based(struct dm_table *t); |
65 | int dm_table_alloc_md_mempools(struct dm_table *t); | 66 | int dm_table_alloc_md_mempools(struct dm_table *t); |
66 | void dm_table_free_md_mempools(struct dm_table *t); | 67 | void dm_table_free_md_mempools(struct dm_table *t); |