aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2009-07-23 15:30:40 -0400
committerAlasdair G Kergon <agk@redhat.com>2009-07-23 15:30:40 -0400
commita732c207d19e899845ae47139708af898daaf9fd (patch)
treeaed98221e373868b4fe0fbba9c4fcf5ac8ede128 /drivers/md
parent69885683d22d8c05910fd808c01fdce1322739b4 (diff)
dm: remove queue next_ordered workaround for barriers
This patch removes DM's bio-based vs request-based conditional setting of next_ordered. For bio-based DM the next_ordered check is no longer a concern (as that check is now in the __make_request path). For request-based DM the default of QUEUE_ORDERED_NONE is now appropriate. bio-based DM was changed to work-around the previously misplaced next_ordered check with this commit: 99360b4c18f7675b50d283301d46d755affe75fd request-based DM does not yet support barriers but reacted to the above bio-based DM change with this commit: 5d67aa2366ccb8257d103d0b43df855605c3c086 The above changes are no longer needed given Neil Brown's recent fix to put the next_ordered check in the __make_request path: db64f680ba4b5c56c4be59f0698000df89ff0281 Signed-off-by: Mike Snitzer <snitzer@redhat.com> Cc: Jun'ichi Nomura <j-nomura@ce.jp.nec.com> Cc: NeilBrown <neilb@suse.de> Acked-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com> Acked-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-table.c5
-rw-r--r--drivers/md/dm.c10
-rw-r--r--drivers/md/dm.h1
3 files changed, 0 insertions, 16 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 2cba557d9e61..5d16d06613aa 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -830,11 +830,6 @@ unsigned dm_table_get_type(struct dm_table *t)
830 return t->type; 830 return t->type;
831} 831}
832 832
833bool dm_table_bio_based(struct dm_table *t)
834{
835 return dm_table_get_type(t) == DM_TYPE_BIO_BASED;
836}
837
838bool dm_table_request_based(struct dm_table *t) 833bool dm_table_request_based(struct dm_table *t)
839{ 834{
840 return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED; 835 return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 9acd54a5cffb..8a311ea0d441 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2203,16 +2203,6 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table)
2203 goto out; 2203 goto out;
2204 } 2204 }
2205 2205
2206 /*
2207 * It is enought that blk_queue_ordered() is called only once when
2208 * the first bio-based table is bound.
2209 *
2210 * This setting should be moved to alloc_dev() when request-based dm
2211 * supports barrier.
2212 */
2213 if (!md->map && dm_table_bio_based(table))
2214 blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
2215
2216 __unbind(md); 2206 __unbind(md);
2217 r = __bind(md, table, &limits); 2207 r = __bind(md, table, &limits);
2218 2208
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 23278ae80f08..a7663eba17e2 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -61,7 +61,6 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits);
61int dm_table_any_busy_target(struct dm_table *t); 61int dm_table_any_busy_target(struct dm_table *t);
62int dm_table_set_type(struct dm_table *t); 62int dm_table_set_type(struct dm_table *t);
63unsigned dm_table_get_type(struct dm_table *t); 63unsigned dm_table_get_type(struct dm_table *t);
64bool dm_table_bio_based(struct dm_table *t);
65bool dm_table_request_based(struct dm_table *t); 64bool dm_table_request_based(struct dm_table *t);
66int dm_table_alloc_md_mempools(struct dm_table *t); 65int dm_table_alloc_md_mempools(struct dm_table *t);
67void dm_table_free_md_mempools(struct dm_table *t); 66void dm_table_free_md_mempools(struct dm_table *t);