aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2009-06-22 05:12:32 -0400
committerAlasdair G Kergon <agk@redhat.com>2009-06-22 05:12:32 -0400
commit1197764e403d97231eb6da2b1e16f511a7fd3101 (patch)
tree9c38793927501989c4c74cae0887309bd17484d8
parent5ab97588fb266187b88d1ad893251c94388f18ba (diff)
dm table: establish queue limits by copying table limits
Copy the table's queue_limits to the DM device's request_queue. This properly initializes the queue's topology limits and also avoids having to track the evolution of 'struct queue_limits' in dm_table_set_restrictions() Also fixes a bug that was introduced in dm_table_set_restrictions() via commit ae03bf639a5027d27270123f5f6e3ee6a412781d. In addition to establishing 'bounce_pfn' in the queue's limits blk_queue_bounce_limit() also performs an allocation to setup the ISA DMA pool. This allocation resulted in "sleeping function called from invalid context" when called from dm_table_set_restrictions(). Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
-rw-r--r--drivers/md/dm-table.c12
1 files changed, 2 insertions, 10 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 41ec2bf9fbe9..267817edc844 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -956,17 +956,9 @@ no_integrity:
956void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) 956void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
957{ 957{
958 /* 958 /*
959 * Make sure we obey the optimistic sub devices 959 * Copy table's limits to the DM device's request_queue
960 * restrictions.
961 */ 960 */
962 blk_queue_max_sectors(q, t->limits.max_sectors); 961 q->limits = t->limits;
963 blk_queue_max_phys_segments(q, t->limits.max_phys_segments);
964 blk_queue_max_hw_segments(q, t->limits.max_hw_segments);
965 blk_queue_logical_block_size(q, t->limits.logical_block_size);
966 blk_queue_max_segment_size(q, t->limits.max_segment_size);
967 blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors);
968 blk_queue_segment_boundary(q, t->limits.seg_boundary_mask);
969 blk_queue_bounce_limit(q, t->limits.bounce_pfn);
970 962
971 if (t->limits.no_cluster) 963 if (t->limits.no_cluster)
972 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); 964 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);