diff options
author | Martin K. Petersen <martin.petersen@oracle.com> | 2009-06-16 02:23:52 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-06-16 02:23:52 -0400 |
commit | e475bba2fdee9c3dbfe25f026f8fb8de69508ad2 (patch) | |
tree | cb2a050b58fbd8cd7a4d82349164622fedc2c6d0 /block | |
parent | 6923715ae39ed39ac2fc1993e5061668f4f71ad0 (diff) |
block: Introduce helper to reset queue limits to default values
DM reuses the request queue when swapping in a new device table
Introduce blk_set_default_limits() which can be used to reset the the
queue_limits prior to stacking devices.
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Acked-by: Alasdair G Kergon <agk@redhat.com>
Acked-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-settings.c | 33 |
1 files changed, 27 insertions, 6 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c index 138610b98956..7541ea4bf9fe 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -96,6 +96,31 @@ void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn) | |||
96 | EXPORT_SYMBOL_GPL(blk_queue_lld_busy); | 96 | EXPORT_SYMBOL_GPL(blk_queue_lld_busy); |
97 | 97 | ||
98 | /** | 98 | /** |
99 | * blk_set_default_limits - reset limits to default values | ||
100 | * @limits: the queue_limits structure to reset | ||
101 | * | ||
102 | * Description: | ||
103 | * Returns a queue_limit struct to its default state. Can be used by | ||
104 | * stacking drivers like DM that stage table swaps and reuse an | ||
105 | * existing device queue. | ||
106 | */ | ||
107 | void blk_set_default_limits(struct queue_limits *lim) | ||
108 | { | ||
109 | lim->max_phys_segments = MAX_PHYS_SEGMENTS; | ||
110 | lim->max_hw_segments = MAX_HW_SEGMENTS; | ||
111 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; | ||
112 | lim->max_segment_size = MAX_SEGMENT_SIZE; | ||
113 | lim->max_sectors = lim->max_hw_sectors = SAFE_MAX_SECTORS; | ||
114 | lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; | ||
115 | lim->bounce_pfn = BLK_BOUNCE_ANY; | ||
116 | lim->alignment_offset = 0; | ||
117 | lim->io_opt = 0; | ||
118 | lim->misaligned = 0; | ||
119 | lim->no_cluster = 0; | ||
120 | } | ||
121 | EXPORT_SYMBOL(blk_set_default_limits); | ||
122 | |||
123 | /** | ||
99 | * blk_queue_make_request - define an alternate make_request function for a device | 124 | * blk_queue_make_request - define an alternate make_request function for a device |
100 | * @q: the request queue for the device to be affected | 125 | * @q: the request queue for the device to be affected |
101 | * @mfn: the alternate make_request function | 126 | * @mfn: the alternate make_request function |
@@ -123,14 +148,8 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) | |||
123 | * set defaults | 148 | * set defaults |
124 | */ | 149 | */ |
125 | q->nr_requests = BLKDEV_MAX_RQ; | 150 | q->nr_requests = BLKDEV_MAX_RQ; |
126 | blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); | ||
127 | blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); | ||
128 | blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK); | ||
129 | blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE); | ||
130 | 151 | ||
131 | q->make_request_fn = mfn; | 152 | q->make_request_fn = mfn; |
132 | blk_queue_max_sectors(q, SAFE_MAX_SECTORS); | ||
133 | blk_queue_logical_block_size(q, 512); | ||
134 | blk_queue_dma_alignment(q, 511); | 153 | blk_queue_dma_alignment(q, 511); |
135 | blk_queue_congestion_threshold(q); | 154 | blk_queue_congestion_threshold(q); |
136 | q->nr_batching = BLK_BATCH_REQ; | 155 | q->nr_batching = BLK_BATCH_REQ; |
@@ -143,6 +162,8 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) | |||
143 | q->unplug_timer.function = blk_unplug_timeout; | 162 | q->unplug_timer.function = blk_unplug_timeout; |
144 | q->unplug_timer.data = (unsigned long)q; | 163 | q->unplug_timer.data = (unsigned long)q; |
145 | 164 | ||
165 | blk_set_default_limits(&q->limits); | ||
166 | |||
146 | /* | 167 | /* |
147 | * by default assume old behaviour and bounce for any highmem page | 168 | * by default assume old behaviour and bounce for any highmem page |
148 | */ | 169 | */ |