diff options
Diffstat (limited to 'block/blk-settings.c')
-rw-r--r-- | block/blk-settings.c | 61 |
1 files changed, 27 insertions, 34 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c index 4df09a1b8f43..c8d0c5724098 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -10,8 +10,10 @@ | |||
10 | 10 | ||
11 | #include "blk.h" | 11 | #include "blk.h" |
12 | 12 | ||
13 | unsigned long blk_max_low_pfn, blk_max_pfn; | 13 | unsigned long blk_max_low_pfn; |
14 | EXPORT_SYMBOL(blk_max_low_pfn); | 14 | EXPORT_SYMBOL(blk_max_low_pfn); |
15 | |||
16 | unsigned long blk_max_pfn; | ||
15 | EXPORT_SYMBOL(blk_max_pfn); | 17 | EXPORT_SYMBOL(blk_max_pfn); |
16 | 18 | ||
17 | /** | 19 | /** |
@@ -29,7 +31,6 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) | |||
29 | { | 31 | { |
30 | q->prep_rq_fn = pfn; | 32 | q->prep_rq_fn = pfn; |
31 | } | 33 | } |
32 | |||
33 | EXPORT_SYMBOL(blk_queue_prep_rq); | 34 | EXPORT_SYMBOL(blk_queue_prep_rq); |
34 | 35 | ||
35 | /** | 36 | /** |
@@ -52,14 +53,12 @@ void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) | |||
52 | { | 53 | { |
53 | q->merge_bvec_fn = mbfn; | 54 | q->merge_bvec_fn = mbfn; |
54 | } | 55 | } |
55 | |||
56 | EXPORT_SYMBOL(blk_queue_merge_bvec); | 56 | EXPORT_SYMBOL(blk_queue_merge_bvec); |
57 | 57 | ||
58 | void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) | 58 | void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) |
59 | { | 59 | { |
60 | q->softirq_done_fn = fn; | 60 | q->softirq_done_fn = fn; |
61 | } | 61 | } |
62 | |||
63 | EXPORT_SYMBOL(blk_queue_softirq_done); | 62 | EXPORT_SYMBOL(blk_queue_softirq_done); |
64 | 63 | ||
65 | /** | 64 | /** |
@@ -84,7 +83,7 @@ EXPORT_SYMBOL(blk_queue_softirq_done); | |||
84 | * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling | 83 | * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling |
85 | * blk_queue_bounce() to create a buffer in normal memory. | 84 | * blk_queue_bounce() to create a buffer in normal memory. |
86 | **/ | 85 | **/ |
87 | void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn) | 86 | void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) |
88 | { | 87 | { |
89 | /* | 88 | /* |
90 | * set defaults | 89 | * set defaults |
@@ -93,7 +92,8 @@ void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn) | |||
93 | blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); | 92 | blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); |
94 | blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); | 93 | blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); |
95 | q->make_request_fn = mfn; | 94 | q->make_request_fn = mfn; |
96 | q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; | 95 | q->backing_dev_info.ra_pages = |
96 | (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; | ||
97 | q->backing_dev_info.state = 0; | 97 | q->backing_dev_info.state = 0; |
98 | q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; | 98 | q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; |
99 | blk_queue_max_sectors(q, SAFE_MAX_SECTORS); | 99 | blk_queue_max_sectors(q, SAFE_MAX_SECTORS); |
@@ -117,7 +117,6 @@ void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn) | |||
117 | */ | 117 | */ |
118 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); | 118 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); |
119 | } | 119 | } |
120 | |||
121 | EXPORT_SYMBOL(blk_queue_make_request); | 120 | EXPORT_SYMBOL(blk_queue_make_request); |
122 | 121 | ||
123 | /** | 122 | /** |
@@ -133,7 +132,7 @@ EXPORT_SYMBOL(blk_queue_make_request); | |||
133 | **/ | 132 | **/ |
134 | void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) | 133 | void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) |
135 | { | 134 | { |
136 | unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT; | 135 | unsigned long b_pfn = dma_addr >> PAGE_SHIFT; |
137 | int dma = 0; | 136 | int dma = 0; |
138 | 137 | ||
139 | q->bounce_gfp = GFP_NOIO; | 138 | q->bounce_gfp = GFP_NOIO; |
@@ -141,21 +140,20 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) | |||
141 | /* Assume anything <= 4GB can be handled by IOMMU. | 140 | /* Assume anything <= 4GB can be handled by IOMMU. |
142 | Actually some IOMMUs can handle everything, but I don't | 141 | Actually some IOMMUs can handle everything, but I don't |
143 | know of a way to test this here. */ | 142 | know of a way to test this here. */ |
144 | if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) | 143 | if (b_pfn < (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) |
145 | dma = 1; | 144 | dma = 1; |
146 | q->bounce_pfn = max_low_pfn; | 145 | q->bounce_pfn = max_low_pfn; |
147 | #else | 146 | #else |
148 | if (bounce_pfn < blk_max_low_pfn) | 147 | if (b_pfn < blk_max_low_pfn) |
149 | dma = 1; | 148 | dma = 1; |
150 | q->bounce_pfn = bounce_pfn; | 149 | q->bounce_pfn = b_pfn; |
151 | #endif | 150 | #endif |
152 | if (dma) { | 151 | if (dma) { |
153 | init_emergency_isa_pool(); | 152 | init_emergency_isa_pool(); |
154 | q->bounce_gfp = GFP_NOIO | GFP_DMA; | 153 | q->bounce_gfp = GFP_NOIO | GFP_DMA; |
155 | q->bounce_pfn = bounce_pfn; | 154 | q->bounce_pfn = b_pfn; |
156 | } | 155 | } |
157 | } | 156 | } |
158 | |||
159 | EXPORT_SYMBOL(blk_queue_bounce_limit); | 157 | EXPORT_SYMBOL(blk_queue_bounce_limit); |
160 | 158 | ||
161 | /** | 159 | /** |
@@ -171,7 +169,8 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) | |||
171 | { | 169 | { |
172 | if ((max_sectors << 9) < PAGE_CACHE_SIZE) { | 170 | if ((max_sectors << 9) < PAGE_CACHE_SIZE) { |
173 | max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); | 171 | max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); |
174 | printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors); | 172 | printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, |
173 | max_sectors); | ||
175 | } | 174 | } |
176 | 175 | ||
177 | if (BLK_DEF_MAX_SECTORS > max_sectors) | 176 | if (BLK_DEF_MAX_SECTORS > max_sectors) |
@@ -181,7 +180,6 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) | |||
181 | q->max_hw_sectors = max_sectors; | 180 | q->max_hw_sectors = max_sectors; |
182 | } | 181 | } |
183 | } | 182 | } |
184 | |||
185 | EXPORT_SYMBOL(blk_queue_max_sectors); | 183 | EXPORT_SYMBOL(blk_queue_max_sectors); |
186 | 184 | ||
187 | /** | 185 | /** |
@@ -199,12 +197,12 @@ void blk_queue_max_phys_segments(struct request_queue *q, | |||
199 | { | 197 | { |
200 | if (!max_segments) { | 198 | if (!max_segments) { |
201 | max_segments = 1; | 199 | max_segments = 1; |
202 | printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); | 200 | printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, |
201 | max_segments); | ||
203 | } | 202 | } |
204 | 203 | ||
205 | q->max_phys_segments = max_segments; | 204 | q->max_phys_segments = max_segments; |
206 | } | 205 | } |
207 | |||
208 | EXPORT_SYMBOL(blk_queue_max_phys_segments); | 206 | EXPORT_SYMBOL(blk_queue_max_phys_segments); |
209 | 207 | ||
210 | /** | 208 | /** |
@@ -223,12 +221,12 @@ void blk_queue_max_hw_segments(struct request_queue *q, | |||
223 | { | 221 | { |
224 | if (!max_segments) { | 222 | if (!max_segments) { |
225 | max_segments = 1; | 223 | max_segments = 1; |
226 | printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); | 224 | printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, |
225 | max_segments); | ||
227 | } | 226 | } |
228 | 227 | ||
229 | q->max_hw_segments = max_segments; | 228 | q->max_hw_segments = max_segments; |
230 | } | 229 | } |
231 | |||
232 | EXPORT_SYMBOL(blk_queue_max_hw_segments); | 230 | EXPORT_SYMBOL(blk_queue_max_hw_segments); |
233 | 231 | ||
234 | /** | 232 | /** |
@@ -244,12 +242,12 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) | |||
244 | { | 242 | { |
245 | if (max_size < PAGE_CACHE_SIZE) { | 243 | if (max_size < PAGE_CACHE_SIZE) { |
246 | max_size = PAGE_CACHE_SIZE; | 244 | max_size = PAGE_CACHE_SIZE; |
247 | printk("%s: set to minimum %d\n", __FUNCTION__, max_size); | 245 | printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, |
246 | max_size); | ||
248 | } | 247 | } |
249 | 248 | ||
250 | q->max_segment_size = max_size; | 249 | q->max_segment_size = max_size; |
251 | } | 250 | } |
252 | |||
253 | EXPORT_SYMBOL(blk_queue_max_segment_size); | 251 | EXPORT_SYMBOL(blk_queue_max_segment_size); |
254 | 252 | ||
255 | /** | 253 | /** |
@@ -267,7 +265,6 @@ void blk_queue_hardsect_size(struct request_queue *q, unsigned short size) | |||
267 | { | 265 | { |
268 | q->hardsect_size = size; | 266 | q->hardsect_size = size; |
269 | } | 267 | } |
270 | |||
271 | EXPORT_SYMBOL(blk_queue_hardsect_size); | 268 | EXPORT_SYMBOL(blk_queue_hardsect_size); |
272 | 269 | ||
273 | /* | 270 | /* |
@@ -283,17 +280,16 @@ EXPORT_SYMBOL(blk_queue_hardsect_size); | |||
283 | void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) | 280 | void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) |
284 | { | 281 | { |
285 | /* zero is "infinity" */ | 282 | /* zero is "infinity" */ |
286 | t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors); | 283 | t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); |
287 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors); | 284 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); |
288 | 285 | ||
289 | t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments); | 286 | t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments); |
290 | t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); | 287 | t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments); |
291 | t->max_segment_size = min(t->max_segment_size,b->max_segment_size); | 288 | t->max_segment_size = min(t->max_segment_size, b->max_segment_size); |
292 | t->hardsect_size = max(t->hardsect_size,b->hardsect_size); | 289 | t->hardsect_size = max(t->hardsect_size, b->hardsect_size); |
293 | if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) | 290 | if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) |
294 | clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); | 291 | clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); |
295 | } | 292 | } |
296 | |||
297 | EXPORT_SYMBOL(blk_queue_stack_limits); | 293 | EXPORT_SYMBOL(blk_queue_stack_limits); |
298 | 294 | ||
299 | /** | 295 | /** |
@@ -332,7 +328,6 @@ int blk_queue_dma_drain(struct request_queue *q, void *buf, | |||
332 | 328 | ||
333 | return 0; | 329 | return 0; |
334 | } | 330 | } |
335 | |||
336 | EXPORT_SYMBOL_GPL(blk_queue_dma_drain); | 331 | EXPORT_SYMBOL_GPL(blk_queue_dma_drain); |
337 | 332 | ||
338 | /** | 333 | /** |
@@ -344,12 +339,12 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) | |||
344 | { | 339 | { |
345 | if (mask < PAGE_CACHE_SIZE - 1) { | 340 | if (mask < PAGE_CACHE_SIZE - 1) { |
346 | mask = PAGE_CACHE_SIZE - 1; | 341 | mask = PAGE_CACHE_SIZE - 1; |
347 | printk("%s: set to minimum %lx\n", __FUNCTION__, mask); | 342 | printk(KERN_INFO "%s: set to minimum %lx\n", __FUNCTION__, |
343 | mask); | ||
348 | } | 344 | } |
349 | 345 | ||
350 | q->seg_boundary_mask = mask; | 346 | q->seg_boundary_mask = mask; |
351 | } | 347 | } |
352 | |||
353 | EXPORT_SYMBOL(blk_queue_segment_boundary); | 348 | EXPORT_SYMBOL(blk_queue_segment_boundary); |
354 | 349 | ||
355 | /** | 350 | /** |
@@ -366,7 +361,6 @@ void blk_queue_dma_alignment(struct request_queue *q, int mask) | |||
366 | { | 361 | { |
367 | q->dma_alignment = mask; | 362 | q->dma_alignment = mask; |
368 | } | 363 | } |
369 | |||
370 | EXPORT_SYMBOL(blk_queue_dma_alignment); | 364 | EXPORT_SYMBOL(blk_queue_dma_alignment); |
371 | 365 | ||
372 | /** | 366 | /** |
@@ -390,7 +384,6 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask) | |||
390 | if (mask > q->dma_alignment) | 384 | if (mask > q->dma_alignment) |
391 | q->dma_alignment = mask; | 385 | q->dma_alignment = mask; |
392 | } | 386 | } |
393 | |||
394 | EXPORT_SYMBOL(blk_queue_update_dma_alignment); | 387 | EXPORT_SYMBOL(blk_queue_update_dma_alignment); |
395 | 388 | ||
396 | int __init blk_settings_init(void) | 389 | int __init blk_settings_init(void) |