aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2006-08-29 14:06:00 -0400
committerJens Axboe <axboe@nelson.home.kernel.dk>2006-09-30 14:32:11 -0400
commit831058dec3735665fe91bd0d37b6a8cf56b91abd (patch)
tree5ef7d38d9c43b24d5a31eb8f0e23c91e20e799e7 /mm
parentb398f6bff93a247d2a7099e92905374966e4558f (diff)
[PATCH] BLOCK: Separate the bounce buffering code from the highmem code [try #6]
Move the bounce buffer code from mm/highmem.c to mm/bounce.c so that it can be more easily disabled when the block layer is disabled. !!!NOTE!!! There may be a bug in this code: Should init_emergency_pool() be contingent on CONFIG_HIGHMEM? Signed-Off-By: David Howells <dhowells@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'mm')
-rw-r--r--mm/Makefile3
-rw-r--r--mm/bounce.c302
-rw-r--r--mm/highmem.c281
3 files changed, 305 insertions, 281 deletions
diff --git a/mm/Makefile b/mm/Makefile
index 6200c6d6afd2..4f2166a833b9 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -12,6 +12,9 @@ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
12 readahead.o swap.o truncate.o vmscan.o \ 12 readahead.o swap.o truncate.o vmscan.o \
13 prio_tree.o util.o mmzone.o vmstat.o $(mmu-y) 13 prio_tree.o util.o mmzone.o vmstat.o $(mmu-y)
14 14
15ifeq ($(CONFIG_MMU),y)
16obj-y += bounce.o
17endif
15obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o 18obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o
16obj-$(CONFIG_HUGETLBFS) += hugetlb.o 19obj-$(CONFIG_HUGETLBFS) += hugetlb.o
17obj-$(CONFIG_NUMA) += mempolicy.o 20obj-$(CONFIG_NUMA) += mempolicy.o
diff --git a/mm/bounce.c b/mm/bounce.c
new file mode 100644
index 000000000000..e4b62d2a4024
--- /dev/null
+++ b/mm/bounce.c
@@ -0,0 +1,302 @@
1/* bounce buffer handling for block devices
2 *
3 * - Split from highmem.c
4 */
5
6#include <linux/mm.h>
7#include <linux/module.h>
8#include <linux/swap.h>
9#include <linux/bio.h>
10#include <linux/pagemap.h>
11#include <linux/mempool.h>
12#include <linux/blkdev.h>
13#include <linux/init.h>
14#include <linux/hash.h>
15#include <linux/highmem.h>
16#include <linux/blktrace_api.h>
17#include <asm/tlbflush.h>
18
19#define POOL_SIZE 64
20#define ISA_POOL_SIZE 16
21
22static mempool_t *page_pool, *isa_page_pool;
23
24#ifdef CONFIG_HIGHMEM
25static __init int init_emergency_pool(void)
26{
27 struct sysinfo i;
28 si_meminfo(&i);
29 si_swapinfo(&i);
30
31 if (!i.totalhigh)
32 return 0;
33
34 page_pool = mempool_create_page_pool(POOL_SIZE, 0);
35 BUG_ON(!page_pool);
36 printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
37
38 return 0;
39}
40
41__initcall(init_emergency_pool);
42
43/*
44 * highmem version, map in to vec
45 */
46static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
47{
48 unsigned long flags;
49 unsigned char *vto;
50
51 local_irq_save(flags);
52 vto = kmap_atomic(to->bv_page, KM_BOUNCE_READ);
53 memcpy(vto + to->bv_offset, vfrom, to->bv_len);
54 kunmap_atomic(vto, KM_BOUNCE_READ);
55 local_irq_restore(flags);
56}
57
58#else /* CONFIG_HIGHMEM */
59
60#define bounce_copy_vec(to, vfrom) \
61 memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
62
63#endif /* CONFIG_HIGHMEM */
64
65/*
66 * allocate pages in the DMA region for the ISA pool
67 */
68static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
69{
70 return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
71}
72
73/*
74 * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
75 * as the max address, so check if the pool has already been created.
76 */
77int init_emergency_isa_pool(void)
78{
79 if (isa_page_pool)
80 return 0;
81
82 isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
83 mempool_free_pages, (void *) 0);
84 BUG_ON(!isa_page_pool);
85
86 printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
87 return 0;
88}
89
90/*
91 * Simple bounce buffer support for highmem pages. Depending on the
92 * queue gfp mask set, *to may or may not be a highmem page. kmap it
93 * always, it will do the Right Thing
94 */
95static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
96{
97 unsigned char *vfrom;
98 struct bio_vec *tovec, *fromvec;
99 int i;
100
101 __bio_for_each_segment(tovec, to, i, 0) {
102 fromvec = from->bi_io_vec + i;
103
104 /*
105 * not bounced
106 */
107 if (tovec->bv_page == fromvec->bv_page)
108 continue;
109
110 /*
111 * fromvec->bv_offset and fromvec->bv_len might have been
112 * modified by the block layer, so use the original copy,
113 * bounce_copy_vec already uses tovec->bv_len
114 */
115 vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
116
117 flush_dcache_page(tovec->bv_page);
118 bounce_copy_vec(tovec, vfrom);
119 }
120}
121
122static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
123{
124 struct bio *bio_orig = bio->bi_private;
125 struct bio_vec *bvec, *org_vec;
126 int i;
127
128 if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
129 set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags);
130
131 /*
132 * free up bounce indirect pages used
133 */
134 __bio_for_each_segment(bvec, bio, i, 0) {
135 org_vec = bio_orig->bi_io_vec + i;
136 if (bvec->bv_page == org_vec->bv_page)
137 continue;
138
139 dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
140 mempool_free(bvec->bv_page, pool);
141 }
142
143 bio_endio(bio_orig, bio_orig->bi_size, err);
144 bio_put(bio);
145}
146
147static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done, int err)
148{
149 if (bio->bi_size)
150 return 1;
151
152 bounce_end_io(bio, page_pool, err);
153 return 0;
154}
155
156static int bounce_end_io_write_isa(struct bio *bio, unsigned int bytes_done, int err)
157{
158 if (bio->bi_size)
159 return 1;
160
161 bounce_end_io(bio, isa_page_pool, err);
162 return 0;
163}
164
165static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err)
166{
167 struct bio *bio_orig = bio->bi_private;
168
169 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
170 copy_to_high_bio_irq(bio_orig, bio);
171
172 bounce_end_io(bio, pool, err);
173}
174
175static int bounce_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
176{
177 if (bio->bi_size)
178 return 1;
179
180 __bounce_end_io_read(bio, page_pool, err);
181 return 0;
182}
183
184static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int err)
185{
186 if (bio->bi_size)
187 return 1;
188
189 __bounce_end_io_read(bio, isa_page_pool, err);
190 return 0;
191}
192
193static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
194 mempool_t *pool)
195{
196 struct page *page;
197 struct bio *bio = NULL;
198 int i, rw = bio_data_dir(*bio_orig);
199 struct bio_vec *to, *from;
200
201 bio_for_each_segment(from, *bio_orig, i) {
202 page = from->bv_page;
203
204 /*
205 * is destination page below bounce pfn?
206 */
207 if (page_to_pfn(page) < q->bounce_pfn)
208 continue;
209
210 /*
211 * irk, bounce it
212 */
213 if (!bio)
214 bio = bio_alloc(GFP_NOIO, (*bio_orig)->bi_vcnt);
215
216 to = bio->bi_io_vec + i;
217
218 to->bv_page = mempool_alloc(pool, q->bounce_gfp);
219 to->bv_len = from->bv_len;
220 to->bv_offset = from->bv_offset;
221 inc_zone_page_state(to->bv_page, NR_BOUNCE);
222
223 if (rw == WRITE) {
224 char *vto, *vfrom;
225
226 flush_dcache_page(from->bv_page);
227 vto = page_address(to->bv_page) + to->bv_offset;
228 vfrom = kmap(from->bv_page) + from->bv_offset;
229 memcpy(vto, vfrom, to->bv_len);
230 kunmap(from->bv_page);
231 }
232 }
233
234 /*
235 * no pages bounced
236 */
237 if (!bio)
238 return;
239
240 /*
241 * at least one page was bounced, fill in possible non-highmem
242 * pages
243 */
244 __bio_for_each_segment(from, *bio_orig, i, 0) {
245 to = bio_iovec_idx(bio, i);
246 if (!to->bv_page) {
247 to->bv_page = from->bv_page;
248 to->bv_len = from->bv_len;
249 to->bv_offset = from->bv_offset;
250 }
251 }
252
253 bio->bi_bdev = (*bio_orig)->bi_bdev;
254 bio->bi_flags |= (1 << BIO_BOUNCED);
255 bio->bi_sector = (*bio_orig)->bi_sector;
256 bio->bi_rw = (*bio_orig)->bi_rw;
257
258 bio->bi_vcnt = (*bio_orig)->bi_vcnt;
259 bio->bi_idx = (*bio_orig)->bi_idx;
260 bio->bi_size = (*bio_orig)->bi_size;
261
262 if (pool == page_pool) {
263 bio->bi_end_io = bounce_end_io_write;
264 if (rw == READ)
265 bio->bi_end_io = bounce_end_io_read;
266 } else {
267 bio->bi_end_io = bounce_end_io_write_isa;
268 if (rw == READ)
269 bio->bi_end_io = bounce_end_io_read_isa;
270 }
271
272 bio->bi_private = *bio_orig;
273 *bio_orig = bio;
274}
275
276void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
277{
278 mempool_t *pool;
279
280 /*
281 * for non-isa bounce case, just check if the bounce pfn is equal
282 * to or bigger than the highest pfn in the system -- in that case,
283 * don't waste time iterating over bio segments
284 */
285 if (!(q->bounce_gfp & GFP_DMA)) {
286 if (q->bounce_pfn >= blk_max_pfn)
287 return;
288 pool = page_pool;
289 } else {
290 BUG_ON(!isa_page_pool);
291 pool = isa_page_pool;
292 }
293
294 blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE);
295
296 /*
297 * slow path
298 */
299 __blk_queue_bounce(q, bio_orig, pool);
300}
301
302EXPORT_SYMBOL(blk_queue_bounce);
diff --git a/mm/highmem.c b/mm/highmem.c
index ee5519b176ee..0206e7e5018c 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -29,13 +29,6 @@
29#include <linux/blktrace_api.h> 29#include <linux/blktrace_api.h>
30#include <asm/tlbflush.h> 30#include <asm/tlbflush.h>
31 31
32static mempool_t *page_pool, *isa_page_pool;
33
34static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
35{
36 return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
37}
38
39/* 32/*
40 * Virtual_count is not a pure "count". 33 * Virtual_count is not a pure "count".
41 * 0 means that it is not mapped, and has not been mapped 34 * 0 means that it is not mapped, and has not been mapped
@@ -217,282 +210,8 @@ void fastcall kunmap_high(struct page *page)
217} 210}
218 211
219EXPORT_SYMBOL(kunmap_high); 212EXPORT_SYMBOL(kunmap_high);
220
221#define POOL_SIZE 64
222
223static __init int init_emergency_pool(void)
224{
225 struct sysinfo i;
226 si_meminfo(&i);
227 si_swapinfo(&i);
228
229 if (!i.totalhigh)
230 return 0;
231
232 page_pool = mempool_create_page_pool(POOL_SIZE, 0);
233 BUG_ON(!page_pool);
234 printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
235
236 return 0;
237}
238
239__initcall(init_emergency_pool);
240
241/*
242 * highmem version, map in to vec
243 */
244static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
245{
246 unsigned long flags;
247 unsigned char *vto;
248
249 local_irq_save(flags);
250 vto = kmap_atomic(to->bv_page, KM_BOUNCE_READ);
251 memcpy(vto + to->bv_offset, vfrom, to->bv_len);
252 kunmap_atomic(vto, KM_BOUNCE_READ);
253 local_irq_restore(flags);
254}
255
256#else /* CONFIG_HIGHMEM */
257
258#define bounce_copy_vec(to, vfrom) \
259 memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
260
261#endif 213#endif
262 214
263#define ISA_POOL_SIZE 16
264
265/*
266 * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
267 * as the max address, so check if the pool has already been created.
268 */
269int init_emergency_isa_pool(void)
270{
271 if (isa_page_pool)
272 return 0;
273
274 isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
275 mempool_free_pages, (void *) 0);
276 BUG_ON(!isa_page_pool);
277
278 printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
279 return 0;
280}
281
282/*
283 * Simple bounce buffer support for highmem pages. Depending on the
284 * queue gfp mask set, *to may or may not be a highmem page. kmap it
285 * always, it will do the Right Thing
286 */
287static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
288{
289 unsigned char *vfrom;
290 struct bio_vec *tovec, *fromvec;
291 int i;
292
293 __bio_for_each_segment(tovec, to, i, 0) {
294 fromvec = from->bi_io_vec + i;
295
296 /*
297 * not bounced
298 */
299 if (tovec->bv_page == fromvec->bv_page)
300 continue;
301
302 /*
303 * fromvec->bv_offset and fromvec->bv_len might have been
304 * modified by the block layer, so use the original copy,
305 * bounce_copy_vec already uses tovec->bv_len
306 */
307 vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
308
309 flush_dcache_page(tovec->bv_page);
310 bounce_copy_vec(tovec, vfrom);
311 }
312}
313
314static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
315{
316 struct bio *bio_orig = bio->bi_private;
317 struct bio_vec *bvec, *org_vec;
318 int i;
319
320 if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
321 set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags);
322
323 /*
324 * free up bounce indirect pages used
325 */
326 __bio_for_each_segment(bvec, bio, i, 0) {
327 org_vec = bio_orig->bi_io_vec + i;
328 if (bvec->bv_page == org_vec->bv_page)
329 continue;
330
331 dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
332 mempool_free(bvec->bv_page, pool);
333 }
334
335 bio_endio(bio_orig, bio_orig->bi_size, err);
336 bio_put(bio);
337}
338
339static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done, int err)
340{
341 if (bio->bi_size)
342 return 1;
343
344 bounce_end_io(bio, page_pool, err);
345 return 0;
346}
347
348static int bounce_end_io_write_isa(struct bio *bio, unsigned int bytes_done, int err)
349{
350 if (bio->bi_size)
351 return 1;
352
353 bounce_end_io(bio, isa_page_pool, err);
354 return 0;
355}
356
357static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err)
358{
359 struct bio *bio_orig = bio->bi_private;
360
361 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
362 copy_to_high_bio_irq(bio_orig, bio);
363
364 bounce_end_io(bio, pool, err);
365}
366
367static int bounce_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
368{
369 if (bio->bi_size)
370 return 1;
371
372 __bounce_end_io_read(bio, page_pool, err);
373 return 0;
374}
375
376static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int err)
377{
378 if (bio->bi_size)
379 return 1;
380
381 __bounce_end_io_read(bio, isa_page_pool, err);
382 return 0;
383}
384
385static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
386 mempool_t *pool)
387{
388 struct page *page;
389 struct bio *bio = NULL;
390 int i, rw = bio_data_dir(*bio_orig);
391 struct bio_vec *to, *from;
392
393 bio_for_each_segment(from, *bio_orig, i) {
394 page = from->bv_page;
395
396 /*
397 * is destination page below bounce pfn?
398 */
399 if (page_to_pfn(page) < q->bounce_pfn)
400 continue;
401
402 /*
403 * irk, bounce it
404 */
405 if (!bio)
406 bio = bio_alloc(GFP_NOIO, (*bio_orig)->bi_vcnt);
407
408 to = bio->bi_io_vec + i;
409
410 to->bv_page = mempool_alloc(pool, q->bounce_gfp);
411 to->bv_len = from->bv_len;
412 to->bv_offset = from->bv_offset;
413 inc_zone_page_state(to->bv_page, NR_BOUNCE);
414
415 if (rw == WRITE) {
416 char *vto, *vfrom;
417
418 flush_dcache_page(from->bv_page);
419 vto = page_address(to->bv_page) + to->bv_offset;
420 vfrom = kmap(from->bv_page) + from->bv_offset;
421 memcpy(vto, vfrom, to->bv_len);
422 kunmap(from->bv_page);
423 }
424 }
425
426 /*
427 * no pages bounced
428 */
429 if (!bio)
430 return;
431
432 /*
433 * at least one page was bounced, fill in possible non-highmem
434 * pages
435 */
436 __bio_for_each_segment(from, *bio_orig, i, 0) {
437 to = bio_iovec_idx(bio, i);
438 if (!to->bv_page) {
439 to->bv_page = from->bv_page;
440 to->bv_len = from->bv_len;
441 to->bv_offset = from->bv_offset;
442 }
443 }
444
445 bio->bi_bdev = (*bio_orig)->bi_bdev;
446 bio->bi_flags |= (1 << BIO_BOUNCED);
447 bio->bi_sector = (*bio_orig)->bi_sector;
448 bio->bi_rw = (*bio_orig)->bi_rw;
449
450 bio->bi_vcnt = (*bio_orig)->bi_vcnt;
451 bio->bi_idx = (*bio_orig)->bi_idx;
452 bio->bi_size = (*bio_orig)->bi_size;
453
454 if (pool == page_pool) {
455 bio->bi_end_io = bounce_end_io_write;
456 if (rw == READ)
457 bio->bi_end_io = bounce_end_io_read;
458 } else {
459 bio->bi_end_io = bounce_end_io_write_isa;
460 if (rw == READ)
461 bio->bi_end_io = bounce_end_io_read_isa;
462 }
463
464 bio->bi_private = *bio_orig;
465 *bio_orig = bio;
466}
467
468void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
469{
470 mempool_t *pool;
471
472 /*
473 * for non-isa bounce case, just check if the bounce pfn is equal
474 * to or bigger than the highest pfn in the system -- in that case,
475 * don't waste time iterating over bio segments
476 */
477 if (!(q->bounce_gfp & GFP_DMA)) {
478 if (q->bounce_pfn >= blk_max_pfn)
479 return;
480 pool = page_pool;
481 } else {
482 BUG_ON(!isa_page_pool);
483 pool = isa_page_pool;
484 }
485
486 blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE);
487
488 /*
489 * slow path
490 */
491 __blk_queue_bounce(q, bio_orig, pool);
492}
493
494EXPORT_SYMBOL(blk_queue_bounce);
495
496#if defined(HASHED_PAGE_VIRTUAL) 215#if defined(HASHED_PAGE_VIRTUAL)
497 216
498#define PA_HASH_ORDER 7 217#define PA_HASH_ORDER 7