aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Dobson <colpatch@us.ibm.com>2006-03-26 04:37:45 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-26 11:56:59 -0500
commita19b27ce3847c3a5d4ea6b6c91b6f7154759af23 (patch)
tree794dc69869408bee9154b3e9d9852327e5219f4c
parent6e0678f394c7bd21bfa5d252b071a09e10e7a749 (diff)
[PATCH] mempool: use common mempool page allocator
Convert two mempool users that currently use their own mempool-backed page allocators to use the generic mempool page allocator. Also included are 2 trivial whitespace fixes. Signed-off-by: Matthew Dobson <colpatch@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--drivers/md/dm-crypt.c17
-rw-r--r--mm/highmem.c23
2 files changed, 8 insertions, 32 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index e7a650f9ca07..d88b8eda3903 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -94,20 +94,6 @@ struct crypt_config {
94static kmem_cache_t *_crypt_io_pool; 94static kmem_cache_t *_crypt_io_pool;
95 95
96/* 96/*
97 * Mempool alloc and free functions for the page
98 */
99static void *mempool_alloc_page(gfp_t gfp_mask, void *data)
100{
101 return alloc_page(gfp_mask);
102}
103
104static void mempool_free_page(void *page, void *data)
105{
106 __free_page(page);
107}
108
109
110/*
111 * Different IV generation algorithms: 97 * Different IV generation algorithms:
112 * 98 *
113 * plain: the initial vector is the 32-bit low-endian version of the sector 99 * plain: the initial vector is the 32-bit low-endian version of the sector
@@ -637,8 +623,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
637 goto bad3; 623 goto bad3;
638 } 624 }
639 625
640 cc->page_pool = mempool_create(MIN_POOL_PAGES, mempool_alloc_page, 626 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
641 mempool_free_page, NULL);
642 if (!cc->page_pool) { 627 if (!cc->page_pool) {
643 ti->error = PFX "Cannot allocate page mempool"; 628 ti->error = PFX "Cannot allocate page mempool";
644 goto bad4; 629 goto bad4;
diff --git a/mm/highmem.c b/mm/highmem.c
index d0ea1eec6a9a..55885f64af40 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -31,14 +31,9 @@
31 31
32static mempool_t *page_pool, *isa_page_pool; 32static mempool_t *page_pool, *isa_page_pool;
33 33
34static void *page_pool_alloc_isa(gfp_t gfp_mask, void *data) 34static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
35{ 35{
36 return alloc_page(gfp_mask | GFP_DMA); 36 return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
37}
38
39static void page_pool_free(void *page, void *data)
40{
41 __free_page(page);
42} 37}
43 38
44/* 39/*
@@ -51,11 +46,6 @@ static void page_pool_free(void *page, void *data)
51 */ 46 */
52#ifdef CONFIG_HIGHMEM 47#ifdef CONFIG_HIGHMEM
53 48
54static void *page_pool_alloc(gfp_t gfp_mask, void *data)
55{
56 return alloc_page(gfp_mask);
57}
58
59static int pkmap_count[LAST_PKMAP]; 49static int pkmap_count[LAST_PKMAP];
60static unsigned int last_pkmap_nr; 50static unsigned int last_pkmap_nr;
61static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); 51static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
@@ -229,7 +219,7 @@ static __init int init_emergency_pool(void)
229 if (!i.totalhigh) 219 if (!i.totalhigh)
230 return 0; 220 return 0;
231 221
232 page_pool = mempool_create(POOL_SIZE, page_pool_alloc, page_pool_free, NULL); 222 page_pool = mempool_create_page_pool(POOL_SIZE, 0);
233 if (!page_pool) 223 if (!page_pool)
234 BUG(); 224 BUG();
235 printk("highmem bounce pool size: %d pages\n", POOL_SIZE); 225 printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
@@ -272,7 +262,8 @@ int init_emergency_isa_pool(void)
272 if (isa_page_pool) 262 if (isa_page_pool)
273 return 0; 263 return 0;
274 264
275 isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc_isa, page_pool_free, NULL); 265 isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
266 mempool_free_pages, (void *) 0);
276 if (!isa_page_pool) 267 if (!isa_page_pool)
277 BUG(); 268 BUG();
278 269
@@ -337,7 +328,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
337 bio_put(bio); 328 bio_put(bio);
338} 329}
339 330
340static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done,int err) 331static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done, int err)
341{ 332{
342 if (bio->bi_size) 333 if (bio->bi_size)
343 return 1; 334 return 1;
@@ -384,7 +375,7 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int
384} 375}
385 376
386static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, 377static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
387 mempool_t *pool) 378 mempool_t *pool)
388{ 379{
389 struct page *page; 380 struct page *page;
390 struct bio *bio = NULL; 381 struct bio *bio = NULL;