aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/highmem.c23
-rw-r--r--mm/memory.c2
-rw-r--r--mm/mempool.c42
3 files changed, 51 insertions, 16 deletions
diff --git a/mm/highmem.c b/mm/highmem.c
index d0ea1eec6a9a..55885f64af40 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -31,14 +31,9 @@
31 31
32static mempool_t *page_pool, *isa_page_pool; 32static mempool_t *page_pool, *isa_page_pool;
33 33
34static void *page_pool_alloc_isa(gfp_t gfp_mask, void *data) 34static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
35{ 35{
36 return alloc_page(gfp_mask | GFP_DMA); 36 return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
37}
38
39static void page_pool_free(void *page, void *data)
40{
41 __free_page(page);
42} 37}
43 38
44/* 39/*
@@ -51,11 +46,6 @@ static void page_pool_free(void *page, void *data)
51 */ 46 */
52#ifdef CONFIG_HIGHMEM 47#ifdef CONFIG_HIGHMEM
53 48
54static void *page_pool_alloc(gfp_t gfp_mask, void *data)
55{
56 return alloc_page(gfp_mask);
57}
58
59static int pkmap_count[LAST_PKMAP]; 49static int pkmap_count[LAST_PKMAP];
60static unsigned int last_pkmap_nr; 50static unsigned int last_pkmap_nr;
61static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); 51static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
@@ -229,7 +219,7 @@ static __init int init_emergency_pool(void)
229 if (!i.totalhigh) 219 if (!i.totalhigh)
230 return 0; 220 return 0;
231 221
232 page_pool = mempool_create(POOL_SIZE, page_pool_alloc, page_pool_free, NULL); 222 page_pool = mempool_create_page_pool(POOL_SIZE, 0);
233 if (!page_pool) 223 if (!page_pool)
234 BUG(); 224 BUG();
235 printk("highmem bounce pool size: %d pages\n", POOL_SIZE); 225 printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
@@ -272,7 +262,8 @@ int init_emergency_isa_pool(void)
272 if (isa_page_pool) 262 if (isa_page_pool)
273 return 0; 263 return 0;
274 264
275 isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc_isa, page_pool_free, NULL); 265 isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
266 mempool_free_pages, (void *) 0);
276 if (!isa_page_pool) 267 if (!isa_page_pool)
277 BUG(); 268 BUG();
278 269
@@ -337,7 +328,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
337 bio_put(bio); 328 bio_put(bio);
338} 329}
339 330
340static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done,int err) 331static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done, int err)
341{ 332{
342 if (bio->bi_size) 333 if (bio->bi_size)
343 return 1; 334 return 1;
@@ -384,7 +375,7 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int
384} 375}
385 376
386static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, 377static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
387 mempool_t *pool) 378 mempool_t *pool)
388{ 379{
389 struct page *page; 380 struct page *page;
390 struct bio *bio = NULL; 381 struct bio *bio = NULL;
diff --git a/mm/memory.c b/mm/memory.c
index d90ff9d04957..8d8f52569f32 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1071,6 +1071,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1071 } 1071 }
1072 if (pages) { 1072 if (pages) {
1073 pages[i] = page; 1073 pages[i] = page;
1074
1075 flush_anon_page(page, start);
1074 flush_dcache_page(page); 1076 flush_dcache_page(page);
1075 } 1077 }
1076 if (vmas) 1078 if (vmas)
diff --git a/mm/mempool.c b/mm/mempool.c
index 9ef13dd68ab7..fe6e05289cc5 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -289,3 +289,45 @@ void mempool_free_slab(void *element, void *pool_data)
289 kmem_cache_free(mem, element); 289 kmem_cache_free(mem, element);
290} 290}
291EXPORT_SYMBOL(mempool_free_slab); 291EXPORT_SYMBOL(mempool_free_slab);
292
293/*
294 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
295 * specfied by pool_data
296 */
297void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
298{
299 size_t size = (size_t)(long)pool_data;
300 return kmalloc(size, gfp_mask);
301}
302EXPORT_SYMBOL(mempool_kmalloc);
303
304void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data)
305{
306 size_t size = (size_t) pool_data;
307 return kzalloc(size, gfp_mask);
308}
309EXPORT_SYMBOL(mempool_kzalloc);
310
311void mempool_kfree(void *element, void *pool_data)
312{
313 kfree(element);
314}
315EXPORT_SYMBOL(mempool_kfree);
316
317/*
318 * A simple mempool-backed page allocator that allocates pages
319 * of the order specified by pool_data.
320 */
321void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
322{
323 int order = (int)(long)pool_data;
324 return alloc_pages(gfp_mask, order);
325}
326EXPORT_SYMBOL(mempool_alloc_pages);
327
328void mempool_free_pages(void *element, void *pool_data)
329{
330 int order = (int)(long)pool_data;
331 __free_pages(element, order);
332}
333EXPORT_SYMBOL(mempool_free_pages);