aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorBalbir Singh <balbir@linux.vnet.ibm.com>2008-02-07 03:14:05 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-07 11:42:19 -0500
commit35c754d79f4da80d5e8972f6403dd26f7962fd88 (patch)
tree5e497fd0ac832b5c832044d0267170d7144e1a0e /mm
parent044d66c1d2b1c5aa50b4d6d68c21c6c93dd678da (diff)
memory controller BUG_ON()
Move mem_controller_cache_charge() above radix_tree_preload(). radix_tree_preload() disables preemption, even though the gfp_mask passed contains __GFP_WAIT, we cannot really do __GFP_WAIT allocations, thus we hit a BUG_ON() in kmem_cache_alloc(). This patch moves mem_controller_cache_charge() to above radix_tree_preload() for cache charging. Signed-off-by: Balbir Singh <balbir@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c13
-rw-r--r--mm/swap_state.c13
2 files changed, 13 insertions, 13 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 63040d5e0ae2..35867ab72640 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -460,14 +460,12 @@ int filemap_write_and_wait_range(struct address_space *mapping,
460int add_to_page_cache(struct page *page, struct address_space *mapping, 460int add_to_page_cache(struct page *page, struct address_space *mapping,
461 pgoff_t offset, gfp_t gfp_mask) 461 pgoff_t offset, gfp_t gfp_mask)
462{ 462{
463 int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); 463 int error = mem_cgroup_cache_charge(page, current->mm, gfp_mask);
464 if (error)
465 goto out;
464 466
467 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
465 if (error == 0) { 468 if (error == 0) {
466
467 error = mem_cgroup_cache_charge(page, current->mm, gfp_mask);
468 if (error)
469 goto out;
470
471 write_lock_irq(&mapping->tree_lock); 469 write_lock_irq(&mapping->tree_lock);
472 error = radix_tree_insert(&mapping->page_tree, offset, page); 470 error = radix_tree_insert(&mapping->page_tree, offset, page);
473 if (!error) { 471 if (!error) {
@@ -482,7 +480,8 @@ int add_to_page_cache(struct page *page, struct address_space *mapping,
482 480
483 write_unlock_irq(&mapping->tree_lock); 481 write_unlock_irq(&mapping->tree_lock);
484 radix_tree_preload_end(); 482 radix_tree_preload_end();
485 } 483 } else
484 mem_cgroup_uncharge_page(page);
486out: 485out:
487 return error; 486 return error;
488} 487}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 581b609e748d..6ce0669acedc 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -75,13 +75,13 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
75 BUG_ON(!PageLocked(page)); 75 BUG_ON(!PageLocked(page));
76 BUG_ON(PageSwapCache(page)); 76 BUG_ON(PageSwapCache(page));
77 BUG_ON(PagePrivate(page)); 77 BUG_ON(PagePrivate(page));
78 error = radix_tree_preload(gfp_mask);
79 if (!error) {
80 78
81 error = mem_cgroup_cache_charge(page, current->mm, gfp_mask); 79 error = mem_cgroup_cache_charge(page, current->mm, gfp_mask);
82 if (error) 80 if (error)
83 goto out; 81 goto out;
84 82
83 error = radix_tree_preload(gfp_mask);
84 if (!error) {
85 write_lock_irq(&swapper_space.tree_lock); 85 write_lock_irq(&swapper_space.tree_lock);
86 error = radix_tree_insert(&swapper_space.page_tree, 86 error = radix_tree_insert(&swapper_space.page_tree,
87 entry.val, page); 87 entry.val, page);
@@ -97,7 +97,8 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
97 } 97 }
98 write_unlock_irq(&swapper_space.tree_lock); 98 write_unlock_irq(&swapper_space.tree_lock);
99 radix_tree_preload_end(); 99 radix_tree_preload_end();
100 } 100 } else
101 mem_cgroup_uncharge_page(page);
101out: 102out:
102 return error; 103 return error;
103} 104}