aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2006-03-22 03:08:34 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-22 10:54:01 -0500
commit84097518d1ecd2330f9488e4c2d09953a3340e74 (patch)
tree50981fe0584c456a1a86e6d7f611eec223b5f536 /mm/page_alloc.c
parent0f8053a509ceba4a077a50ea7b77039b5559b428 (diff)
[PATCH] mm: nommu use compound pages
Now that compound page handling is properly fixed in the VM, move nommu over to using compound pages rather than rolling their own refcounting. nommu vm page refcounting is broken anyway, but there is no need to have divergent code in the core VM now, nor when it gets fixed. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: David Howells <dhowells@redhat.com> (Needs testing, please). Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c7
1 files changed, 0 insertions, 7 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7aa0181287e1..e197818a7cf6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -422,11 +422,6 @@ static void __free_pages_ok(struct page *page, unsigned int order)
422 mutex_debug_check_no_locks_freed(page_address(page), 422 mutex_debug_check_no_locks_freed(page_address(page),
423 PAGE_SIZE<<order); 423 PAGE_SIZE<<order);
424 424
425#ifndef CONFIG_MMU
426 for (i = 1 ; i < (1 << order) ; ++i)
427 __put_page(page + i);
428#endif
429
430 for (i = 0 ; i < (1 << order) ; ++i) 425 for (i = 0 ; i < (1 << order) ; ++i)
431 reserved += free_pages_check(page + i); 426 reserved += free_pages_check(page + i);
432 if (reserved) 427 if (reserved)
@@ -746,7 +741,6 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
746 clear_highpage(page + i); 741 clear_highpage(page + i);
747} 742}
748 743
749#ifdef CONFIG_MMU
750/* 744/*
751 * split_page takes a non-compound higher-order page, and splits it into 745 * split_page takes a non-compound higher-order page, and splits it into
752 * n (1<<order) sub-pages: page[0..n] 746 * n (1<<order) sub-pages: page[0..n]
@@ -766,7 +760,6 @@ void split_page(struct page *page, unsigned int order)
766 set_page_count(page + i, 1); 760 set_page_count(page + i, 1);
767 } 761 }
768} 762}
769#endif
770 763
771/* 764/*
772 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But 765 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But