aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2006-03-22 03:08:34 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-22 10:54:01 -0500
commit84097518d1ecd2330f9488e4c2d09953a3340e74 (patch)
tree50981fe0584c456a1a86e6d7f611eec223b5f536
parent0f8053a509ceba4a077a50ea7b77039b5559b428 (diff)
[PATCH] mm: nommu use compound pages
Now that compound page handling is properly fixed in the VM, move nommu over to using compound pages rather than rolling their own refcounting. nommu vm page refcounting is broken anyway, but there is no need to have divergent code in the core VM now, nor when it gets fixed. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: David Howells <dhowells@redhat.com> (Needs testing, please). Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--fs/ramfs/file-nommu.c3
-rw-r--r--include/linux/mm.h4
-rw-r--r--mm/internal.h12
-rw-r--r--mm/nommu.c4
-rw-r--r--mm/page_alloc.c7
-rw-r--r--mm/slab.c9
6 files changed, 11 insertions, 28 deletions
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index 3f810acd0bfa..b1ca234068f6 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -87,8 +87,7 @@ static int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
87 xpages = 1UL << order; 87 xpages = 1UL << order;
88 npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT; 88 npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
89 89
90 for (loop = 0; loop < npages; loop++) 90 split_page(pages, order);
91 set_page_count(pages + loop, 1);
92 91
93 /* trim off any pages we don't actually require */ 92 /* trim off any pages we don't actually require */
94 for (loop = npages; loop < xpages; loop++) 93 for (loop = npages; loop < xpages; loop++)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 9b3cdfc8046d..3d84b7a35e0d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -327,11 +327,7 @@ static inline void get_page(struct page *page)
327 327
328void put_page(struct page *page); 328void put_page(struct page *page);
329 329
330#ifdef CONFIG_MMU
331void split_page(struct page *page, unsigned int order); 330void split_page(struct page *page, unsigned int order);
332#else
333static inline void split_page(struct page *page, unsigned int order) {}
334#endif
335 331
336/* 332/*
337 * Multiple processes may "see" the same page. E.g. for untouched 333 * Multiple processes may "see" the same page. E.g. for untouched
diff --git a/mm/internal.h b/mm/internal.h
index e3042db2a2d6..7bb339779818 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -15,19 +15,7 @@
15 15
16static inline void set_page_refs(struct page *page, int order) 16static inline void set_page_refs(struct page *page, int order)
17{ 17{
18#ifdef CONFIG_MMU
19 set_page_count(page, 1); 18 set_page_count(page, 1);
20#else
21 int i;
22
23 /*
24 * We need to reference all the pages for this order, otherwise if
25 * anyone accesses one of the pages with (get/put) it will be freed.
26 * - eg: access_process_vm()
27 */
28 for (i = 0; i < (1 << order); i++)
29 set_page_count(page + i, 1);
30#endif /* CONFIG_MMU */
31} 19}
32 20
33static inline void __put_page(struct page *page) 21static inline void __put_page(struct page *page)
diff --git a/mm/nommu.c b/mm/nommu.c
index 4951f4786f28..db45efac17cc 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -159,7 +159,7 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
159 /* 159 /*
160 * kmalloc doesn't like __GFP_HIGHMEM for some reason 160 * kmalloc doesn't like __GFP_HIGHMEM for some reason
161 */ 161 */
162 return kmalloc(size, gfp_mask & ~__GFP_HIGHMEM); 162 return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
163} 163}
164 164
165struct page * vmalloc_to_page(void *addr) 165struct page * vmalloc_to_page(void *addr)
@@ -623,7 +623,7 @@ static int do_mmap_private(struct vm_area_struct *vma, unsigned long len)
623 * - note that this may not return a page-aligned address if the object 623 * - note that this may not return a page-aligned address if the object
624 * we're allocating is smaller than a page 624 * we're allocating is smaller than a page
625 */ 625 */
626 base = kmalloc(len, GFP_KERNEL); 626 base = kmalloc(len, GFP_KERNEL|__GFP_COMP);
627 if (!base) 627 if (!base)
628 goto enomem; 628 goto enomem;
629 629
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7aa0181287e1..e197818a7cf6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -422,11 +422,6 @@ static void __free_pages_ok(struct page *page, unsigned int order)
422 mutex_debug_check_no_locks_freed(page_address(page), 422 mutex_debug_check_no_locks_freed(page_address(page),
423 PAGE_SIZE<<order); 423 PAGE_SIZE<<order);
424 424
425#ifndef CONFIG_MMU
426 for (i = 1 ; i < (1 << order) ; ++i)
427 __put_page(page + i);
428#endif
429
430 for (i = 0 ; i < (1 << order) ; ++i) 425 for (i = 0 ; i < (1 << order) ; ++i)
431 reserved += free_pages_check(page + i); 426 reserved += free_pages_check(page + i);
432 if (reserved) 427 if (reserved)
@@ -746,7 +741,6 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
746 clear_highpage(page + i); 741 clear_highpage(page + i);
747} 742}
748 743
749#ifdef CONFIG_MMU
750/* 744/*
751 * split_page takes a non-compound higher-order page, and splits it into 745 * split_page takes a non-compound higher-order page, and splits it into
752 * n (1<<order) sub-pages: page[0..n] 746 * n (1<<order) sub-pages: page[0..n]
@@ -766,7 +760,6 @@ void split_page(struct page *page, unsigned int order)
766 set_page_count(page + i, 1); 760 set_page_count(page + i, 1);
767 } 761 }
768} 762}
769#endif
770 763
771/* 764/*
772 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But 765 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
diff --git a/mm/slab.c b/mm/slab.c
index f477acfb732f..ff0ab772f49d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -590,6 +590,8 @@ static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
590 590
591static inline struct kmem_cache *page_get_cache(struct page *page) 591static inline struct kmem_cache *page_get_cache(struct page *page)
592{ 592{
593 if (unlikely(PageCompound(page)))
594 page = (struct page *)page_private(page);
593 return (struct kmem_cache *)page->lru.next; 595 return (struct kmem_cache *)page->lru.next;
594} 596}
595 597
@@ -600,6 +602,8 @@ static inline void page_set_slab(struct page *page, struct slab *slab)
600 602
601static inline struct slab *page_get_slab(struct page *page) 603static inline struct slab *page_get_slab(struct page *page)
602{ 604{
605 if (unlikely(PageCompound(page)))
606 page = (struct page *)page_private(page);
603 return (struct slab *)page->lru.prev; 607 return (struct slab *)page->lru.prev;
604} 608}
605 609
@@ -2412,8 +2416,11 @@ static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp,
2412 struct page *page; 2416 struct page *page;
2413 2417
2414 /* Nasty!!!!!! I hope this is OK. */ 2418 /* Nasty!!!!!! I hope this is OK. */
2415 i = 1 << cachep->gfporder;
2416 page = virt_to_page(objp); 2419 page = virt_to_page(objp);
2420
2421 i = 1;
2422 if (likely(!PageCompound(page)))
2423 i <<= cachep->gfporder;
2417 do { 2424 do {
2418 page_set_cache(page, cachep); 2425 page_set_cache(page, cachep);
2419 page_set_slab(page, slabp); 2426 page_set_slab(page, slabp);