diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/internal.h | 12 | ||||
-rw-r--r-- | mm/nommu.c | 4 | ||||
-rw-r--r-- | mm/page_alloc.c | 7 | ||||
-rw-r--r-- | mm/slab.c | 9 |
4 files changed, 10 insertions, 22 deletions
diff --git a/mm/internal.h b/mm/internal.h index e3042db2a2d6..7bb339779818 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -15,19 +15,7 @@ | |||
15 | 15 | ||
16 | static inline void set_page_refs(struct page *page, int order) | 16 | static inline void set_page_refs(struct page *page, int order) |
17 | { | 17 | { |
18 | #ifdef CONFIG_MMU | ||
19 | set_page_count(page, 1); | 18 | set_page_count(page, 1); |
20 | #else | ||
21 | int i; | ||
22 | |||
23 | /* | ||
24 | * We need to reference all the pages for this order, otherwise if | ||
25 | * anyone accesses one of the pages with (get/put) it will be freed. | ||
26 | * - eg: access_process_vm() | ||
27 | */ | ||
28 | for (i = 0; i < (1 << order); i++) | ||
29 | set_page_count(page + i, 1); | ||
30 | #endif /* CONFIG_MMU */ | ||
31 | } | 19 | } |
32 | 20 | ||
33 | static inline void __put_page(struct page *page) | 21 | static inline void __put_page(struct page *page) |
diff --git a/mm/nommu.c b/mm/nommu.c index 4951f4786f28..db45efac17cc 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -159,7 +159,7 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) | |||
159 | /* | 159 | /* |
160 | * kmalloc doesn't like __GFP_HIGHMEM for some reason | 160 | * kmalloc doesn't like __GFP_HIGHMEM for some reason |
161 | */ | 161 | */ |
162 | return kmalloc(size, gfp_mask & ~__GFP_HIGHMEM); | 162 | return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); |
163 | } | 163 | } |
164 | 164 | ||
165 | struct page * vmalloc_to_page(void *addr) | 165 | struct page * vmalloc_to_page(void *addr) |
@@ -623,7 +623,7 @@ static int do_mmap_private(struct vm_area_struct *vma, unsigned long len) | |||
623 | * - note that this may not return a page-aligned address if the object | 623 | * - note that this may not return a page-aligned address if the object |
624 | * we're allocating is smaller than a page | 624 | * we're allocating is smaller than a page |
625 | */ | 625 | */ |
626 | base = kmalloc(len, GFP_KERNEL); | 626 | base = kmalloc(len, GFP_KERNEL|__GFP_COMP); |
627 | if (!base) | 627 | if (!base) |
628 | goto enomem; | 628 | goto enomem; |
629 | 629 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7aa0181287e1..e197818a7cf6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -422,11 +422,6 @@ static void __free_pages_ok(struct page *page, unsigned int order) | |||
422 | mutex_debug_check_no_locks_freed(page_address(page), | 422 | mutex_debug_check_no_locks_freed(page_address(page), |
423 | PAGE_SIZE<<order); | 423 | PAGE_SIZE<<order); |
424 | 424 | ||
425 | #ifndef CONFIG_MMU | ||
426 | for (i = 1 ; i < (1 << order) ; ++i) | ||
427 | __put_page(page + i); | ||
428 | #endif | ||
429 | |||
430 | for (i = 0 ; i < (1 << order) ; ++i) | 425 | for (i = 0 ; i < (1 << order) ; ++i) |
431 | reserved += free_pages_check(page + i); | 426 | reserved += free_pages_check(page + i); |
432 | if (reserved) | 427 | if (reserved) |
@@ -746,7 +741,6 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) | |||
746 | clear_highpage(page + i); | 741 | clear_highpage(page + i); |
747 | } | 742 | } |
748 | 743 | ||
749 | #ifdef CONFIG_MMU | ||
750 | /* | 744 | /* |
751 | * split_page takes a non-compound higher-order page, and splits it into | 745 | * split_page takes a non-compound higher-order page, and splits it into |
752 | * n (1<<order) sub-pages: page[0..n] | 746 | * n (1<<order) sub-pages: page[0..n] |
@@ -766,7 +760,6 @@ void split_page(struct page *page, unsigned int order) | |||
766 | set_page_count(page + i, 1); | 760 | set_page_count(page + i, 1); |
767 | } | 761 | } |
768 | } | 762 | } |
769 | #endif | ||
770 | 763 | ||
771 | /* | 764 | /* |
772 | * Really, prep_compound_page() should be called from __rmqueue_bulk(). But | 765 | * Really, prep_compound_page() should be called from __rmqueue_bulk(). But |
@@ -590,6 +590,8 @@ static inline void page_set_cache(struct page *page, struct kmem_cache *cache) | |||
590 | 590 | ||
591 | static inline struct kmem_cache *page_get_cache(struct page *page) | 591 | static inline struct kmem_cache *page_get_cache(struct page *page) |
592 | { | 592 | { |
593 | if (unlikely(PageCompound(page))) | ||
594 | page = (struct page *)page_private(page); | ||
593 | return (struct kmem_cache *)page->lru.next; | 595 | return (struct kmem_cache *)page->lru.next; |
594 | } | 596 | } |
595 | 597 | ||
@@ -600,6 +602,8 @@ static inline void page_set_slab(struct page *page, struct slab *slab) | |||
600 | 602 | ||
601 | static inline struct slab *page_get_slab(struct page *page) | 603 | static inline struct slab *page_get_slab(struct page *page) |
602 | { | 604 | { |
605 | if (unlikely(PageCompound(page))) | ||
606 | page = (struct page *)page_private(page); | ||
603 | return (struct slab *)page->lru.prev; | 607 | return (struct slab *)page->lru.prev; |
604 | } | 608 | } |
605 | 609 | ||
@@ -2412,8 +2416,11 @@ static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp, | |||
2412 | struct page *page; | 2416 | struct page *page; |
2413 | 2417 | ||
2414 | /* Nasty!!!!!! I hope this is OK. */ | 2418 | /* Nasty!!!!!! I hope this is OK. */ |
2415 | i = 1 << cachep->gfporder; | ||
2416 | page = virt_to_page(objp); | 2419 | page = virt_to_page(objp); |
2420 | |||
2421 | i = 1; | ||
2422 | if (likely(!PageCompound(page))) | ||
2423 | i <<= cachep->gfporder; | ||
2417 | do { | 2424 | do { |
2418 | page_set_cache(page, cachep); | 2425 | page_set_cache(page, cachep); |
2419 | page_set_slab(page, slabp); | 2426 | page_set_slab(page, slabp); |