aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/mm/consistent.c4
-rw-r--r--arch/frv/mm/dma-alloc.c4
-rw-r--r--arch/mips/mm/init.c5
-rw-r--r--arch/ppc/kernel/dma-mapping.c4
-rw-r--r--arch/sh/mm/consistent.c3
-rw-r--r--arch/xtensa/mm/pgtable.c10
-rw-r--r--include/linux/mm.h6
-rw-r--r--mm/memory.c4
-rw-r--r--mm/page_alloc.c22
9 files changed, 41 insertions, 21 deletions
diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/consistent.c
index c2ee18d2075e..8a1bfcd50087 100644
--- a/arch/arm/mm/consistent.c
+++ b/arch/arm/mm/consistent.c
@@ -223,6 +223,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
223 pte = consistent_pte[idx] + off; 223 pte = consistent_pte[idx] + off;
224 c->vm_pages = page; 224 c->vm_pages = page;
225 225
226 split_page(page, order);
227
226 /* 228 /*
227 * Set the "dma handle" 229 * Set the "dma handle"
228 */ 230 */
@@ -231,7 +233,6 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
231 do { 233 do {
232 BUG_ON(!pte_none(*pte)); 234 BUG_ON(!pte_none(*pte));
233 235
234 set_page_count(page, 1);
235 /* 236 /*
236 * x86 does not mark the pages reserved... 237 * x86 does not mark the pages reserved...
237 */ 238 */
@@ -250,7 +251,6 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
250 * Free the otherwise unused pages. 251 * Free the otherwise unused pages.
251 */ 252 */
252 while (page < end) { 253 while (page < end) {
253 set_page_count(page, 1);
254 __free_page(page); 254 __free_page(page);
255 page++; 255 page++;
256 } 256 }
diff --git a/arch/frv/mm/dma-alloc.c b/arch/frv/mm/dma-alloc.c
index 342823aad758..636b2f8b5d98 100644
--- a/arch/frv/mm/dma-alloc.c
+++ b/arch/frv/mm/dma-alloc.c
@@ -115,9 +115,7 @@ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
115 */ 115 */
116 if (order > 0) { 116 if (order > 0) {
117 struct page *rpage = virt_to_page(page); 117 struct page *rpage = virt_to_page(page);
118 118 split_page(rpage, order);
119 for (i = 1; i < (1 << order); i++)
120 set_page_count(rpage + i, 1);
121 } 119 }
122 120
123 err = 0; 121 err = 0;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 0ff9a348b843..a140da9732db 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -54,7 +54,8 @@ unsigned long empty_zero_page, zero_page_mask;
54 */ 54 */
55unsigned long setup_zero_pages(void) 55unsigned long setup_zero_pages(void)
56{ 56{
57 unsigned long order, size; 57 unsigned int order;
58 unsigned long size;
58 struct page *page; 59 struct page *page;
59 60
60 if (cpu_has_vce) 61 if (cpu_has_vce)
@@ -67,9 +68,9 @@ unsigned long setup_zero_pages(void)
67 panic("Oh boy, that early out of memory?"); 68 panic("Oh boy, that early out of memory?");
68 69
69 page = virt_to_page(empty_zero_page); 70 page = virt_to_page(empty_zero_page);
71 split_page(page, order);
70 while (page < virt_to_page(empty_zero_page + (PAGE_SIZE << order))) { 72 while (page < virt_to_page(empty_zero_page + (PAGE_SIZE << order))) {
71 SetPageReserved(page); 73 SetPageReserved(page);
72 set_page_count(page, 1);
73 page++; 74 page++;
74 } 75 }
75 76
diff --git a/arch/ppc/kernel/dma-mapping.c b/arch/ppc/kernel/dma-mapping.c
index 685fd0defe23..61465ec88bc7 100644
--- a/arch/ppc/kernel/dma-mapping.c
+++ b/arch/ppc/kernel/dma-mapping.c
@@ -223,6 +223,8 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
223 pte_t *pte = consistent_pte + CONSISTENT_OFFSET(vaddr); 223 pte_t *pte = consistent_pte + CONSISTENT_OFFSET(vaddr);
224 struct page *end = page + (1 << order); 224 struct page *end = page + (1 << order);
225 225
226 split_page(page, order);
227
226 /* 228 /*
227 * Set the "dma handle" 229 * Set the "dma handle"
228 */ 230 */
@@ -231,7 +233,6 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
231 do { 233 do {
232 BUG_ON(!pte_none(*pte)); 234 BUG_ON(!pte_none(*pte));
233 235
234 set_page_count(page, 1);
235 SetPageReserved(page); 236 SetPageReserved(page);
236 set_pte_at(&init_mm, vaddr, 237 set_pte_at(&init_mm, vaddr,
237 pte, mk_pte(page, pgprot_noncached(PAGE_KERNEL))); 238 pte, mk_pte(page, pgprot_noncached(PAGE_KERNEL)));
@@ -244,7 +245,6 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
244 * Free the otherwise unused pages. 245 * Free the otherwise unused pages.
245 */ 246 */
246 while (page < end) { 247 while (page < end) {
247 set_page_count(page, 1);
248 __free_page(page); 248 __free_page(page);
249 page++; 249 page++;
250 } 250 }
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index df3a9e452cc5..ee73e30263af 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -23,6 +23,7 @@ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle)
23 page = alloc_pages(gfp, order); 23 page = alloc_pages(gfp, order);
24 if (!page) 24 if (!page)
25 return NULL; 25 return NULL;
26 split_page(page, order);
26 27
27 ret = page_address(page); 28 ret = page_address(page);
28 *handle = virt_to_phys(ret); 29 *handle = virt_to_phys(ret);
@@ -37,8 +38,6 @@ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle)
37 end = page + (1 << order); 38 end = page + (1 << order);
38 39
39 while (++page < end) { 40 while (++page < end) {
40 set_page_count(page, 1);
41
42 /* Free any unused pages */ 41 /* Free any unused pages */
43 if (page >= free) { 42 if (page >= free) {
44 __free_page(page); 43 __free_page(page);
diff --git a/arch/xtensa/mm/pgtable.c b/arch/xtensa/mm/pgtable.c
index cbc56aedf13e..7d28914d11cb 100644
--- a/arch/xtensa/mm/pgtable.c
+++ b/arch/xtensa/mm/pgtable.c
@@ -21,13 +21,9 @@ pte_t* pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
21 p = (pte_t*) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, COLOR_ORDER); 21 p = (pte_t*) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, COLOR_ORDER);
22 22
23 if (likely(p)) { 23 if (likely(p)) {
24 struct page *page; 24 split_page(virt_to_page(p), COLOR_ORDER);
25 25
26 for (i = 0; i < COLOR_SIZE; i++) { 26 for (i = 0; i < COLOR_SIZE; i++) {
27 page = virt_to_page(p);
28
29 set_page_count(page, 1);
30
31 if (ADDR_COLOR(p) == color) 27 if (ADDR_COLOR(p) == color)
32 pte = p; 28 pte = p;
33 else 29 else
@@ -55,9 +51,9 @@ struct page* pte_alloc_one(struct mm_struct *mm, unsigned long address)
55 p = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); 51 p = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
56 52
57 if (likely(p)) { 53 if (likely(p)) {
58 for (i = 0; i < PAGE_ORDER; i++) { 54 split_page(p, COLOR_ORDER);
59 set_page_count(p, 1);
60 55
56 for (i = 0; i < PAGE_ORDER; i++) {
61 if (PADDR_COLOR(page_address(p)) == color) 57 if (PADDR_COLOR(page_address(p)) == color)
62 page = p; 58 page = p;
63 else 59 else
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 9bbddf228cd9..e67980654c49 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -328,6 +328,12 @@ static inline void get_page(struct page *page)
328 328
329void put_page(struct page *page); 329void put_page(struct page *page);
330 330
331#ifdef CONFIG_MMU
332void split_page(struct page *page, unsigned int order);
333#else
334static inline void split_page(struct page *page, unsigned int order) {}
335#endif
336
331/* 337/*
332 * Multiple processes may "see" the same page. E.g. for untouched 338 * Multiple processes may "see" the same page. E.g. for untouched
333 * mappings of /dev/null, all processes see the same page full of 339 * mappings of /dev/null, all processes see the same page full of
diff --git a/mm/memory.c b/mm/memory.c
index 85e80a57db29..6af555c1c42a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1221,9 +1221,7 @@ out:
1221 * The page has to be a nice clean _individual_ kernel allocation. 1221 * The page has to be a nice clean _individual_ kernel allocation.
1222 * If you allocate a compound page, you need to have marked it as 1222 * If you allocate a compound page, you need to have marked it as
1223 * such (__GFP_COMP), or manually just split the page up yourself 1223 * such (__GFP_COMP), or manually just split the page up yourself
1224 * (which is mainly an issue of doing "set_page_count(page, 1)" for 1224 * (see split_page()).
1225 * each sub-page, and then freeing them one by one when you free
1226 * them rather than freeing it as a compound page).
1227 * 1225 *
1228 * NOTE! Traditionally this was done with "remap_pfn_range()" which 1226 * NOTE! Traditionally this was done with "remap_pfn_range()" which
1229 * took an arbitrary page protection parameter. This doesn't allow 1227 * took an arbitrary page protection parameter. This doesn't allow
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 102919851353..fc65e87368b3 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -752,6 +752,28 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
752 clear_highpage(page + i); 752 clear_highpage(page + i);
753} 753}
754 754
755#ifdef CONFIG_MMU
756/*
757 * split_page takes a non-compound higher-order page, and splits it into
758 * n (1<<order) sub-pages: page[0..n]
759 * Each sub-page must be freed individually.
760 *
761 * Note: this is probably too low level an operation for use in drivers.
762 * Please consult with lkml before using this in your driver.
763 */
764void split_page(struct page *page, unsigned int order)
765{
766 int i;
767
768 BUG_ON(PageCompound(page));
769 BUG_ON(!page_count(page));
770 for (i = 1; i < (1 << order); i++) {
771 BUG_ON(page_count(page + i));
772 set_page_count(page + i, 1);
773 }
774}
775#endif
776
755/* 777/*
756 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But 778 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
757 * we cheat by calling it from here, in the order > 0 path. Saves a branch 779 * we cheat by calling it from here, in the order > 0 path. Saves a branch