aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2007-07-17 07:03:05 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-17 13:22:59 -0400
commit769848c03895b63e5662eb7e4ec8c4866f7d0183 (patch)
tree8911c7c312c8b8b172795fa2874c8162e1d3d15a /mm
parenta32ea1e1f925399e0d81ca3f7394a44a6dafa12c (diff)
Add __GFP_MOVABLE for callers to flag allocations from high memory that may be migrated
It is often known at allocation time whether a page may be migrated or not. This patch adds a flag called __GFP_MOVABLE and a new mask called GFP_HIGH_MOVABLE. Allocations using the __GFP_MOVABLE can be either migrated using the page migration mechanism or reclaimed by syncing with backing storage and discarding. An API function very similar to alloc_zeroed_user_highpage() is added for __GFP_MOVABLE allocations called alloc_zeroed_user_highpage_movable(). The flags used by alloc_zeroed_user_highpage() are not changed because it would change the semantics of an existing API. After this patch is applied there are no in-kernel users of alloc_zeroed_user_highpage() so it probably should be marked deprecated if this patch is merged. Note that this patch includes a minor cleanup to the use of __GFP_ZERO in shmem.c to keep all flag modifications to inode->mapping in the shmem_dir_alloc() helper function. This clean-up suggestion is courtesy of Hugh Dickens. Additional credit goes to Christoph Lameter and Linus Torvalds for shaping the concept. Credit to Hugh Dickens for catching issues with shmem swap vector and ramfs allocations. [akpm@linux-foundation.org: build fix] [hugh@veritas.com: __GFP_ZERO cleanup] Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: Andy Whitcroft <apw@shadowen.org> Cc: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c9
-rw-r--r--mm/mempolicy.c5
-rw-r--r--mm/migrate.c3
-rw-r--r--mm/shmem.c7
-rw-r--r--mm/swap_state.c3
5 files changed, 17 insertions, 10 deletions
diff --git a/mm/memory.c b/mm/memory.c
index b3d73bb1f680..9c6ff7fffdc8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1715,11 +1715,11 @@ gotten:
1715 if (unlikely(anon_vma_prepare(vma))) 1715 if (unlikely(anon_vma_prepare(vma)))
1716 goto oom; 1716 goto oom;
1717 if (old_page == ZERO_PAGE(address)) { 1717 if (old_page == ZERO_PAGE(address)) {
1718 new_page = alloc_zeroed_user_highpage(vma, address); 1718 new_page = alloc_zeroed_user_highpage_movable(vma, address);
1719 if (!new_page) 1719 if (!new_page)
1720 goto oom; 1720 goto oom;
1721 } else { 1721 } else {
1722 new_page = alloc_page_vma(GFP_HIGHUSER, vma, address); 1722 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1723 if (!new_page) 1723 if (!new_page)
1724 goto oom; 1724 goto oom;
1725 cow_user_page(new_page, old_page, address, vma); 1725 cow_user_page(new_page, old_page, address, vma);
@@ -2237,7 +2237,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2237 2237
2238 if (unlikely(anon_vma_prepare(vma))) 2238 if (unlikely(anon_vma_prepare(vma)))
2239 goto oom; 2239 goto oom;
2240 page = alloc_zeroed_user_highpage(vma, address); 2240 page = alloc_zeroed_user_highpage_movable(vma, address);
2241 if (!page) 2241 if (!page)
2242 goto oom; 2242 goto oom;
2243 2243
@@ -2340,7 +2340,8 @@ retry:
2340 2340
2341 if (unlikely(anon_vma_prepare(vma))) 2341 if (unlikely(anon_vma_prepare(vma)))
2342 goto oom; 2342 goto oom;
2343 page = alloc_page_vma(GFP_HIGHUSER, vma, address); 2343 page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
2344 vma, address);
2344 if (!page) 2345 if (!page)
2345 goto oom; 2346 goto oom;
2346 copy_user_highpage(page, new_page, address, vma); 2347 copy_user_highpage(page, new_page, address, vma);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 188f8d9c4aed..4c0f99996811 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -594,7 +594,7 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
594 594
595static struct page *new_node_page(struct page *page, unsigned long node, int **x) 595static struct page *new_node_page(struct page *page, unsigned long node, int **x)
596{ 596{
597 return alloc_pages_node(node, GFP_HIGHUSER, 0); 597 return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
598} 598}
599 599
600/* 600/*
@@ -710,7 +710,8 @@ static struct page *new_vma_page(struct page *page, unsigned long private, int *
710{ 710{
711 struct vm_area_struct *vma = (struct vm_area_struct *)private; 711 struct vm_area_struct *vma = (struct vm_area_struct *)private;
712 712
713 return alloc_page_vma(GFP_HIGHUSER, vma, page_address_in_vma(page, vma)); 713 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
714 page_address_in_vma(page, vma));
714} 715}
715#else 716#else
716 717
diff --git a/mm/migrate.c b/mm/migrate.c
index a91ca00abebe..34d8ada053e4 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -761,7 +761,8 @@ static struct page *new_page_node(struct page *p, unsigned long private,
761 761
762 *result = &pm->status; 762 *result = &pm->status;
763 763
764 return alloc_pages_node(pm->node, GFP_HIGHUSER | GFP_THISNODE, 0); 764 return alloc_pages_node(pm->node,
765 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
765} 766}
766 767
767/* 768/*
diff --git a/mm/shmem.c b/mm/shmem.c
index 0493e4d0bcaa..e49181d9d893 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -93,8 +93,11 @@ static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
93 * The above definition of ENTRIES_PER_PAGE, and the use of 93 * The above definition of ENTRIES_PER_PAGE, and the use of
94 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE: 94 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
95 * might be reconsidered if it ever diverges from PAGE_SIZE. 95 * might be reconsidered if it ever diverges from PAGE_SIZE.
96 *
97 * __GFP_MOVABLE is masked out as swap vectors cannot move
96 */ 98 */
97 return alloc_pages(gfp_mask, PAGE_CACHE_SHIFT-PAGE_SHIFT); 99 return alloc_pages((gfp_mask & ~__GFP_MOVABLE) | __GFP_ZERO,
100 PAGE_CACHE_SHIFT-PAGE_SHIFT);
98} 101}
99 102
100static inline void shmem_dir_free(struct page *page) 103static inline void shmem_dir_free(struct page *page)
@@ -372,7 +375,7 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long
372 } 375 }
373 376
374 spin_unlock(&info->lock); 377 spin_unlock(&info->lock);
375 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO); 378 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
376 if (page) 379 if (page)
377 set_page_private(page, 0); 380 set_page_private(page, 0);
378 spin_lock(&info->lock); 381 spin_lock(&info->lock);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 925d5c50f18d..67daecb6031a 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -334,7 +334,8 @@ struct page *read_swap_cache_async(swp_entry_t entry,
334 * Get a new page to read into from swap. 334 * Get a new page to read into from swap.
335 */ 335 */
336 if (!new_page) { 336 if (!new_page) {
337 new_page = alloc_page_vma(GFP_HIGHUSER, vma, addr); 337 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
338 vma, addr);
338 if (!new_page) 339 if (!new_page)
339 break; /* Out of memory */ 340 break; /* Out of memory */
340 } 341 }