aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Hicks <mort@sgi.com>2005-06-21 20:14:42 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-21 21:46:14 -0400
commit0c35bbadc59f5ed105c34471143eceb4c0dd9c95 (patch)
treed82de388e8c0a38fa4b1d27ad372e84c9f01e013
parent753ee728964e5afb80c17659cc6c3a6fd0a42fe0 (diff)
[PATCH] VM: add __GFP_NORECLAIM
When using the early zone reclaim, it was noticed that allocating new pages that should be spread across the whole system caused eviction of local pages. This adds a new GFP flag to prevent early reclaim from happening during certain allocation attempts. The example that is implemented here is for page cache pages. We want page cache pages to be spread across the whole system, and we don't want page cache pages to evict other pages to get local memory. Signed-off-by: Martin Hicks <mort@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/gfp.h3
-rw-r--r--include/linux/pagemap.h4
-rw-r--r--mm/page_alloc.c2
3 files changed, 6 insertions, 3 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index af7407e8cfc5..208535fd4832 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -39,6 +39,7 @@ struct vm_area_struct;
39#define __GFP_COMP 0x4000u /* Add compound page metadata */ 39#define __GFP_COMP 0x4000u /* Add compound page metadata */
40#define __GFP_ZERO 0x8000u /* Return zeroed page on success */ 40#define __GFP_ZERO 0x8000u /* Return zeroed page on success */
41#define __GFP_NOMEMALLOC 0x10000u /* Don't use emergency reserves */ 41#define __GFP_NOMEMALLOC 0x10000u /* Don't use emergency reserves */
42#define __GFP_NORECLAIM 0x20000u /* No realy zone reclaim during allocation */
42 43
43#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */ 44#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */
44#define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1) 45#define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1)
@@ -47,7 +48,7 @@ struct vm_area_struct;
47#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \ 48#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
48 __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \ 49 __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
49 __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \ 50 __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \
50 __GFP_NOMEMALLOC) 51 __GFP_NOMEMALLOC|__GFP_NORECLAIM)
51 52
52#define GFP_ATOMIC (__GFP_HIGH) 53#define GFP_ATOMIC (__GFP_HIGH)
53#define GFP_NOIO (__GFP_WAIT) 54#define GFP_NOIO (__GFP_WAIT)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 0422031161ba..d9a25647a295 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -52,12 +52,12 @@ void release_pages(struct page **pages, int nr, int cold);
52 52
53static inline struct page *page_cache_alloc(struct address_space *x) 53static inline struct page *page_cache_alloc(struct address_space *x)
54{ 54{
55 return alloc_pages(mapping_gfp_mask(x), 0); 55 return alloc_pages(mapping_gfp_mask(x)|__GFP_NORECLAIM, 0);
56} 56}
57 57
58static inline struct page *page_cache_alloc_cold(struct address_space *x) 58static inline struct page *page_cache_alloc_cold(struct address_space *x)
59{ 59{
60 return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0); 60 return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD|__GFP_NORECLAIM, 0);
61} 61}
62 62
63typedef int filler_t(void *, struct page *); 63typedef int filler_t(void *, struct page *);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3c0f69ded6b5..a9da20bc2ed0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -729,6 +729,8 @@ should_reclaim_zone(struct zone *z, unsigned int gfp_mask)
729{ 729{
730 if (!z->reclaim_pages) 730 if (!z->reclaim_pages)
731 return 0; 731 return 0;
732 if (gfp_mask & __GFP_NORECLAIM)
733 return 0;
732 return 1; 734 return 1;
733} 735}
734 736