aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/pagemap.h5
-rw-r--r--mm/filemap.c23
2 files changed, 28 insertions, 0 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index ee700c6eb442..839f0b3c23aa 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -51,6 +51,10 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
51#define page_cache_release(page) put_page(page) 51#define page_cache_release(page) put_page(page)
52void release_pages(struct page **pages, int nr, int cold); 52void release_pages(struct page **pages, int nr, int cold);
53 53
54#ifdef CONFIG_NUMA
55extern struct page *page_cache_alloc(struct address_space *x);
56extern struct page *page_cache_alloc_cold(struct address_space *x);
57#else
54static inline struct page *page_cache_alloc(struct address_space *x) 58static inline struct page *page_cache_alloc(struct address_space *x)
55{ 59{
56 return alloc_pages(mapping_gfp_mask(x), 0); 60 return alloc_pages(mapping_gfp_mask(x), 0);
@@ -60,6 +64,7 @@ static inline struct page *page_cache_alloc_cold(struct address_space *x)
60{ 64{
61 return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0); 65 return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0);
62} 66}
67#endif
63 68
64typedef int filler_t(void *, struct page *); 69typedef int filler_t(void *, struct page *);
65 70
diff --git a/mm/filemap.c b/mm/filemap.c
index e8f58f7dd7a5..d4ff48ec269e 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -29,6 +29,7 @@
29#include <linux/blkdev.h> 29#include <linux/blkdev.h>
30#include <linux/security.h> 30#include <linux/security.h>
31#include <linux/syscalls.h> 31#include <linux/syscalls.h>
32#include <linux/cpuset.h>
32#include "filemap.h" 33#include "filemap.h"
33#include "internal.h" 34#include "internal.h"
34 35
@@ -427,6 +428,28 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
427 return ret; 428 return ret;
428} 429}
429 430
431#ifdef CONFIG_NUMA
432struct page *page_cache_alloc(struct address_space *x)
433{
434 if (cpuset_do_page_mem_spread()) {
435 int n = cpuset_mem_spread_node();
436 return alloc_pages_node(n, mapping_gfp_mask(x), 0);
437 }
438 return alloc_pages(mapping_gfp_mask(x), 0);
439}
440EXPORT_SYMBOL(page_cache_alloc);
441
442struct page *page_cache_alloc_cold(struct address_space *x)
443{
444 if (cpuset_do_page_mem_spread()) {
445 int n = cpuset_mem_spread_node();
446 return alloc_pages_node(n, mapping_gfp_mask(x)|__GFP_COLD, 0);
447 }
448 return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0);
449}
450EXPORT_SYMBOL(page_cache_alloc_cold);
451#endif
452
430/* 453/*
431 * In order to wait for pages to become available there must be 454 * In order to wait for pages to become available there must be
432 * waitqueues associated with pages. By using a hash table of 455 * waitqueues associated with pages. By using a hash table of