diff options
author | Paul Jackson <pj@sgi.com> | 2006-03-24 06:16:04 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-24 10:33:22 -0500 |
commit | 44110fe385af23ca5eee8a6ad4ff55d50339097a (patch) | |
tree | 50ed2bfe054b8e35968d8e4a5fbe95c8b3db843b | |
parent | 825a46af5ac171f9f41f794a0a00165588ba1589 (diff) |
[PATCH] cpuset memory spread page cache implementation and hooks
Change the page cache allocation calls to support cpuset memory spreading.
See the previous patch, cpuset_mem_spread, for an explanation of cpuset memory
spreading.
On systems without cpusets configured in the kernel, this is no change.
On systems with cpusets configured in the kernel, but the "memory_spread"
cpuset option not enabled for the current tasks cpuset, this adds a call to a
cpuset routine and failed bit test of the processor state flag PF_SPREAD_PAGE.
On tasks in cpusets with "memory_spread" enabled, this adds a call to a cpuset
routine that computes which of the tasks mems_allowed nodes should be
preferred for this allocation.
If memory spreading applies to a particular allocation, then any other NUMA
mempolicy does not apply.
Signed-off-by: Paul Jackson <pj@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | include/linux/pagemap.h | 5 | ||||
-rw-r--r-- | mm/filemap.c | 23 |
2 files changed, 28 insertions, 0 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index ee700c6eb442..839f0b3c23aa 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -51,6 +51,10 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) | |||
51 | #define page_cache_release(page) put_page(page) | 51 | #define page_cache_release(page) put_page(page) |
52 | void release_pages(struct page **pages, int nr, int cold); | 52 | void release_pages(struct page **pages, int nr, int cold); |
53 | 53 | ||
54 | #ifdef CONFIG_NUMA | ||
55 | extern struct page *page_cache_alloc(struct address_space *x); | ||
56 | extern struct page *page_cache_alloc_cold(struct address_space *x); | ||
57 | #else | ||
54 | static inline struct page *page_cache_alloc(struct address_space *x) | 58 | static inline struct page *page_cache_alloc(struct address_space *x) |
55 | { | 59 | { |
56 | return alloc_pages(mapping_gfp_mask(x), 0); | 60 | return alloc_pages(mapping_gfp_mask(x), 0); |
@@ -60,6 +64,7 @@ static inline struct page *page_cache_alloc_cold(struct address_space *x) | |||
60 | { | 64 | { |
61 | return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0); | 65 | return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0); |
62 | } | 66 | } |
67 | #endif | ||
63 | 68 | ||
64 | typedef int filler_t(void *, struct page *); | 69 | typedef int filler_t(void *, struct page *); |
65 | 70 | ||
diff --git a/mm/filemap.c b/mm/filemap.c index e8f58f7dd7a5..d4ff48ec269e 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/blkdev.h> | 29 | #include <linux/blkdev.h> |
30 | #include <linux/security.h> | 30 | #include <linux/security.h> |
31 | #include <linux/syscalls.h> | 31 | #include <linux/syscalls.h> |
32 | #include <linux/cpuset.h> | ||
32 | #include "filemap.h" | 33 | #include "filemap.h" |
33 | #include "internal.h" | 34 | #include "internal.h" |
34 | 35 | ||
@@ -427,6 +428,28 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | |||
427 | return ret; | 428 | return ret; |
428 | } | 429 | } |
429 | 430 | ||
431 | #ifdef CONFIG_NUMA | ||
432 | struct page *page_cache_alloc(struct address_space *x) | ||
433 | { | ||
434 | if (cpuset_do_page_mem_spread()) { | ||
435 | int n = cpuset_mem_spread_node(); | ||
436 | return alloc_pages_node(n, mapping_gfp_mask(x), 0); | ||
437 | } | ||
438 | return alloc_pages(mapping_gfp_mask(x), 0); | ||
439 | } | ||
440 | EXPORT_SYMBOL(page_cache_alloc); | ||
441 | |||
442 | struct page *page_cache_alloc_cold(struct address_space *x) | ||
443 | { | ||
444 | if (cpuset_do_page_mem_spread()) { | ||
445 | int n = cpuset_mem_spread_node(); | ||
446 | return alloc_pages_node(n, mapping_gfp_mask(x)|__GFP_COLD, 0); | ||
447 | } | ||
448 | return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0); | ||
449 | } | ||
450 | EXPORT_SYMBOL(page_cache_alloc_cold); | ||
451 | #endif | ||
452 | |||
430 | /* | 453 | /* |
431 | * In order to wait for pages to become available there must be | 454 | * In order to wait for pages to become available there must be |
432 | * waitqueues associated with pages. By using a hash table of | 455 | * waitqueues associated with pages. By using a hash table of |