diff options
author | Mel Gorman <mgorman@suse.de> | 2013-02-22 19:34:46 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-23 20:50:18 -0500 |
commit | 4468b8f1e2d32ce79ef4bcb8e00d7e88627f1c3a (patch) | |
tree | a73555b462198c396da08acab928a788f3733b71 /mm | |
parent | 6acc8b02517d7063b25490b26215834bd2f363c8 (diff) |
mm: uninline page_xchg_last_nid()
Andrew Morton pointed out that page_xchg_last_nid() and
reset_page_last_nid() were "getting nuttily large" and asked that it be
investigated.
reset_page_last_nid() is on the page free path and it would be
unfortunate to make that path more expensive than it needs to be. Due
to the internal use of page_xchg_last_nid() it is already too expensive
but fortunately, it should also be impossible for the page->flags to be
updated in parallel when we call reset_page_last_nid(). Instead of
unlining the function, it uses a simplier implementation that assumes no
parallel updates and should now be sufficiently short for inlining.
page_xchg_last_nid() is called in paths that are already quite expensive
(splitting huge page, fault handling, migration) and it is reasonable to
uninline. There was not really a good place to place the function but
mm/mmzone.c was the closest fit IMO.
This patch saved 128 bytes of text in the vmlinux file for the kernel
configuration I used for testing automatic NUMA balancing.
Signed-off-by: Mel Gorman <mgorman@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/mmzone.c | 20 |
1 files changed, 19 insertions, 1 deletions
diff --git a/mm/mmzone.c b/mm/mmzone.c index 4596d81b89b1..bce796e8487f 100644 --- a/mm/mmzone.c +++ b/mm/mmzone.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * linux/mm/mmzone.c | 2 | * linux/mm/mmzone.c |
3 | * | 3 | * |
4 | * management codes for pgdats and zones. | 4 | * management codes for pgdats, zones and page flags |
5 | */ | 5 | */ |
6 | 6 | ||
7 | 7 | ||
@@ -96,3 +96,21 @@ void lruvec_init(struct lruvec *lruvec) | |||
96 | for_each_lru(lru) | 96 | for_each_lru(lru) |
97 | INIT_LIST_HEAD(&lruvec->lists[lru]); | 97 | INIT_LIST_HEAD(&lruvec->lists[lru]); |
98 | } | 98 | } |
99 | |||
100 | #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_NID_NOT_IN_PAGE_FLAGS) | ||
101 | int page_xchg_last_nid(struct page *page, int nid) | ||
102 | { | ||
103 | unsigned long old_flags, flags; | ||
104 | int last_nid; | ||
105 | |||
106 | do { | ||
107 | old_flags = flags = page->flags; | ||
108 | last_nid = page_last_nid(page); | ||
109 | |||
110 | flags &= ~(LAST_NID_MASK << LAST_NID_PGSHIFT); | ||
111 | flags |= (nid & LAST_NID_MASK) << LAST_NID_PGSHIFT; | ||
112 | } while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags)); | ||
113 | |||
114 | return last_nid; | ||
115 | } | ||
116 | #endif | ||