aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2013-02-22 19:34:46 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 20:50:18 -0500
commit4468b8f1e2d32ce79ef4bcb8e00d7e88627f1c3a (patch)
treea73555b462198c396da08acab928a788f3733b71
parent6acc8b02517d7063b25490b26215834bd2f363c8 (diff)
mm: uninline page_xchg_last_nid()
Andrew Morton pointed out that page_xchg_last_nid() and reset_page_last_nid() were "getting nuttily large" and asked that it be investigated. reset_page_last_nid() is on the page free path and it would be unfortunate to make that path more expensive than it needs to be. Due to the internal use of page_xchg_last_nid() it is already too expensive but fortunately, it should also be impossible for the page->flags to be updated in parallel when we call reset_page_last_nid(). Instead of unlining the function, it uses a simplier implementation that assumes no parallel updates and should now be sufficiently short for inlining. page_xchg_last_nid() is called in paths that are already quite expensive (splitting huge page, fault handling, migration) and it is reasonable to uninline. There was not really a good place to place the function but mm/mmzone.c was the closest fit IMO. This patch saved 128 bytes of text in the vmlinux file for the kernel configuration I used for testing automatic NUMA balancing. Signed-off-by: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mm.h21
-rw-r--r--mm/mmzone.c20
2 files changed, 24 insertions, 17 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 437da0ce78c7..8a5bbe3b9e56 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -677,25 +677,14 @@ static inline int page_last_nid(struct page *page)
677 return (page->flags >> LAST_NID_PGSHIFT) & LAST_NID_MASK; 677 return (page->flags >> LAST_NID_PGSHIFT) & LAST_NID_MASK;
678} 678}
679 679
680static inline int page_xchg_last_nid(struct page *page, int nid) 680extern int page_xchg_last_nid(struct page *page, int nid);
681{
682 unsigned long old_flags, flags;
683 int last_nid;
684
685 do {
686 old_flags = flags = page->flags;
687 last_nid = page_last_nid(page);
688
689 flags &= ~(LAST_NID_MASK << LAST_NID_PGSHIFT);
690 flags |= (nid & LAST_NID_MASK) << LAST_NID_PGSHIFT;
691 } while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags));
692
693 return last_nid;
694}
695 681
696static inline void reset_page_last_nid(struct page *page) 682static inline void reset_page_last_nid(struct page *page)
697{ 683{
698 page_xchg_last_nid(page, (1 << LAST_NID_SHIFT) - 1); 684 int nid = (1 << LAST_NID_SHIFT) - 1;
685
686 page->flags &= ~(LAST_NID_MASK << LAST_NID_PGSHIFT);
687 page->flags |= (nid & LAST_NID_MASK) << LAST_NID_PGSHIFT;
699} 688}
700#endif /* LAST_NID_NOT_IN_PAGE_FLAGS */ 689#endif /* LAST_NID_NOT_IN_PAGE_FLAGS */
701#else 690#else
diff --git a/mm/mmzone.c b/mm/mmzone.c
index 4596d81b89b1..bce796e8487f 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/mm/mmzone.c 2 * linux/mm/mmzone.c
3 * 3 *
4 * management codes for pgdats and zones. 4 * management codes for pgdats, zones and page flags
5 */ 5 */
6 6
7 7
@@ -96,3 +96,21 @@ void lruvec_init(struct lruvec *lruvec)
96 for_each_lru(lru) 96 for_each_lru(lru)
97 INIT_LIST_HEAD(&lruvec->lists[lru]); 97 INIT_LIST_HEAD(&lruvec->lists[lru]);
98} 98}
99
100#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_NID_NOT_IN_PAGE_FLAGS)
101int page_xchg_last_nid(struct page *page, int nid)
102{
103 unsigned long old_flags, flags;
104 int last_nid;
105
106 do {
107 old_flags = flags = page->flags;
108 last_nid = page_last_nid(page);
109
110 flags &= ~(LAST_NID_MASK << LAST_NID_PGSHIFT);
111 flags |= (nid & LAST_NID_MASK) << LAST_NID_PGSHIFT;
112 } while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags));
113
114 return last_nid;
115}
116#endif