aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2013-02-22 19:34:32 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 20:50:17 -0500
commit75980e97daccfc6babbac7e180ff118537955f5d (patch)
treeb5721bae11eab21a8ee7c2ba4c7f153a59766dd6 /include/linux/mm.h
parentbbeae5b05ef6e40bf54db05ceb8635824153b9e2 (diff)
mm: fold page->_last_nid into page->flags where possible
page->_last_nid fits into page->flags on 64-bit. The unlikely 32-bit NUMA configuration with NUMA Balancing will still need an extra page field. As Peter notes "Completely dropping 32bit support for CONFIG_NUMA_BALANCING would simplify things, but it would also remove the warning if we grow enough 64bit only page-flags to push the last-cpu out." [mgorman@suse.de: minor modifications] Signed-off-by: Mel Gorman <mgorman@suse.de> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Simon Jeons <simon.jeons@gmail.com> Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h33
1 files changed, 32 insertions, 1 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c2d7d5993b14..473abbda942e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -581,10 +581,11 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
581 * sets it, so none of the operations on it need to be atomic. 581 * sets it, so none of the operations on it need to be atomic.
582 */ 582 */
583 583
584/* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */ 584/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_NID] | ... | FLAGS | */
585#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) 585#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
586#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) 586#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
587#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) 587#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
588#define LAST_NID_PGOFF (ZONES_PGOFF - LAST_NID_WIDTH)
588 589
589/* 590/*
590 * Define the bit shifts to access each section. For non-existent 591 * Define the bit shifts to access each section. For non-existent
@@ -594,6 +595,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
594#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) 595#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
595#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) 596#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
596#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) 597#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
598#define LAST_NID_PGSHIFT (LAST_NID_PGOFF * (LAST_NID_WIDTH != 0))
597 599
598/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ 600/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
599#ifdef NODE_NOT_IN_PAGE_FLAGS 601#ifdef NODE_NOT_IN_PAGE_FLAGS
@@ -615,6 +617,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
615#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) 617#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
616#define NODES_MASK ((1UL << NODES_WIDTH) - 1) 618#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
617#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) 619#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
620#define LAST_NID_MASK ((1UL << LAST_NID_WIDTH) - 1)
618#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) 621#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
619 622
620static inline enum zone_type page_zonenum(const struct page *page) 623static inline enum zone_type page_zonenum(const struct page *page)
@@ -654,6 +657,7 @@ static inline int page_to_nid(const struct page *page)
654#endif 657#endif
655 658
656#ifdef CONFIG_NUMA_BALANCING 659#ifdef CONFIG_NUMA_BALANCING
660#ifdef LAST_NID_NOT_IN_PAGE_FLAGS
657static inline int page_xchg_last_nid(struct page *page, int nid) 661static inline int page_xchg_last_nid(struct page *page, int nid)
658{ 662{
659 return xchg(&page->_last_nid, nid); 663 return xchg(&page->_last_nid, nid);
@@ -668,6 +672,33 @@ static inline void reset_page_last_nid(struct page *page)
668 page->_last_nid = -1; 672 page->_last_nid = -1;
669} 673}
670#else 674#else
675static inline int page_last_nid(struct page *page)
676{
677 return (page->flags >> LAST_NID_PGSHIFT) & LAST_NID_MASK;
678}
679
680static inline int page_xchg_last_nid(struct page *page, int nid)
681{
682 unsigned long old_flags, flags;
683 int last_nid;
684
685 do {
686 old_flags = flags = page->flags;
687 last_nid = page_last_nid(page);
688
689 flags &= ~(LAST_NID_MASK << LAST_NID_PGSHIFT);
690 flags |= (nid & LAST_NID_MASK) << LAST_NID_PGSHIFT;
691 } while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags));
692
693 return last_nid;
694}
695
696static inline void reset_page_last_nid(struct page *page)
697{
698 page_xchg_last_nid(page, (1 << LAST_NID_SHIFT) - 1);
699}
700#endif /* LAST_NID_NOT_IN_PAGE_FLAGS */
701#else
671static inline int page_xchg_last_nid(struct page *page, int nid) 702static inline int page_xchg_last_nid(struct page *page, int nid)
672{ 703{
673 return page_to_nid(page); 704 return page_to_nid(page);