aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memblock.c
diff options
context:
space:
mode:
authorGrygorii Strashko <grygorii.strashko@ti.com>2014-01-21 18:50:16 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-21 19:19:46 -0500
commitb115423357e0cda6d8f45d0c81df537d7b004020 (patch)
treef3acecb4396cd1967ae57d71269a84862d9a10ad /mm/memblock.c
parent87029ee9390b2297dae699d5fb135b77992116e5 (diff)
mm/memblock: switch to use NUMA_NO_NODE instead of MAX_NUMNODES
It's recommended to use NUMA_NO_NODE everywhere to select "process any node" behavior or to indicate that "no node id specified". Hence, update __next_free_mem_range*() API's to accept both NUMA_NO_NODE and MAX_NUMNODES, but emit warning once on MAX_NUMNODES, and correct corresponding API's documentation to describe new behavior. Also, update other memblock/nobootmem APIs where MAX_NUMNODES is used dirrectly. The change was suggested by Tejun Heo. Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Tejun Heo <tj@kernel.org> Cc: "Rafael J. Wysocki" <rjw@sisk.pl> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Paul Walmsley <paul@pwsan.com> Cc: Pavel Machek <pavel@ucw.cz> Cc: Russell King <linux@arm.linux.org.uk> Cc: Tony Lindgren <tony@atomide.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memblock.c')
-rw-r--r--mm/memblock.c28
1 files changed, 19 insertions, 9 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index a95d6dc066d5..03f1dc7b663c 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -94,7 +94,7 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
94 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 94 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
95 * @size: size of free area to find 95 * @size: size of free area to find
96 * @align: alignment of free area to find 96 * @align: alignment of free area to find
97 * @nid: nid of the free area to find, %MAX_NUMNODES for any node 97 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
98 * 98 *
99 * Utility called from memblock_find_in_range_node(), find free area bottom-up. 99 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
100 * 100 *
@@ -126,7 +126,7 @@ __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
126 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 126 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
127 * @size: size of free area to find 127 * @size: size of free area to find
128 * @align: alignment of free area to find 128 * @align: alignment of free area to find
129 * @nid: nid of the free area to find, %MAX_NUMNODES for any node 129 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
130 * 130 *
131 * Utility called from memblock_find_in_range_node(), find free area top-down. 131 * Utility called from memblock_find_in_range_node(), find free area top-down.
132 * 132 *
@@ -161,7 +161,7 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
161 * @align: alignment of free area to find 161 * @align: alignment of free area to find
162 * @start: start of candidate range 162 * @start: start of candidate range
163 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 163 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
164 * @nid: nid of the free area to find, %MAX_NUMNODES for any node 164 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
165 * 165 *
166 * Find @size free area aligned to @align in the specified range and node. 166 * Find @size free area aligned to @align in the specified range and node.
167 * 167 *
@@ -242,7 +242,7 @@ phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
242 phys_addr_t align) 242 phys_addr_t align)
243{ 243{
244 return memblock_find_in_range_node(size, align, start, end, 244 return memblock_find_in_range_node(size, align, start, end,
245 MAX_NUMNODES); 245 NUMA_NO_NODE);
246} 246}
247 247
248static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) 248static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
@@ -754,7 +754,7 @@ int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
754/** 754/**
755 * __next_free_mem_range - next function for for_each_free_mem_range() 755 * __next_free_mem_range - next function for for_each_free_mem_range()
756 * @idx: pointer to u64 loop variable 756 * @idx: pointer to u64 loop variable
757 * @nid: node selector, %MAX_NUMNODES for all nodes 757 * @nid: node selector, %NUMA_NO_NODE for all nodes
758 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 758 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
759 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 759 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
760 * @out_nid: ptr to int for nid of the range, can be %NULL 760 * @out_nid: ptr to int for nid of the range, can be %NULL
@@ -782,6 +782,11 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid,
782 struct memblock_type *rsv = &memblock.reserved; 782 struct memblock_type *rsv = &memblock.reserved;
783 int mi = *idx & 0xffffffff; 783 int mi = *idx & 0xffffffff;
784 int ri = *idx >> 32; 784 int ri = *idx >> 32;
785 bool check_node = (nid != NUMA_NO_NODE) && (nid != MAX_NUMNODES);
786
787 if (nid == MAX_NUMNODES)
788 pr_warn_once("%s: Usage of MAX_NUMNODES is depricated. Use NUMA_NO_NODE instead\n",
789 __func__);
785 790
786 for ( ; mi < mem->cnt; mi++) { 791 for ( ; mi < mem->cnt; mi++) {
787 struct memblock_region *m = &mem->regions[mi]; 792 struct memblock_region *m = &mem->regions[mi];
@@ -789,7 +794,7 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid,
789 phys_addr_t m_end = m->base + m->size; 794 phys_addr_t m_end = m->base + m->size;
790 795
791 /* only memory regions are associated with nodes, check it */ 796 /* only memory regions are associated with nodes, check it */
792 if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m)) 797 if (check_node && nid != memblock_get_region_node(m))
793 continue; 798 continue;
794 799
795 /* scan areas before each reservation for intersection */ 800 /* scan areas before each reservation for intersection */
@@ -830,7 +835,7 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid,
830/** 835/**
831 * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse() 836 * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse()
832 * @idx: pointer to u64 loop variable 837 * @idx: pointer to u64 loop variable
833 * @nid: nid: node selector, %MAX_NUMNODES for all nodes 838 * @nid: nid: node selector, %NUMA_NO_NODE for all nodes
834 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 839 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
835 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 840 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
836 * @out_nid: ptr to int for nid of the range, can be %NULL 841 * @out_nid: ptr to int for nid of the range, can be %NULL
@@ -850,6 +855,11 @@ void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid,
850 struct memblock_type *rsv = &memblock.reserved; 855 struct memblock_type *rsv = &memblock.reserved;
851 int mi = *idx & 0xffffffff; 856 int mi = *idx & 0xffffffff;
852 int ri = *idx >> 32; 857 int ri = *idx >> 32;
858 bool check_node = (nid != NUMA_NO_NODE) && (nid != MAX_NUMNODES);
859
860 if (nid == MAX_NUMNODES)
861 pr_warn_once("%s: Usage of MAX_NUMNODES is depricated. Use NUMA_NO_NODE instead\n",
862 __func__);
853 863
854 if (*idx == (u64)ULLONG_MAX) { 864 if (*idx == (u64)ULLONG_MAX) {
855 mi = mem->cnt - 1; 865 mi = mem->cnt - 1;
@@ -862,7 +872,7 @@ void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid,
862 phys_addr_t m_end = m->base + m->size; 872 phys_addr_t m_end = m->base + m->size;
863 873
864 /* only memory regions are associated with nodes, check it */ 874 /* only memory regions are associated with nodes, check it */
865 if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m)) 875 if (check_node && nid != memblock_get_region_node(m))
866 continue; 876 continue;
867 877
868 /* skip hotpluggable memory regions if needed */ 878 /* skip hotpluggable memory regions if needed */
@@ -989,7 +999,7 @@ phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int n
989 999
990phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 1000phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
991{ 1001{
992 return memblock_alloc_base_nid(size, align, max_addr, MAX_NUMNODES); 1002 return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE);
993} 1003}
994 1004
995phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 1005phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)