diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2012-12-11 19:00:29 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-11 20:22:22 -0500 |
commit | e5adfffc857788c8b7eca0e98cf1e26f1964b292 (patch) | |
tree | 8d1ac37aa649a060055162b59cb06e8ca63f7a68 /mm/page_alloc.c | |
parent | 19965460e31c73a934d2c19c152f876a75bdff3e (diff) |
mm: use IS_ENABLED(CONFIG_NUMA) instead of NUMA_BUILD
We don't need custom NUMA_BUILD anymore, since we have handy
IS_ENABLED().
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 18 |
1 files changed, 10 insertions, 8 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index dc018b486b74..a49b0ea3cc2f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1871,7 +1871,7 @@ zonelist_scan: | |||
1871 | */ | 1871 | */ |
1872 | for_each_zone_zonelist_nodemask(zone, z, zonelist, | 1872 | for_each_zone_zonelist_nodemask(zone, z, zonelist, |
1873 | high_zoneidx, nodemask) { | 1873 | high_zoneidx, nodemask) { |
1874 | if (NUMA_BUILD && zlc_active && | 1874 | if (IS_ENABLED(CONFIG_NUMA) && zlc_active && |
1875 | !zlc_zone_worth_trying(zonelist, z, allowednodes)) | 1875 | !zlc_zone_worth_trying(zonelist, z, allowednodes)) |
1876 | continue; | 1876 | continue; |
1877 | if ((alloc_flags & ALLOC_CPUSET) && | 1877 | if ((alloc_flags & ALLOC_CPUSET) && |
@@ -1917,7 +1917,8 @@ zonelist_scan: | |||
1917 | classzone_idx, alloc_flags)) | 1917 | classzone_idx, alloc_flags)) |
1918 | goto try_this_zone; | 1918 | goto try_this_zone; |
1919 | 1919 | ||
1920 | if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) { | 1920 | if (IS_ENABLED(CONFIG_NUMA) && |
1921 | !did_zlc_setup && nr_online_nodes > 1) { | ||
1921 | /* | 1922 | /* |
1922 | * we do zlc_setup if there are multiple nodes | 1923 | * we do zlc_setup if there are multiple nodes |
1923 | * and before considering the first zone allowed | 1924 | * and before considering the first zone allowed |
@@ -1936,7 +1937,7 @@ zonelist_scan: | |||
1936 | * As we may have just activated ZLC, check if the first | 1937 | * As we may have just activated ZLC, check if the first |
1937 | * eligible zone has failed zone_reclaim recently. | 1938 | * eligible zone has failed zone_reclaim recently. |
1938 | */ | 1939 | */ |
1939 | if (NUMA_BUILD && zlc_active && | 1940 | if (IS_ENABLED(CONFIG_NUMA) && zlc_active && |
1940 | !zlc_zone_worth_trying(zonelist, z, allowednodes)) | 1941 | !zlc_zone_worth_trying(zonelist, z, allowednodes)) |
1941 | continue; | 1942 | continue; |
1942 | 1943 | ||
@@ -1962,11 +1963,11 @@ try_this_zone: | |||
1962 | if (page) | 1963 | if (page) |
1963 | break; | 1964 | break; |
1964 | this_zone_full: | 1965 | this_zone_full: |
1965 | if (NUMA_BUILD) | 1966 | if (IS_ENABLED(CONFIG_NUMA)) |
1966 | zlc_mark_zone_full(zonelist, z); | 1967 | zlc_mark_zone_full(zonelist, z); |
1967 | } | 1968 | } |
1968 | 1969 | ||
1969 | if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) { | 1970 | if (unlikely(IS_ENABLED(CONFIG_NUMA) && page == NULL && zlc_active)) { |
1970 | /* Disable zlc cache for second zonelist scan */ | 1971 | /* Disable zlc cache for second zonelist scan */ |
1971 | zlc_active = 0; | 1972 | zlc_active = 0; |
1972 | goto zonelist_scan; | 1973 | goto zonelist_scan; |
@@ -2266,7 +2267,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, | |||
2266 | return NULL; | 2267 | return NULL; |
2267 | 2268 | ||
2268 | /* After successful reclaim, reconsider all zones for allocation */ | 2269 | /* After successful reclaim, reconsider all zones for allocation */ |
2269 | if (NUMA_BUILD) | 2270 | if (IS_ENABLED(CONFIG_NUMA)) |
2270 | zlc_clear_zones_full(zonelist); | 2271 | zlc_clear_zones_full(zonelist); |
2271 | 2272 | ||
2272 | retry: | 2273 | retry: |
@@ -2412,7 +2413,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | |||
2412 | * allowed per node queues are empty and that nodes are | 2413 | * allowed per node queues are empty and that nodes are |
2413 | * over allocated. | 2414 | * over allocated. |
2414 | */ | 2415 | */ |
2415 | if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE) | 2416 | if (IS_ENABLED(CONFIG_NUMA) && |
2417 | (gfp_mask & GFP_THISNODE) == GFP_THISNODE) | ||
2416 | goto nopage; | 2418 | goto nopage; |
2417 | 2419 | ||
2418 | restart: | 2420 | restart: |
@@ -2819,7 +2821,7 @@ unsigned int nr_free_pagecache_pages(void) | |||
2819 | 2821 | ||
2820 | static inline void show_node(struct zone *zone) | 2822 | static inline void show_node(struct zone *zone) |
2821 | { | 2823 | { |
2822 | if (NUMA_BUILD) | 2824 | if (IS_ENABLED(CONFIG_NUMA)) |
2823 | printk("Node %d ", zone_to_nid(zone)); | 2825 | printk("Node %d ", zone_to_nid(zone)); |
2824 | } | 2826 | } |
2825 | 2827 | ||