diff options
-rw-r--r-- | include/linux/gfp.h | 17 | ||||
-rw-r--r-- | include/linux/mmzone.h | 14 | ||||
-rw-r--r-- | mm/page_alloc.c | 28 |
3 files changed, 48 insertions, 11 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index bc68dd9a6d41..12a90a191c11 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -98,22 +98,29 @@ struct vm_area_struct; | |||
98 | 98 | ||
99 | static inline enum zone_type gfp_zone(gfp_t flags) | 99 | static inline enum zone_type gfp_zone(gfp_t flags) |
100 | { | 100 | { |
101 | int base = 0; | ||
102 | |||
103 | #ifdef CONFIG_NUMA | ||
104 | if (flags & __GFP_THISNODE) | ||
105 | base = MAX_NR_ZONES; | ||
106 | #endif | ||
107 | |||
101 | #ifdef CONFIG_ZONE_DMA | 108 | #ifdef CONFIG_ZONE_DMA |
102 | if (flags & __GFP_DMA) | 109 | if (flags & __GFP_DMA) |
103 | return ZONE_DMA; | 110 | return base + ZONE_DMA; |
104 | #endif | 111 | #endif |
105 | #ifdef CONFIG_ZONE_DMA32 | 112 | #ifdef CONFIG_ZONE_DMA32 |
106 | if (flags & __GFP_DMA32) | 113 | if (flags & __GFP_DMA32) |
107 | return ZONE_DMA32; | 114 | return base + ZONE_DMA32; |
108 | #endif | 115 | #endif |
109 | if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) == | 116 | if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) == |
110 | (__GFP_HIGHMEM | __GFP_MOVABLE)) | 117 | (__GFP_HIGHMEM | __GFP_MOVABLE)) |
111 | return ZONE_MOVABLE; | 118 | return base + ZONE_MOVABLE; |
112 | #ifdef CONFIG_HIGHMEM | 119 | #ifdef CONFIG_HIGHMEM |
113 | if (flags & __GFP_HIGHMEM) | 120 | if (flags & __GFP_HIGHMEM) |
114 | return ZONE_HIGHMEM; | 121 | return base + ZONE_HIGHMEM; |
115 | #endif | 122 | #endif |
116 | return ZONE_NORMAL; | 123 | return base + ZONE_NORMAL; |
117 | } | 124 | } |
118 | 125 | ||
119 | /* | 126 | /* |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index f21e5951038b..f6167f2fd7fb 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -324,6 +324,17 @@ struct zone { | |||
324 | #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) | 324 | #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) |
325 | 325 | ||
326 | #ifdef CONFIG_NUMA | 326 | #ifdef CONFIG_NUMA |
327 | |||
328 | /* | ||
329 | * The NUMA zonelists are doubled becausse we need zonelists that restrict the | ||
330 | * allocations to a single node for GFP_THISNODE. | ||
331 | * | ||
332 | * [0 .. MAX_NR_ZONES -1] : Zonelists with fallback | ||
333 | * [MAZ_NR_ZONES ... MAZ_ZONELISTS -1] : No fallback (GFP_THISNODE) | ||
334 | */ | ||
335 | #define MAX_ZONELISTS (2 * MAX_NR_ZONES) | ||
336 | |||
337 | |||
327 | /* | 338 | /* |
328 | * We cache key information from each zonelist for smaller cache | 339 | * We cache key information from each zonelist for smaller cache |
329 | * footprint when scanning for free pages in get_page_from_freelist(). | 340 | * footprint when scanning for free pages in get_page_from_freelist(). |
@@ -389,6 +400,7 @@ struct zonelist_cache { | |||
389 | unsigned long last_full_zap; /* when last zap'd (jiffies) */ | 400 | unsigned long last_full_zap; /* when last zap'd (jiffies) */ |
390 | }; | 401 | }; |
391 | #else | 402 | #else |
403 | #define MAX_ZONELISTS MAX_NR_ZONES | ||
392 | struct zonelist_cache; | 404 | struct zonelist_cache; |
393 | #endif | 405 | #endif |
394 | 406 | ||
@@ -455,7 +467,7 @@ extern struct page *mem_map; | |||
455 | struct bootmem_data; | 467 | struct bootmem_data; |
456 | typedef struct pglist_data { | 468 | typedef struct pglist_data { |
457 | struct zone node_zones[MAX_NR_ZONES]; | 469 | struct zone node_zones[MAX_NR_ZONES]; |
458 | struct zonelist node_zonelists[MAX_NR_ZONES]; | 470 | struct zonelist node_zonelists[MAX_ZONELISTS]; |
459 | int nr_zones; | 471 | int nr_zones; |
460 | #ifdef CONFIG_FLAT_NODE_MEM_MAP | 472 | #ifdef CONFIG_FLAT_NODE_MEM_MAP |
461 | struct page *node_mem_map; | 473 | struct page *node_mem_map; |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d06f6e0f75aa..2f547f45de18 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1191,9 +1191,6 @@ zonelist_scan: | |||
1191 | !zlc_zone_worth_trying(zonelist, z, allowednodes)) | 1191 | !zlc_zone_worth_trying(zonelist, z, allowednodes)) |
1192 | continue; | 1192 | continue; |
1193 | zone = *z; | 1193 | zone = *z; |
1194 | if (unlikely(NUMA_BUILD && (gfp_mask & __GFP_THISNODE) && | ||
1195 | zone->zone_pgdat != zonelist->zones[0]->zone_pgdat)) | ||
1196 | break; | ||
1197 | if ((alloc_flags & ALLOC_CPUSET) && | 1194 | if ((alloc_flags & ALLOC_CPUSET) && |
1198 | !cpuset_zone_allowed_softwall(zone, gfp_mask)) | 1195 | !cpuset_zone_allowed_softwall(zone, gfp_mask)) |
1199 | goto try_next_zone; | 1196 | goto try_next_zone; |
@@ -1262,7 +1259,10 @@ restart: | |||
1262 | z = zonelist->zones; /* the list of zones suitable for gfp_mask */ | 1259 | z = zonelist->zones; /* the list of zones suitable for gfp_mask */ |
1263 | 1260 | ||
1264 | if (unlikely(*z == NULL)) { | 1261 | if (unlikely(*z == NULL)) { |
1265 | /* Should this ever happen?? */ | 1262 | /* |
1263 | * Happens if we have an empty zonelist as a result of | ||
1264 | * GFP_THISNODE being used on a memoryless node | ||
1265 | */ | ||
1266 | return NULL; | 1266 | return NULL; |
1267 | } | 1267 | } |
1268 | 1268 | ||
@@ -1858,6 +1858,22 @@ static void build_zonelists_in_node_order(pg_data_t *pgdat, int node) | |||
1858 | } | 1858 | } |
1859 | 1859 | ||
1860 | /* | 1860 | /* |
1861 | * Build gfp_thisnode zonelists | ||
1862 | */ | ||
1863 | static void build_thisnode_zonelists(pg_data_t *pgdat) | ||
1864 | { | ||
1865 | enum zone_type i; | ||
1866 | int j; | ||
1867 | struct zonelist *zonelist; | ||
1868 | |||
1869 | for (i = 0; i < MAX_NR_ZONES; i++) { | ||
1870 | zonelist = pgdat->node_zonelists + MAX_NR_ZONES + i; | ||
1871 | j = build_zonelists_node(pgdat, zonelist, 0, i); | ||
1872 | zonelist->zones[j] = NULL; | ||
1873 | } | ||
1874 | } | ||
1875 | |||
1876 | /* | ||
1861 | * Build zonelists ordered by zone and nodes within zones. | 1877 | * Build zonelists ordered by zone and nodes within zones. |
1862 | * This results in conserving DMA zone[s] until all Normal memory is | 1878 | * This results in conserving DMA zone[s] until all Normal memory is |
1863 | * exhausted, but results in overflowing to remote node while memory | 1879 | * exhausted, but results in overflowing to remote node while memory |
@@ -1961,7 +1977,7 @@ static void build_zonelists(pg_data_t *pgdat) | |||
1961 | int order = current_zonelist_order; | 1977 | int order = current_zonelist_order; |
1962 | 1978 | ||
1963 | /* initialize zonelists */ | 1979 | /* initialize zonelists */ |
1964 | for (i = 0; i < MAX_NR_ZONES; i++) { | 1980 | for (i = 0; i < MAX_ZONELISTS; i++) { |
1965 | zonelist = pgdat->node_zonelists + i; | 1981 | zonelist = pgdat->node_zonelists + i; |
1966 | zonelist->zones[0] = NULL; | 1982 | zonelist->zones[0] = NULL; |
1967 | } | 1983 | } |
@@ -2006,6 +2022,8 @@ static void build_zonelists(pg_data_t *pgdat) | |||
2006 | /* calculate node order -- i.e., DMA last! */ | 2022 | /* calculate node order -- i.e., DMA last! */ |
2007 | build_zonelists_in_zone_order(pgdat, j); | 2023 | build_zonelists_in_zone_order(pgdat, j); |
2008 | } | 2024 | } |
2025 | |||
2026 | build_thisnode_zonelists(pgdat); | ||
2009 | } | 2027 | } |
2010 | 2028 | ||
2011 | /* Construct the zonelist performance cache - see further mmzone.h */ | 2029 | /* Construct the zonelist performance cache - see further mmzone.h */ |