diff options
Diffstat (limited to 'mm/page_alloc.c')
| -rw-r--r-- | mm/page_alloc.c | 28 |
1 files changed, 16 insertions, 12 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b06a9636d971..3974fd81d27c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -42,13 +42,13 @@ | |||
| 42 | * MCD - HACK: Find somewhere to initialize this EARLY, or make this | 42 | * MCD - HACK: Find somewhere to initialize this EARLY, or make this |
| 43 | * initializer cleaner | 43 | * initializer cleaner |
| 44 | */ | 44 | */ |
| 45 | nodemask_t node_online_map = { { [0] = 1UL } }; | 45 | nodemask_t node_online_map __read_mostly = { { [0] = 1UL } }; |
| 46 | EXPORT_SYMBOL(node_online_map); | 46 | EXPORT_SYMBOL(node_online_map); |
| 47 | nodemask_t node_possible_map = NODE_MASK_ALL; | 47 | nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL; |
| 48 | EXPORT_SYMBOL(node_possible_map); | 48 | EXPORT_SYMBOL(node_possible_map); |
| 49 | struct pglist_data *pgdat_list; | 49 | struct pglist_data *pgdat_list __read_mostly; |
| 50 | unsigned long totalram_pages; | 50 | unsigned long totalram_pages __read_mostly; |
| 51 | unsigned long totalhigh_pages; | 51 | unsigned long totalhigh_pages __read_mostly; |
| 52 | long nr_swap_pages; | 52 | long nr_swap_pages; |
| 53 | 53 | ||
| 54 | /* | 54 | /* |
| @@ -68,7 +68,7 @@ EXPORT_SYMBOL(nr_swap_pages); | |||
| 68 | * Used by page_zone() to look up the address of the struct zone whose | 68 | * Used by page_zone() to look up the address of the struct zone whose |
| 69 | * id is encoded in the upper bits of page->flags | 69 | * id is encoded in the upper bits of page->flags |
| 70 | */ | 70 | */ |
| 71 | struct zone *zone_table[1 << ZONETABLE_SHIFT]; | 71 | struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly; |
| 72 | EXPORT_SYMBOL(zone_table); | 72 | EXPORT_SYMBOL(zone_table); |
| 73 | 73 | ||
| 74 | static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" }; | 74 | static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" }; |
| @@ -806,11 +806,14 @@ __alloc_pages(unsigned int __nocast gfp_mask, unsigned int order, | |||
| 806 | classzone_idx = zone_idx(zones[0]); | 806 | classzone_idx = zone_idx(zones[0]); |
| 807 | 807 | ||
| 808 | restart: | 808 | restart: |
| 809 | /* Go through the zonelist once, looking for a zone with enough free */ | 809 | /* |
| 810 | * Go through the zonelist once, looking for a zone with enough free. | ||
| 811 | * See also cpuset_zone_allowed() comment in kernel/cpuset.c. | ||
| 812 | */ | ||
| 810 | for (i = 0; (z = zones[i]) != NULL; i++) { | 813 | for (i = 0; (z = zones[i]) != NULL; i++) { |
| 811 | int do_reclaim = should_reclaim_zone(z, gfp_mask); | 814 | int do_reclaim = should_reclaim_zone(z, gfp_mask); |
| 812 | 815 | ||
| 813 | if (!cpuset_zone_allowed(z)) | 816 | if (!cpuset_zone_allowed(z, __GFP_HARDWALL)) |
| 814 | continue; | 817 | continue; |
| 815 | 818 | ||
| 816 | /* | 819 | /* |
| @@ -845,6 +848,7 @@ zone_reclaim_retry: | |||
| 845 | * | 848 | * |
| 846 | * This is the last chance, in general, before the goto nopage. | 849 | * This is the last chance, in general, before the goto nopage. |
| 847 | * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. | 850 | * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. |
| 851 | * See also cpuset_zone_allowed() comment in kernel/cpuset.c. | ||
| 848 | */ | 852 | */ |
| 849 | for (i = 0; (z = zones[i]) != NULL; i++) { | 853 | for (i = 0; (z = zones[i]) != NULL; i++) { |
| 850 | if (!zone_watermark_ok(z, order, z->pages_min, | 854 | if (!zone_watermark_ok(z, order, z->pages_min, |
| @@ -852,7 +856,7 @@ zone_reclaim_retry: | |||
| 852 | gfp_mask & __GFP_HIGH)) | 856 | gfp_mask & __GFP_HIGH)) |
| 853 | continue; | 857 | continue; |
| 854 | 858 | ||
| 855 | if (wait && !cpuset_zone_allowed(z)) | 859 | if (wait && !cpuset_zone_allowed(z, gfp_mask)) |
| 856 | continue; | 860 | continue; |
| 857 | 861 | ||
| 858 | page = buffered_rmqueue(z, order, gfp_mask); | 862 | page = buffered_rmqueue(z, order, gfp_mask); |
| @@ -867,7 +871,7 @@ zone_reclaim_retry: | |||
| 867 | if (!(gfp_mask & __GFP_NOMEMALLOC)) { | 871 | if (!(gfp_mask & __GFP_NOMEMALLOC)) { |
| 868 | /* go through the zonelist yet again, ignoring mins */ | 872 | /* go through the zonelist yet again, ignoring mins */ |
| 869 | for (i = 0; (z = zones[i]) != NULL; i++) { | 873 | for (i = 0; (z = zones[i]) != NULL; i++) { |
| 870 | if (!cpuset_zone_allowed(z)) | 874 | if (!cpuset_zone_allowed(z, gfp_mask)) |
| 871 | continue; | 875 | continue; |
| 872 | page = buffered_rmqueue(z, order, gfp_mask); | 876 | page = buffered_rmqueue(z, order, gfp_mask); |
| 873 | if (page) | 877 | if (page) |
| @@ -903,7 +907,7 @@ rebalance: | |||
| 903 | gfp_mask & __GFP_HIGH)) | 907 | gfp_mask & __GFP_HIGH)) |
| 904 | continue; | 908 | continue; |
| 905 | 909 | ||
| 906 | if (!cpuset_zone_allowed(z)) | 910 | if (!cpuset_zone_allowed(z, gfp_mask)) |
| 907 | continue; | 911 | continue; |
| 908 | 912 | ||
| 909 | page = buffered_rmqueue(z, order, gfp_mask); | 913 | page = buffered_rmqueue(z, order, gfp_mask); |
| @@ -922,7 +926,7 @@ rebalance: | |||
| 922 | classzone_idx, 0, 0)) | 926 | classzone_idx, 0, 0)) |
| 923 | continue; | 927 | continue; |
| 924 | 928 | ||
| 925 | if (!cpuset_zone_allowed(z)) | 929 | if (!cpuset_zone_allowed(z, __GFP_HARDWALL)) |
| 926 | continue; | 930 | continue; |
| 927 | 931 | ||
| 928 | page = buffered_rmqueue(z, order, gfp_mask); | 932 | page = buffered_rmqueue(z, order, gfp_mask); |
