aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-04-08 11:25:42 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-08 11:26:00 -0400
commit5af8c4e0fac9838428bd718040b664043a05f37c (patch)
tree75a01d98ed244db45fe3c734c4a81c1a3d92ac37 /mm/page_alloc.c
parent46e0bb9c12f4bab539736f1714cbf16600f681ec (diff)
parent577c9c456f0e1371cbade38eaf91ae8e8a308555 (diff)
Merge commit 'v2.6.30-rc1' into sched/urgent
Merge reason: update to latest upstream to queue up fix Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c64
1 files changed, 36 insertions, 28 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 22b15a4cde8a..e2f26991fff1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -331,7 +331,7 @@ static int destroy_compound_page(struct page *page, unsigned long order)
331 for (i = 1; i < nr_pages; i++) { 331 for (i = 1; i < nr_pages; i++) {
332 struct page *p = page + i; 332 struct page *p = page + i;
333 333
334 if (unlikely(!PageTail(p) | (p->first_page != page))) { 334 if (unlikely(!PageTail(p) || (p->first_page != page))) {
335 bad_page(page); 335 bad_page(page);
336 bad++; 336 bad++;
337 } 337 }
@@ -922,13 +922,10 @@ static void drain_pages(unsigned int cpu)
922 unsigned long flags; 922 unsigned long flags;
923 struct zone *zone; 923 struct zone *zone;
924 924
925 for_each_zone(zone) { 925 for_each_populated_zone(zone) {
926 struct per_cpu_pageset *pset; 926 struct per_cpu_pageset *pset;
927 struct per_cpu_pages *pcp; 927 struct per_cpu_pages *pcp;
928 928
929 if (!populated_zone(zone))
930 continue;
931
932 pset = zone_pcp(zone, cpu); 929 pset = zone_pcp(zone, cpu);
933 930
934 pcp = &pset->pcp; 931 pcp = &pset->pcp;
@@ -1585,7 +1582,8 @@ nofail_alloc:
1585 reclaim_state.reclaimed_slab = 0; 1582 reclaim_state.reclaimed_slab = 0;
1586 p->reclaim_state = &reclaim_state; 1583 p->reclaim_state = &reclaim_state;
1587 1584
1588 did_some_progress = try_to_free_pages(zonelist, order, gfp_mask); 1585 did_some_progress = try_to_free_pages(zonelist, order,
1586 gfp_mask, nodemask);
1589 1587
1590 p->reclaim_state = NULL; 1588 p->reclaim_state = NULL;
1591 lockdep_clear_current_reclaim_state(); 1589 lockdep_clear_current_reclaim_state();
@@ -1879,10 +1877,7 @@ void show_free_areas(void)
1879 int cpu; 1877 int cpu;
1880 struct zone *zone; 1878 struct zone *zone;
1881 1879
1882 for_each_zone(zone) { 1880 for_each_populated_zone(zone) {
1883 if (!populated_zone(zone))
1884 continue;
1885
1886 show_node(zone); 1881 show_node(zone);
1887 printk("%s per-cpu:\n", zone->name); 1882 printk("%s per-cpu:\n", zone->name);
1888 1883
@@ -1922,12 +1917,9 @@ void show_free_areas(void)
1922 global_page_state(NR_PAGETABLE), 1917 global_page_state(NR_PAGETABLE),
1923 global_page_state(NR_BOUNCE)); 1918 global_page_state(NR_BOUNCE));
1924 1919
1925 for_each_zone(zone) { 1920 for_each_populated_zone(zone) {
1926 int i; 1921 int i;
1927 1922
1928 if (!populated_zone(zone))
1929 continue;
1930
1931 show_node(zone); 1923 show_node(zone);
1932 printk("%s" 1924 printk("%s"
1933 " free:%lukB" 1925 " free:%lukB"
@@ -1967,12 +1959,9 @@ void show_free_areas(void)
1967 printk("\n"); 1959 printk("\n");
1968 } 1960 }
1969 1961
1970 for_each_zone(zone) { 1962 for_each_populated_zone(zone) {
1971 unsigned long nr[MAX_ORDER], flags, order, total = 0; 1963 unsigned long nr[MAX_ORDER], flags, order, total = 0;
1972 1964
1973 if (!populated_zone(zone))
1974 continue;
1975
1976 show_node(zone); 1965 show_node(zone);
1977 printk("%s: ", zone->name); 1966 printk("%s: ", zone->name);
1978 1967
@@ -2139,7 +2128,7 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
2139 int n, val; 2128 int n, val;
2140 int min_val = INT_MAX; 2129 int min_val = INT_MAX;
2141 int best_node = -1; 2130 int best_node = -1;
2142 node_to_cpumask_ptr(tmp, 0); 2131 const struct cpumask *tmp = cpumask_of_node(0);
2143 2132
2144 /* Use the local node if we haven't already */ 2133 /* Use the local node if we haven't already */
2145 if (!node_isset(node, *used_node_mask)) { 2134 if (!node_isset(node, *used_node_mask)) {
@@ -2160,8 +2149,8 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
2160 val += (n < node); 2149 val += (n < node);
2161 2150
2162 /* Give preference to headless and unused nodes */ 2151 /* Give preference to headless and unused nodes */
2163 node_to_cpumask_ptr_next(tmp, n); 2152 tmp = cpumask_of_node(n);
2164 if (!cpus_empty(*tmp)) 2153 if (!cpumask_empty(tmp))
2165 val += PENALTY_FOR_NODE_WITH_CPUS; 2154 val += PENALTY_FOR_NODE_WITH_CPUS;
2166 2155
2167 /* Slight preference for less loaded node */ 2156 /* Slight preference for less loaded node */
@@ -2784,11 +2773,7 @@ static int __cpuinit process_zones(int cpu)
2784 2773
2785 node_set_state(node, N_CPU); /* this node has a cpu */ 2774 node_set_state(node, N_CPU); /* this node has a cpu */
2786 2775
2787 for_each_zone(zone) { 2776 for_each_populated_zone(zone) {
2788
2789 if (!populated_zone(zone))
2790 continue;
2791
2792 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset), 2777 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
2793 GFP_KERNEL, node); 2778 GFP_KERNEL, node);
2794 if (!zone_pcp(zone, cpu)) 2779 if (!zone_pcp(zone, cpu))
@@ -2994,7 +2979,7 @@ static int __meminit next_active_region_index_in_nid(int index, int nid)
2994 * was used and there are no special requirements, this is a convenient 2979 * was used and there are no special requirements, this is a convenient
2995 * alternative 2980 * alternative
2996 */ 2981 */
2997int __meminit early_pfn_to_nid(unsigned long pfn) 2982int __meminit __early_pfn_to_nid(unsigned long pfn)
2998{ 2983{
2999 int i; 2984 int i;
3000 2985
@@ -3005,10 +2990,33 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
3005 if (start_pfn <= pfn && pfn < end_pfn) 2990 if (start_pfn <= pfn && pfn < end_pfn)
3006 return early_node_map[i].nid; 2991 return early_node_map[i].nid;
3007 } 2992 }
2993 /* This is a memory hole */
2994 return -1;
2995}
2996#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
2997
2998int __meminit early_pfn_to_nid(unsigned long pfn)
2999{
3000 int nid;
3008 3001
3002 nid = __early_pfn_to_nid(pfn);
3003 if (nid >= 0)
3004 return nid;
3005 /* just returns 0 */
3009 return 0; 3006 return 0;
3010} 3007}
3011#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 3008
3009#ifdef CONFIG_NODES_SPAN_OTHER_NODES
3010bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3011{
3012 int nid;
3013
3014 nid = __early_pfn_to_nid(pfn);
3015 if (nid >= 0 && nid != node)
3016 return false;
3017 return true;
3018}
3019#endif
3012 3020
3013/* Basic iterator support to walk early_node_map[] */ 3021/* Basic iterator support to walk early_node_map[] */
3014#define for_each_active_range_index_in_nid(i, nid) \ 3022#define for_each_active_range_index_in_nid(i, nid) \