aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-04-07 05:15:40 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-07 05:15:40 -0400
commit5e34437840d33554f69380584311743b39e8fbeb (patch)
treee081135619ee146af5efb9ee883afca950df5757 /mm/page_alloc.c
parent77d05632baee21b1cef8730d7c06aa69601e4dca (diff)
parentd508afb437daee7cf07da085b635c44a4ebf9b38 (diff)
Merge branch 'linus' into core/softlockup
Conflicts: kernel/sysctl.c
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c69
1 files changed, 41 insertions, 28 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5675b3073854..e2f26991fff1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -331,7 +331,7 @@ static int destroy_compound_page(struct page *page, unsigned long order)
331 for (i = 1; i < nr_pages; i++) { 331 for (i = 1; i < nr_pages; i++) {
332 struct page *p = page + i; 332 struct page *p = page + i;
333 333
334 if (unlikely(!PageTail(p) | (p->first_page != page))) { 334 if (unlikely(!PageTail(p) || (p->first_page != page))) {
335 bad_page(page); 335 bad_page(page);
336 bad++; 336 bad++;
337 } 337 }
@@ -922,13 +922,10 @@ static void drain_pages(unsigned int cpu)
922 unsigned long flags; 922 unsigned long flags;
923 struct zone *zone; 923 struct zone *zone;
924 924
925 for_each_zone(zone) { 925 for_each_populated_zone(zone) {
926 struct per_cpu_pageset *pset; 926 struct per_cpu_pageset *pset;
927 struct per_cpu_pages *pcp; 927 struct per_cpu_pages *pcp;
928 928
929 if (!populated_zone(zone))
930 continue;
931
932 pset = zone_pcp(zone, cpu); 929 pset = zone_pcp(zone, cpu);
933 930
934 pcp = &pset->pcp; 931 pcp = &pset->pcp;
@@ -1479,6 +1476,8 @@ __alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
1479 unsigned long did_some_progress; 1476 unsigned long did_some_progress;
1480 unsigned long pages_reclaimed = 0; 1477 unsigned long pages_reclaimed = 0;
1481 1478
1479 lockdep_trace_alloc(gfp_mask);
1480
1482 might_sleep_if(wait); 1481 might_sleep_if(wait);
1483 1482
1484 if (should_fail_alloc_page(gfp_mask, order)) 1483 if (should_fail_alloc_page(gfp_mask, order))
@@ -1578,12 +1577,16 @@ nofail_alloc:
1578 */ 1577 */
1579 cpuset_update_task_memory_state(); 1578 cpuset_update_task_memory_state();
1580 p->flags |= PF_MEMALLOC; 1579 p->flags |= PF_MEMALLOC;
1580
1581 lockdep_set_current_reclaim_state(gfp_mask);
1581 reclaim_state.reclaimed_slab = 0; 1582 reclaim_state.reclaimed_slab = 0;
1582 p->reclaim_state = &reclaim_state; 1583 p->reclaim_state = &reclaim_state;
1583 1584
1584 did_some_progress = try_to_free_pages(zonelist, order, gfp_mask); 1585 did_some_progress = try_to_free_pages(zonelist, order,
1586 gfp_mask, nodemask);
1585 1587
1586 p->reclaim_state = NULL; 1588 p->reclaim_state = NULL;
1589 lockdep_clear_current_reclaim_state();
1587 p->flags &= ~PF_MEMALLOC; 1590 p->flags &= ~PF_MEMALLOC;
1588 1591
1589 cond_resched(); 1592 cond_resched();
@@ -1874,10 +1877,7 @@ void show_free_areas(void)
1874 int cpu; 1877 int cpu;
1875 struct zone *zone; 1878 struct zone *zone;
1876 1879
1877 for_each_zone(zone) { 1880 for_each_populated_zone(zone) {
1878 if (!populated_zone(zone))
1879 continue;
1880
1881 show_node(zone); 1881 show_node(zone);
1882 printk("%s per-cpu:\n", zone->name); 1882 printk("%s per-cpu:\n", zone->name);
1883 1883
@@ -1917,12 +1917,9 @@ void show_free_areas(void)
1917 global_page_state(NR_PAGETABLE), 1917 global_page_state(NR_PAGETABLE),
1918 global_page_state(NR_BOUNCE)); 1918 global_page_state(NR_BOUNCE));
1919 1919
1920 for_each_zone(zone) { 1920 for_each_populated_zone(zone) {
1921 int i; 1921 int i;
1922 1922
1923 if (!populated_zone(zone))
1924 continue;
1925
1926 show_node(zone); 1923 show_node(zone);
1927 printk("%s" 1924 printk("%s"
1928 " free:%lukB" 1925 " free:%lukB"
@@ -1962,12 +1959,9 @@ void show_free_areas(void)
1962 printk("\n"); 1959 printk("\n");
1963 } 1960 }
1964 1961
1965 for_each_zone(zone) { 1962 for_each_populated_zone(zone) {
1966 unsigned long nr[MAX_ORDER], flags, order, total = 0; 1963 unsigned long nr[MAX_ORDER], flags, order, total = 0;
1967 1964
1968 if (!populated_zone(zone))
1969 continue;
1970
1971 show_node(zone); 1965 show_node(zone);
1972 printk("%s: ", zone->name); 1966 printk("%s: ", zone->name);
1973 1967
@@ -2134,7 +2128,7 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
2134 int n, val; 2128 int n, val;
2135 int min_val = INT_MAX; 2129 int min_val = INT_MAX;
2136 int best_node = -1; 2130 int best_node = -1;
2137 node_to_cpumask_ptr(tmp, 0); 2131 const struct cpumask *tmp = cpumask_of_node(0);
2138 2132
2139 /* Use the local node if we haven't already */ 2133 /* Use the local node if we haven't already */
2140 if (!node_isset(node, *used_node_mask)) { 2134 if (!node_isset(node, *used_node_mask)) {
@@ -2155,8 +2149,8 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
2155 val += (n < node); 2149 val += (n < node);
2156 2150
2157 /* Give preference to headless and unused nodes */ 2151 /* Give preference to headless and unused nodes */
2158 node_to_cpumask_ptr_next(tmp, n); 2152 tmp = cpumask_of_node(n);
2159 if (!cpus_empty(*tmp)) 2153 if (!cpumask_empty(tmp))
2160 val += PENALTY_FOR_NODE_WITH_CPUS; 2154 val += PENALTY_FOR_NODE_WITH_CPUS;
2161 2155
2162 /* Slight preference for less loaded node */ 2156 /* Slight preference for less loaded node */
@@ -2779,11 +2773,7 @@ static int __cpuinit process_zones(int cpu)
2779 2773
2780 node_set_state(node, N_CPU); /* this node has a cpu */ 2774 node_set_state(node, N_CPU); /* this node has a cpu */
2781 2775
2782 for_each_zone(zone) { 2776 for_each_populated_zone(zone) {
2783
2784 if (!populated_zone(zone))
2785 continue;
2786
2787 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset), 2777 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
2788 GFP_KERNEL, node); 2778 GFP_KERNEL, node);
2789 if (!zone_pcp(zone, cpu)) 2779 if (!zone_pcp(zone, cpu))
@@ -2989,7 +2979,7 @@ static int __meminit next_active_region_index_in_nid(int index, int nid)
2989 * was used and there are no special requirements, this is a convenient 2979 * was used and there are no special requirements, this is a convenient
2990 * alternative 2980 * alternative
2991 */ 2981 */
2992int __meminit early_pfn_to_nid(unsigned long pfn) 2982int __meminit __early_pfn_to_nid(unsigned long pfn)
2993{ 2983{
2994 int i; 2984 int i;
2995 2985
@@ -3000,10 +2990,33 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
3000 if (start_pfn <= pfn && pfn < end_pfn) 2990 if (start_pfn <= pfn && pfn < end_pfn)
3001 return early_node_map[i].nid; 2991 return early_node_map[i].nid;
3002 } 2992 }
2993 /* This is a memory hole */
2994 return -1;
2995}
2996#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
2997
2998int __meminit early_pfn_to_nid(unsigned long pfn)
2999{
3000 int nid;
3003 3001
3002 nid = __early_pfn_to_nid(pfn);
3003 if (nid >= 0)
3004 return nid;
3005 /* just returns 0 */
3004 return 0; 3006 return 0;
3005} 3007}
3006#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 3008
3009#ifdef CONFIG_NODES_SPAN_OTHER_NODES
3010bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3011{
3012 int nid;
3013
3014 nid = __early_pfn_to_nid(pfn);
3015 if (nid >= 0 && nid != node)
3016 return false;
3017 return true;
3018}
3019#endif
3007 3020
3008/* Basic iterator support to walk early_node_map[] */ 3021/* Basic iterator support to walk early_node_map[] */
3009#define for_each_active_range_index_in_nid(i, nid) \ 3022#define for_each_active_range_index_in_nid(i, nid) \