aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c6
-rw-r--r--mm/hugetlb.c26
-rw-r--r--mm/memcontrol.c14
-rw-r--r--mm/mmzone.c15
-rw-r--r--mm/oom_kill.c24
-rw-r--r--mm/page-writeback.c6
-rw-r--r--mm/page_alloc.c69
-rw-r--r--mm/percpu.c141
-rw-r--r--mm/rmap.c2
-rw-r--r--mm/slob.c5
-rw-r--r--mm/slub.c6
-rw-r--r--mm/swap_state.c4
-rw-r--r--mm/truncate.c1
-rw-r--r--mm/vmscan.c2
-rw-r--r--mm/vmstat.c19
15 files changed, 115 insertions, 225 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 379ff0bcbf6e..1b60f30cebfa 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -121,7 +121,6 @@ void __remove_from_page_cache(struct page *page)
121 mapping->nrpages--; 121 mapping->nrpages--;
122 __dec_zone_page_state(page, NR_FILE_PAGES); 122 __dec_zone_page_state(page, NR_FILE_PAGES);
123 BUG_ON(page_mapped(page)); 123 BUG_ON(page_mapped(page));
124 mem_cgroup_uncharge_cache_page(page);
125 124
126 /* 125 /*
127 * Some filesystems seem to re-dirty the page even after 126 * Some filesystems seem to re-dirty the page even after
@@ -145,6 +144,7 @@ void remove_from_page_cache(struct page *page)
145 spin_lock_irq(&mapping->tree_lock); 144 spin_lock_irq(&mapping->tree_lock);
146 __remove_from_page_cache(page); 145 __remove_from_page_cache(page);
147 spin_unlock_irq(&mapping->tree_lock); 146 spin_unlock_irq(&mapping->tree_lock);
147 mem_cgroup_uncharge_cache_page(page);
148} 148}
149 149
150static int sync_page(void *word) 150static int sync_page(void *word)
@@ -476,13 +476,13 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
476 if (likely(!error)) { 476 if (likely(!error)) {
477 mapping->nrpages++; 477 mapping->nrpages++;
478 __inc_zone_page_state(page, NR_FILE_PAGES); 478 __inc_zone_page_state(page, NR_FILE_PAGES);
479 spin_unlock_irq(&mapping->tree_lock);
479 } else { 480 } else {
480 page->mapping = NULL; 481 page->mapping = NULL;
482 spin_unlock_irq(&mapping->tree_lock);
481 mem_cgroup_uncharge_cache_page(page); 483 mem_cgroup_uncharge_cache_page(page);
482 page_cache_release(page); 484 page_cache_release(page);
483 } 485 }
484
485 spin_unlock_irq(&mapping->tree_lock);
486 radix_tree_preload_end(); 486 radix_tree_preload_end();
487 } else 487 } else
488 mem_cgroup_uncharge_cache_page(page); 488 mem_cgroup_uncharge_cache_page(page);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 28c655ba9353..e83ad2c9228c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -316,7 +316,7 @@ static void resv_map_release(struct kref *ref)
316static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 316static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
317{ 317{
318 VM_BUG_ON(!is_vm_hugetlb_page(vma)); 318 VM_BUG_ON(!is_vm_hugetlb_page(vma));
319 if (!(vma->vm_flags & VM_SHARED)) 319 if (!(vma->vm_flags & VM_MAYSHARE))
320 return (struct resv_map *)(get_vma_private_data(vma) & 320 return (struct resv_map *)(get_vma_private_data(vma) &
321 ~HPAGE_RESV_MASK); 321 ~HPAGE_RESV_MASK);
322 return NULL; 322 return NULL;
@@ -325,7 +325,7 @@ static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
325static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 325static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
326{ 326{
327 VM_BUG_ON(!is_vm_hugetlb_page(vma)); 327 VM_BUG_ON(!is_vm_hugetlb_page(vma));
328 VM_BUG_ON(vma->vm_flags & VM_SHARED); 328 VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
329 329
330 set_vma_private_data(vma, (get_vma_private_data(vma) & 330 set_vma_private_data(vma, (get_vma_private_data(vma) &
331 HPAGE_RESV_MASK) | (unsigned long)map); 331 HPAGE_RESV_MASK) | (unsigned long)map);
@@ -334,7 +334,7 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
334static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 334static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
335{ 335{
336 VM_BUG_ON(!is_vm_hugetlb_page(vma)); 336 VM_BUG_ON(!is_vm_hugetlb_page(vma));
337 VM_BUG_ON(vma->vm_flags & VM_SHARED); 337 VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
338 338
339 set_vma_private_data(vma, get_vma_private_data(vma) | flags); 339 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
340} 340}
@@ -353,7 +353,7 @@ static void decrement_hugepage_resv_vma(struct hstate *h,
353 if (vma->vm_flags & VM_NORESERVE) 353 if (vma->vm_flags & VM_NORESERVE)
354 return; 354 return;
355 355
356 if (vma->vm_flags & VM_SHARED) { 356 if (vma->vm_flags & VM_MAYSHARE) {
357 /* Shared mappings always use reserves */ 357 /* Shared mappings always use reserves */
358 h->resv_huge_pages--; 358 h->resv_huge_pages--;
359 } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 359 } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
@@ -369,14 +369,14 @@ static void decrement_hugepage_resv_vma(struct hstate *h,
369void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 369void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
370{ 370{
371 VM_BUG_ON(!is_vm_hugetlb_page(vma)); 371 VM_BUG_ON(!is_vm_hugetlb_page(vma));
372 if (!(vma->vm_flags & VM_SHARED)) 372 if (!(vma->vm_flags & VM_MAYSHARE))
373 vma->vm_private_data = (void *)0; 373 vma->vm_private_data = (void *)0;
374} 374}
375 375
376/* Returns true if the VMA has associated reserve pages */ 376/* Returns true if the VMA has associated reserve pages */
377static int vma_has_reserves(struct vm_area_struct *vma) 377static int vma_has_reserves(struct vm_area_struct *vma)
378{ 378{
379 if (vma->vm_flags & VM_SHARED) 379 if (vma->vm_flags & VM_MAYSHARE)
380 return 1; 380 return 1;
381 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 381 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
382 return 1; 382 return 1;
@@ -924,7 +924,7 @@ static long vma_needs_reservation(struct hstate *h,
924 struct address_space *mapping = vma->vm_file->f_mapping; 924 struct address_space *mapping = vma->vm_file->f_mapping;
925 struct inode *inode = mapping->host; 925 struct inode *inode = mapping->host;
926 926
927 if (vma->vm_flags & VM_SHARED) { 927 if (vma->vm_flags & VM_MAYSHARE) {
928 pgoff_t idx = vma_hugecache_offset(h, vma, addr); 928 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
929 return region_chg(&inode->i_mapping->private_list, 929 return region_chg(&inode->i_mapping->private_list,
930 idx, idx + 1); 930 idx, idx + 1);
@@ -949,7 +949,7 @@ static void vma_commit_reservation(struct hstate *h,
949 struct address_space *mapping = vma->vm_file->f_mapping; 949 struct address_space *mapping = vma->vm_file->f_mapping;
950 struct inode *inode = mapping->host; 950 struct inode *inode = mapping->host;
951 951
952 if (vma->vm_flags & VM_SHARED) { 952 if (vma->vm_flags & VM_MAYSHARE) {
953 pgoff_t idx = vma_hugecache_offset(h, vma, addr); 953 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
954 region_add(&inode->i_mapping->private_list, idx, idx + 1); 954 region_add(&inode->i_mapping->private_list, idx, idx + 1);
955 955
@@ -1893,7 +1893,7 @@ retry_avoidcopy:
1893 * at the time of fork() could consume its reserves on COW instead 1893 * at the time of fork() could consume its reserves on COW instead
1894 * of the full address range. 1894 * of the full address range.
1895 */ 1895 */
1896 if (!(vma->vm_flags & VM_SHARED) && 1896 if (!(vma->vm_flags & VM_MAYSHARE) &&
1897 is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 1897 is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
1898 old_page != pagecache_page) 1898 old_page != pagecache_page)
1899 outside_reserve = 1; 1899 outside_reserve = 1;
@@ -2000,7 +2000,7 @@ retry:
2000 clear_huge_page(page, address, huge_page_size(h)); 2000 clear_huge_page(page, address, huge_page_size(h));
2001 __SetPageUptodate(page); 2001 __SetPageUptodate(page);
2002 2002
2003 if (vma->vm_flags & VM_SHARED) { 2003 if (vma->vm_flags & VM_MAYSHARE) {
2004 int err; 2004 int err;
2005 struct inode *inode = mapping->host; 2005 struct inode *inode = mapping->host;
2006 2006
@@ -2104,7 +2104,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2104 goto out_mutex; 2104 goto out_mutex;
2105 } 2105 }
2106 2106
2107 if (!(vma->vm_flags & VM_SHARED)) 2107 if (!(vma->vm_flags & VM_MAYSHARE))
2108 pagecache_page = hugetlbfs_pagecache_page(h, 2108 pagecache_page = hugetlbfs_pagecache_page(h,
2109 vma, address); 2109 vma, address);
2110 } 2110 }
@@ -2289,7 +2289,7 @@ int hugetlb_reserve_pages(struct inode *inode,
2289 * to reserve the full area even if read-only as mprotect() may be 2289 * to reserve the full area even if read-only as mprotect() may be
2290 * called to make the mapping read-write. Assume !vma is a shm mapping 2290 * called to make the mapping read-write. Assume !vma is a shm mapping
2291 */ 2291 */
2292 if (!vma || vma->vm_flags & VM_SHARED) 2292 if (!vma || vma->vm_flags & VM_MAYSHARE)
2293 chg = region_chg(&inode->i_mapping->private_list, from, to); 2293 chg = region_chg(&inode->i_mapping->private_list, from, to);
2294 else { 2294 else {
2295 struct resv_map *resv_map = resv_map_alloc(); 2295 struct resv_map *resv_map = resv_map_alloc();
@@ -2330,7 +2330,7 @@ int hugetlb_reserve_pages(struct inode *inode,
2330 * consumed reservations are stored in the map. Hence, nothing 2330 * consumed reservations are stored in the map. Hence, nothing
2331 * else has to be done for private mappings here 2331 * else has to be done for private mappings here
2332 */ 2332 */
2333 if (!vma || vma->vm_flags & VM_SHARED) 2333 if (!vma || vma->vm_flags & VM_MAYSHARE)
2334 region_add(&inode->i_mapping->private_list, from, to); 2334 region_add(&inode->i_mapping->private_list, from, to);
2335 return 0; 2335 return 0;
2336} 2336}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 01c2d8f14685..78eb8552818b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -314,14 +314,6 @@ static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
314 return mem; 314 return mem;
315} 315}
316 316
317static bool mem_cgroup_is_obsolete(struct mem_cgroup *mem)
318{
319 if (!mem)
320 return true;
321 return css_is_removed(&mem->css);
322}
323
324
325/* 317/*
326 * Call callback function against all cgroup under hierarchy tree. 318 * Call callback function against all cgroup under hierarchy tree.
327 */ 319 */
@@ -932,7 +924,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
932 if (unlikely(!mem)) 924 if (unlikely(!mem))
933 return 0; 925 return 0;
934 926
935 VM_BUG_ON(!mem || mem_cgroup_is_obsolete(mem)); 927 VM_BUG_ON(css_is_removed(&mem->css));
936 928
937 while (1) { 929 while (1) {
938 int ret; 930 int ret;
@@ -1488,8 +1480,9 @@ void mem_cgroup_uncharge_cache_page(struct page *page)
1488 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE); 1480 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
1489} 1481}
1490 1482
1483#ifdef CONFIG_SWAP
1491/* 1484/*
1492 * called from __delete_from_swap_cache() and drop "page" account. 1485 * called after __delete_from_swap_cache() and drop "page" account.
1493 * memcg information is recorded to swap_cgroup of "ent" 1486 * memcg information is recorded to swap_cgroup of "ent"
1494 */ 1487 */
1495void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) 1488void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
@@ -1506,6 +1499,7 @@ void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
1506 if (memcg) 1499 if (memcg)
1507 css_put(&memcg->css); 1500 css_put(&memcg->css);
1508} 1501}
1502#endif
1509 1503
1510#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 1504#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1511/* 1505/*
diff --git a/mm/mmzone.c b/mm/mmzone.c
index 16ce8b955dcf..f5b7d1760213 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -6,6 +6,7 @@
6 6
7 7
8#include <linux/stddef.h> 8#include <linux/stddef.h>
9#include <linux/mm.h>
9#include <linux/mmzone.h> 10#include <linux/mmzone.h>
10#include <linux/module.h> 11#include <linux/module.h>
11 12
@@ -72,3 +73,17 @@ struct zoneref *next_zones_zonelist(struct zoneref *z,
72 *zone = zonelist_zone(z); 73 *zone = zonelist_zone(z);
73 return z; 74 return z;
74} 75}
76
77#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
78int memmap_valid_within(unsigned long pfn,
79 struct page *page, struct zone *zone)
80{
81 if (page_to_pfn(page) != pfn)
82 return 0;
83
84 if (page_zone(page) != zone)
85 return 0;
86
87 return 1;
88}
89#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 92bcf1db16b2..a7b2460e922b 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -284,22 +284,28 @@ static void dump_tasks(const struct mem_cgroup *mem)
284 printk(KERN_INFO "[ pid ] uid tgid total_vm rss cpu oom_adj " 284 printk(KERN_INFO "[ pid ] uid tgid total_vm rss cpu oom_adj "
285 "name\n"); 285 "name\n");
286 do_each_thread(g, p) { 286 do_each_thread(g, p) {
287 /* 287 struct mm_struct *mm;
288 * total_vm and rss sizes do not exist for tasks with a 288
289 * detached mm so there's no need to report them.
290 */
291 if (!p->mm)
292 continue;
293 if (mem && !task_in_mem_cgroup(p, mem)) 289 if (mem && !task_in_mem_cgroup(p, mem))
294 continue; 290 continue;
295 if (!thread_group_leader(p)) 291 if (!thread_group_leader(p))
296 continue; 292 continue;
297 293
298 task_lock(p); 294 task_lock(p);
295 mm = p->mm;
296 if (!mm) {
297 /*
298 * total_vm and rss sizes do not exist for tasks with no
299 * mm so there's no need to report them; they can't be
300 * oom killed anyway.
301 */
302 task_unlock(p);
303 continue;
304 }
299 printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n", 305 printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n",
300 p->pid, __task_cred(p)->uid, p->tgid, 306 p->pid, __task_cred(p)->uid, p->tgid, mm->total_vm,
301 p->mm->total_vm, get_mm_rss(p->mm), (int)task_cpu(p), 307 get_mm_rss(mm), (int)task_cpu(p), p->oomkilladj,
302 p->oomkilladj, p->comm); 308 p->comm);
303 task_unlock(p); 309 task_unlock(p);
304 } while_each_thread(g, p); 310 } while_each_thread(g, p);
305} 311}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 30351f0063ac..bb553c3e955d 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -94,12 +94,12 @@ unsigned long vm_dirty_bytes;
94/* 94/*
95 * The interval between `kupdate'-style writebacks 95 * The interval between `kupdate'-style writebacks
96 */ 96 */
97unsigned int dirty_writeback_interval = 5 * 100; /* sentiseconds */ 97unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
98 98
99/* 99/*
100 * The longest time for which data is allowed to remain dirty 100 * The longest time for which data is allowed to remain dirty
101 */ 101 */
102unsigned int dirty_expire_interval = 30 * 100; /* sentiseconds */ 102unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
103 103
104/* 104/*
105 * Flag that makes the machine dump writes/reads and block dirtyings. 105 * Flag that makes the machine dump writes/reads and block dirtyings.
@@ -770,7 +770,7 @@ static void wb_kupdate(unsigned long arg)
770 770
771 sync_supers(); 771 sync_supers();
772 772
773 oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval); 773 oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval * 10);
774 start_jif = jiffies; 774 start_jif = jiffies;
775 next_jif = start_jif + msecs_to_jiffies(dirty_writeback_interval * 10); 775 next_jif = start_jif + msecs_to_jiffies(dirty_writeback_interval * 10);
776 nr_to_write = global_page_state(NR_FILE_DIRTY) + 776 nr_to_write = global_page_state(NR_FILE_DIRTY) +
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fe753ecf2aa5..474c7e9dd51a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -149,10 +149,6 @@ static unsigned long __meminitdata dma_reserve;
149 static int __meminitdata nr_nodemap_entries; 149 static int __meminitdata nr_nodemap_entries;
150 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; 150 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
151 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; 151 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
152#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
153 static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
154 static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
155#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
156 static unsigned long __initdata required_kernelcore; 152 static unsigned long __initdata required_kernelcore;
157 static unsigned long __initdata required_movablecore; 153 static unsigned long __initdata required_movablecore;
158 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; 154 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
@@ -3103,64 +3099,6 @@ void __init sparse_memory_present_with_active_regions(int nid)
3103} 3099}
3104 3100
3105/** 3101/**
3106 * push_node_boundaries - Push node boundaries to at least the requested boundary
3107 * @nid: The nid of the node to push the boundary for
3108 * @start_pfn: The start pfn of the node
3109 * @end_pfn: The end pfn of the node
3110 *
3111 * In reserve-based hot-add, mem_map is allocated that is unused until hotadd
3112 * time. Specifically, on x86_64, SRAT will report ranges that can potentially
3113 * be hotplugged even though no physical memory exists. This function allows
3114 * an arch to push out the node boundaries so mem_map is allocated that can
3115 * be used later.
3116 */
3117#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
3118void __init push_node_boundaries(unsigned int nid,
3119 unsigned long start_pfn, unsigned long end_pfn)
3120{
3121 mminit_dprintk(MMINIT_TRACE, "zoneboundary",
3122 "Entering push_node_boundaries(%u, %lu, %lu)\n",
3123 nid, start_pfn, end_pfn);
3124
3125 /* Initialise the boundary for this node if necessary */
3126 if (node_boundary_end_pfn[nid] == 0)
3127 node_boundary_start_pfn[nid] = -1UL;
3128
3129 /* Update the boundaries */
3130 if (node_boundary_start_pfn[nid] > start_pfn)
3131 node_boundary_start_pfn[nid] = start_pfn;
3132 if (node_boundary_end_pfn[nid] < end_pfn)
3133 node_boundary_end_pfn[nid] = end_pfn;
3134}
3135
3136/* If necessary, push the node boundary out for reserve hotadd */
3137static void __meminit account_node_boundary(unsigned int nid,
3138 unsigned long *start_pfn, unsigned long *end_pfn)
3139{
3140 mminit_dprintk(MMINIT_TRACE, "zoneboundary",
3141 "Entering account_node_boundary(%u, %lu, %lu)\n",
3142 nid, *start_pfn, *end_pfn);
3143
3144 /* Return if boundary information has not been provided */
3145 if (node_boundary_end_pfn[nid] == 0)
3146 return;
3147
3148 /* Check the boundaries and update if necessary */
3149 if (node_boundary_start_pfn[nid] < *start_pfn)
3150 *start_pfn = node_boundary_start_pfn[nid];
3151 if (node_boundary_end_pfn[nid] > *end_pfn)
3152 *end_pfn = node_boundary_end_pfn[nid];
3153}
3154#else
3155void __init push_node_boundaries(unsigned int nid,
3156 unsigned long start_pfn, unsigned long end_pfn) {}
3157
3158static void __meminit account_node_boundary(unsigned int nid,
3159 unsigned long *start_pfn, unsigned long *end_pfn) {}
3160#endif
3161
3162
3163/**
3164 * get_pfn_range_for_nid - Return the start and end page frames for a node 3102 * get_pfn_range_for_nid - Return the start and end page frames for a node
3165 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 3103 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3166 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 3104 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
@@ -3185,9 +3123,6 @@ void __meminit get_pfn_range_for_nid(unsigned int nid,
3185 3123
3186 if (*start_pfn == -1UL) 3124 if (*start_pfn == -1UL)
3187 *start_pfn = 0; 3125 *start_pfn = 0;
3188
3189 /* Push the node boundaries out if requested */
3190 account_node_boundary(nid, start_pfn, end_pfn);
3191} 3126}
3192 3127
3193/* 3128/*
@@ -3793,10 +3728,6 @@ void __init remove_all_active_ranges(void)
3793{ 3728{
3794 memset(early_node_map, 0, sizeof(early_node_map)); 3729 memset(early_node_map, 0, sizeof(early_node_map));
3795 nr_nodemap_entries = 0; 3730 nr_nodemap_entries = 0;
3796#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
3797 memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn));
3798 memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn));
3799#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
3800} 3731}
3801 3732
3802/* Compare two active node_active_regions */ 3733/* Compare two active node_active_regions */
diff --git a/mm/percpu.c b/mm/percpu.c
index 1aa5d8fbca12..c0b2c1a76e81 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -23,7 +23,7 @@
23 * Allocation is done in offset-size areas of single unit space. Ie, 23 * Allocation is done in offset-size areas of single unit space. Ie,
24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, 24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25 * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring 25 * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring
26 * percpu base registers UNIT_SIZE apart. 26 * percpu base registers pcpu_unit_size apart.
27 * 27 *
28 * There are usually many small percpu allocations many of them as 28 * There are usually many small percpu allocations many of them as
29 * small as 4 bytes. The allocator organizes chunks into lists 29 * small as 4 bytes. The allocator organizes chunks into lists
@@ -38,8 +38,8 @@
38 * region and negative allocated. Allocation inside a chunk is done 38 * region and negative allocated. Allocation inside a chunk is done
39 * by scanning this map sequentially and serving the first matching 39 * by scanning this map sequentially and serving the first matching
40 * entry. This is mostly copied from the percpu_modalloc() allocator. 40 * entry. This is mostly copied from the percpu_modalloc() allocator.
41 * Chunks are also linked into a rb tree to ease address to chunk 41 * Chunks can be determined from the address using the index field
42 * mapping during free. 42 * in the page struct. The index field contains a pointer to the chunk.
43 * 43 *
44 * To use this allocator, arch code should do the followings. 44 * To use this allocator, arch code should do the followings.
45 * 45 *
@@ -61,7 +61,6 @@
61#include <linux/mutex.h> 61#include <linux/mutex.h>
62#include <linux/percpu.h> 62#include <linux/percpu.h>
63#include <linux/pfn.h> 63#include <linux/pfn.h>
64#include <linux/rbtree.h>
65#include <linux/slab.h> 64#include <linux/slab.h>
66#include <linux/spinlock.h> 65#include <linux/spinlock.h>
67#include <linux/vmalloc.h> 66#include <linux/vmalloc.h>
@@ -88,7 +87,6 @@
88 87
89struct pcpu_chunk { 88struct pcpu_chunk {
90 struct list_head list; /* linked to pcpu_slot lists */ 89 struct list_head list; /* linked to pcpu_slot lists */
91 struct rb_node rb_node; /* key is chunk->vm->addr */
92 int free_size; /* free bytes in the chunk */ 90 int free_size; /* free bytes in the chunk */
93 int contig_hint; /* max contiguous size hint */ 91 int contig_hint; /* max contiguous size hint */
94 struct vm_struct *vm; /* mapped vmalloc region */ 92 struct vm_struct *vm; /* mapped vmalloc region */
@@ -110,9 +108,21 @@ static size_t pcpu_chunk_struct_size __read_mostly;
110void *pcpu_base_addr __read_mostly; 108void *pcpu_base_addr __read_mostly;
111EXPORT_SYMBOL_GPL(pcpu_base_addr); 109EXPORT_SYMBOL_GPL(pcpu_base_addr);
112 110
113/* optional reserved chunk, only accessible for reserved allocations */ 111/*
112 * The first chunk which always exists. Note that unlike other
113 * chunks, this one can be allocated and mapped in several different
114 * ways and thus often doesn't live in the vmalloc area.
115 */
116static struct pcpu_chunk *pcpu_first_chunk;
117
118/*
119 * Optional reserved chunk. This chunk reserves part of the first
120 * chunk and serves it for reserved allocations. The amount of
121 * reserved offset is in pcpu_reserved_chunk_limit. When reserved
122 * area doesn't exist, the following variables contain NULL and 0
123 * respectively.
124 */
114static struct pcpu_chunk *pcpu_reserved_chunk; 125static struct pcpu_chunk *pcpu_reserved_chunk;
115/* offset limit of the reserved chunk */
116static int pcpu_reserved_chunk_limit; 126static int pcpu_reserved_chunk_limit;
117 127
118/* 128/*
@@ -121,7 +131,7 @@ static int pcpu_reserved_chunk_limit;
121 * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former 131 * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
122 * protects allocation/reclaim paths, chunks and chunk->page arrays. 132 * protects allocation/reclaim paths, chunks and chunk->page arrays.
123 * The latter is a spinlock and protects the index data structures - 133 * The latter is a spinlock and protects the index data structures -
124 * chunk slots, rbtree, chunks and area maps in chunks. 134 * chunk slots, chunks and area maps in chunks.
125 * 135 *
126 * During allocation, pcpu_alloc_mutex is kept locked all the time and 136 * During allocation, pcpu_alloc_mutex is kept locked all the time and
127 * pcpu_lock is grabbed and released as necessary. All actual memory 137 * pcpu_lock is grabbed and released as necessary. All actual memory
@@ -140,7 +150,6 @@ static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */
140static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */ 150static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */
141 151
142static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ 152static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
143static struct rb_root pcpu_addr_root = RB_ROOT; /* chunks by address */
144 153
145/* reclaim work to release fully free chunks, scheduled from free path */ 154/* reclaim work to release fully free chunks, scheduled from free path */
146static void pcpu_reclaim(struct work_struct *work); 155static void pcpu_reclaim(struct work_struct *work);
@@ -191,6 +200,18 @@ static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk,
191 return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL; 200 return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL;
192} 201}
193 202
203/* set the pointer to a chunk in a page struct */
204static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
205{
206 page->index = (unsigned long)pcpu;
207}
208
209/* obtain pointer to a chunk from a page struct */
210static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
211{
212 return (struct pcpu_chunk *)page->index;
213}
214
194/** 215/**
195 * pcpu_mem_alloc - allocate memory 216 * pcpu_mem_alloc - allocate memory
196 * @size: bytes to allocate 217 * @size: bytes to allocate
@@ -257,93 +278,26 @@ static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
257 } 278 }
258} 279}
259 280
260static struct rb_node **pcpu_chunk_rb_search(void *addr,
261 struct rb_node **parentp)
262{
263 struct rb_node **p = &pcpu_addr_root.rb_node;
264 struct rb_node *parent = NULL;
265 struct pcpu_chunk *chunk;
266
267 while (*p) {
268 parent = *p;
269 chunk = rb_entry(parent, struct pcpu_chunk, rb_node);
270
271 if (addr < chunk->vm->addr)
272 p = &(*p)->rb_left;
273 else if (addr > chunk->vm->addr)
274 p = &(*p)->rb_right;
275 else
276 break;
277 }
278
279 if (parentp)
280 *parentp = parent;
281 return p;
282}
283
284/** 281/**
285 * pcpu_chunk_addr_search - search for chunk containing specified address 282 * pcpu_chunk_addr_search - determine chunk containing specified address
286 * @addr: address to search for 283 * @addr: address for which the chunk needs to be determined.
287 *
288 * Look for chunk which might contain @addr. More specifically, it
289 * searchs for the chunk with the highest start address which isn't
290 * beyond @addr.
291 *
292 * CONTEXT:
293 * pcpu_lock.
294 * 284 *
295 * RETURNS: 285 * RETURNS:
296 * The address of the found chunk. 286 * The address of the found chunk.
297 */ 287 */
298static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) 288static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
299{ 289{
300 struct rb_node *n, *parent; 290 void *first_start = pcpu_first_chunk->vm->addr;
301 struct pcpu_chunk *chunk;
302 291
303 /* is it in the reserved chunk? */ 292 /* is it in the first chunk? */
304 if (pcpu_reserved_chunk) { 293 if (addr >= first_start && addr < first_start + pcpu_chunk_size) {
305 void *start = pcpu_reserved_chunk->vm->addr; 294 /* is it in the reserved area? */
306 295 if (addr < first_start + pcpu_reserved_chunk_limit)
307 if (addr >= start && addr < start + pcpu_reserved_chunk_limit)
308 return pcpu_reserved_chunk; 296 return pcpu_reserved_chunk;
297 return pcpu_first_chunk;
309 } 298 }
310 299
311 /* nah... search the regular ones */ 300 return pcpu_get_page_chunk(vmalloc_to_page(addr));
312 n = *pcpu_chunk_rb_search(addr, &parent);
313 if (!n) {
314 /* no exactly matching chunk, the parent is the closest */
315 n = parent;
316 BUG_ON(!n);
317 }
318 chunk = rb_entry(n, struct pcpu_chunk, rb_node);
319
320 if (addr < chunk->vm->addr) {
321 /* the parent was the next one, look for the previous one */
322 n = rb_prev(n);
323 BUG_ON(!n);
324 chunk = rb_entry(n, struct pcpu_chunk, rb_node);
325 }
326
327 return chunk;
328}
329
330/**
331 * pcpu_chunk_addr_insert - insert chunk into address rb tree
332 * @new: chunk to insert
333 *
334 * Insert @new into address rb tree.
335 *
336 * CONTEXT:
337 * pcpu_lock.
338 */
339static void pcpu_chunk_addr_insert(struct pcpu_chunk *new)
340{
341 struct rb_node **p, *parent;
342
343 p = pcpu_chunk_rb_search(new->vm->addr, &parent);
344 BUG_ON(*p);
345 rb_link_node(&new->rb_node, parent, p);
346 rb_insert_color(&new->rb_node, &pcpu_addr_root);
347} 301}
348 302
349/** 303/**
@@ -755,6 +709,7 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
755 alloc_mask, 0); 709 alloc_mask, 0);
756 if (!*pagep) 710 if (!*pagep)
757 goto err; 711 goto err;
712 pcpu_set_page_chunk(*pagep, chunk);
758 } 713 }
759 } 714 }
760 715
@@ -879,7 +834,6 @@ restart:
879 834
880 spin_lock_irq(&pcpu_lock); 835 spin_lock_irq(&pcpu_lock);
881 pcpu_chunk_relocate(chunk, -1); 836 pcpu_chunk_relocate(chunk, -1);
882 pcpu_chunk_addr_insert(chunk);
883 goto restart; 837 goto restart;
884 838
885area_found: 839area_found:
@@ -968,7 +922,6 @@ static void pcpu_reclaim(struct work_struct *work)
968 if (chunk == list_first_entry(head, struct pcpu_chunk, list)) 922 if (chunk == list_first_entry(head, struct pcpu_chunk, list))
969 continue; 923 continue;
970 924
971 rb_erase(&chunk->rb_node, &pcpu_addr_root);
972 list_move(&chunk->list, &todo); 925 list_move(&chunk->list, &todo);
973 } 926 }
974 927
@@ -1147,7 +1100,8 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
1147 1100
1148 if (reserved_size) { 1101 if (reserved_size) {
1149 schunk->free_size = reserved_size; 1102 schunk->free_size = reserved_size;
1150 pcpu_reserved_chunk = schunk; /* not for dynamic alloc */ 1103 pcpu_reserved_chunk = schunk;
1104 pcpu_reserved_chunk_limit = static_size + reserved_size;
1151 } else { 1105 } else {
1152 schunk->free_size = dyn_size; 1106 schunk->free_size = dyn_size;
1153 dyn_size = 0; /* dynamic area covered */ 1107 dyn_size = 0; /* dynamic area covered */
@@ -1158,8 +1112,6 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
1158 if (schunk->free_size) 1112 if (schunk->free_size)
1159 schunk->map[schunk->map_used++] = schunk->free_size; 1113 schunk->map[schunk->map_used++] = schunk->free_size;
1160 1114
1161 pcpu_reserved_chunk_limit = static_size + schunk->free_size;
1162
1163 /* init dynamic chunk if necessary */ 1115 /* init dynamic chunk if necessary */
1164 if (dyn_size) { 1116 if (dyn_size) {
1165 dchunk = alloc_bootmem(sizeof(struct pcpu_chunk)); 1117 dchunk = alloc_bootmem(sizeof(struct pcpu_chunk));
@@ -1226,13 +1178,8 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
1226 } 1178 }
1227 1179
1228 /* link the first chunk in */ 1180 /* link the first chunk in */
1229 if (!dchunk) { 1181 pcpu_first_chunk = dchunk ?: schunk;
1230 pcpu_chunk_relocate(schunk, -1); 1182 pcpu_chunk_relocate(pcpu_first_chunk, -1);
1231 pcpu_chunk_addr_insert(schunk);
1232 } else {
1233 pcpu_chunk_relocate(dchunk, -1);
1234 pcpu_chunk_addr_insert(dchunk);
1235 }
1236 1183
1237 /* we're done */ 1184 /* we're done */
1238 pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0); 1185 pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0);
diff --git a/mm/rmap.c b/mm/rmap.c
index 16521664010d..23122af32611 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -14,7 +14,7 @@
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001 14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17 * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004 17 * Contributions by Hugh Dickins 2003, 2004
18 */ 18 */
19 19
20/* 20/*
diff --git a/mm/slob.c b/mm/slob.c
index 494f05f19417..9b1737b0787b 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -60,6 +60,7 @@
60#include <linux/kernel.h> 60#include <linux/kernel.h>
61#include <linux/slab.h> 61#include <linux/slab.h>
62#include <linux/mm.h> 62#include <linux/mm.h>
63#include <linux/swap.h> /* struct reclaim_state */
63#include <linux/cache.h> 64#include <linux/cache.h>
64#include <linux/init.h> 65#include <linux/init.h>
65#include <linux/module.h> 66#include <linux/module.h>
@@ -255,6 +256,8 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
255 256
256static void slob_free_pages(void *b, int order) 257static void slob_free_pages(void *b, int order)
257{ 258{
259 if (current->reclaim_state)
260 current->reclaim_state->reclaimed_slab += 1 << order;
258 free_pages((unsigned long)b, order); 261 free_pages((unsigned long)b, order);
259} 262}
260 263
@@ -407,7 +410,7 @@ static void slob_free(void *block, int size)
407 spin_unlock_irqrestore(&slob_lock, flags); 410 spin_unlock_irqrestore(&slob_lock, flags);
408 clear_slob_page(sp); 411 clear_slob_page(sp);
409 free_slob_page(sp); 412 free_slob_page(sp);
410 free_page((unsigned long)b); 413 slob_free_pages(b, 0);
411 return; 414 return;
412 } 415 }
413 416
diff --git a/mm/slub.c b/mm/slub.c
index ea9e7160e2e7..5e805a6fe36c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/swap.h> /* struct reclaim_state */
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/bit_spinlock.h> 14#include <linux/bit_spinlock.h>
14#include <linux/interrupt.h> 15#include <linux/interrupt.h>
@@ -1170,6 +1171,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1170 1171
1171 __ClearPageSlab(page); 1172 __ClearPageSlab(page);
1172 reset_page_mapcount(page); 1173 reset_page_mapcount(page);
1174 if (current->reclaim_state)
1175 current->reclaim_state->reclaimed_slab += pages;
1173 __free_pages(page, order); 1176 __free_pages(page, order);
1174} 1177}
1175 1178
@@ -1909,7 +1912,7 @@ static inline int calculate_order(int size)
1909 * Doh this slab cannot be placed using slub_max_order. 1912 * Doh this slab cannot be placed using slub_max_order.
1910 */ 1913 */
1911 order = slab_order(size, 1, MAX_ORDER, 1); 1914 order = slab_order(size, 1, MAX_ORDER, 1);
1912 if (order <= MAX_ORDER) 1915 if (order < MAX_ORDER)
1913 return order; 1916 return order;
1914 return -ENOSYS; 1917 return -ENOSYS;
1915} 1918}
@@ -2522,6 +2525,7 @@ __setup("slub_min_order=", setup_slub_min_order);
2522static int __init setup_slub_max_order(char *str) 2525static int __init setup_slub_max_order(char *str)
2523{ 2526{
2524 get_option(&str, &slub_max_order); 2527 get_option(&str, &slub_max_order);
2528 slub_max_order = min(slub_max_order, MAX_ORDER - 1);
2525 2529
2526 return 1; 2530 return 1;
2527} 2531}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 3ecea98ecb45..1416e7e9e02d 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -109,8 +109,6 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
109 */ 109 */
110void __delete_from_swap_cache(struct page *page) 110void __delete_from_swap_cache(struct page *page)
111{ 111{
112 swp_entry_t ent = {.val = page_private(page)};
113
114 VM_BUG_ON(!PageLocked(page)); 112 VM_BUG_ON(!PageLocked(page));
115 VM_BUG_ON(!PageSwapCache(page)); 113 VM_BUG_ON(!PageSwapCache(page));
116 VM_BUG_ON(PageWriteback(page)); 114 VM_BUG_ON(PageWriteback(page));
@@ -121,7 +119,6 @@ void __delete_from_swap_cache(struct page *page)
121 total_swapcache_pages--; 119 total_swapcache_pages--;
122 __dec_zone_page_state(page, NR_FILE_PAGES); 120 __dec_zone_page_state(page, NR_FILE_PAGES);
123 INC_CACHE_INFO(del_total); 121 INC_CACHE_INFO(del_total);
124 mem_cgroup_uncharge_swapcache(page, ent);
125} 122}
126 123
127/** 124/**
@@ -191,6 +188,7 @@ void delete_from_swap_cache(struct page *page)
191 __delete_from_swap_cache(page); 188 __delete_from_swap_cache(page);
192 spin_unlock_irq(&swapper_space.tree_lock); 189 spin_unlock_irq(&swapper_space.tree_lock);
193 190
191 mem_cgroup_uncharge_swapcache(page, entry);
194 swap_free(entry); 192 swap_free(entry);
195 page_cache_release(page); 193 page_cache_release(page);
196} 194}
diff --git a/mm/truncate.c b/mm/truncate.c
index 55206fab7b99..12e1579f9165 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -359,6 +359,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
359 BUG_ON(page_has_private(page)); 359 BUG_ON(page_has_private(page));
360 __remove_from_page_cache(page); 360 __remove_from_page_cache(page);
361 spin_unlock_irq(&mapping->tree_lock); 361 spin_unlock_irq(&mapping->tree_lock);
362 mem_cgroup_uncharge_cache_page(page);
362 page_cache_release(page); /* pagecache ref */ 363 page_cache_release(page); /* pagecache ref */
363 return 1; 364 return 1;
364failed: 365failed:
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5fa3eda1f03f..d254306562cd 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -470,10 +470,12 @@ static int __remove_mapping(struct address_space *mapping, struct page *page)
470 swp_entry_t swap = { .val = page_private(page) }; 470 swp_entry_t swap = { .val = page_private(page) };
471 __delete_from_swap_cache(page); 471 __delete_from_swap_cache(page);
472 spin_unlock_irq(&mapping->tree_lock); 472 spin_unlock_irq(&mapping->tree_lock);
473 mem_cgroup_uncharge_swapcache(page, swap);
473 swap_free(swap); 474 swap_free(swap);
474 } else { 475 } else {
475 __remove_from_page_cache(page); 476 __remove_from_page_cache(page);
476 spin_unlock_irq(&mapping->tree_lock); 477 spin_unlock_irq(&mapping->tree_lock);
478 mem_cgroup_uncharge_cache_page(page);
477 } 479 }
478 480
479 return 1; 481 return 1;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 66f6130976cb..74d66dba0cbe 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -509,22 +509,11 @@ static void pagetypeinfo_showblockcount_print(struct seq_file *m,
509 continue; 509 continue;
510 510
511 page = pfn_to_page(pfn); 511 page = pfn_to_page(pfn);
512#ifdef CONFIG_ARCH_FLATMEM_HAS_HOLES 512
513 /* 513 /* Watch for unexpected holes punched in the memmap */
514 * Ordinarily, memory holes in flatmem still have a valid 514 if (!memmap_valid_within(pfn, page, zone))
515 * memmap for the PFN range. However, an architecture for
516 * embedded systems (e.g. ARM) can free up the memmap backing
517 * holes to save memory on the assumption the memmap is
518 * never used. The page_zone linkages are then broken even
519 * though pfn_valid() returns true. Skip the page if the
520 * linkages are broken. Even if this test passed, the impact
521 * is that the counters for the movable type are off but
522 * fragmentation monitoring is likely meaningless on small
523 * systems.
524 */
525 if (page_zone(page) != zone)
526 continue; 515 continue;
527#endif 516
528 mtype = get_pageblock_migratetype(page); 517 mtype = get_pageblock_migratetype(page);
529 518
530 if (mtype < MIGRATE_TYPES) 519 if (mtype < MIGRATE_TYPES)