aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c6
-rw-r--r--mm/hugetlb.c26
-rw-r--r--mm/madvise.c8
-rw-r--r--mm/memcontrol.c14
-rw-r--r--mm/mmzone.c15
-rw-r--r--mm/oom_kill.c24
-rw-r--r--mm/page-writeback.c6
-rw-r--r--mm/pdflush.c31
-rw-r--r--mm/rmap.c2
-rw-r--r--mm/slob.c5
-rw-r--r--mm/slub.c6
-rw-r--r--mm/swap_state.c4
-rw-r--r--mm/truncate.c1
-rw-r--r--mm/vmscan.c2
-rw-r--r--mm/vmstat.c19
15 files changed, 83 insertions, 86 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 379ff0bcbf6e..1b60f30cebfa 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -121,7 +121,6 @@ void __remove_from_page_cache(struct page *page)
121 mapping->nrpages--; 121 mapping->nrpages--;
122 __dec_zone_page_state(page, NR_FILE_PAGES); 122 __dec_zone_page_state(page, NR_FILE_PAGES);
123 BUG_ON(page_mapped(page)); 123 BUG_ON(page_mapped(page));
124 mem_cgroup_uncharge_cache_page(page);
125 124
126 /* 125 /*
127 * Some filesystems seem to re-dirty the page even after 126 * Some filesystems seem to re-dirty the page even after
@@ -145,6 +144,7 @@ void remove_from_page_cache(struct page *page)
145 spin_lock_irq(&mapping->tree_lock); 144 spin_lock_irq(&mapping->tree_lock);
146 __remove_from_page_cache(page); 145 __remove_from_page_cache(page);
147 spin_unlock_irq(&mapping->tree_lock); 146 spin_unlock_irq(&mapping->tree_lock);
147 mem_cgroup_uncharge_cache_page(page);
148} 148}
149 149
150static int sync_page(void *word) 150static int sync_page(void *word)
@@ -476,13 +476,13 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
476 if (likely(!error)) { 476 if (likely(!error)) {
477 mapping->nrpages++; 477 mapping->nrpages++;
478 __inc_zone_page_state(page, NR_FILE_PAGES); 478 __inc_zone_page_state(page, NR_FILE_PAGES);
479 spin_unlock_irq(&mapping->tree_lock);
479 } else { 480 } else {
480 page->mapping = NULL; 481 page->mapping = NULL;
482 spin_unlock_irq(&mapping->tree_lock);
481 mem_cgroup_uncharge_cache_page(page); 483 mem_cgroup_uncharge_cache_page(page);
482 page_cache_release(page); 484 page_cache_release(page);
483 } 485 }
484
485 spin_unlock_irq(&mapping->tree_lock);
486 radix_tree_preload_end(); 486 radix_tree_preload_end();
487 } else 487 } else
488 mem_cgroup_uncharge_cache_page(page); 488 mem_cgroup_uncharge_cache_page(page);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 28c655ba9353..e83ad2c9228c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -316,7 +316,7 @@ static void resv_map_release(struct kref *ref)
316static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 316static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
317{ 317{
318 VM_BUG_ON(!is_vm_hugetlb_page(vma)); 318 VM_BUG_ON(!is_vm_hugetlb_page(vma));
319 if (!(vma->vm_flags & VM_SHARED)) 319 if (!(vma->vm_flags & VM_MAYSHARE))
320 return (struct resv_map *)(get_vma_private_data(vma) & 320 return (struct resv_map *)(get_vma_private_data(vma) &
321 ~HPAGE_RESV_MASK); 321 ~HPAGE_RESV_MASK);
322 return NULL; 322 return NULL;
@@ -325,7 +325,7 @@ static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
325static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 325static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
326{ 326{
327 VM_BUG_ON(!is_vm_hugetlb_page(vma)); 327 VM_BUG_ON(!is_vm_hugetlb_page(vma));
328 VM_BUG_ON(vma->vm_flags & VM_SHARED); 328 VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
329 329
330 set_vma_private_data(vma, (get_vma_private_data(vma) & 330 set_vma_private_data(vma, (get_vma_private_data(vma) &
331 HPAGE_RESV_MASK) | (unsigned long)map); 331 HPAGE_RESV_MASK) | (unsigned long)map);
@@ -334,7 +334,7 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
334static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 334static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
335{ 335{
336 VM_BUG_ON(!is_vm_hugetlb_page(vma)); 336 VM_BUG_ON(!is_vm_hugetlb_page(vma));
337 VM_BUG_ON(vma->vm_flags & VM_SHARED); 337 VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
338 338
339 set_vma_private_data(vma, get_vma_private_data(vma) | flags); 339 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
340} 340}
@@ -353,7 +353,7 @@ static void decrement_hugepage_resv_vma(struct hstate *h,
353 if (vma->vm_flags & VM_NORESERVE) 353 if (vma->vm_flags & VM_NORESERVE)
354 return; 354 return;
355 355
356 if (vma->vm_flags & VM_SHARED) { 356 if (vma->vm_flags & VM_MAYSHARE) {
357 /* Shared mappings always use reserves */ 357 /* Shared mappings always use reserves */
358 h->resv_huge_pages--; 358 h->resv_huge_pages--;
359 } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 359 } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
@@ -369,14 +369,14 @@ static void decrement_hugepage_resv_vma(struct hstate *h,
369void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 369void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
370{ 370{
371 VM_BUG_ON(!is_vm_hugetlb_page(vma)); 371 VM_BUG_ON(!is_vm_hugetlb_page(vma));
372 if (!(vma->vm_flags & VM_SHARED)) 372 if (!(vma->vm_flags & VM_MAYSHARE))
373 vma->vm_private_data = (void *)0; 373 vma->vm_private_data = (void *)0;
374} 374}
375 375
376/* Returns true if the VMA has associated reserve pages */ 376/* Returns true if the VMA has associated reserve pages */
377static int vma_has_reserves(struct vm_area_struct *vma) 377static int vma_has_reserves(struct vm_area_struct *vma)
378{ 378{
379 if (vma->vm_flags & VM_SHARED) 379 if (vma->vm_flags & VM_MAYSHARE)
380 return 1; 380 return 1;
381 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 381 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
382 return 1; 382 return 1;
@@ -924,7 +924,7 @@ static long vma_needs_reservation(struct hstate *h,
924 struct address_space *mapping = vma->vm_file->f_mapping; 924 struct address_space *mapping = vma->vm_file->f_mapping;
925 struct inode *inode = mapping->host; 925 struct inode *inode = mapping->host;
926 926
927 if (vma->vm_flags & VM_SHARED) { 927 if (vma->vm_flags & VM_MAYSHARE) {
928 pgoff_t idx = vma_hugecache_offset(h, vma, addr); 928 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
929 return region_chg(&inode->i_mapping->private_list, 929 return region_chg(&inode->i_mapping->private_list,
930 idx, idx + 1); 930 idx, idx + 1);
@@ -949,7 +949,7 @@ static void vma_commit_reservation(struct hstate *h,
949 struct address_space *mapping = vma->vm_file->f_mapping; 949 struct address_space *mapping = vma->vm_file->f_mapping;
950 struct inode *inode = mapping->host; 950 struct inode *inode = mapping->host;
951 951
952 if (vma->vm_flags & VM_SHARED) { 952 if (vma->vm_flags & VM_MAYSHARE) {
953 pgoff_t idx = vma_hugecache_offset(h, vma, addr); 953 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
954 region_add(&inode->i_mapping->private_list, idx, idx + 1); 954 region_add(&inode->i_mapping->private_list, idx, idx + 1);
955 955
@@ -1893,7 +1893,7 @@ retry_avoidcopy:
1893 * at the time of fork() could consume its reserves on COW instead 1893 * at the time of fork() could consume its reserves on COW instead
1894 * of the full address range. 1894 * of the full address range.
1895 */ 1895 */
1896 if (!(vma->vm_flags & VM_SHARED) && 1896 if (!(vma->vm_flags & VM_MAYSHARE) &&
1897 is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 1897 is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
1898 old_page != pagecache_page) 1898 old_page != pagecache_page)
1899 outside_reserve = 1; 1899 outside_reserve = 1;
@@ -2000,7 +2000,7 @@ retry:
2000 clear_huge_page(page, address, huge_page_size(h)); 2000 clear_huge_page(page, address, huge_page_size(h));
2001 __SetPageUptodate(page); 2001 __SetPageUptodate(page);
2002 2002
2003 if (vma->vm_flags & VM_SHARED) { 2003 if (vma->vm_flags & VM_MAYSHARE) {
2004 int err; 2004 int err;
2005 struct inode *inode = mapping->host; 2005 struct inode *inode = mapping->host;
2006 2006
@@ -2104,7 +2104,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2104 goto out_mutex; 2104 goto out_mutex;
2105 } 2105 }
2106 2106
2107 if (!(vma->vm_flags & VM_SHARED)) 2107 if (!(vma->vm_flags & VM_MAYSHARE))
2108 pagecache_page = hugetlbfs_pagecache_page(h, 2108 pagecache_page = hugetlbfs_pagecache_page(h,
2109 vma, address); 2109 vma, address);
2110 } 2110 }
@@ -2289,7 +2289,7 @@ int hugetlb_reserve_pages(struct inode *inode,
2289 * to reserve the full area even if read-only as mprotect() may be 2289 * to reserve the full area even if read-only as mprotect() may be
2290 * called to make the mapping read-write. Assume !vma is a shm mapping 2290 * called to make the mapping read-write. Assume !vma is a shm mapping
2291 */ 2291 */
2292 if (!vma || vma->vm_flags & VM_SHARED) 2292 if (!vma || vma->vm_flags & VM_MAYSHARE)
2293 chg = region_chg(&inode->i_mapping->private_list, from, to); 2293 chg = region_chg(&inode->i_mapping->private_list, from, to);
2294 else { 2294 else {
2295 struct resv_map *resv_map = resv_map_alloc(); 2295 struct resv_map *resv_map = resv_map_alloc();
@@ -2330,7 +2330,7 @@ int hugetlb_reserve_pages(struct inode *inode,
2330 * consumed reservations are stored in the map. Hence, nothing 2330 * consumed reservations are stored in the map. Hence, nothing
2331 * else has to be done for private mappings here 2331 * else has to be done for private mappings here
2332 */ 2332 */
2333 if (!vma || vma->vm_flags & VM_SHARED) 2333 if (!vma || vma->vm_flags & VM_MAYSHARE)
2334 region_add(&inode->i_mapping->private_list, from, to); 2334 region_add(&inode->i_mapping->private_list, from, to);
2335 return 0; 2335 return 0;
2336} 2336}
diff --git a/mm/madvise.c b/mm/madvise.c
index 36d6ea2b6340..b9ce574827c8 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -112,14 +112,6 @@ static long madvise_willneed(struct vm_area_struct * vma,
112 if (!file) 112 if (!file)
113 return -EBADF; 113 return -EBADF;
114 114
115 /*
116 * Page cache readahead assumes page cache pages are order-0 which
117 * is not the case for hugetlbfs. Do not give a bad return value
118 * but ignore the advice.
119 */
120 if (vma->vm_flags & VM_HUGETLB)
121 return 0;
122
123 if (file->f_mapping->a_ops->get_xip_mem) { 115 if (file->f_mapping->a_ops->get_xip_mem) {
124 /* no bad return value, but ignore advice */ 116 /* no bad return value, but ignore advice */
125 return 0; 117 return 0;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 01c2d8f14685..78eb8552818b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -314,14 +314,6 @@ static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
314 return mem; 314 return mem;
315} 315}
316 316
317static bool mem_cgroup_is_obsolete(struct mem_cgroup *mem)
318{
319 if (!mem)
320 return true;
321 return css_is_removed(&mem->css);
322}
323
324
325/* 317/*
326 * Call callback function against all cgroup under hierarchy tree. 318 * Call callback function against all cgroup under hierarchy tree.
327 */ 319 */
@@ -932,7 +924,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
932 if (unlikely(!mem)) 924 if (unlikely(!mem))
933 return 0; 925 return 0;
934 926
935 VM_BUG_ON(!mem || mem_cgroup_is_obsolete(mem)); 927 VM_BUG_ON(css_is_removed(&mem->css));
936 928
937 while (1) { 929 while (1) {
938 int ret; 930 int ret;
@@ -1488,8 +1480,9 @@ void mem_cgroup_uncharge_cache_page(struct page *page)
1488 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE); 1480 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
1489} 1481}
1490 1482
1483#ifdef CONFIG_SWAP
1491/* 1484/*
1492 * called from __delete_from_swap_cache() and drop "page" account. 1485 * called after __delete_from_swap_cache() and drop "page" account.
1493 * memcg information is recorded to swap_cgroup of "ent" 1486 * memcg information is recorded to swap_cgroup of "ent"
1494 */ 1487 */
1495void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) 1488void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
@@ -1506,6 +1499,7 @@ void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
1506 if (memcg) 1499 if (memcg)
1507 css_put(&memcg->css); 1500 css_put(&memcg->css);
1508} 1501}
1502#endif
1509 1503
1510#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 1504#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1511/* 1505/*
diff --git a/mm/mmzone.c b/mm/mmzone.c
index 16ce8b955dcf..f5b7d1760213 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -6,6 +6,7 @@
6 6
7 7
8#include <linux/stddef.h> 8#include <linux/stddef.h>
9#include <linux/mm.h>
9#include <linux/mmzone.h> 10#include <linux/mmzone.h>
10#include <linux/module.h> 11#include <linux/module.h>
11 12
@@ -72,3 +73,17 @@ struct zoneref *next_zones_zonelist(struct zoneref *z,
72 *zone = zonelist_zone(z); 73 *zone = zonelist_zone(z);
73 return z; 74 return z;
74} 75}
76
77#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
78int memmap_valid_within(unsigned long pfn,
79 struct page *page, struct zone *zone)
80{
81 if (page_to_pfn(page) != pfn)
82 return 0;
83
84 if (page_zone(page) != zone)
85 return 0;
86
87 return 1;
88}
89#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 92bcf1db16b2..a7b2460e922b 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -284,22 +284,28 @@ static void dump_tasks(const struct mem_cgroup *mem)
284 printk(KERN_INFO "[ pid ] uid tgid total_vm rss cpu oom_adj " 284 printk(KERN_INFO "[ pid ] uid tgid total_vm rss cpu oom_adj "
285 "name\n"); 285 "name\n");
286 do_each_thread(g, p) { 286 do_each_thread(g, p) {
287 /* 287 struct mm_struct *mm;
288 * total_vm and rss sizes do not exist for tasks with a 288
289 * detached mm so there's no need to report them.
290 */
291 if (!p->mm)
292 continue;
293 if (mem && !task_in_mem_cgroup(p, mem)) 289 if (mem && !task_in_mem_cgroup(p, mem))
294 continue; 290 continue;
295 if (!thread_group_leader(p)) 291 if (!thread_group_leader(p))
296 continue; 292 continue;
297 293
298 task_lock(p); 294 task_lock(p);
295 mm = p->mm;
296 if (!mm) {
297 /*
298 * total_vm and rss sizes do not exist for tasks with no
299 * mm so there's no need to report them; they can't be
300 * oom killed anyway.
301 */
302 task_unlock(p);
303 continue;
304 }
299 printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n", 305 printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n",
300 p->pid, __task_cred(p)->uid, p->tgid, 306 p->pid, __task_cred(p)->uid, p->tgid, mm->total_vm,
301 p->mm->total_vm, get_mm_rss(p->mm), (int)task_cpu(p), 307 get_mm_rss(mm), (int)task_cpu(p), p->oomkilladj,
302 p->oomkilladj, p->comm); 308 p->comm);
303 task_unlock(p); 309 task_unlock(p);
304 } while_each_thread(g, p); 310 } while_each_thread(g, p);
305} 311}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 30351f0063ac..bb553c3e955d 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -94,12 +94,12 @@ unsigned long vm_dirty_bytes;
94/* 94/*
95 * The interval between `kupdate'-style writebacks 95 * The interval between `kupdate'-style writebacks
96 */ 96 */
97unsigned int dirty_writeback_interval = 5 * 100; /* sentiseconds */ 97unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
98 98
99/* 99/*
100 * The longest time for which data is allowed to remain dirty 100 * The longest time for which data is allowed to remain dirty
101 */ 101 */
102unsigned int dirty_expire_interval = 30 * 100; /* sentiseconds */ 102unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
103 103
104/* 104/*
105 * Flag that makes the machine dump writes/reads and block dirtyings. 105 * Flag that makes the machine dump writes/reads and block dirtyings.
@@ -770,7 +770,7 @@ static void wb_kupdate(unsigned long arg)
770 770
771 sync_supers(); 771 sync_supers();
772 772
773 oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval); 773 oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval * 10);
774 start_jif = jiffies; 774 start_jif = jiffies;
775 next_jif = start_jif + msecs_to_jiffies(dirty_writeback_interval * 10); 775 next_jif = start_jif + msecs_to_jiffies(dirty_writeback_interval * 10);
776 nr_to_write = global_page_state(NR_FILE_DIRTY) + 776 nr_to_write = global_page_state(NR_FILE_DIRTY) +
diff --git a/mm/pdflush.c b/mm/pdflush.c
index f2caf96993f8..235ac440c44e 100644
--- a/mm/pdflush.c
+++ b/mm/pdflush.c
@@ -58,14 +58,6 @@ static DEFINE_SPINLOCK(pdflush_lock);
58int nr_pdflush_threads = 0; 58int nr_pdflush_threads = 0;
59 59
60/* 60/*
61 * The max/min number of pdflush threads. R/W by sysctl at
62 * /proc/sys/vm/nr_pdflush_threads_max/min
63 */
64int nr_pdflush_threads_max __read_mostly = MAX_PDFLUSH_THREADS;
65int nr_pdflush_threads_min __read_mostly = MIN_PDFLUSH_THREADS;
66
67
68/*
69 * The time at which the pdflush thread pool last went empty 61 * The time at which the pdflush thread pool last went empty
70 */ 62 */
71static unsigned long last_empty_jifs; 63static unsigned long last_empty_jifs;
@@ -76,7 +68,7 @@ static unsigned long last_empty_jifs;
76 * Thread pool management algorithm: 68 * Thread pool management algorithm:
77 * 69 *
78 * - The minimum and maximum number of pdflush instances are bound 70 * - The minimum and maximum number of pdflush instances are bound
79 * by nr_pdflush_threads_min and nr_pdflush_threads_max. 71 * by MIN_PDFLUSH_THREADS and MAX_PDFLUSH_THREADS.
80 * 72 *
81 * - If there have been no idle pdflush instances for 1 second, create 73 * - If there have been no idle pdflush instances for 1 second, create
82 * a new one. 74 * a new one.
@@ -142,13 +134,14 @@ static int __pdflush(struct pdflush_work *my_work)
142 * To throttle creation, we reset last_empty_jifs. 134 * To throttle creation, we reset last_empty_jifs.
143 */ 135 */
144 if (time_after(jiffies, last_empty_jifs + 1 * HZ)) { 136 if (time_after(jiffies, last_empty_jifs + 1 * HZ)) {
145 if (list_empty(&pdflush_list) && 137 if (list_empty(&pdflush_list)) {
146 nr_pdflush_threads < nr_pdflush_threads_max) { 138 if (nr_pdflush_threads < MAX_PDFLUSH_THREADS) {
147 last_empty_jifs = jiffies; 139 last_empty_jifs = jiffies;
148 nr_pdflush_threads++; 140 nr_pdflush_threads++;
149 spin_unlock_irq(&pdflush_lock); 141 spin_unlock_irq(&pdflush_lock);
150 start_one_pdflush_thread(); 142 start_one_pdflush_thread();
151 spin_lock_irq(&pdflush_lock); 143 spin_lock_irq(&pdflush_lock);
144 }
152 } 145 }
153 } 146 }
154 147
@@ -160,7 +153,7 @@ static int __pdflush(struct pdflush_work *my_work)
160 */ 153 */
161 if (list_empty(&pdflush_list)) 154 if (list_empty(&pdflush_list))
162 continue; 155 continue;
163 if (nr_pdflush_threads <= nr_pdflush_threads_min) 156 if (nr_pdflush_threads <= MIN_PDFLUSH_THREADS)
164 continue; 157 continue;
165 pdf = list_entry(pdflush_list.prev, struct pdflush_work, list); 158 pdf = list_entry(pdflush_list.prev, struct pdflush_work, list);
166 if (time_after(jiffies, pdf->when_i_went_to_sleep + 1 * HZ)) { 159 if (time_after(jiffies, pdf->when_i_went_to_sleep + 1 * HZ)) {
@@ -266,9 +259,9 @@ static int __init pdflush_init(void)
266 * Pre-set nr_pdflush_threads... If we fail to create, 259 * Pre-set nr_pdflush_threads... If we fail to create,
267 * the count will be decremented. 260 * the count will be decremented.
268 */ 261 */
269 nr_pdflush_threads = nr_pdflush_threads_min; 262 nr_pdflush_threads = MIN_PDFLUSH_THREADS;
270 263
271 for (i = 0; i < nr_pdflush_threads_min; i++) 264 for (i = 0; i < MIN_PDFLUSH_THREADS; i++)
272 start_one_pdflush_thread(); 265 start_one_pdflush_thread();
273 return 0; 266 return 0;
274} 267}
diff --git a/mm/rmap.c b/mm/rmap.c
index 16521664010d..23122af32611 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -14,7 +14,7 @@
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001 14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17 * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004 17 * Contributions by Hugh Dickins 2003, 2004
18 */ 18 */
19 19
20/* 20/*
diff --git a/mm/slob.c b/mm/slob.c
index a2d4ab32198d..f92e66d558bd 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -60,6 +60,7 @@
60#include <linux/kernel.h> 60#include <linux/kernel.h>
61#include <linux/slab.h> 61#include <linux/slab.h>
62#include <linux/mm.h> 62#include <linux/mm.h>
63#include <linux/swap.h> /* struct reclaim_state */
63#include <linux/cache.h> 64#include <linux/cache.h>
64#include <linux/init.h> 65#include <linux/init.h>
65#include <linux/module.h> 66#include <linux/module.h>
@@ -255,6 +256,8 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
255 256
256static void slob_free_pages(void *b, int order) 257static void slob_free_pages(void *b, int order)
257{ 258{
259 if (current->reclaim_state)
260 current->reclaim_state->reclaimed_slab += 1 << order;
258 free_pages((unsigned long)b, order); 261 free_pages((unsigned long)b, order);
259} 262}
260 263
@@ -407,7 +410,7 @@ static void slob_free(void *block, int size)
407 spin_unlock_irqrestore(&slob_lock, flags); 410 spin_unlock_irqrestore(&slob_lock, flags);
408 clear_slob_page(sp); 411 clear_slob_page(sp);
409 free_slob_page(sp); 412 free_slob_page(sp);
410 free_page((unsigned long)b); 413 slob_free_pages(b, 0);
411 return; 414 return;
412 } 415 }
413 416
diff --git a/mm/slub.c b/mm/slub.c
index 7ab54ecbd3f3..65ffda5934b0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/swap.h> /* struct reclaim_state */
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/bit_spinlock.h> 14#include <linux/bit_spinlock.h>
14#include <linux/interrupt.h> 15#include <linux/interrupt.h>
@@ -1170,6 +1171,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1170 1171
1171 __ClearPageSlab(page); 1172 __ClearPageSlab(page);
1172 reset_page_mapcount(page); 1173 reset_page_mapcount(page);
1174 if (current->reclaim_state)
1175 current->reclaim_state->reclaimed_slab += pages;
1173 __free_pages(page, order); 1176 __free_pages(page, order);
1174} 1177}
1175 1178
@@ -1909,7 +1912,7 @@ static inline int calculate_order(int size)
1909 * Doh this slab cannot be placed using slub_max_order. 1912 * Doh this slab cannot be placed using slub_max_order.
1910 */ 1913 */
1911 order = slab_order(size, 1, MAX_ORDER, 1); 1914 order = slab_order(size, 1, MAX_ORDER, 1);
1912 if (order <= MAX_ORDER) 1915 if (order < MAX_ORDER)
1913 return order; 1916 return order;
1914 return -ENOSYS; 1917 return -ENOSYS;
1915} 1918}
@@ -2522,6 +2525,7 @@ __setup("slub_min_order=", setup_slub_min_order);
2522static int __init setup_slub_max_order(char *str) 2525static int __init setup_slub_max_order(char *str)
2523{ 2526{
2524 get_option(&str, &slub_max_order); 2527 get_option(&str, &slub_max_order);
2528 slub_max_order = min(slub_max_order, MAX_ORDER - 1);
2525 2529
2526 return 1; 2530 return 1;
2527} 2531}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 3ecea98ecb45..1416e7e9e02d 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -109,8 +109,6 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
109 */ 109 */
110void __delete_from_swap_cache(struct page *page) 110void __delete_from_swap_cache(struct page *page)
111{ 111{
112 swp_entry_t ent = {.val = page_private(page)};
113
114 VM_BUG_ON(!PageLocked(page)); 112 VM_BUG_ON(!PageLocked(page));
115 VM_BUG_ON(!PageSwapCache(page)); 113 VM_BUG_ON(!PageSwapCache(page));
116 VM_BUG_ON(PageWriteback(page)); 114 VM_BUG_ON(PageWriteback(page));
@@ -121,7 +119,6 @@ void __delete_from_swap_cache(struct page *page)
121 total_swapcache_pages--; 119 total_swapcache_pages--;
122 __dec_zone_page_state(page, NR_FILE_PAGES); 120 __dec_zone_page_state(page, NR_FILE_PAGES);
123 INC_CACHE_INFO(del_total); 121 INC_CACHE_INFO(del_total);
124 mem_cgroup_uncharge_swapcache(page, ent);
125} 122}
126 123
127/** 124/**
@@ -191,6 +188,7 @@ void delete_from_swap_cache(struct page *page)
191 __delete_from_swap_cache(page); 188 __delete_from_swap_cache(page);
192 spin_unlock_irq(&swapper_space.tree_lock); 189 spin_unlock_irq(&swapper_space.tree_lock);
193 190
191 mem_cgroup_uncharge_swapcache(page, entry);
194 swap_free(entry); 192 swap_free(entry);
195 page_cache_release(page); 193 page_cache_release(page);
196} 194}
diff --git a/mm/truncate.c b/mm/truncate.c
index 55206fab7b99..12e1579f9165 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -359,6 +359,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
359 BUG_ON(page_has_private(page)); 359 BUG_ON(page_has_private(page));
360 __remove_from_page_cache(page); 360 __remove_from_page_cache(page);
361 spin_unlock_irq(&mapping->tree_lock); 361 spin_unlock_irq(&mapping->tree_lock);
362 mem_cgroup_uncharge_cache_page(page);
362 page_cache_release(page); /* pagecache ref */ 363 page_cache_release(page); /* pagecache ref */
363 return 1; 364 return 1;
364failed: 365failed:
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5fa3eda1f03f..d254306562cd 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -470,10 +470,12 @@ static int __remove_mapping(struct address_space *mapping, struct page *page)
470 swp_entry_t swap = { .val = page_private(page) }; 470 swp_entry_t swap = { .val = page_private(page) };
471 __delete_from_swap_cache(page); 471 __delete_from_swap_cache(page);
472 spin_unlock_irq(&mapping->tree_lock); 472 spin_unlock_irq(&mapping->tree_lock);
473 mem_cgroup_uncharge_swapcache(page, swap);
473 swap_free(swap); 474 swap_free(swap);
474 } else { 475 } else {
475 __remove_from_page_cache(page); 476 __remove_from_page_cache(page);
476 spin_unlock_irq(&mapping->tree_lock); 477 spin_unlock_irq(&mapping->tree_lock);
478 mem_cgroup_uncharge_cache_page(page);
477 } 479 }
478 480
479 return 1; 481 return 1;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 66f6130976cb..74d66dba0cbe 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -509,22 +509,11 @@ static void pagetypeinfo_showblockcount_print(struct seq_file *m,
509 continue; 509 continue;
510 510
511 page = pfn_to_page(pfn); 511 page = pfn_to_page(pfn);
512#ifdef CONFIG_ARCH_FLATMEM_HAS_HOLES 512
513 /* 513 /* Watch for unexpected holes punched in the memmap */
514 * Ordinarily, memory holes in flatmem still have a valid 514 if (!memmap_valid_within(pfn, page, zone))
515 * memmap for the PFN range. However, an architecture for
516 * embedded systems (e.g. ARM) can free up the memmap backing
517 * holes to save memory on the assumption the memmap is
518 * never used. The page_zone linkages are then broken even
519 * though pfn_valid() returns true. Skip the page if the
520 * linkages are broken. Even if this test passed, the impact
521 * is that the counters for the movable type are off but
522 * fragmentation monitoring is likely meaningless on small
523 * systems.
524 */
525 if (page_zone(page) != zone)
526 continue; 515 continue;
527#endif 516
528 mtype = get_pageblock_migratetype(page); 517 mtype = get_pageblock_migratetype(page);
529 518
530 if (mtype < MIGRATE_TYPES) 519 if (mtype < MIGRATE_TYPES)