diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-06-01 04:01:03 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-06-01 04:01:39 -0400 |
commit | 23db9f430be9325a861c7762c1ffadad9ca528a8 (patch) | |
tree | 1ebb681611c96f17aa4f96e28d6923824a8b210f /mm | |
parent | 27b9613b7be39412775d0ab80f57229aa73bb07d (diff) | |
parent | 3218911f839b6c85acbf872ad264ea69aa4d89ad (diff) |
Merge branch 'linus' into perfcounters/core
Merge reason: merge almost-rc8 into perfcounters/core, which was -rc6
based - to pick up the latest upstream fixes.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 6 | ||||
-rw-r--r-- | mm/hugetlb.c | 26 | ||||
-rw-r--r-- | mm/memcontrol.c | 14 | ||||
-rw-r--r-- | mm/mmzone.c | 15 | ||||
-rw-r--r-- | mm/oom_kill.c | 24 | ||||
-rw-r--r-- | mm/page-writeback.c | 6 | ||||
-rw-r--r-- | mm/rmap.c | 2 | ||||
-rw-r--r-- | mm/slob.c | 5 | ||||
-rw-r--r-- | mm/slub.c | 6 | ||||
-rw-r--r-- | mm/swap_state.c | 4 | ||||
-rw-r--r-- | mm/truncate.c | 1 | ||||
-rw-r--r-- | mm/vmscan.c | 2 | ||||
-rw-r--r-- | mm/vmstat.c | 19 |
13 files changed, 71 insertions, 59 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 379ff0bcbf6e..1b60f30cebfa 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -121,7 +121,6 @@ void __remove_from_page_cache(struct page *page) | |||
121 | mapping->nrpages--; | 121 | mapping->nrpages--; |
122 | __dec_zone_page_state(page, NR_FILE_PAGES); | 122 | __dec_zone_page_state(page, NR_FILE_PAGES); |
123 | BUG_ON(page_mapped(page)); | 123 | BUG_ON(page_mapped(page)); |
124 | mem_cgroup_uncharge_cache_page(page); | ||
125 | 124 | ||
126 | /* | 125 | /* |
127 | * Some filesystems seem to re-dirty the page even after | 126 | * Some filesystems seem to re-dirty the page even after |
@@ -145,6 +144,7 @@ void remove_from_page_cache(struct page *page) | |||
145 | spin_lock_irq(&mapping->tree_lock); | 144 | spin_lock_irq(&mapping->tree_lock); |
146 | __remove_from_page_cache(page); | 145 | __remove_from_page_cache(page); |
147 | spin_unlock_irq(&mapping->tree_lock); | 146 | spin_unlock_irq(&mapping->tree_lock); |
147 | mem_cgroup_uncharge_cache_page(page); | ||
148 | } | 148 | } |
149 | 149 | ||
150 | static int sync_page(void *word) | 150 | static int sync_page(void *word) |
@@ -476,13 +476,13 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | |||
476 | if (likely(!error)) { | 476 | if (likely(!error)) { |
477 | mapping->nrpages++; | 477 | mapping->nrpages++; |
478 | __inc_zone_page_state(page, NR_FILE_PAGES); | 478 | __inc_zone_page_state(page, NR_FILE_PAGES); |
479 | spin_unlock_irq(&mapping->tree_lock); | ||
479 | } else { | 480 | } else { |
480 | page->mapping = NULL; | 481 | page->mapping = NULL; |
482 | spin_unlock_irq(&mapping->tree_lock); | ||
481 | mem_cgroup_uncharge_cache_page(page); | 483 | mem_cgroup_uncharge_cache_page(page); |
482 | page_cache_release(page); | 484 | page_cache_release(page); |
483 | } | 485 | } |
484 | |||
485 | spin_unlock_irq(&mapping->tree_lock); | ||
486 | radix_tree_preload_end(); | 486 | radix_tree_preload_end(); |
487 | } else | 487 | } else |
488 | mem_cgroup_uncharge_cache_page(page); | 488 | mem_cgroup_uncharge_cache_page(page); |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 28c655ba9353..e83ad2c9228c 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -316,7 +316,7 @@ static void resv_map_release(struct kref *ref) | |||
316 | static struct resv_map *vma_resv_map(struct vm_area_struct *vma) | 316 | static struct resv_map *vma_resv_map(struct vm_area_struct *vma) |
317 | { | 317 | { |
318 | VM_BUG_ON(!is_vm_hugetlb_page(vma)); | 318 | VM_BUG_ON(!is_vm_hugetlb_page(vma)); |
319 | if (!(vma->vm_flags & VM_SHARED)) | 319 | if (!(vma->vm_flags & VM_MAYSHARE)) |
320 | return (struct resv_map *)(get_vma_private_data(vma) & | 320 | return (struct resv_map *)(get_vma_private_data(vma) & |
321 | ~HPAGE_RESV_MASK); | 321 | ~HPAGE_RESV_MASK); |
322 | return NULL; | 322 | return NULL; |
@@ -325,7 +325,7 @@ static struct resv_map *vma_resv_map(struct vm_area_struct *vma) | |||
325 | static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) | 325 | static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) |
326 | { | 326 | { |
327 | VM_BUG_ON(!is_vm_hugetlb_page(vma)); | 327 | VM_BUG_ON(!is_vm_hugetlb_page(vma)); |
328 | VM_BUG_ON(vma->vm_flags & VM_SHARED); | 328 | VM_BUG_ON(vma->vm_flags & VM_MAYSHARE); |
329 | 329 | ||
330 | set_vma_private_data(vma, (get_vma_private_data(vma) & | 330 | set_vma_private_data(vma, (get_vma_private_data(vma) & |
331 | HPAGE_RESV_MASK) | (unsigned long)map); | 331 | HPAGE_RESV_MASK) | (unsigned long)map); |
@@ -334,7 +334,7 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) | |||
334 | static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) | 334 | static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) |
335 | { | 335 | { |
336 | VM_BUG_ON(!is_vm_hugetlb_page(vma)); | 336 | VM_BUG_ON(!is_vm_hugetlb_page(vma)); |
337 | VM_BUG_ON(vma->vm_flags & VM_SHARED); | 337 | VM_BUG_ON(vma->vm_flags & VM_MAYSHARE); |
338 | 338 | ||
339 | set_vma_private_data(vma, get_vma_private_data(vma) | flags); | 339 | set_vma_private_data(vma, get_vma_private_data(vma) | flags); |
340 | } | 340 | } |
@@ -353,7 +353,7 @@ static void decrement_hugepage_resv_vma(struct hstate *h, | |||
353 | if (vma->vm_flags & VM_NORESERVE) | 353 | if (vma->vm_flags & VM_NORESERVE) |
354 | return; | 354 | return; |
355 | 355 | ||
356 | if (vma->vm_flags & VM_SHARED) { | 356 | if (vma->vm_flags & VM_MAYSHARE) { |
357 | /* Shared mappings always use reserves */ | 357 | /* Shared mappings always use reserves */ |
358 | h->resv_huge_pages--; | 358 | h->resv_huge_pages--; |
359 | } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { | 359 | } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { |
@@ -369,14 +369,14 @@ static void decrement_hugepage_resv_vma(struct hstate *h, | |||
369 | void reset_vma_resv_huge_pages(struct vm_area_struct *vma) | 369 | void reset_vma_resv_huge_pages(struct vm_area_struct *vma) |
370 | { | 370 | { |
371 | VM_BUG_ON(!is_vm_hugetlb_page(vma)); | 371 | VM_BUG_ON(!is_vm_hugetlb_page(vma)); |
372 | if (!(vma->vm_flags & VM_SHARED)) | 372 | if (!(vma->vm_flags & VM_MAYSHARE)) |
373 | vma->vm_private_data = (void *)0; | 373 | vma->vm_private_data = (void *)0; |
374 | } | 374 | } |
375 | 375 | ||
376 | /* Returns true if the VMA has associated reserve pages */ | 376 | /* Returns true if the VMA has associated reserve pages */ |
377 | static int vma_has_reserves(struct vm_area_struct *vma) | 377 | static int vma_has_reserves(struct vm_area_struct *vma) |
378 | { | 378 | { |
379 | if (vma->vm_flags & VM_SHARED) | 379 | if (vma->vm_flags & VM_MAYSHARE) |
380 | return 1; | 380 | return 1; |
381 | if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) | 381 | if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) |
382 | return 1; | 382 | return 1; |
@@ -924,7 +924,7 @@ static long vma_needs_reservation(struct hstate *h, | |||
924 | struct address_space *mapping = vma->vm_file->f_mapping; | 924 | struct address_space *mapping = vma->vm_file->f_mapping; |
925 | struct inode *inode = mapping->host; | 925 | struct inode *inode = mapping->host; |
926 | 926 | ||
927 | if (vma->vm_flags & VM_SHARED) { | 927 | if (vma->vm_flags & VM_MAYSHARE) { |
928 | pgoff_t idx = vma_hugecache_offset(h, vma, addr); | 928 | pgoff_t idx = vma_hugecache_offset(h, vma, addr); |
929 | return region_chg(&inode->i_mapping->private_list, | 929 | return region_chg(&inode->i_mapping->private_list, |
930 | idx, idx + 1); | 930 | idx, idx + 1); |
@@ -949,7 +949,7 @@ static void vma_commit_reservation(struct hstate *h, | |||
949 | struct address_space *mapping = vma->vm_file->f_mapping; | 949 | struct address_space *mapping = vma->vm_file->f_mapping; |
950 | struct inode *inode = mapping->host; | 950 | struct inode *inode = mapping->host; |
951 | 951 | ||
952 | if (vma->vm_flags & VM_SHARED) { | 952 | if (vma->vm_flags & VM_MAYSHARE) { |
953 | pgoff_t idx = vma_hugecache_offset(h, vma, addr); | 953 | pgoff_t idx = vma_hugecache_offset(h, vma, addr); |
954 | region_add(&inode->i_mapping->private_list, idx, idx + 1); | 954 | region_add(&inode->i_mapping->private_list, idx, idx + 1); |
955 | 955 | ||
@@ -1893,7 +1893,7 @@ retry_avoidcopy: | |||
1893 | * at the time of fork() could consume its reserves on COW instead | 1893 | * at the time of fork() could consume its reserves on COW instead |
1894 | * of the full address range. | 1894 | * of the full address range. |
1895 | */ | 1895 | */ |
1896 | if (!(vma->vm_flags & VM_SHARED) && | 1896 | if (!(vma->vm_flags & VM_MAYSHARE) && |
1897 | is_vma_resv_set(vma, HPAGE_RESV_OWNER) && | 1897 | is_vma_resv_set(vma, HPAGE_RESV_OWNER) && |
1898 | old_page != pagecache_page) | 1898 | old_page != pagecache_page) |
1899 | outside_reserve = 1; | 1899 | outside_reserve = 1; |
@@ -2000,7 +2000,7 @@ retry: | |||
2000 | clear_huge_page(page, address, huge_page_size(h)); | 2000 | clear_huge_page(page, address, huge_page_size(h)); |
2001 | __SetPageUptodate(page); | 2001 | __SetPageUptodate(page); |
2002 | 2002 | ||
2003 | if (vma->vm_flags & VM_SHARED) { | 2003 | if (vma->vm_flags & VM_MAYSHARE) { |
2004 | int err; | 2004 | int err; |
2005 | struct inode *inode = mapping->host; | 2005 | struct inode *inode = mapping->host; |
2006 | 2006 | ||
@@ -2104,7 +2104,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2104 | goto out_mutex; | 2104 | goto out_mutex; |
2105 | } | 2105 | } |
2106 | 2106 | ||
2107 | if (!(vma->vm_flags & VM_SHARED)) | 2107 | if (!(vma->vm_flags & VM_MAYSHARE)) |
2108 | pagecache_page = hugetlbfs_pagecache_page(h, | 2108 | pagecache_page = hugetlbfs_pagecache_page(h, |
2109 | vma, address); | 2109 | vma, address); |
2110 | } | 2110 | } |
@@ -2289,7 +2289,7 @@ int hugetlb_reserve_pages(struct inode *inode, | |||
2289 | * to reserve the full area even if read-only as mprotect() may be | 2289 | * to reserve the full area even if read-only as mprotect() may be |
2290 | * called to make the mapping read-write. Assume !vma is a shm mapping | 2290 | * called to make the mapping read-write. Assume !vma is a shm mapping |
2291 | */ | 2291 | */ |
2292 | if (!vma || vma->vm_flags & VM_SHARED) | 2292 | if (!vma || vma->vm_flags & VM_MAYSHARE) |
2293 | chg = region_chg(&inode->i_mapping->private_list, from, to); | 2293 | chg = region_chg(&inode->i_mapping->private_list, from, to); |
2294 | else { | 2294 | else { |
2295 | struct resv_map *resv_map = resv_map_alloc(); | 2295 | struct resv_map *resv_map = resv_map_alloc(); |
@@ -2330,7 +2330,7 @@ int hugetlb_reserve_pages(struct inode *inode, | |||
2330 | * consumed reservations are stored in the map. Hence, nothing | 2330 | * consumed reservations are stored in the map. Hence, nothing |
2331 | * else has to be done for private mappings here | 2331 | * else has to be done for private mappings here |
2332 | */ | 2332 | */ |
2333 | if (!vma || vma->vm_flags & VM_SHARED) | 2333 | if (!vma || vma->vm_flags & VM_MAYSHARE) |
2334 | region_add(&inode->i_mapping->private_list, from, to); | 2334 | region_add(&inode->i_mapping->private_list, from, to); |
2335 | return 0; | 2335 | return 0; |
2336 | } | 2336 | } |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 01c2d8f14685..78eb8552818b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -314,14 +314,6 @@ static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) | |||
314 | return mem; | 314 | return mem; |
315 | } | 315 | } |
316 | 316 | ||
317 | static bool mem_cgroup_is_obsolete(struct mem_cgroup *mem) | ||
318 | { | ||
319 | if (!mem) | ||
320 | return true; | ||
321 | return css_is_removed(&mem->css); | ||
322 | } | ||
323 | |||
324 | |||
325 | /* | 317 | /* |
326 | * Call callback function against all cgroup under hierarchy tree. | 318 | * Call callback function against all cgroup under hierarchy tree. |
327 | */ | 319 | */ |
@@ -932,7 +924,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, | |||
932 | if (unlikely(!mem)) | 924 | if (unlikely(!mem)) |
933 | return 0; | 925 | return 0; |
934 | 926 | ||
935 | VM_BUG_ON(!mem || mem_cgroup_is_obsolete(mem)); | 927 | VM_BUG_ON(css_is_removed(&mem->css)); |
936 | 928 | ||
937 | while (1) { | 929 | while (1) { |
938 | int ret; | 930 | int ret; |
@@ -1488,8 +1480,9 @@ void mem_cgroup_uncharge_cache_page(struct page *page) | |||
1488 | __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE); | 1480 | __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE); |
1489 | } | 1481 | } |
1490 | 1482 | ||
1483 | #ifdef CONFIG_SWAP | ||
1491 | /* | 1484 | /* |
1492 | * called from __delete_from_swap_cache() and drop "page" account. | 1485 | * called after __delete_from_swap_cache() and drop "page" account. |
1493 | * memcg information is recorded to swap_cgroup of "ent" | 1486 | * memcg information is recorded to swap_cgroup of "ent" |
1494 | */ | 1487 | */ |
1495 | void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) | 1488 | void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) |
@@ -1506,6 +1499,7 @@ void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) | |||
1506 | if (memcg) | 1499 | if (memcg) |
1507 | css_put(&memcg->css); | 1500 | css_put(&memcg->css); |
1508 | } | 1501 | } |
1502 | #endif | ||
1509 | 1503 | ||
1510 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | 1504 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP |
1511 | /* | 1505 | /* |
diff --git a/mm/mmzone.c b/mm/mmzone.c index 16ce8b955dcf..f5b7d1760213 100644 --- a/mm/mmzone.c +++ b/mm/mmzone.c | |||
@@ -6,6 +6,7 @@ | |||
6 | 6 | ||
7 | 7 | ||
8 | #include <linux/stddef.h> | 8 | #include <linux/stddef.h> |
9 | #include <linux/mm.h> | ||
9 | #include <linux/mmzone.h> | 10 | #include <linux/mmzone.h> |
10 | #include <linux/module.h> | 11 | #include <linux/module.h> |
11 | 12 | ||
@@ -72,3 +73,17 @@ struct zoneref *next_zones_zonelist(struct zoneref *z, | |||
72 | *zone = zonelist_zone(z); | 73 | *zone = zonelist_zone(z); |
73 | return z; | 74 | return z; |
74 | } | 75 | } |
76 | |||
77 | #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL | ||
78 | int memmap_valid_within(unsigned long pfn, | ||
79 | struct page *page, struct zone *zone) | ||
80 | { | ||
81 | if (page_to_pfn(page) != pfn) | ||
82 | return 0; | ||
83 | |||
84 | if (page_zone(page) != zone) | ||
85 | return 0; | ||
86 | |||
87 | return 1; | ||
88 | } | ||
89 | #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ | ||
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 92bcf1db16b2..a7b2460e922b 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -284,22 +284,28 @@ static void dump_tasks(const struct mem_cgroup *mem) | |||
284 | printk(KERN_INFO "[ pid ] uid tgid total_vm rss cpu oom_adj " | 284 | printk(KERN_INFO "[ pid ] uid tgid total_vm rss cpu oom_adj " |
285 | "name\n"); | 285 | "name\n"); |
286 | do_each_thread(g, p) { | 286 | do_each_thread(g, p) { |
287 | /* | 287 | struct mm_struct *mm; |
288 | * total_vm and rss sizes do not exist for tasks with a | 288 | |
289 | * detached mm so there's no need to report them. | ||
290 | */ | ||
291 | if (!p->mm) | ||
292 | continue; | ||
293 | if (mem && !task_in_mem_cgroup(p, mem)) | 289 | if (mem && !task_in_mem_cgroup(p, mem)) |
294 | continue; | 290 | continue; |
295 | if (!thread_group_leader(p)) | 291 | if (!thread_group_leader(p)) |
296 | continue; | 292 | continue; |
297 | 293 | ||
298 | task_lock(p); | 294 | task_lock(p); |
295 | mm = p->mm; | ||
296 | if (!mm) { | ||
297 | /* | ||
298 | * total_vm and rss sizes do not exist for tasks with no | ||
299 | * mm so there's no need to report them; they can't be | ||
300 | * oom killed anyway. | ||
301 | */ | ||
302 | task_unlock(p); | ||
303 | continue; | ||
304 | } | ||
299 | printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n", | 305 | printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n", |
300 | p->pid, __task_cred(p)->uid, p->tgid, | 306 | p->pid, __task_cred(p)->uid, p->tgid, mm->total_vm, |
301 | p->mm->total_vm, get_mm_rss(p->mm), (int)task_cpu(p), | 307 | get_mm_rss(mm), (int)task_cpu(p), p->oomkilladj, |
302 | p->oomkilladj, p->comm); | 308 | p->comm); |
303 | task_unlock(p); | 309 | task_unlock(p); |
304 | } while_each_thread(g, p); | 310 | } while_each_thread(g, p); |
305 | } | 311 | } |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 30351f0063ac..bb553c3e955d 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -94,12 +94,12 @@ unsigned long vm_dirty_bytes; | |||
94 | /* | 94 | /* |
95 | * The interval between `kupdate'-style writebacks | 95 | * The interval between `kupdate'-style writebacks |
96 | */ | 96 | */ |
97 | unsigned int dirty_writeback_interval = 5 * 100; /* sentiseconds */ | 97 | unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */ |
98 | 98 | ||
99 | /* | 99 | /* |
100 | * The longest time for which data is allowed to remain dirty | 100 | * The longest time for which data is allowed to remain dirty |
101 | */ | 101 | */ |
102 | unsigned int dirty_expire_interval = 30 * 100; /* sentiseconds */ | 102 | unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */ |
103 | 103 | ||
104 | /* | 104 | /* |
105 | * Flag that makes the machine dump writes/reads and block dirtyings. | 105 | * Flag that makes the machine dump writes/reads and block dirtyings. |
@@ -770,7 +770,7 @@ static void wb_kupdate(unsigned long arg) | |||
770 | 770 | ||
771 | sync_supers(); | 771 | sync_supers(); |
772 | 772 | ||
773 | oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval); | 773 | oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval * 10); |
774 | start_jif = jiffies; | 774 | start_jif = jiffies; |
775 | next_jif = start_jif + msecs_to_jiffies(dirty_writeback_interval * 10); | 775 | next_jif = start_jif + msecs_to_jiffies(dirty_writeback_interval * 10); |
776 | nr_to_write = global_page_state(NR_FILE_DIRTY) + | 776 | nr_to_write = global_page_state(NR_FILE_DIRTY) + |
@@ -14,7 +14,7 @@ | |||
14 | * Original design by Rik van Riel <riel@conectiva.com.br> 2001 | 14 | * Original design by Rik van Riel <riel@conectiva.com.br> 2001 |
15 | * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 | 15 | * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 |
16 | * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 | 16 | * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 |
17 | * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004 | 17 | * Contributions by Hugh Dickins 2003, 2004 |
18 | */ | 18 | */ |
19 | 19 | ||
20 | /* | 20 | /* |
@@ -60,6 +60,7 @@ | |||
60 | #include <linux/kernel.h> | 60 | #include <linux/kernel.h> |
61 | #include <linux/slab.h> | 61 | #include <linux/slab.h> |
62 | #include <linux/mm.h> | 62 | #include <linux/mm.h> |
63 | #include <linux/swap.h> /* struct reclaim_state */ | ||
63 | #include <linux/cache.h> | 64 | #include <linux/cache.h> |
64 | #include <linux/init.h> | 65 | #include <linux/init.h> |
65 | #include <linux/module.h> | 66 | #include <linux/module.h> |
@@ -255,6 +256,8 @@ static void *slob_new_pages(gfp_t gfp, int order, int node) | |||
255 | 256 | ||
256 | static void slob_free_pages(void *b, int order) | 257 | static void slob_free_pages(void *b, int order) |
257 | { | 258 | { |
259 | if (current->reclaim_state) | ||
260 | current->reclaim_state->reclaimed_slab += 1 << order; | ||
258 | free_pages((unsigned long)b, order); | 261 | free_pages((unsigned long)b, order); |
259 | } | 262 | } |
260 | 263 | ||
@@ -407,7 +410,7 @@ static void slob_free(void *block, int size) | |||
407 | spin_unlock_irqrestore(&slob_lock, flags); | 410 | spin_unlock_irqrestore(&slob_lock, flags); |
408 | clear_slob_page(sp); | 411 | clear_slob_page(sp); |
409 | free_slob_page(sp); | 412 | free_slob_page(sp); |
410 | free_page((unsigned long)b); | 413 | slob_free_pages(b, 0); |
411 | return; | 414 | return; |
412 | } | 415 | } |
413 | 416 | ||
@@ -9,6 +9,7 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
12 | #include <linux/swap.h> /* struct reclaim_state */ | ||
12 | #include <linux/module.h> | 13 | #include <linux/module.h> |
13 | #include <linux/bit_spinlock.h> | 14 | #include <linux/bit_spinlock.h> |
14 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
@@ -1170,6 +1171,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page) | |||
1170 | 1171 | ||
1171 | __ClearPageSlab(page); | 1172 | __ClearPageSlab(page); |
1172 | reset_page_mapcount(page); | 1173 | reset_page_mapcount(page); |
1174 | if (current->reclaim_state) | ||
1175 | current->reclaim_state->reclaimed_slab += pages; | ||
1173 | __free_pages(page, order); | 1176 | __free_pages(page, order); |
1174 | } | 1177 | } |
1175 | 1178 | ||
@@ -1909,7 +1912,7 @@ static inline int calculate_order(int size) | |||
1909 | * Doh this slab cannot be placed using slub_max_order. | 1912 | * Doh this slab cannot be placed using slub_max_order. |
1910 | */ | 1913 | */ |
1911 | order = slab_order(size, 1, MAX_ORDER, 1); | 1914 | order = slab_order(size, 1, MAX_ORDER, 1); |
1912 | if (order <= MAX_ORDER) | 1915 | if (order < MAX_ORDER) |
1913 | return order; | 1916 | return order; |
1914 | return -ENOSYS; | 1917 | return -ENOSYS; |
1915 | } | 1918 | } |
@@ -2522,6 +2525,7 @@ __setup("slub_min_order=", setup_slub_min_order); | |||
2522 | static int __init setup_slub_max_order(char *str) | 2525 | static int __init setup_slub_max_order(char *str) |
2523 | { | 2526 | { |
2524 | get_option(&str, &slub_max_order); | 2527 | get_option(&str, &slub_max_order); |
2528 | slub_max_order = min(slub_max_order, MAX_ORDER - 1); | ||
2525 | 2529 | ||
2526 | return 1; | 2530 | return 1; |
2527 | } | 2531 | } |
diff --git a/mm/swap_state.c b/mm/swap_state.c index 3ecea98ecb45..1416e7e9e02d 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -109,8 +109,6 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) | |||
109 | */ | 109 | */ |
110 | void __delete_from_swap_cache(struct page *page) | 110 | void __delete_from_swap_cache(struct page *page) |
111 | { | 111 | { |
112 | swp_entry_t ent = {.val = page_private(page)}; | ||
113 | |||
114 | VM_BUG_ON(!PageLocked(page)); | 112 | VM_BUG_ON(!PageLocked(page)); |
115 | VM_BUG_ON(!PageSwapCache(page)); | 113 | VM_BUG_ON(!PageSwapCache(page)); |
116 | VM_BUG_ON(PageWriteback(page)); | 114 | VM_BUG_ON(PageWriteback(page)); |
@@ -121,7 +119,6 @@ void __delete_from_swap_cache(struct page *page) | |||
121 | total_swapcache_pages--; | 119 | total_swapcache_pages--; |
122 | __dec_zone_page_state(page, NR_FILE_PAGES); | 120 | __dec_zone_page_state(page, NR_FILE_PAGES); |
123 | INC_CACHE_INFO(del_total); | 121 | INC_CACHE_INFO(del_total); |
124 | mem_cgroup_uncharge_swapcache(page, ent); | ||
125 | } | 122 | } |
126 | 123 | ||
127 | /** | 124 | /** |
@@ -191,6 +188,7 @@ void delete_from_swap_cache(struct page *page) | |||
191 | __delete_from_swap_cache(page); | 188 | __delete_from_swap_cache(page); |
192 | spin_unlock_irq(&swapper_space.tree_lock); | 189 | spin_unlock_irq(&swapper_space.tree_lock); |
193 | 190 | ||
191 | mem_cgroup_uncharge_swapcache(page, entry); | ||
194 | swap_free(entry); | 192 | swap_free(entry); |
195 | page_cache_release(page); | 193 | page_cache_release(page); |
196 | } | 194 | } |
diff --git a/mm/truncate.c b/mm/truncate.c index 55206fab7b99..12e1579f9165 100644 --- a/mm/truncate.c +++ b/mm/truncate.c | |||
@@ -359,6 +359,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page) | |||
359 | BUG_ON(page_has_private(page)); | 359 | BUG_ON(page_has_private(page)); |
360 | __remove_from_page_cache(page); | 360 | __remove_from_page_cache(page); |
361 | spin_unlock_irq(&mapping->tree_lock); | 361 | spin_unlock_irq(&mapping->tree_lock); |
362 | mem_cgroup_uncharge_cache_page(page); | ||
362 | page_cache_release(page); /* pagecache ref */ | 363 | page_cache_release(page); /* pagecache ref */ |
363 | return 1; | 364 | return 1; |
364 | failed: | 365 | failed: |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 5fa3eda1f03f..d254306562cd 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -470,10 +470,12 @@ static int __remove_mapping(struct address_space *mapping, struct page *page) | |||
470 | swp_entry_t swap = { .val = page_private(page) }; | 470 | swp_entry_t swap = { .val = page_private(page) }; |
471 | __delete_from_swap_cache(page); | 471 | __delete_from_swap_cache(page); |
472 | spin_unlock_irq(&mapping->tree_lock); | 472 | spin_unlock_irq(&mapping->tree_lock); |
473 | mem_cgroup_uncharge_swapcache(page, swap); | ||
473 | swap_free(swap); | 474 | swap_free(swap); |
474 | } else { | 475 | } else { |
475 | __remove_from_page_cache(page); | 476 | __remove_from_page_cache(page); |
476 | spin_unlock_irq(&mapping->tree_lock); | 477 | spin_unlock_irq(&mapping->tree_lock); |
478 | mem_cgroup_uncharge_cache_page(page); | ||
477 | } | 479 | } |
478 | 480 | ||
479 | return 1; | 481 | return 1; |
diff --git a/mm/vmstat.c b/mm/vmstat.c index 66f6130976cb..74d66dba0cbe 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -509,22 +509,11 @@ static void pagetypeinfo_showblockcount_print(struct seq_file *m, | |||
509 | continue; | 509 | continue; |
510 | 510 | ||
511 | page = pfn_to_page(pfn); | 511 | page = pfn_to_page(pfn); |
512 | #ifdef CONFIG_ARCH_FLATMEM_HAS_HOLES | 512 | |
513 | /* | 513 | /* Watch for unexpected holes punched in the memmap */ |
514 | * Ordinarily, memory holes in flatmem still have a valid | 514 | if (!memmap_valid_within(pfn, page, zone)) |
515 | * memmap for the PFN range. However, an architecture for | ||
516 | * embedded systems (e.g. ARM) can free up the memmap backing | ||
517 | * holes to save memory on the assumption the memmap is | ||
518 | * never used. The page_zone linkages are then broken even | ||
519 | * though pfn_valid() returns true. Skip the page if the | ||
520 | * linkages are broken. Even if this test passed, the impact | ||
521 | * is that the counters for the movable type are off but | ||
522 | * fragmentation monitoring is likely meaningless on small | ||
523 | * systems. | ||
524 | */ | ||
525 | if (page_zone(page) != zone) | ||
526 | continue; | 515 | continue; |
527 | #endif | 516 | |
528 | mtype = get_pageblock_migratetype(page); | 517 | mtype = get_pageblock_migratetype(page); |
529 | 518 | ||
530 | if (mtype < MIGRATE_TYPES) | 519 | if (mtype < MIGRATE_TYPES) |