diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-31 22:25:39 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-31 22:25:39 -0400 |
commit | ac694dbdbc403c00e2c14d10bc7b8412cc378259 (patch) | |
tree | e37328cfbeaf43716dd5914cad9179e57e84df76 /mm/mremap.c | |
parent | a40a1d3d0a2fd613fdec6d89d3c053268ced76ed (diff) | |
parent | 437ea90cc3afdca5229b41c6b1d38c4842756cb9 (diff) |
Merge branch 'akpm' (Andrew's patch-bomb)
Merge Andrew's second set of patches:
- MM
- a few random fixes
- a couple of RTC leftovers
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (120 commits)
rtc/rtc-88pm80x: remove unneed devm_kfree
rtc/rtc-88pm80x: assign ret only when rtc_register_driver fails
mm: hugetlbfs: close race during teardown of hugetlbfs shared page tables
tmpfs: distribute interleave better across nodes
mm: remove redundant initialization
mm: warn if pg_data_t isn't initialized with zero
mips: zero out pg_data_t when it's allocated
memcg: gix memory accounting scalability in shrink_page_list
mm/sparse: remove index_init_lock
mm/sparse: more checks on mem_section number
mm/sparse: optimize sparse_index_alloc
memcg: add mem_cgroup_from_css() helper
memcg: further prevent OOM with too many dirty pages
memcg: prevent OOM with too many dirty pages
mm: mmu_notifier: fix freed page still mapped in secondary MMU
mm: memcg: only check anon swapin page charges for swap cache
mm: memcg: only check swap cache pages for repeated charging
mm: memcg: split swapin charge function into private and public part
mm: memcg: remove needless !mm fixup to init_mm when charging
mm: memcg: remove unneeded shmem charge type
...
Diffstat (limited to 'mm/mremap.c')
-rw-r--r-- | mm/mremap.c | 2 |
1 files changed, 0 insertions, 2 deletions
diff --git a/mm/mremap.c b/mm/mremap.c index 21fed202ddad..cc06d0e48d05 100644 --- a/mm/mremap.c +++ b/mm/mremap.c | |||
@@ -260,7 +260,6 @@ static unsigned long move_vma(struct vm_area_struct *vma, | |||
260 | * If this were a serious issue, we'd add a flag to do_munmap(). | 260 | * If this were a serious issue, we'd add a flag to do_munmap(). |
261 | */ | 261 | */ |
262 | hiwater_vm = mm->hiwater_vm; | 262 | hiwater_vm = mm->hiwater_vm; |
263 | mm->total_vm += new_len >> PAGE_SHIFT; | ||
264 | vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); | 263 | vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); |
265 | 264 | ||
266 | if (do_munmap(mm, old_addr, old_len) < 0) { | 265 | if (do_munmap(mm, old_addr, old_len) < 0) { |
@@ -497,7 +496,6 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, | |||
497 | goto out; | 496 | goto out; |
498 | } | 497 | } |
499 | 498 | ||
500 | mm->total_vm += pages; | ||
501 | vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages); | 499 | vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages); |
502 | if (vma->vm_flags & VM_LOCKED) { | 500 | if (vma->vm_flags & VM_LOCKED) { |
503 | mm->locked_vm += pages; | 501 | mm->locked_vm += pages; |