diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-04-30 03:56:41 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-04-30 03:56:44 -0400 |
commit | 3ca50496c2677a2b3fdd3ede86660fd1433beac6 (patch) | |
tree | 97a76d8479a8d8a96e04ed0694b8dbf89457bfcc /mm | |
parent | 462b04e28a7ec1339c892117c3f20a40e55d0e83 (diff) | |
parent | 66f41d4c5c8a5deed66fdcc84509376c9a0bf9d8 (diff) |
Merge commit 'v2.6.34-rc6' into perf/core
Merge reason: update to the latest -rc.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/backing-dev.c | 34 | ||||
-rw-r--r-- | mm/hugetlb.c | 5 | ||||
-rw-r--r-- | mm/ksm.c | 12 | ||||
-rw-r--r-- | mm/memcontrol.c | 4 | ||||
-rw-r--r-- | mm/mmap.c | 3 | ||||
-rw-r--r-- | mm/rmap.c | 9 |
6 files changed, 53 insertions, 14 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index f13e067e1467..707d0dc6da0f 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <linux/writeback.h> | 11 | #include <linux/writeback.h> |
12 | #include <linux/device.h> | 12 | #include <linux/device.h> |
13 | 13 | ||
14 | static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); | ||
15 | |||
14 | void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) | 16 | void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) |
15 | { | 17 | { |
16 | } | 18 | } |
@@ -25,6 +27,11 @@ struct backing_dev_info default_backing_dev_info = { | |||
25 | }; | 27 | }; |
26 | EXPORT_SYMBOL_GPL(default_backing_dev_info); | 28 | EXPORT_SYMBOL_GPL(default_backing_dev_info); |
27 | 29 | ||
30 | struct backing_dev_info noop_backing_dev_info = { | ||
31 | .name = "noop", | ||
32 | }; | ||
33 | EXPORT_SYMBOL_GPL(noop_backing_dev_info); | ||
34 | |||
28 | static struct class *bdi_class; | 35 | static struct class *bdi_class; |
29 | 36 | ||
30 | /* | 37 | /* |
@@ -715,6 +722,33 @@ void bdi_destroy(struct backing_dev_info *bdi) | |||
715 | } | 722 | } |
716 | EXPORT_SYMBOL(bdi_destroy); | 723 | EXPORT_SYMBOL(bdi_destroy); |
717 | 724 | ||
725 | /* | ||
726 | * For use from filesystems to quickly init and register a bdi associated | ||
727 | * with dirty writeback | ||
728 | */ | ||
729 | int bdi_setup_and_register(struct backing_dev_info *bdi, char *name, | ||
730 | unsigned int cap) | ||
731 | { | ||
732 | char tmp[32]; | ||
733 | int err; | ||
734 | |||
735 | bdi->name = name; | ||
736 | bdi->capabilities = cap; | ||
737 | err = bdi_init(bdi); | ||
738 | if (err) | ||
739 | return err; | ||
740 | |||
741 | sprintf(tmp, "%.28s%s", name, "-%d"); | ||
742 | err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq)); | ||
743 | if (err) { | ||
744 | bdi_destroy(bdi); | ||
745 | return err; | ||
746 | } | ||
747 | |||
748 | return 0; | ||
749 | } | ||
750 | EXPORT_SYMBOL(bdi_setup_and_register); | ||
751 | |||
718 | static wait_queue_head_t congestion_wqh[2] = { | 752 | static wait_queue_head_t congestion_wqh[2] = { |
719 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), | 753 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), |
720 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) | 754 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 6034dc9e9796..ffbdfc86aedf 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -546,6 +546,7 @@ static void free_huge_page(struct page *page) | |||
546 | 546 | ||
547 | mapping = (struct address_space *) page_private(page); | 547 | mapping = (struct address_space *) page_private(page); |
548 | set_page_private(page, 0); | 548 | set_page_private(page, 0); |
549 | page->mapping = NULL; | ||
549 | BUG_ON(page_count(page)); | 550 | BUG_ON(page_count(page)); |
550 | INIT_LIST_HEAD(&page->lru); | 551 | INIT_LIST_HEAD(&page->lru); |
551 | 552 | ||
@@ -2447,8 +2448,10 @@ retry: | |||
2447 | spin_lock(&inode->i_lock); | 2448 | spin_lock(&inode->i_lock); |
2448 | inode->i_blocks += blocks_per_huge_page(h); | 2449 | inode->i_blocks += blocks_per_huge_page(h); |
2449 | spin_unlock(&inode->i_lock); | 2450 | spin_unlock(&inode->i_lock); |
2450 | } else | 2451 | } else { |
2451 | lock_page(page); | 2452 | lock_page(page); |
2453 | page->mapping = HUGETLB_POISON; | ||
2454 | } | ||
2452 | } | 2455 | } |
2453 | 2456 | ||
2454 | /* | 2457 | /* |
@@ -365,7 +365,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr) | |||
365 | do { | 365 | do { |
366 | cond_resched(); | 366 | cond_resched(); |
367 | page = follow_page(vma, addr, FOLL_GET); | 367 | page = follow_page(vma, addr, FOLL_GET); |
368 | if (!page) | 368 | if (IS_ERR_OR_NULL(page)) |
369 | break; | 369 | break; |
370 | if (PageKsm(page)) | 370 | if (PageKsm(page)) |
371 | ret = handle_mm_fault(vma->vm_mm, vma, addr, | 371 | ret = handle_mm_fault(vma->vm_mm, vma, addr, |
@@ -447,7 +447,7 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item) | |||
447 | goto out; | 447 | goto out; |
448 | 448 | ||
449 | page = follow_page(vma, addr, FOLL_GET); | 449 | page = follow_page(vma, addr, FOLL_GET); |
450 | if (!page) | 450 | if (IS_ERR_OR_NULL(page)) |
451 | goto out; | 451 | goto out; |
452 | if (PageAnon(page)) { | 452 | if (PageAnon(page)) { |
453 | flush_anon_page(vma, page, addr); | 453 | flush_anon_page(vma, page, addr); |
@@ -1086,7 +1086,7 @@ struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, | |||
1086 | cond_resched(); | 1086 | cond_resched(); |
1087 | tree_rmap_item = rb_entry(*new, struct rmap_item, node); | 1087 | tree_rmap_item = rb_entry(*new, struct rmap_item, node); |
1088 | tree_page = get_mergeable_page(tree_rmap_item); | 1088 | tree_page = get_mergeable_page(tree_rmap_item); |
1089 | if (!tree_page) | 1089 | if (IS_ERR_OR_NULL(tree_page)) |
1090 | return NULL; | 1090 | return NULL; |
1091 | 1091 | ||
1092 | /* | 1092 | /* |
@@ -1294,7 +1294,7 @@ next_mm: | |||
1294 | if (ksm_test_exit(mm)) | 1294 | if (ksm_test_exit(mm)) |
1295 | break; | 1295 | break; |
1296 | *page = follow_page(vma, ksm_scan.address, FOLL_GET); | 1296 | *page = follow_page(vma, ksm_scan.address, FOLL_GET); |
1297 | if (*page && PageAnon(*page)) { | 1297 | if (!IS_ERR_OR_NULL(*page) && PageAnon(*page)) { |
1298 | flush_anon_page(vma, *page, ksm_scan.address); | 1298 | flush_anon_page(vma, *page, ksm_scan.address); |
1299 | flush_dcache_page(*page); | 1299 | flush_dcache_page(*page); |
1300 | rmap_item = get_next_rmap_item(slot, | 1300 | rmap_item = get_next_rmap_item(slot, |
@@ -1308,7 +1308,7 @@ next_mm: | |||
1308 | up_read(&mm->mmap_sem); | 1308 | up_read(&mm->mmap_sem); |
1309 | return rmap_item; | 1309 | return rmap_item; |
1310 | } | 1310 | } |
1311 | if (*page) | 1311 | if (!IS_ERR_OR_NULL(*page)) |
1312 | put_page(*page); | 1312 | put_page(*page); |
1313 | ksm_scan.address += PAGE_SIZE; | 1313 | ksm_scan.address += PAGE_SIZE; |
1314 | cond_resched(); | 1314 | cond_resched(); |
@@ -1367,7 +1367,7 @@ next_mm: | |||
1367 | static void ksm_do_scan(unsigned int scan_npages) | 1367 | static void ksm_do_scan(unsigned int scan_npages) |
1368 | { | 1368 | { |
1369 | struct rmap_item *rmap_item; | 1369 | struct rmap_item *rmap_item; |
1370 | struct page *page; | 1370 | struct page *uninitialized_var(page); |
1371 | 1371 | ||
1372 | while (scan_npages--) { | 1372 | while (scan_npages--) { |
1373 | cond_resched(); | 1373 | cond_resched(); |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f4ede99c8b9b..6c755de385f7 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -2429,11 +2429,11 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) | |||
2429 | } | 2429 | } |
2430 | unlock_page_cgroup(pc); | 2430 | unlock_page_cgroup(pc); |
2431 | 2431 | ||
2432 | *ptr = mem; | ||
2432 | if (mem) { | 2433 | if (mem) { |
2433 | ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false); | 2434 | ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false); |
2434 | css_put(&mem->css); | 2435 | css_put(&mem->css); |
2435 | } | 2436 | } |
2436 | *ptr = mem; | ||
2437 | return ret; | 2437 | return ret; |
2438 | } | 2438 | } |
2439 | 2439 | ||
@@ -1977,7 +1977,8 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, | |||
1977 | return 0; | 1977 | return 0; |
1978 | 1978 | ||
1979 | /* Clean everything up if vma_adjust failed. */ | 1979 | /* Clean everything up if vma_adjust failed. */ |
1980 | new->vm_ops->close(new); | 1980 | if (new->vm_ops && new->vm_ops->close) |
1981 | new->vm_ops->close(new); | ||
1981 | if (new->vm_file) { | 1982 | if (new->vm_file) { |
1982 | if (vma->vm_flags & VM_EXECUTABLE) | 1983 | if (vma->vm_flags & VM_EXECUTABLE) |
1983 | removed_exe_file_vma(mm); | 1984 | removed_exe_file_vma(mm); |
@@ -133,8 +133,8 @@ int anon_vma_prepare(struct vm_area_struct *vma) | |||
133 | goto out_enomem_free_avc; | 133 | goto out_enomem_free_avc; |
134 | allocated = anon_vma; | 134 | allocated = anon_vma; |
135 | } | 135 | } |
136 | spin_lock(&anon_vma->lock); | ||
137 | 136 | ||
137 | spin_lock(&anon_vma->lock); | ||
138 | /* page_table_lock to protect against threads */ | 138 | /* page_table_lock to protect against threads */ |
139 | spin_lock(&mm->page_table_lock); | 139 | spin_lock(&mm->page_table_lock); |
140 | if (likely(!vma->anon_vma)) { | 140 | if (likely(!vma->anon_vma)) { |
@@ -144,14 +144,15 @@ int anon_vma_prepare(struct vm_area_struct *vma) | |||
144 | list_add(&avc->same_vma, &vma->anon_vma_chain); | 144 | list_add(&avc->same_vma, &vma->anon_vma_chain); |
145 | list_add(&avc->same_anon_vma, &anon_vma->head); | 145 | list_add(&avc->same_anon_vma, &anon_vma->head); |
146 | allocated = NULL; | 146 | allocated = NULL; |
147 | avc = NULL; | ||
147 | } | 148 | } |
148 | spin_unlock(&mm->page_table_lock); | 149 | spin_unlock(&mm->page_table_lock); |
149 | |||
150 | spin_unlock(&anon_vma->lock); | 150 | spin_unlock(&anon_vma->lock); |
151 | if (unlikely(allocated)) { | 151 | |
152 | if (unlikely(allocated)) | ||
152 | anon_vma_free(allocated); | 153 | anon_vma_free(allocated); |
154 | if (unlikely(avc)) | ||
153 | anon_vma_chain_free(avc); | 155 | anon_vma_chain_free(avc); |
154 | } | ||
155 | } | 156 | } |
156 | return 0; | 157 | return 0; |
157 | 158 | ||