aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-05-24 03:59:36 -0400
committerTejun Heo <tj@kernel.org>2011-05-24 03:59:36 -0400
commit6988f20fe04e9ef3aea488cb8ab57fbeb78e12f0 (patch)
treec9d7fc50a2e2147a5ca07e3096e7eeb916ad2da9 /mm
parent0415b00d175e0d8945e6785aad21b5f157976ce0 (diff)
parent6ea0c34dac89611126455537552cffe6c7e832ad (diff)
Merge branch 'fixes-2.6.39' into for-2.6.40
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c8
-rw-r--r--mm/filemap.c10
-rw-r--r--mm/memory.c2
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/percpu.c4
-rw-r--r--mm/rmap.c5
-rw-r--r--mm/slub.c6
8 files changed, 21 insertions, 18 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 8fe9d3407921..0d9a036ada66 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -67,14 +67,14 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
67 struct inode *inode; 67 struct inode *inode;
68 68
69 nr_wb = nr_dirty = nr_io = nr_more_io = 0; 69 nr_wb = nr_dirty = nr_io = nr_more_io = 0;
70 spin_lock(&inode_lock); 70 spin_lock(&inode_wb_list_lock);
71 list_for_each_entry(inode, &wb->b_dirty, i_wb_list) 71 list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
72 nr_dirty++; 72 nr_dirty++;
73 list_for_each_entry(inode, &wb->b_io, i_wb_list) 73 list_for_each_entry(inode, &wb->b_io, i_wb_list)
74 nr_io++; 74 nr_io++;
75 list_for_each_entry(inode, &wb->b_more_io, i_wb_list) 75 list_for_each_entry(inode, &wb->b_more_io, i_wb_list)
76 nr_more_io++; 76 nr_more_io++;
77 spin_unlock(&inode_lock); 77 spin_unlock(&inode_wb_list_lock);
78 78
79 global_dirty_limits(&background_thresh, &dirty_thresh); 79 global_dirty_limits(&background_thresh, &dirty_thresh);
80 bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); 80 bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
@@ -676,11 +676,11 @@ void bdi_destroy(struct backing_dev_info *bdi)
676 if (bdi_has_dirty_io(bdi)) { 676 if (bdi_has_dirty_io(bdi)) {
677 struct bdi_writeback *dst = &default_backing_dev_info.wb; 677 struct bdi_writeback *dst = &default_backing_dev_info.wb;
678 678
679 spin_lock(&inode_lock); 679 spin_lock(&inode_wb_list_lock);
680 list_splice(&bdi->wb.b_dirty, &dst->b_dirty); 680 list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
681 list_splice(&bdi->wb.b_io, &dst->b_io); 681 list_splice(&bdi->wb.b_io, &dst->b_io);
682 list_splice(&bdi->wb.b_more_io, &dst->b_more_io); 682 list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
683 spin_unlock(&inode_lock); 683 spin_unlock(&inode_wb_list_lock);
684 } 684 }
685 685
686 bdi_unregister(bdi); 686 bdi_unregister(bdi);
diff --git a/mm/filemap.c b/mm/filemap.c
index 04d1992fd86b..c641edf553a9 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -80,8 +80,8 @@
80 * ->i_mutex 80 * ->i_mutex
81 * ->i_alloc_sem (various) 81 * ->i_alloc_sem (various)
82 * 82 *
83 * ->inode_lock 83 * inode_wb_list_lock
84 * ->sb_lock (fs/fs-writeback.c) 84 * sb_lock (fs/fs-writeback.c)
85 * ->mapping->tree_lock (__sync_single_inode) 85 * ->mapping->tree_lock (__sync_single_inode)
86 * 86 *
87 * ->i_mmap_lock 87 * ->i_mmap_lock
@@ -98,8 +98,10 @@
98 * ->zone.lru_lock (check_pte_range->isolate_lru_page) 98 * ->zone.lru_lock (check_pte_range->isolate_lru_page)
99 * ->private_lock (page_remove_rmap->set_page_dirty) 99 * ->private_lock (page_remove_rmap->set_page_dirty)
100 * ->tree_lock (page_remove_rmap->set_page_dirty) 100 * ->tree_lock (page_remove_rmap->set_page_dirty)
101 * ->inode_lock (page_remove_rmap->set_page_dirty) 101 * inode_wb_list_lock (page_remove_rmap->set_page_dirty)
102 * ->inode_lock (zap_pte_range->set_page_dirty) 102 * ->inode->i_lock (page_remove_rmap->set_page_dirty)
103 * inode_wb_list_lock (zap_pte_range->set_page_dirty)
104 * ->inode->i_lock (zap_pte_range->set_page_dirty)
103 * ->private_lock (zap_pte_range->__set_page_dirty_buffers) 105 * ->private_lock (zap_pte_range->__set_page_dirty_buffers)
104 * 106 *
105 * (code doesn't rely on that order, so you could switch it around) 107 * (code doesn't rely on that order, so you could switch it around)
diff --git a/mm/memory.c b/mm/memory.c
index 51a5c23704af..9da8cab1b1b0 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3715,7 +3715,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
3715} 3715}
3716 3716
3717/** 3717/**
3718 * @access_remote_vm - access another process' address space 3718 * access_remote_vm - access another process' address space
3719 * @mm: the mm_struct of the target address space 3719 * @mm: the mm_struct of the target address space
3720 * @addr: start address to access 3720 * @addr: start address to access
3721 * @buf: source or destination buffer 3721 * @buf: source or destination buffer
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 62a5cec08a17..6a819d1b2c7d 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -406,7 +406,7 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
406 task_unlock(current); 406 task_unlock(current);
407 dump_stack(); 407 dump_stack();
408 mem_cgroup_print_oom_info(mem, p); 408 mem_cgroup_print_oom_info(mem, p);
409 __show_mem(SHOW_MEM_FILTER_NODES); 409 show_mem(SHOW_MEM_FILTER_NODES);
410 if (sysctl_oom_dump_tasks) 410 if (sysctl_oom_dump_tasks)
411 dump_tasks(mem, nodemask); 411 dump_tasks(mem, nodemask);
412} 412}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8e5726ab0d85..d6e7ba7373be 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2195,7 +2195,7 @@ nopage:
2195 current->comm, order, gfp_mask); 2195 current->comm, order, gfp_mask);
2196 dump_stack(); 2196 dump_stack();
2197 if (!should_suppress_show_mem()) 2197 if (!should_suppress_show_mem())
2198 __show_mem(filter); 2198 show_mem(filter);
2199 } 2199 }
2200 return page; 2200 return page;
2201got_pg: 2201got_pg:
diff --git a/mm/percpu.c b/mm/percpu.c
index c5feb79f5995..8eb536645f68 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1648,8 +1648,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1648 /* warn if maximum distance is further than 75% of vmalloc space */ 1648 /* warn if maximum distance is further than 75% of vmalloc space */
1649 if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) { 1649 if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
1650 pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc " 1650 pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
1651 "space 0x%lx\n", 1651 "space 0x%lx\n", max_distance,
1652 max_distance, VMALLOC_END - VMALLOC_START); 1652 (unsigned long)(VMALLOC_END - VMALLOC_START));
1653#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 1653#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1654 /* and fail if we have fallback */ 1654 /* and fail if we have fallback */
1655 rc = -EINVAL; 1655 rc = -EINVAL;
diff --git a/mm/rmap.c b/mm/rmap.c
index 4a8e99a0fb97..8da044a1db0f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -31,11 +31,12 @@
31 * swap_lock (in swap_duplicate, swap_info_get) 31 * swap_lock (in swap_duplicate, swap_info_get)
32 * mmlist_lock (in mmput, drain_mmlist and others) 32 * mmlist_lock (in mmput, drain_mmlist and others)
33 * mapping->private_lock (in __set_page_dirty_buffers) 33 * mapping->private_lock (in __set_page_dirty_buffers)
34 * inode_lock (in set_page_dirty's __mark_inode_dirty) 34 * inode->i_lock (in set_page_dirty's __mark_inode_dirty)
35 * inode_wb_list_lock (in set_page_dirty's __mark_inode_dirty)
35 * sb_lock (within inode_lock in fs/fs-writeback.c) 36 * sb_lock (within inode_lock in fs/fs-writeback.c)
36 * mapping->tree_lock (widely used, in set_page_dirty, 37 * mapping->tree_lock (widely used, in set_page_dirty,
37 * in arch-dependent flush_dcache_mmap_lock, 38 * in arch-dependent flush_dcache_mmap_lock,
38 * within inode_lock in __sync_single_inode) 39 * within inode_wb_list_lock in __sync_single_inode)
39 * 40 *
40 * (code doesn't rely on that order so it could be switched around) 41 * (code doesn't rely on that order so it could be switched around)
41 * ->tasklist_lock 42 * ->tasklist_lock
diff --git a/mm/slub.c b/mm/slub.c
index 93de30db95f5..f881874843a5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -849,11 +849,11 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
849 local_irq_save(flags); 849 local_irq_save(flags);
850 kmemcheck_slab_free(s, x, s->objsize); 850 kmemcheck_slab_free(s, x, s->objsize);
851 debug_check_no_locks_freed(x, s->objsize); 851 debug_check_no_locks_freed(x, s->objsize);
852 if (!(s->flags & SLAB_DEBUG_OBJECTS))
853 debug_check_no_obj_freed(x, s->objsize);
854 local_irq_restore(flags); 852 local_irq_restore(flags);
855 } 853 }
856#endif 854#endif
855 if (!(s->flags & SLAB_DEBUG_OBJECTS))
856 debug_check_no_obj_freed(x, s->objsize);
857} 857}
858 858
859/* 859/*
@@ -1604,7 +1604,7 @@ static inline void note_cmpxchg_failure(const char *n,
1604 1604
1605void init_kmem_cache_cpus(struct kmem_cache *s) 1605void init_kmem_cache_cpus(struct kmem_cache *s)
1606{ 1606{
1607#if defined(CONFIG_CMPXCHG_LOCAL) && defined(CONFIG_PREEMPT) 1607#ifdef CONFIG_CMPXCHG_LOCAL
1608 int cpu; 1608 int cpu;
1609 1609
1610 for_each_possible_cpu(cpu) 1610 for_each_possible_cpu(cpu)