diff options
author | Gavin Shan <shangw@linux.vnet.ibm.com> | 2012-07-31 19:46:06 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-31 21:42:49 -0400 |
commit | c1c9518331969f97ea403bac66f0fd4a85d204d5 (patch) | |
tree | 9a3122ec4600b94784e86a99aec61a1e93a27da5 /mm | |
parent | db36a46113e101a8aa2d6ede41e78f2eaabed3f1 (diff) |
mm/sparse: remove index_init_lock
sparse_index_init() uses the index_init_lock spinlock to protect root
mem_section assignment. The lock is not necessary anymore because the
function is called only during boot (during paging init which is executed
only from a single CPU) and from the hotplug code (by add_memory() via
arch_add_memory()) which uses mem_hotplug_mutex.
The lock was introduced by 28ae55c9 ("sparsemem extreme: hotplug
preparation") and sparse_index_init() was used only during boot at that
time.
Later when the hotplug code (and add_memory()) was introduced there was no
synchronization so it was possible to online more sections from the same
root probably (though I am not 100% sure about that). The first
synchronization has been added by 6ad696d2 ("mm: allow memory hotplug and
hibernation in the same kernel") which was later replaced by the
mem_hotplug_mutex - 20d6c96b ("mem-hotplug: introduce
{un}lock_memory_hotplug()").
Let's remove the lock as it is not needed and it makes the code more
confusing.
[mhocko@suse.cz: changelog]
Signed-off-by: Gavin Shan <shangw@linux.vnet.ibm.com>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Cc: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/sparse.c | 14 |
1 files changed, 1 insertions, 13 deletions
diff --git a/mm/sparse.c b/mm/sparse.c index 42ca0ea9af1..fac95f2888f 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -77,7 +77,6 @@ static struct mem_section noinline __init_refok *sparse_index_alloc(int nid) | |||
77 | 77 | ||
78 | static int __meminit sparse_index_init(unsigned long section_nr, int nid) | 78 | static int __meminit sparse_index_init(unsigned long section_nr, int nid) |
79 | { | 79 | { |
80 | static DEFINE_SPINLOCK(index_init_lock); | ||
81 | unsigned long root = SECTION_NR_TO_ROOT(section_nr); | 80 | unsigned long root = SECTION_NR_TO_ROOT(section_nr); |
82 | struct mem_section *section; | 81 | struct mem_section *section; |
83 | int ret = 0; | 82 | int ret = 0; |
@@ -88,20 +87,9 @@ static int __meminit sparse_index_init(unsigned long section_nr, int nid) | |||
88 | section = sparse_index_alloc(nid); | 87 | section = sparse_index_alloc(nid); |
89 | if (!section) | 88 | if (!section) |
90 | return -ENOMEM; | 89 | return -ENOMEM; |
91 | /* | ||
92 | * This lock keeps two different sections from | ||
93 | * reallocating for the same index | ||
94 | */ | ||
95 | spin_lock(&index_init_lock); | ||
96 | |||
97 | if (mem_section[root]) { | ||
98 | ret = -EEXIST; | ||
99 | goto out; | ||
100 | } | ||
101 | 90 | ||
102 | mem_section[root] = section; | 91 | mem_section[root] = section; |
103 | out: | 92 | |
104 | spin_unlock(&index_init_lock); | ||
105 | return ret; | 93 | return ret; |
106 | } | 94 | } |
107 | #else /* !SPARSEMEM_EXTREME */ | 95 | #else /* !SPARSEMEM_EXTREME */ |