aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mempolicy.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2012-10-08 19:29:17 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:22:22 -0400
commitb22d127a39ddd10d93deee3d96e643657ad53a49 (patch)
tree9a16e3d3a53a230dba611c85e9f892dda2b6c202 /mm/mempolicy.c
parent869833f2c5c6e4dd09a5378cfc665ffb4615e5d2 (diff)
mempolicy: fix a race in shared_policy_replace()
shared_policy_replace() use of sp_alloc() is unsafe. 1) sp_node cannot be dereferenced if sp->lock is not held and 2) another thread can modify sp_node between spin_unlock for allocating a new sp node and next spin_lock. The bug was introduced before 2.6.12-rc2. Kosaki's original patch for this problem was to allocate an sp node and policy within shared_policy_replace and initialise it when the lock is reacquired. I was not keen on this approach because it partially duplicates sp_alloc(). As the paths were sp->lock is taken are not that performance critical this patch converts sp->lock to sp->mutex so it can sleep when calling sp_alloc(). [kosaki.motohiro@jp.fujitsu.com: Original patch] Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: Christoph Lameter <cl@linux.com> Cc: Josh Boyer <jwboyer@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r--mm/mempolicy.c37
1 files changed, 16 insertions, 21 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index f0728ae74672..b2f12ecc1b34 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2083,7 +2083,7 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2083 */ 2083 */
2084 2084
2085/* lookup first element intersecting start-end */ 2085/* lookup first element intersecting start-end */
2086/* Caller holds sp->lock */ 2086/* Caller holds sp->mutex */
2087static struct sp_node * 2087static struct sp_node *
2088sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 2088sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2089{ 2089{
@@ -2147,13 +2147,13 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2147 2147
2148 if (!sp->root.rb_node) 2148 if (!sp->root.rb_node)
2149 return NULL; 2149 return NULL;
2150 spin_lock(&sp->lock); 2150 mutex_lock(&sp->mutex);
2151 sn = sp_lookup(sp, idx, idx+1); 2151 sn = sp_lookup(sp, idx, idx+1);
2152 if (sn) { 2152 if (sn) {
2153 mpol_get(sn->policy); 2153 mpol_get(sn->policy);
2154 pol = sn->policy; 2154 pol = sn->policy;
2155 } 2155 }
2156 spin_unlock(&sp->lock); 2156 mutex_unlock(&sp->mutex);
2157 return pol; 2157 return pol;
2158} 2158}
2159 2159
@@ -2193,10 +2193,10 @@ static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2193static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 2193static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2194 unsigned long end, struct sp_node *new) 2194 unsigned long end, struct sp_node *new)
2195{ 2195{
2196 struct sp_node *n, *new2 = NULL; 2196 struct sp_node *n;
2197 int ret = 0;
2197 2198
2198restart: 2199 mutex_lock(&sp->mutex);
2199 spin_lock(&sp->lock);
2200 n = sp_lookup(sp, start, end); 2200 n = sp_lookup(sp, start, end);
2201 /* Take care of old policies in the same range. */ 2201 /* Take care of old policies in the same range. */
2202 while (n && n->start < end) { 2202 while (n && n->start < end) {
@@ -2209,16 +2209,14 @@ restart:
2209 } else { 2209 } else {
2210 /* Old policy spanning whole new range. */ 2210 /* Old policy spanning whole new range. */
2211 if (n->end > end) { 2211 if (n->end > end) {
2212 struct sp_node *new2;
2213 new2 = sp_alloc(end, n->end, n->policy);
2212 if (!new2) { 2214 if (!new2) {
2213 spin_unlock(&sp->lock); 2215 ret = -ENOMEM;
2214 new2 = sp_alloc(end, n->end, n->policy); 2216 goto out;
2215 if (!new2)
2216 return -ENOMEM;
2217 goto restart;
2218 } 2217 }
2219 n->end = start; 2218 n->end = start;
2220 sp_insert(sp, new2); 2219 sp_insert(sp, new2);
2221 new2 = NULL;
2222 break; 2220 break;
2223 } else 2221 } else
2224 n->end = start; 2222 n->end = start;
@@ -2229,12 +2227,9 @@ restart:
2229 } 2227 }
2230 if (new) 2228 if (new)
2231 sp_insert(sp, new); 2229 sp_insert(sp, new);
2232 spin_unlock(&sp->lock); 2230out:
2233 if (new2) { 2231 mutex_unlock(&sp->mutex);
2234 mpol_put(new2->policy); 2232 return ret;
2235 kmem_cache_free(sn_cache, new2);
2236 }
2237 return 0;
2238} 2233}
2239 2234
2240/** 2235/**
@@ -2252,7 +2247,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2252 int ret; 2247 int ret;
2253 2248
2254 sp->root = RB_ROOT; /* empty tree == default mempolicy */ 2249 sp->root = RB_ROOT; /* empty tree == default mempolicy */
2255 spin_lock_init(&sp->lock); 2250 mutex_init(&sp->mutex);
2256 2251
2257 if (mpol) { 2252 if (mpol) {
2258 struct vm_area_struct pvma; 2253 struct vm_area_struct pvma;
@@ -2318,7 +2313,7 @@ void mpol_free_shared_policy(struct shared_policy *p)
2318 2313
2319 if (!p->root.rb_node) 2314 if (!p->root.rb_node)
2320 return; 2315 return;
2321 spin_lock(&p->lock); 2316 mutex_lock(&p->mutex);
2322 next = rb_first(&p->root); 2317 next = rb_first(&p->root);
2323 while (next) { 2318 while (next) {
2324 n = rb_entry(next, struct sp_node, nd); 2319 n = rb_entry(next, struct sp_node, nd);
@@ -2327,7 +2322,7 @@ void mpol_free_shared_policy(struct shared_policy *p)
2327 mpol_put(n->policy); 2322 mpol_put(n->policy);
2328 kmem_cache_free(sn_cache, n); 2323 kmem_cache_free(sn_cache, n);
2329 } 2324 }
2330 spin_unlock(&p->lock); 2325 mutex_unlock(&p->mutex);
2331} 2326}
2332 2327
2333/* assumes fs == KERNEL_DS */ 2328/* assumes fs == KERNEL_DS */