diff options
author | Andi Kleen <ak@linux.intel.com> | 2012-06-09 05:40:03 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2012-06-20 03:01:04 -0400 |
commit | e7b691b085fda913830e5280ae6f724b2a63c824 (patch) | |
tree | 9fbd380538f1c3fd5e36c5beeac35041351baf40 | |
parent | 8c138bc00925521c4e764269db3a903bd2a51592 (diff) |
slab/mempolicy: always use local policy from interrupt context
slab_node() could access current->mempolicy from interrupt context.
However there's a race condition during exit where the mempolicy
is first freed and then the pointer zeroed.
Using this from interrupts seems bogus anyways. The interrupt
will interrupt a random process and therefore get a random
mempolicy. Many times, this will be idle's, which noone can change.
Just disable this here and always use local for slab
from interrupts. I also cleaned up the callers of slab_node a bit
which always passed the same argument.
I believe the original mempolicy code did that in fact,
so it's likely a regression.
v2: send version with correct logic
v3: simplify. fix typo.
Reported-by: Arun Sharma <asharma@fb.com>
Cc: penberg@kernel.org
Cc: cl@linux.com
Signed-off-by: Andi Kleen <ak@linux.intel.com>
[tdmackey@twitter.com: Rework control flow based on feedback from
cl@linux.com, fix logic, and cleanup current task_struct reference]
Acked-by: David Rientjes <rientjes@google.com>
Acked-by: Christoph Lameter <cl@linux.com>
Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: David Mackey <tdmackey@twitter.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r-- | include/linux/mempolicy.h | 2 | ||||
-rw-r--r-- | mm/mempolicy.c | 8 | ||||
-rw-r--r-- | mm/slab.c | 4 | ||||
-rw-r--r-- | mm/slub.c | 2 |
4 files changed, 11 insertions, 5 deletions
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 4aa42732e47f..95b738c7abff 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h | |||
@@ -215,7 +215,7 @@ extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, | |||
215 | extern bool init_nodemask_of_mempolicy(nodemask_t *mask); | 215 | extern bool init_nodemask_of_mempolicy(nodemask_t *mask); |
216 | extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, | 216 | extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, |
217 | const nodemask_t *mask); | 217 | const nodemask_t *mask); |
218 | extern unsigned slab_node(struct mempolicy *policy); | 218 | extern unsigned slab_node(void); |
219 | 219 | ||
220 | extern enum zone_type policy_zone; | 220 | extern enum zone_type policy_zone; |
221 | 221 | ||
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index f15c1b24ca18..cb0b230aa3f2 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -1602,8 +1602,14 @@ static unsigned interleave_nodes(struct mempolicy *policy) | |||
1602 | * task can change it's policy. The system default policy requires no | 1602 | * task can change it's policy. The system default policy requires no |
1603 | * such protection. | 1603 | * such protection. |
1604 | */ | 1604 | */ |
1605 | unsigned slab_node(struct mempolicy *policy) | 1605 | unsigned slab_node(void) |
1606 | { | 1606 | { |
1607 | struct mempolicy *policy; | ||
1608 | |||
1609 | if (in_interrupt()) | ||
1610 | return numa_node_id(); | ||
1611 | |||
1612 | policy = current->mempolicy; | ||
1607 | if (!policy || policy->flags & MPOL_F_LOCAL) | 1613 | if (!policy || policy->flags & MPOL_F_LOCAL) |
1608 | return numa_node_id(); | 1614 | return numa_node_id(); |
1609 | 1615 | ||
@@ -3310,7 +3310,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
3310 | if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) | 3310 | if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) |
3311 | nid_alloc = cpuset_slab_spread_node(); | 3311 | nid_alloc = cpuset_slab_spread_node(); |
3312 | else if (current->mempolicy) | 3312 | else if (current->mempolicy) |
3313 | nid_alloc = slab_node(current->mempolicy); | 3313 | nid_alloc = slab_node(); |
3314 | if (nid_alloc != nid_here) | 3314 | if (nid_alloc != nid_here) |
3315 | return ____cache_alloc_node(cachep, flags, nid_alloc); | 3315 | return ____cache_alloc_node(cachep, flags, nid_alloc); |
3316 | return NULL; | 3316 | return NULL; |
@@ -3342,7 +3342,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) | |||
3342 | 3342 | ||
3343 | retry_cpuset: | 3343 | retry_cpuset: |
3344 | cpuset_mems_cookie = get_mems_allowed(); | 3344 | cpuset_mems_cookie = get_mems_allowed(); |
3345 | zonelist = node_zonelist(slab_node(current->mempolicy), flags); | 3345 | zonelist = node_zonelist(slab_node(), flags); |
3346 | 3346 | ||
3347 | retry: | 3347 | retry: |
3348 | /* | 3348 | /* |
@@ -1617,7 +1617,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags, | |||
1617 | 1617 | ||
1618 | do { | 1618 | do { |
1619 | cpuset_mems_cookie = get_mems_allowed(); | 1619 | cpuset_mems_cookie = get_mems_allowed(); |
1620 | zonelist = node_zonelist(slab_node(current->mempolicy), flags); | 1620 | zonelist = node_zonelist(slab_node(), flags); |
1621 | for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { | 1621 | for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { |
1622 | struct kmem_cache_node *n; | 1622 | struct kmem_cache_node *n; |
1623 | 1623 | ||