diff options
author | Christoph Lameter <cl@linux.com> | 2013-01-23 16:45:48 -0500 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2013-04-05 07:23:06 -0400 |
commit | 7cccd80b4397699902aced1ad3d692d384aaab77 (patch) | |
tree | 010bad7b7e3d3969f6050406b448fbcbc57cdca0 /mm | |
parent | 4d7868e6475d478172581828021bd8a356726679 (diff) |
slub: tid must be retrieved from the percpu area of the current processor
As Steven Rostedt has pointer out: rescheduling could occur on a
different processor after the determination of the per cpu pointer and
before the tid is retrieved. This could result in allocation from the
wrong node in slab_alloc().
The effect is much more severe in slab_free() where we could free to the
freelist of the wrong page.
The window for something like that occurring is pretty small but it is
possible.
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slub.c | 12 |
1 files changed, 9 insertions, 3 deletions
@@ -2332,13 +2332,18 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, | |||
2332 | 2332 | ||
2333 | s = memcg_kmem_get_cache(s, gfpflags); | 2333 | s = memcg_kmem_get_cache(s, gfpflags); |
2334 | redo: | 2334 | redo: |
2335 | |||
2336 | /* | 2335 | /* |
2337 | * Must read kmem_cache cpu data via this cpu ptr. Preemption is | 2336 | * Must read kmem_cache cpu data via this cpu ptr. Preemption is |
2338 | * enabled. We may switch back and forth between cpus while | 2337 | * enabled. We may switch back and forth between cpus while |
2339 | * reading from one cpu area. That does not matter as long | 2338 | * reading from one cpu area. That does not matter as long |
2340 | * as we end up on the original cpu again when doing the cmpxchg. | 2339 | * as we end up on the original cpu again when doing the cmpxchg. |
2340 | * | ||
2341 | * Preemption is disabled for the retrieval of the tid because that | ||
2342 | * must occur from the current processor. We cannot allow rescheduling | ||
2343 | * on a different processor between the determination of the pointer | ||
2344 | * and the retrieval of the tid. | ||
2341 | */ | 2345 | */ |
2346 | preempt_disable(); | ||
2342 | c = __this_cpu_ptr(s->cpu_slab); | 2347 | c = __this_cpu_ptr(s->cpu_slab); |
2343 | 2348 | ||
2344 | /* | 2349 | /* |
@@ -2348,7 +2353,7 @@ redo: | |||
2348 | * linked list in between. | 2353 | * linked list in between. |
2349 | */ | 2354 | */ |
2350 | tid = c->tid; | 2355 | tid = c->tid; |
2351 | barrier(); | 2356 | preempt_enable(); |
2352 | 2357 | ||
2353 | object = c->freelist; | 2358 | object = c->freelist; |
2354 | page = c->page; | 2359 | page = c->page; |
@@ -2595,10 +2600,11 @@ redo: | |||
2595 | * data is retrieved via this pointer. If we are on the same cpu | 2600 | * data is retrieved via this pointer. If we are on the same cpu |
2596 | * during the cmpxchg then the free will succedd. | 2601 | * during the cmpxchg then the free will succedd. |
2597 | */ | 2602 | */ |
2603 | preempt_disable(); | ||
2598 | c = __this_cpu_ptr(s->cpu_slab); | 2604 | c = __this_cpu_ptr(s->cpu_slab); |
2599 | 2605 | ||
2600 | tid = c->tid; | 2606 | tid = c->tid; |
2601 | barrier(); | 2607 | preempt_enable(); |
2602 | 2608 | ||
2603 | if (likely(page == c->page)) { | 2609 | if (likely(page == c->page)) { |
2604 | set_freepointer(s, object, c->freelist); | 2610 | set_freepointer(s, object, c->freelist); |