diff options
author | Christoph Lameter <cl@linux.com> | 2011-12-22 12:58:51 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2011-12-22 13:40:20 -0500 |
commit | 933393f58fef9963eac61db8093689544e29a600 (patch) | |
tree | 719f8b231499aa4ea023bc1a06db4582df5f0965 /mm/slub.c | |
parent | ecefc36b41ac0fe92d76273a23faf27b2da13411 (diff) |
percpu: Remove irqsafe_cpu_xxx variants
We simply say that regular this_cpu use must be safe regardless of
preemption and interrupt state. That has no material change for x86
and s390 implementations of this_cpu operations. However, arches that
do not provide their own implementation for this_cpu operations will
now get code generated that disables interrupts instead of preemption.
-tj: This is part of on-going percpu API cleanup. For detailed
discussion of the subject, please refer to the following thread.
http://thread.gmane.org/gmane.linux.kernel/1222078
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
LKML-Reference: <alpine.DEB.2.00.1112221154380.11787@router.home>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 6 |
1 files changed, 3 insertions, 3 deletions
@@ -1978,7 +1978,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) | |||
1978 | page->pobjects = pobjects; | 1978 | page->pobjects = pobjects; |
1979 | page->next = oldpage; | 1979 | page->next = oldpage; |
1980 | 1980 | ||
1981 | } while (irqsafe_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); | 1981 | } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); |
1982 | stat(s, CPU_PARTIAL_FREE); | 1982 | stat(s, CPU_PARTIAL_FREE); |
1983 | return pobjects; | 1983 | return pobjects; |
1984 | } | 1984 | } |
@@ -2304,7 +2304,7 @@ redo: | |||
2304 | * Since this is without lock semantics the protection is only against | 2304 | * Since this is without lock semantics the protection is only against |
2305 | * code executing on this cpu *not* from access by other cpus. | 2305 | * code executing on this cpu *not* from access by other cpus. |
2306 | */ | 2306 | */ |
2307 | if (unlikely(!irqsafe_cpu_cmpxchg_double( | 2307 | if (unlikely(!this_cpu_cmpxchg_double( |
2308 | s->cpu_slab->freelist, s->cpu_slab->tid, | 2308 | s->cpu_slab->freelist, s->cpu_slab->tid, |
2309 | object, tid, | 2309 | object, tid, |
2310 | get_freepointer_safe(s, object), next_tid(tid)))) { | 2310 | get_freepointer_safe(s, object), next_tid(tid)))) { |
@@ -2534,7 +2534,7 @@ redo: | |||
2534 | if (likely(page == c->page)) { | 2534 | if (likely(page == c->page)) { |
2535 | set_freepointer(s, object, c->freelist); | 2535 | set_freepointer(s, object, c->freelist); |
2536 | 2536 | ||
2537 | if (unlikely(!irqsafe_cpu_cmpxchg_double( | 2537 | if (unlikely(!this_cpu_cmpxchg_double( |
2538 | s->cpu_slab->freelist, s->cpu_slab->tid, | 2538 | s->cpu_slab->freelist, s->cpu_slab->tid, |
2539 | c->freelist, tid, | 2539 | c->freelist, tid, |
2540 | object, next_tid(tid)))) { | 2540 | object, next_tid(tid)))) { |