aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorJames Morris <jmorris@namei.org>2011-04-19 07:32:41 -0400
committerJames Morris <jmorris@namei.org>2011-04-19 07:32:41 -0400
commitd4ab4e6a23f805abb8fc3cc34525eec3788aeca1 (patch)
treeeefd82c155bc27469a85667d759cd90facf4a6e3 /mm/slub.c
parentc0fa797ae6cd02ff87c0bfe0d509368a3b45640e (diff)
parent96fd2d57b8252e16dfacf8941f7a74a6119197f5 (diff)
Merge branch 'master'; commit 'v2.6.39-rc3' into next
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/slub.c b/mm/slub.c
index f881874843a5..94d2a33a866e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -64,7 +64,7 @@
64 * we must stay away from it for a while since we may cause a bouncing 64 * we must stay away from it for a while since we may cause a bouncing
65 * cacheline if we try to acquire the lock. So go onto the next slab. 65 * cacheline if we try to acquire the lock. So go onto the next slab.
66 * If all pages are busy then we may allocate a new slab instead of reusing 66 * If all pages are busy then we may allocate a new slab instead of reusing
67 * a partial slab. A new slab has noone operating on it and thus there is 67 * a partial slab. A new slab has no one operating on it and thus there is
68 * no danger of cacheline contention. 68 * no danger of cacheline contention.
69 * 69 *
70 * Interrupts are disabled during allocation and deallocation in order to 70 * Interrupts are disabled during allocation and deallocation in order to
@@ -1929,7 +1929,7 @@ redo:
1929 else { 1929 else {
1930#ifdef CONFIG_CMPXCHG_LOCAL 1930#ifdef CONFIG_CMPXCHG_LOCAL
1931 /* 1931 /*
1932 * The cmpxchg will only match if there was no additonal 1932 * The cmpxchg will only match if there was no additional
1933 * operation and if we are on the right processor. 1933 * operation and if we are on the right processor.
1934 * 1934 *
1935 * The cmpxchg does the following atomically (without lock semantics!) 1935 * The cmpxchg does the following atomically (without lock semantics!)
@@ -3547,7 +3547,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3547 3547
3548 ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller); 3548 ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller);
3549 3549
3550 /* Honor the call site pointer we recieved. */ 3550 /* Honor the call site pointer we received. */
3551 trace_kmalloc(caller, ret, size, s->size, gfpflags); 3551 trace_kmalloc(caller, ret, size, s->size, gfpflags);
3552 3552
3553 return ret; 3553 return ret;
@@ -3577,7 +3577,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3577 3577
3578 ret = slab_alloc(s, gfpflags, node, caller); 3578 ret = slab_alloc(s, gfpflags, node, caller);
3579 3579
3580 /* Honor the call site pointer we recieved. */ 3580 /* Honor the call site pointer we received. */
3581 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); 3581 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
3582 3582
3583 return ret; 3583 return ret;