aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPekka Enberg <penberg@kernel.org>2011-03-20 12:13:26 -0400
committerPekka Enberg <penberg@kernel.org>2011-03-20 12:13:26 -0400
commite8c500c2b64b6e237e67ecba7249e72363c47047 (patch)
treee9c62e59a879ebef45b0fc2823d318b2fb2fed84
parentc53badd0801728feedfcccae04239410b52b0d03 (diff)
parenta24c5a0ea902bcda348f086bd909cc2d6e305bf8 (diff)
Merge branch 'slub/lockless' into for-linus
Conflicts: include/linux/slub_def.h
-rw-r--r--arch/alpha/kernel/vmlinux.lds.S5
-rw-r--r--arch/arm/kernel/vmlinux.lds.S2
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S2
-rw-r--r--arch/cris/kernel/vmlinux.lds.S2
-rw-r--r--arch/frv/kernel/vmlinux.lds.S2
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S2
-rw-r--r--arch/m32r/kernel/vmlinux.lds.S2
-rw-r--r--arch/mips/kernel/vmlinux.lds.S2
-rw-r--r--arch/mn10300/kernel/vmlinux.lds.S2
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S2
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S2
-rw-r--r--arch/s390/kernel/vmlinux.lds.S2
-rw-r--r--arch/sh/kernel/vmlinux.lds.S2
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S2
-rw-r--r--arch/tile/kernel/vmlinux.lds.S2
-rw-r--r--arch/um/include/asm/common.lds.S2
-rw-r--r--arch/x86/include/asm/percpu.h48
-rw-r--r--arch/x86/kernel/vmlinux.lds.S4
-rw-r--r--arch/x86/lib/Makefile1
-rw-r--r--arch/x86/lib/cmpxchg16b_emu.S59
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S2
-rw-r--r--include/asm-generic/vmlinux.lds.h35
-rw-r--r--include/linux/percpu.h128
-rw-r--r--include/linux/slub_def.h7
-rw-r--r--mm/slub.c234
25 files changed, 505 insertions, 48 deletions
diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S
index 003ef4c02585..433be2a24f31 100644
--- a/arch/alpha/kernel/vmlinux.lds.S
+++ b/arch/alpha/kernel/vmlinux.lds.S
@@ -1,5 +1,6 @@
1#include <asm-generic/vmlinux.lds.h> 1#include <asm-generic/vmlinux.lds.h>
2#include <asm/thread_info.h> 2#include <asm/thread_info.h>
3#include <asm/cache.h>
3#include <asm/page.h> 4#include <asm/page.h>
4 5
5OUTPUT_FORMAT("elf64-alpha") 6OUTPUT_FORMAT("elf64-alpha")
@@ -38,7 +39,7 @@ SECTIONS
38 __init_begin = ALIGN(PAGE_SIZE); 39 __init_begin = ALIGN(PAGE_SIZE);
39 INIT_TEXT_SECTION(PAGE_SIZE) 40 INIT_TEXT_SECTION(PAGE_SIZE)
40 INIT_DATA_SECTION(16) 41 INIT_DATA_SECTION(16)
41 PERCPU(PAGE_SIZE) 42 PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
42 /* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page 43 /* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page
43 needed for the THREAD_SIZE aligned init_task gets freed after init */ 44 needed for the THREAD_SIZE aligned init_task gets freed after init */
44 . = ALIGN(THREAD_SIZE); 45 . = ALIGN(THREAD_SIZE);
@@ -46,7 +47,7 @@ SECTIONS
46 /* Freed after init ends here */ 47 /* Freed after init ends here */
47 48
48 _data = .; 49 _data = .;
49 RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE) 50 RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
50 51
51 .got : { 52 .got : {
52 *(.got) 53 *(.got)
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 61462790757f..28fea9b2d129 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -78,7 +78,7 @@ SECTIONS
78#endif 78#endif
79 } 79 }
80 80
81 PERCPU(PAGE_SIZE) 81 PERCPU(32, PAGE_SIZE)
82 82
83#ifndef CONFIG_XIP_KERNEL 83#ifndef CONFIG_XIP_KERNEL
84 . = ALIGN(PAGE_SIZE); 84 . = ALIGN(PAGE_SIZE);
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 4122678529c0..c40d07f708e8 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -136,7 +136,7 @@ SECTIONS
136 136
137 . = ALIGN(16); 137 . = ALIGN(16);
138 INIT_DATA_SECTION(16) 138 INIT_DATA_SECTION(16)
139 PERCPU(4) 139 PERCPU(32, 4)
140 140
141 .exit.data : 141 .exit.data :
142 { 142 {
diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S
index c49be845f96a..728bbd9e7d4c 100644
--- a/arch/cris/kernel/vmlinux.lds.S
+++ b/arch/cris/kernel/vmlinux.lds.S
@@ -102,7 +102,7 @@ SECTIONS
102#endif 102#endif
103 __vmlinux_end = .; /* Last address of the physical file. */ 103 __vmlinux_end = .; /* Last address of the physical file. */
104#ifdef CONFIG_ETRAX_ARCH_V32 104#ifdef CONFIG_ETRAX_ARCH_V32
105 PERCPU(PAGE_SIZE) 105 PERCPU(32, PAGE_SIZE)
106 106
107 .init.ramfs : { 107 .init.ramfs : {
108 INIT_RAM_FS 108 INIT_RAM_FS
diff --git a/arch/frv/kernel/vmlinux.lds.S b/arch/frv/kernel/vmlinux.lds.S
index 8b973f3cc90e..0daae8af5787 100644
--- a/arch/frv/kernel/vmlinux.lds.S
+++ b/arch/frv/kernel/vmlinux.lds.S
@@ -37,7 +37,7 @@ SECTIONS
37 _einittext = .; 37 _einittext = .;
38 38
39 INIT_DATA_SECTION(8) 39 INIT_DATA_SECTION(8)
40 PERCPU(4096) 40 PERCPU(L1_CACHE_BYTES, 4096)
41 41
42 . = ALIGN(PAGE_SIZE); 42 . = ALIGN(PAGE_SIZE);
43 __init_end = .; 43 __init_end = .;
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 5a4d044dcb1c..787de4a77d82 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -198,7 +198,7 @@ SECTIONS {
198 198
199 /* Per-cpu data: */ 199 /* Per-cpu data: */
200 . = ALIGN(PERCPU_PAGE_SIZE); 200 . = ALIGN(PERCPU_PAGE_SIZE);
201 PERCPU_VADDR(PERCPU_ADDR, :percpu) 201 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
202 __phys_per_cpu_start = __per_cpu_load; 202 __phys_per_cpu_start = __per_cpu_load;
203 /* 203 /*
204 * ensure percpu data fits 204 * ensure percpu data fits
diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S
index 7da94eaa082b..c194d64cdbb9 100644
--- a/arch/m32r/kernel/vmlinux.lds.S
+++ b/arch/m32r/kernel/vmlinux.lds.S
@@ -53,7 +53,7 @@ SECTIONS
53 __init_begin = .; 53 __init_begin = .;
54 INIT_TEXT_SECTION(PAGE_SIZE) 54 INIT_TEXT_SECTION(PAGE_SIZE)
55 INIT_DATA_SECTION(16) 55 INIT_DATA_SECTION(16)
56 PERCPU(PAGE_SIZE) 56 PERCPU(32, PAGE_SIZE)
57 . = ALIGN(PAGE_SIZE); 57 . = ALIGN(PAGE_SIZE);
58 __init_end = .; 58 __init_end = .;
59 /* freed after init ends here */ 59 /* freed after init ends here */
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 570607b376b5..832afbb87588 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -115,7 +115,7 @@ SECTIONS
115 EXIT_DATA 115 EXIT_DATA
116 } 116 }
117 117
118 PERCPU(PAGE_SIZE) 118 PERCPU(1 << CONFIG_MIPS_L1_CACHE_SHIFT, PAGE_SIZE)
119 . = ALIGN(PAGE_SIZE); 119 . = ALIGN(PAGE_SIZE);
120 __init_end = .; 120 __init_end = .;
121 /* freed after init ends here */ 121 /* freed after init ends here */
diff --git a/arch/mn10300/kernel/vmlinux.lds.S b/arch/mn10300/kernel/vmlinux.lds.S
index febbeee7f2f5..968bcd2cb022 100644
--- a/arch/mn10300/kernel/vmlinux.lds.S
+++ b/arch/mn10300/kernel/vmlinux.lds.S
@@ -70,7 +70,7 @@ SECTIONS
70 .exit.text : { EXIT_TEXT; } 70 .exit.text : { EXIT_TEXT; }
71 .exit.data : { EXIT_DATA; } 71 .exit.data : { EXIT_DATA; }
72 72
73 PERCPU(PAGE_SIZE) 73 PERCPU(32, PAGE_SIZE)
74 . = ALIGN(PAGE_SIZE); 74 . = ALIGN(PAGE_SIZE);
75 __init_end = .; 75 __init_end = .;
76 /* freed after init ends here */ 76 /* freed after init ends here */
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index d64a6bbec2aa..8f1e4efd143e 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -145,7 +145,7 @@ SECTIONS
145 EXIT_DATA 145 EXIT_DATA
146 } 146 }
147 147
148 PERCPU(PAGE_SIZE) 148 PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
149 . = ALIGN(PAGE_SIZE); 149 . = ALIGN(PAGE_SIZE);
150 __init_end = .; 150 __init_end = .;
151 /* freed after init ends here */ 151 /* freed after init ends here */
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 8a0deefac08d..b9150f07d266 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -160,7 +160,7 @@ SECTIONS
160 INIT_RAM_FS 160 INIT_RAM_FS
161 } 161 }
162 162
163 PERCPU(PAGE_SIZE) 163 PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
164 164
165 . = ALIGN(8); 165 . = ALIGN(8);
166 .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) { 166 .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index a68ac10213b2..1bc18cdb525b 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -77,7 +77,7 @@ SECTIONS
77 . = ALIGN(PAGE_SIZE); 77 . = ALIGN(PAGE_SIZE);
78 INIT_DATA_SECTION(0x100) 78 INIT_DATA_SECTION(0x100)
79 79
80 PERCPU(PAGE_SIZE) 80 PERCPU(0x100, PAGE_SIZE)
81 . = ALIGN(PAGE_SIZE); 81 . = ALIGN(PAGE_SIZE);
82 __init_end = .; /* freed after init ends here */ 82 __init_end = .; /* freed after init ends here */
83 83
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index 7f8a709c3ada..af4d46187a79 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -66,7 +66,7 @@ SECTIONS
66 __machvec_end = .; 66 __machvec_end = .;
67 } 67 }
68 68
69 PERCPU(PAGE_SIZE) 69 PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
70 70
71 /* 71 /*
72 * .exit.text is discarded at runtime, not link time, to deal with 72 * .exit.text is discarded at runtime, not link time, to deal with
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 0c1e6783657f..92b557afe535 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -108,7 +108,7 @@ SECTIONS
108 __sun4v_2insn_patch_end = .; 108 __sun4v_2insn_patch_end = .;
109 } 109 }
110 110
111 PERCPU(PAGE_SIZE) 111 PERCPU(SMP_CACHE_BYTES, PAGE_SIZE)
112 112
113 . = ALIGN(PAGE_SIZE); 113 . = ALIGN(PAGE_SIZE);
114 __init_end = .; 114 __init_end = .;
diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S
index 25fdc0c1839a..c6ce378e0678 100644
--- a/arch/tile/kernel/vmlinux.lds.S
+++ b/arch/tile/kernel/vmlinux.lds.S
@@ -63,7 +63,7 @@ SECTIONS
63 *(.init.page) 63 *(.init.page)
64 } :data =0 64 } :data =0
65 INIT_DATA_SECTION(16) 65 INIT_DATA_SECTION(16)
66 PERCPU(PAGE_SIZE) 66 PERCPU(L2_CACHE_BYTES, PAGE_SIZE)
67 . = ALIGN(PAGE_SIZE); 67 . = ALIGN(PAGE_SIZE);
68 VMLINUX_SYMBOL(_einitdata) = .; 68 VMLINUX_SYMBOL(_einitdata) = .;
69 69
diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S
index ac55b9efa1ce..34bede8aad4a 100644
--- a/arch/um/include/asm/common.lds.S
+++ b/arch/um/include/asm/common.lds.S
@@ -42,7 +42,7 @@
42 INIT_SETUP(0) 42 INIT_SETUP(0)
43 } 43 }
44 44
45 PERCPU(32) 45 PERCPU(32, 32)
46 46
47 .initcall.init : { 47 .initcall.init : {
48 INIT_CALLS 48 INIT_CALLS
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 7e172955ee57..a09e1f052d84 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -451,6 +451,26 @@ do { \
451#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 451#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
452#endif /* !CONFIG_M386 */ 452#endif /* !CONFIG_M386 */
453 453
454#ifdef CONFIG_X86_CMPXCHG64
455#define percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) \
456({ \
457 char __ret; \
458 typeof(o1) __o1 = o1; \
459 typeof(o1) __n1 = n1; \
460 typeof(o2) __o2 = o2; \
461 typeof(o2) __n2 = n2; \
462 typeof(o2) __dummy = n2; \
463 asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \
464 : "=a"(__ret), "=m" (pcp1), "=d"(__dummy) \
465 : "b"(__n1), "c"(__n2), "a"(__o1), "d"(__o2)); \
466 __ret; \
467})
468
469#define __this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
470#define this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
471#define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
472#endif /* CONFIG_X86_CMPXCHG64 */
473
454/* 474/*
455 * Per cpu atomic 64 bit operations are only available under 64 bit. 475 * Per cpu atomic 64 bit operations are only available under 64 bit.
456 * 32 bit must fall back to generic operations. 476 * 32 bit must fall back to generic operations.
@@ -480,6 +500,34 @@ do { \
480#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) 500#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
481#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) 501#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
482#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 502#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
503
504/*
505 * Pretty complex macro to generate cmpxchg16 instruction. The instruction
506 * is not supported on early AMD64 processors so we must be able to emulate
507 * it in software. The address used in the cmpxchg16 instruction must be
508 * aligned to a 16 byte boundary.
509 */
510#define percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) \
511({ \
512 char __ret; \
513 typeof(o1) __o1 = o1; \
514 typeof(o1) __n1 = n1; \
515 typeof(o2) __o2 = o2; \
516 typeof(o2) __n2 = n2; \
517 typeof(o2) __dummy; \
518 alternative_io("call this_cpu_cmpxchg16b_emu\n\t" P6_NOP4, \
519 "cmpxchg16b %%gs:(%%rsi)\n\tsetz %0\n\t", \
520 X86_FEATURE_CX16, \
521 ASM_OUTPUT2("=a"(__ret), "=d"(__dummy)), \
522 "S" (&pcp1), "b"(__n1), "c"(__n2), \
523 "a"(__o1), "d"(__o2)); \
524 __ret; \
525})
526
527#define __this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
528#define this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
529#define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
530
483#endif 531#endif
484 532
485/* This is not atomic against other CPUs -- CPU preemption needs to be off */ 533/* This is not atomic against other CPUs -- CPU preemption needs to be off */
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index bf4700755184..cef446f8ac78 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -230,7 +230,7 @@ SECTIONS
230 * output PHDR, so the next output section - .init.text - should 230 * output PHDR, so the next output section - .init.text - should
231 * start another segment - init. 231 * start another segment - init.
232 */ 232 */
233 PERCPU_VADDR(0, :percpu) 233 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
234#endif 234#endif
235 235
236 INIT_TEXT_SECTION(PAGE_SIZE) 236 INIT_TEXT_SECTION(PAGE_SIZE)
@@ -305,7 +305,7 @@ SECTIONS
305 } 305 }
306 306
307#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) 307#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
308 PERCPU(THREAD_SIZE) 308 PERCPU(INTERNODE_CACHE_BYTES, THREAD_SIZE)
309#endif 309#endif
310 310
311 . = ALIGN(PAGE_SIZE); 311 . = ALIGN(PAGE_SIZE);
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index e10cf070ede0..f2479f19ddde 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -42,4 +42,5 @@ else
42 lib-y += memmove_64.o memset_64.o 42 lib-y += memmove_64.o memset_64.o
43 lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o 43 lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o
44 lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem_64.o 44 lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem_64.o
45 lib-y += cmpxchg16b_emu.o
45endif 46endif
diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
new file mode 100644
index 000000000000..3e8b08a6de2b
--- /dev/null
+++ b/arch/x86/lib/cmpxchg16b_emu.S
@@ -0,0 +1,59 @@
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; version 2
5 * of the License.
6 *
7 */
8#include <linux/linkage.h>
9#include <asm/alternative-asm.h>
10#include <asm/frame.h>
11#include <asm/dwarf2.h>
12
13.text
14
15/*
16 * Inputs:
17 * %rsi : memory location to compare
18 * %rax : low 64 bits of old value
19 * %rdx : high 64 bits of old value
20 * %rbx : low 64 bits of new value
21 * %rcx : high 64 bits of new value
22 * %al : Operation successful
23 */
24ENTRY(this_cpu_cmpxchg16b_emu)
25CFI_STARTPROC
26
27#
28# Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not
29# via the ZF. Caller will access %al to get result.
30#
31# Note that this is only useful for a cpuops operation. Meaning that we
32# do *not* have a fully atomic operation but just an operation that is
33# *atomic* on a single cpu (as provided by the this_cpu_xx class of
34# macros).
35#
36this_cpu_cmpxchg16b_emu:
37 pushf
38 cli
39
40 cmpq %gs:(%rsi), %rax
41 jne not_same
42 cmpq %gs:8(%rsi), %rdx
43 jne not_same
44
45 movq %rbx, %gs:(%rsi)
46 movq %rcx, %gs:8(%rsi)
47
48 popf
49 mov $1, %al
50 ret
51
52 not_same:
53 popf
54 xor %al,%al
55 ret
56
57CFI_ENDPROC
58
59ENDPROC(this_cpu_cmpxchg16b_emu)
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 9b526154c9ba..a2820065927e 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -155,7 +155,7 @@ SECTIONS
155 INIT_RAM_FS 155 INIT_RAM_FS
156 } 156 }
157 157
158 PERCPU(PAGE_SIZE) 158 PERCPU(XCHAL_ICACHE_LINESIZE, PAGE_SIZE)
159 159
160 /* We need this dummy segment here */ 160 /* We need this dummy segment here */
161 161
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index fe77e3395b40..22d3342d6af4 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -15,7 +15,7 @@
15 * HEAD_TEXT_SECTION 15 * HEAD_TEXT_SECTION
16 * INIT_TEXT_SECTION(PAGE_SIZE) 16 * INIT_TEXT_SECTION(PAGE_SIZE)
17 * INIT_DATA_SECTION(...) 17 * INIT_DATA_SECTION(...)
18 * PERCPU(PAGE_SIZE) 18 * PERCPU(CACHELINE_SIZE, PAGE_SIZE)
19 * __init_end = .; 19 * __init_end = .;
20 * 20 *
21 * _stext = .; 21 * _stext = .;
@@ -683,13 +683,18 @@
683 683
684/** 684/**
685 * PERCPU_VADDR - define output section for percpu area 685 * PERCPU_VADDR - define output section for percpu area
686 * @cacheline: cacheline size
686 * @vaddr: explicit base address (optional) 687 * @vaddr: explicit base address (optional)
687 * @phdr: destination PHDR (optional) 688 * @phdr: destination PHDR (optional)
688 * 689 *
689 * Macro which expands to output section for percpu area. If @vaddr 690 * Macro which expands to output section for percpu area.
690 * is not blank, it specifies explicit base address and all percpu 691 *
691 * symbols will be offset from the given address. If blank, @vaddr 692 * @cacheline is used to align subsections to avoid false cacheline
692 * always equals @laddr + LOAD_OFFSET. 693 * sharing between subsections for different purposes.
694 *
695 * If @vaddr is not blank, it specifies explicit base address and all
696 * percpu symbols will be offset from the given address. If blank,
697 * @vaddr always equals @laddr + LOAD_OFFSET.
693 * 698 *
694 * @phdr defines the output PHDR to use if not blank. Be warned that 699 * @phdr defines the output PHDR to use if not blank. Be warned that
695 * output PHDR is sticky. If @phdr is specified, the next output 700 * output PHDR is sticky. If @phdr is specified, the next output
@@ -700,7 +705,7 @@
700 * If there is no need to put the percpu section at a predetermined 705 * If there is no need to put the percpu section at a predetermined
701 * address, use PERCPU(). 706 * address, use PERCPU().
702 */ 707 */
703#define PERCPU_VADDR(vaddr, phdr) \ 708#define PERCPU_VADDR(cacheline, vaddr, phdr) \
704 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 709 VMLINUX_SYMBOL(__per_cpu_load) = .; \
705 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ 710 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
706 - LOAD_OFFSET) { \ 711 - LOAD_OFFSET) { \
@@ -708,7 +713,9 @@
708 *(.data..percpu..first) \ 713 *(.data..percpu..first) \
709 . = ALIGN(PAGE_SIZE); \ 714 . = ALIGN(PAGE_SIZE); \
710 *(.data..percpu..page_aligned) \ 715 *(.data..percpu..page_aligned) \
716 . = ALIGN(cacheline); \
711 *(.data..percpu..readmostly) \ 717 *(.data..percpu..readmostly) \
718 . = ALIGN(cacheline); \
712 *(.data..percpu) \ 719 *(.data..percpu) \
713 *(.data..percpu..shared_aligned) \ 720 *(.data..percpu..shared_aligned) \
714 VMLINUX_SYMBOL(__per_cpu_end) = .; \ 721 VMLINUX_SYMBOL(__per_cpu_end) = .; \
@@ -717,18 +724,18 @@
717 724
718/** 725/**
719 * PERCPU - define output section for percpu area, simple version 726 * PERCPU - define output section for percpu area, simple version
727 * @cacheline: cacheline size
720 * @align: required alignment 728 * @align: required alignment
721 * 729 *
722 * Align to @align and outputs output section for percpu area. This 730 * Align to @align and outputs output section for percpu area. This macro
723 * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and 731 * doesn't manipulate @vaddr or @phdr and __per_cpu_load and
724 * __per_cpu_start will be identical. 732 * __per_cpu_start will be identical.
725 * 733 *
726 * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except 734 * This macro is equivalent to ALIGN(@align); PERCPU_VADDR(@cacheline,,)
727 * that __per_cpu_load is defined as a relative symbol against 735 * except that __per_cpu_load is defined as a relative symbol against
728 * .data..percpu which is required for relocatable x86_32 736 * .data..percpu which is required for relocatable x86_32 configuration.
729 * configuration.
730 */ 737 */
731#define PERCPU(align) \ 738#define PERCPU(cacheline, align) \
732 . = ALIGN(align); \ 739 . = ALIGN(align); \
733 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ 740 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
734 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 741 VMLINUX_SYMBOL(__per_cpu_load) = .; \
@@ -736,7 +743,9 @@
736 *(.data..percpu..first) \ 743 *(.data..percpu..first) \
737 . = ALIGN(PAGE_SIZE); \ 744 . = ALIGN(PAGE_SIZE); \
738 *(.data..percpu..page_aligned) \ 745 *(.data..percpu..page_aligned) \
746 . = ALIGN(cacheline); \
739 *(.data..percpu..readmostly) \ 747 *(.data..percpu..readmostly) \
748 . = ALIGN(cacheline); \
740 *(.data..percpu) \ 749 *(.data..percpu) \
741 *(.data..percpu..shared_aligned) \ 750 *(.data..percpu..shared_aligned) \
742 VMLINUX_SYMBOL(__per_cpu_end) = .; \ 751 VMLINUX_SYMBOL(__per_cpu_end) = .; \
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 27c3c6fcfad3..3a5c4449fd36 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -255,6 +255,30 @@ extern void __bad_size_call_parameter(void);
255 pscr2_ret__; \ 255 pscr2_ret__; \
256}) 256})
257 257
258/*
259 * Special handling for cmpxchg_double. cmpxchg_double is passed two
260 * percpu variables. The first has to be aligned to a double word
261 * boundary and the second has to follow directly thereafter.
262 */
263#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \
264({ \
265 bool pdcrb_ret__; \
266 __verify_pcpu_ptr(&pcp1); \
267 BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \
268 VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1))); \
269 VM_BUG_ON((unsigned long)(&pcp2) != \
270 (unsigned long)(&pcp1) + sizeof(pcp1)); \
271 switch(sizeof(pcp1)) { \
272 case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \
273 case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \
274 case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \
275 case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \
276 default: \
277 __bad_size_call_parameter(); break; \
278 } \
279 pdcrb_ret__; \
280})
281
258#define __pcpu_size_call(stem, variable, ...) \ 282#define __pcpu_size_call(stem, variable, ...) \
259do { \ 283do { \
260 __verify_pcpu_ptr(&(variable)); \ 284 __verify_pcpu_ptr(&(variable)); \
@@ -501,6 +525,45 @@ do { \
501#endif 525#endif
502 526
503/* 527/*
528 * cmpxchg_double replaces two adjacent scalars at once. The first
529 * two parameters are per cpu variables which have to be of the same
530 * size. A truth value is returned to indicate success or failure
531 * (since a double register result is difficult to handle). There is
532 * very limited hardware support for these operations, so only certain
533 * sizes may work.
534 */
535#define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
536({ \
537 int ret__; \
538 preempt_disable(); \
539 ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
540 oval1, oval2, nval1, nval2); \
541 preempt_enable(); \
542 ret__; \
543})
544
545#ifndef this_cpu_cmpxchg_double
546# ifndef this_cpu_cmpxchg_double_1
547# define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
548 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
549# endif
550# ifndef this_cpu_cmpxchg_double_2
551# define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
552 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
553# endif
554# ifndef this_cpu_cmpxchg_double_4
555# define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
556 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
557# endif
558# ifndef this_cpu_cmpxchg_double_8
559# define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
560 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
561# endif
562# define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
563 __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
564#endif
565
566/*
504 * Generic percpu operations that do not require preemption handling. 567 * Generic percpu operations that do not require preemption handling.
505 * Either we do not care about races or the caller has the 568 * Either we do not care about races or the caller has the
506 * responsibility of handling preemptions issues. Arch code can still 569 * responsibility of handling preemptions issues. Arch code can still
@@ -703,6 +766,39 @@ do { \
703 __pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval) 766 __pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval)
704#endif 767#endif
705 768
769#define __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
770({ \
771 int __ret = 0; \
772 if (__this_cpu_read(pcp1) == (oval1) && \
773 __this_cpu_read(pcp2) == (oval2)) { \
774 __this_cpu_write(pcp1, (nval1)); \
775 __this_cpu_write(pcp2, (nval2)); \
776 __ret = 1; \
777 } \
778 (__ret); \
779})
780
781#ifndef __this_cpu_cmpxchg_double
782# ifndef __this_cpu_cmpxchg_double_1
783# define __this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
784 __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
785# endif
786# ifndef __this_cpu_cmpxchg_double_2
787# define __this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
788 __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
789# endif
790# ifndef __this_cpu_cmpxchg_double_4
791# define __this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
792 __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
793# endif
794# ifndef __this_cpu_cmpxchg_double_8
795# define __this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
796 __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
797# endif
798# define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
799 __pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
800#endif
801
706/* 802/*
707 * IRQ safe versions of the per cpu RMW operations. Note that these operations 803 * IRQ safe versions of the per cpu RMW operations. Note that these operations
708 * are *not* safe against modification of the same variable from another 804 * are *not* safe against modification of the same variable from another
@@ -823,4 +919,36 @@ do { \
823 __pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval) 919 __pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval)
824#endif 920#endif
825 921
922#define irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
923({ \
924 int ret__; \
925 unsigned long flags; \
926 local_irq_save(flags); \
927 ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
928 oval1, oval2, nval1, nval2); \
929 local_irq_restore(flags); \
930 ret__; \
931})
932
933#ifndef irqsafe_cpu_cmpxchg_double
934# ifndef irqsafe_cpu_cmpxchg_double_1
935# define irqsafe_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
936 irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
937# endif
938# ifndef irqsafe_cpu_cmpxchg_double_2
939# define irqsafe_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
940 irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
941# endif
942# ifndef irqsafe_cpu_cmpxchg_double_4
943# define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
944 irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
945# endif
946# ifndef irqsafe_cpu_cmpxchg_double_8
947# define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
948 irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
949# endif
950# define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
951 __pcpu_double_call_return_int(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
952#endif
953
826#endif /* __LINUX_PERCPU_H */ 954#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index ae0093cc5189..90fbb6d87e11 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -35,7 +35,10 @@ enum stat_item {
35 NR_SLUB_STAT_ITEMS }; 35 NR_SLUB_STAT_ITEMS };
36 36
37struct kmem_cache_cpu { 37struct kmem_cache_cpu {
38 void **freelist; /* Pointer to first free per cpu object */ 38 void **freelist; /* Pointer to next available object */
39#ifdef CONFIG_CMPXCHG_LOCAL
40 unsigned long tid; /* Globally unique transaction id */
41#endif
39 struct page *page; /* The slab from which we are allocating */ 42 struct page *page; /* The slab from which we are allocating */
40 int node; /* The node of the page (or -1 for debug) */ 43 int node; /* The node of the page (or -1 for debug) */
41#ifdef CONFIG_SLUB_STATS 44#ifdef CONFIG_SLUB_STATS
@@ -70,6 +73,7 @@ struct kmem_cache {
70 struct kmem_cache_cpu __percpu *cpu_slab; 73 struct kmem_cache_cpu __percpu *cpu_slab;
71 /* Used for retriving partial slabs etc */ 74 /* Used for retriving partial slabs etc */
72 unsigned long flags; 75 unsigned long flags;
76 unsigned long min_partial;
73 int size; /* The size of an object including meta data */ 77 int size; /* The size of an object including meta data */
74 int objsize; /* The size of an object without meta data */ 78 int objsize; /* The size of an object without meta data */
75 int offset; /* Free pointer offset. */ 79 int offset; /* Free pointer offset. */
@@ -84,7 +88,6 @@ struct kmem_cache {
84 int inuse; /* Offset to metadata */ 88 int inuse; /* Offset to metadata */
85 int align; /* Alignment */ 89 int align; /* Alignment */
86 int reserved; /* Reserved bytes at the end of slabs */ 90 int reserved; /* Reserved bytes at the end of slabs */
87 unsigned long min_partial;
88 const char *name; /* Name (only for display!) */ 91 const char *name; /* Name (only for display!) */
89 struct list_head list; /* List of slab caches */ 92 struct list_head list; /* List of slab caches */
90#ifdef CONFIG_SYSFS 93#ifdef CONFIG_SYSFS
diff --git a/mm/slub.c b/mm/slub.c
index e841d8921c22..7e4f835e32ab 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -836,14 +836,24 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void
836static inline void slab_free_hook(struct kmem_cache *s, void *x) 836static inline void slab_free_hook(struct kmem_cache *s, void *x)
837{ 837{
838 kmemleak_free_recursive(x, s->flags); 838 kmemleak_free_recursive(x, s->flags);
839}
840 839
841static inline void slab_free_hook_irq(struct kmem_cache *s, void *object) 840 /*
842{ 841 * Trouble is that we may no longer disable interupts in the fast path
843 kmemcheck_slab_free(s, object, s->objsize); 842 * So in order to make the debug calls that expect irqs to be
844 debug_check_no_locks_freed(object, s->objsize); 843 * disabled we need to disable interrupts temporarily.
845 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 844 */
846 debug_check_no_obj_freed(object, s->objsize); 845#if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
846 {
847 unsigned long flags;
848
849 local_irq_save(flags);
850 kmemcheck_slab_free(s, x, s->objsize);
851 debug_check_no_locks_freed(x, s->objsize);
852 if (!(s->flags & SLAB_DEBUG_OBJECTS))
853 debug_check_no_obj_freed(x, s->objsize);
854 local_irq_restore(flags);
855 }
856#endif
847} 857}
848 858
849/* 859/*
@@ -1130,9 +1140,6 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
1130 1140
1131static inline void slab_free_hook(struct kmem_cache *s, void *x) {} 1141static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
1132 1142
1133static inline void slab_free_hook_irq(struct kmem_cache *s,
1134 void *object) {}
1135
1136#endif /* CONFIG_SLUB_DEBUG */ 1143#endif /* CONFIG_SLUB_DEBUG */
1137 1144
1138/* 1145/*
@@ -1533,6 +1540,77 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1533 } 1540 }
1534} 1541}
1535 1542
1543#ifdef CONFIG_CMPXCHG_LOCAL
1544#ifdef CONFIG_PREEMPT
1545/*
1546 * Calculate the next globally unique transaction for disambiguiation
1547 * during cmpxchg. The transactions start with the cpu number and are then
1548 * incremented by CONFIG_NR_CPUS.
1549 */
1550#define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS)
1551#else
1552/*
1553 * No preemption supported therefore also no need to check for
1554 * different cpus.
1555 */
1556#define TID_STEP 1
1557#endif
1558
1559static inline unsigned long next_tid(unsigned long tid)
1560{
1561 return tid + TID_STEP;
1562}
1563
1564static inline unsigned int tid_to_cpu(unsigned long tid)
1565{
1566 return tid % TID_STEP;
1567}
1568
1569static inline unsigned long tid_to_event(unsigned long tid)
1570{
1571 return tid / TID_STEP;
1572}
1573
1574static inline unsigned int init_tid(int cpu)
1575{
1576 return cpu;
1577}
1578
1579static inline void note_cmpxchg_failure(const char *n,
1580 const struct kmem_cache *s, unsigned long tid)
1581{
1582#ifdef SLUB_DEBUG_CMPXCHG
1583 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
1584
1585 printk(KERN_INFO "%s %s: cmpxchg redo ", n, s->name);
1586
1587#ifdef CONFIG_PREEMPT
1588 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
1589 printk("due to cpu change %d -> %d\n",
1590 tid_to_cpu(tid), tid_to_cpu(actual_tid));
1591 else
1592#endif
1593 if (tid_to_event(tid) != tid_to_event(actual_tid))
1594 printk("due to cpu running other code. Event %ld->%ld\n",
1595 tid_to_event(tid), tid_to_event(actual_tid));
1596 else
1597 printk("for unknown reason: actual=%lx was=%lx target=%lx\n",
1598 actual_tid, tid, next_tid(tid));
1599#endif
1600}
1601
1602#endif
1603
1604void init_kmem_cache_cpus(struct kmem_cache *s)
1605{
1606#if defined(CONFIG_CMPXCHG_LOCAL) && defined(CONFIG_PREEMPT)
1607 int cpu;
1608
1609 for_each_possible_cpu(cpu)
1610 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
1611#endif
1612
1613}
1536/* 1614/*
1537 * Remove the cpu slab 1615 * Remove the cpu slab
1538 */ 1616 */
@@ -1564,6 +1642,9 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1564 page->inuse--; 1642 page->inuse--;
1565 } 1643 }
1566 c->page = NULL; 1644 c->page = NULL;
1645#ifdef CONFIG_CMPXCHG_LOCAL
1646 c->tid = next_tid(c->tid);
1647#endif
1567 unfreeze_slab(s, page, tail); 1648 unfreeze_slab(s, page, tail);
1568} 1649}
1569 1650
@@ -1698,6 +1779,19 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
1698{ 1779{
1699 void **object; 1780 void **object;
1700 struct page *new; 1781 struct page *new;
1782#ifdef CONFIG_CMPXCHG_LOCAL
1783 unsigned long flags;
1784
1785 local_irq_save(flags);
1786#ifdef CONFIG_PREEMPT
1787 /*
1788 * We may have been preempted and rescheduled on a different
1789 * cpu before disabling interrupts. Need to reload cpu area
1790 * pointer.
1791 */
1792 c = this_cpu_ptr(s->cpu_slab);
1793#endif
1794#endif
1701 1795
1702 /* We handle __GFP_ZERO in the caller */ 1796 /* We handle __GFP_ZERO in the caller */
1703 gfpflags &= ~__GFP_ZERO; 1797 gfpflags &= ~__GFP_ZERO;
@@ -1724,6 +1818,10 @@ load_freelist:
1724 c->node = page_to_nid(c->page); 1818 c->node = page_to_nid(c->page);
1725unlock_out: 1819unlock_out:
1726 slab_unlock(c->page); 1820 slab_unlock(c->page);
1821#ifdef CONFIG_CMPXCHG_LOCAL
1822 c->tid = next_tid(c->tid);
1823 local_irq_restore(flags);
1824#endif
1727 stat(s, ALLOC_SLOWPATH); 1825 stat(s, ALLOC_SLOWPATH);
1728 return object; 1826 return object;
1729 1827
@@ -1785,23 +1883,76 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1785{ 1883{
1786 void **object; 1884 void **object;
1787 struct kmem_cache_cpu *c; 1885 struct kmem_cache_cpu *c;
1886#ifdef CONFIG_CMPXCHG_LOCAL
1887 unsigned long tid;
1888#else
1788 unsigned long flags; 1889 unsigned long flags;
1890#endif
1789 1891
1790 if (slab_pre_alloc_hook(s, gfpflags)) 1892 if (slab_pre_alloc_hook(s, gfpflags))
1791 return NULL; 1893 return NULL;
1792 1894
1895#ifndef CONFIG_CMPXCHG_LOCAL
1793 local_irq_save(flags); 1896 local_irq_save(flags);
1897#else
1898redo:
1899#endif
1900
1901 /*
1902 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
1903 * enabled. We may switch back and forth between cpus while
1904 * reading from one cpu area. That does not matter as long
1905 * as we end up on the original cpu again when doing the cmpxchg.
1906 */
1794 c = __this_cpu_ptr(s->cpu_slab); 1907 c = __this_cpu_ptr(s->cpu_slab);
1908
1909#ifdef CONFIG_CMPXCHG_LOCAL
1910 /*
1911 * The transaction ids are globally unique per cpu and per operation on
1912 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
1913 * occurs on the right processor and that there was no operation on the
1914 * linked list in between.
1915 */
1916 tid = c->tid;
1917 barrier();
1918#endif
1919
1795 object = c->freelist; 1920 object = c->freelist;
1796 if (unlikely(!object || !node_match(c, node))) 1921 if (unlikely(!object || !node_match(c, node)))
1797 1922
1798 object = __slab_alloc(s, gfpflags, node, addr, c); 1923 object = __slab_alloc(s, gfpflags, node, addr, c);
1799 1924
1800 else { 1925 else {
1926#ifdef CONFIG_CMPXCHG_LOCAL
1927 /*
1928 * The cmpxchg will only match if there was no additonal
1929 * operation and if we are on the right processor.
1930 *
1931 * The cmpxchg does the following atomically (without lock semantics!)
1932 * 1. Relocate first pointer to the current per cpu area.
1933 * 2. Verify that tid and freelist have not been changed
1934 * 3. If they were not changed replace tid and freelist
1935 *
1936 * Since this is without lock semantics the protection is only against
1937 * code executing on this cpu *not* from access by other cpus.
1938 */
1939 if (unlikely(!this_cpu_cmpxchg_double(
1940 s->cpu_slab->freelist, s->cpu_slab->tid,
1941 object, tid,
1942 get_freepointer(s, object), next_tid(tid)))) {
1943
1944 note_cmpxchg_failure("slab_alloc", s, tid);
1945 goto redo;
1946 }
1947#else
1801 c->freelist = get_freepointer(s, object); 1948 c->freelist = get_freepointer(s, object);
1949#endif
1802 stat(s, ALLOC_FASTPATH); 1950 stat(s, ALLOC_FASTPATH);
1803 } 1951 }
1952
1953#ifndef CONFIG_CMPXCHG_LOCAL
1804 local_irq_restore(flags); 1954 local_irq_restore(flags);
1955#endif
1805 1956
1806 if (unlikely(gfpflags & __GFP_ZERO) && object) 1957 if (unlikely(gfpflags & __GFP_ZERO) && object)
1807 memset(object, 0, s->objsize); 1958 memset(object, 0, s->objsize);
@@ -1879,9 +2030,13 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
1879{ 2030{
1880 void *prior; 2031 void *prior;
1881 void **object = (void *)x; 2032 void **object = (void *)x;
2033#ifdef CONFIG_CMPXCHG_LOCAL
2034 unsigned long flags;
1882 2035
1883 stat(s, FREE_SLOWPATH); 2036 local_irq_save(flags);
2037#endif
1884 slab_lock(page); 2038 slab_lock(page);
2039 stat(s, FREE_SLOWPATH);
1885 2040
1886 if (kmem_cache_debug(s)) 2041 if (kmem_cache_debug(s))
1887 goto debug; 2042 goto debug;
@@ -1911,6 +2066,9 @@ checks_ok:
1911 2066
1912out_unlock: 2067out_unlock:
1913 slab_unlock(page); 2068 slab_unlock(page);
2069#ifdef CONFIG_CMPXCHG_LOCAL
2070 local_irq_restore(flags);
2071#endif
1914 return; 2072 return;
1915 2073
1916slab_empty: 2074slab_empty:
@@ -1922,6 +2080,9 @@ slab_empty:
1922 stat(s, FREE_REMOVE_PARTIAL); 2080 stat(s, FREE_REMOVE_PARTIAL);
1923 } 2081 }
1924 slab_unlock(page); 2082 slab_unlock(page);
2083#ifdef CONFIG_CMPXCHG_LOCAL
2084 local_irq_restore(flags);
2085#endif
1925 stat(s, FREE_SLAB); 2086 stat(s, FREE_SLAB);
1926 discard_slab(s, page); 2087 discard_slab(s, page);
1927 return; 2088 return;
@@ -1948,23 +2109,56 @@ static __always_inline void slab_free(struct kmem_cache *s,
1948{ 2109{
1949 void **object = (void *)x; 2110 void **object = (void *)x;
1950 struct kmem_cache_cpu *c; 2111 struct kmem_cache_cpu *c;
2112#ifdef CONFIG_CMPXCHG_LOCAL
2113 unsigned long tid;
2114#else
1951 unsigned long flags; 2115 unsigned long flags;
2116#endif
1952 2117
1953 slab_free_hook(s, x); 2118 slab_free_hook(s, x);
1954 2119
2120#ifndef CONFIG_CMPXCHG_LOCAL
1955 local_irq_save(flags); 2121 local_irq_save(flags);
2122
2123#else
2124redo:
2125#endif
2126
2127 /*
2128 * Determine the currently cpus per cpu slab.
2129 * The cpu may change afterward. However that does not matter since
2130 * data is retrieved via this pointer. If we are on the same cpu
2131 * during the cmpxchg then the free will succedd.
2132 */
1956 c = __this_cpu_ptr(s->cpu_slab); 2133 c = __this_cpu_ptr(s->cpu_slab);
1957 2134
1958 slab_free_hook_irq(s, x); 2135#ifdef CONFIG_CMPXCHG_LOCAL
2136 tid = c->tid;
2137 barrier();
2138#endif
1959 2139
1960 if (likely(page == c->page && c->node != NUMA_NO_NODE)) { 2140 if (likely(page == c->page && c->node != NUMA_NO_NODE)) {
1961 set_freepointer(s, object, c->freelist); 2141 set_freepointer(s, object, c->freelist);
2142
2143#ifdef CONFIG_CMPXCHG_LOCAL
2144 if (unlikely(!this_cpu_cmpxchg_double(
2145 s->cpu_slab->freelist, s->cpu_slab->tid,
2146 c->freelist, tid,
2147 object, next_tid(tid)))) {
2148
2149 note_cmpxchg_failure("slab_free", s, tid);
2150 goto redo;
2151 }
2152#else
1962 c->freelist = object; 2153 c->freelist = object;
2154#endif
1963 stat(s, FREE_FASTPATH); 2155 stat(s, FREE_FASTPATH);
1964 } else 2156 } else
1965 __slab_free(s, page, x, addr); 2157 __slab_free(s, page, x, addr);
1966 2158
2159#ifndef CONFIG_CMPXCHG_LOCAL
1967 local_irq_restore(flags); 2160 local_irq_restore(flags);
2161#endif
1968} 2162}
1969 2163
1970void kmem_cache_free(struct kmem_cache *s, void *x) 2164void kmem_cache_free(struct kmem_cache *s, void *x)
@@ -2156,9 +2350,23 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
2156 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 2350 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
2157 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); 2351 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
2158 2352
2353#ifdef CONFIG_CMPXCHG_LOCAL
2354 /*
2355 * Must align to double word boundary for the double cmpxchg instructions
2356 * to work.
2357 */
2358 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 2 * sizeof(void *));
2359#else
2360 /* Regular alignment is sufficient */
2159 s->cpu_slab = alloc_percpu(struct kmem_cache_cpu); 2361 s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
2362#endif
2363
2364 if (!s->cpu_slab)
2365 return 0;
2366
2367 init_kmem_cache_cpus(s);
2160 2368
2161 return s->cpu_slab != NULL; 2369 return 1;
2162} 2370}
2163 2371
2164static struct kmem_cache *kmem_cache_node; 2372static struct kmem_cache *kmem_cache_node;