aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/kernel/Makefile2
-rw-r--r--arch/i386/kernel/alternative.c321
-rw-r--r--arch/i386/kernel/cpu/proc.c2
-rw-r--r--arch/i386/kernel/module.c32
-rw-r--r--arch/i386/kernel/semaphore.c8
-rw-r--r--arch/i386/kernel/setup.c95
-rw-r--r--arch/i386/kernel/smpboot.c3
-rw-r--r--arch/i386/kernel/vmlinux.lds.S20
-rw-r--r--arch/i386/mm/init.c45
-rw-r--r--arch/um/kernel/um_arch.c12
-rw-r--r--include/asm-i386/alternative.h129
-rw-r--r--include/asm-i386/atomic.h28
-rw-r--r--include/asm-i386/bitops.h7
-rw-r--r--include/asm-i386/cpufeature.h1
-rw-r--r--include/asm-i386/mutex.h6
-rw-r--r--include/asm-i386/rwlock.h56
-rw-r--r--include/asm-i386/semaphore.h8
-rw-r--r--include/asm-i386/spinlock.h21
-rw-r--r--include/asm-i386/system.h62
-rw-r--r--include/asm-um/alternative.h6
20 files changed, 605 insertions, 259 deletions
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index 65656c033d70..5b9ed21216cf 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds
7obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \ 7obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \
8 ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \ 8 ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \
9 pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \ 9 pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \
10 quirks.o i8237.o topology.o 10 quirks.o i8237.o topology.o alternative.o
11 11
12obj-y += cpu/ 12obj-y += cpu/
13obj-y += timers/ 13obj-y += timers/
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c
new file mode 100644
index 000000000000..5cbd6f99fb2a
--- /dev/null
+++ b/arch/i386/kernel/alternative.c
@@ -0,0 +1,321 @@
1#include <linux/module.h>
2#include <linux/spinlock.h>
3#include <linux/list.h>
4#include <asm/alternative.h>
5#include <asm/sections.h>
6
7#define DEBUG 0
8#if DEBUG
9# define DPRINTK(fmt, args...) printk(fmt, args)
10#else
11# define DPRINTK(fmt, args...)
12#endif
13
14/* Use inline assembly to define this because the nops are defined
15 as inline assembly strings in the include files and we cannot
16 get them easily into strings. */
17asm("\t.data\nintelnops: "
18 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
19 GENERIC_NOP7 GENERIC_NOP8);
20asm("\t.data\nk8nops: "
21 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
22 K8_NOP7 K8_NOP8);
23asm("\t.data\nk7nops: "
24 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
25 K7_NOP7 K7_NOP8);
26
27extern unsigned char intelnops[], k8nops[], k7nops[];
28static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
29 NULL,
30 intelnops,
31 intelnops + 1,
32 intelnops + 1 + 2,
33 intelnops + 1 + 2 + 3,
34 intelnops + 1 + 2 + 3 + 4,
35 intelnops + 1 + 2 + 3 + 4 + 5,
36 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
37 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
38};
39static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
40 NULL,
41 k8nops,
42 k8nops + 1,
43 k8nops + 1 + 2,
44 k8nops + 1 + 2 + 3,
45 k8nops + 1 + 2 + 3 + 4,
46 k8nops + 1 + 2 + 3 + 4 + 5,
47 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
48 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
49};
50static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
51 NULL,
52 k7nops,
53 k7nops + 1,
54 k7nops + 1 + 2,
55 k7nops + 1 + 2 + 3,
56 k7nops + 1 + 2 + 3 + 4,
57 k7nops + 1 + 2 + 3 + 4 + 5,
58 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
59 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
60};
61static struct nop {
62 int cpuid;
63 unsigned char **noptable;
64} noptypes[] = {
65 { X86_FEATURE_K8, k8_nops },
66 { X86_FEATURE_K7, k7_nops },
67 { -1, NULL }
68};
69
70
71extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
72extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
73extern u8 *__smp_locks[], *__smp_locks_end[];
74
75extern u8 __smp_alt_begin[], __smp_alt_end[];
76
77
78static unsigned char** find_nop_table(void)
79{
80 unsigned char **noptable = intel_nops;
81 int i;
82
83 for (i = 0; noptypes[i].cpuid >= 0; i++) {
84 if (boot_cpu_has(noptypes[i].cpuid)) {
85 noptable = noptypes[i].noptable;
86 break;
87 }
88 }
89 return noptable;
90}
91
92/* Replace instructions with better alternatives for this CPU type.
93 This runs before SMP is initialized to avoid SMP problems with
94 self modifying code. This implies that assymetric systems where
95 APs have less capabilities than the boot processor are not handled.
96 Tough. Make sure you disable such features by hand. */
97
98void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
99{
100 unsigned char **noptable = find_nop_table();
101 struct alt_instr *a;
102 int diff, i, k;
103
104 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
105 for (a = start; a < end; a++) {
106 BUG_ON(a->replacementlen > a->instrlen);
107 if (!boot_cpu_has(a->cpuid))
108 continue;
109 memcpy(a->instr, a->replacement, a->replacementlen);
110 diff = a->instrlen - a->replacementlen;
111 /* Pad the rest with nops */
112 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
113 k = diff;
114 if (k > ASM_NOP_MAX)
115 k = ASM_NOP_MAX;
116 memcpy(a->instr + i, noptable[k], k);
117 }
118 }
119}
120
121static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end)
122{
123 struct alt_instr *a;
124
125 DPRINTK("%s: alt table %p-%p\n", __FUNCTION__, start, end);
126 for (a = start; a < end; a++) {
127 memcpy(a->replacement + a->replacementlen,
128 a->instr,
129 a->instrlen);
130 }
131}
132
133static void alternatives_smp_apply(struct alt_instr *start, struct alt_instr *end)
134{
135 struct alt_instr *a;
136
137 for (a = start; a < end; a++) {
138 memcpy(a->instr,
139 a->replacement + a->replacementlen,
140 a->instrlen);
141 }
142}
143
144static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
145{
146 u8 **ptr;
147
148 for (ptr = start; ptr < end; ptr++) {
149 if (*ptr < text)
150 continue;
151 if (*ptr > text_end)
152 continue;
153 **ptr = 0xf0; /* lock prefix */
154 };
155}
156
157static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
158{
159 unsigned char **noptable = find_nop_table();
160 u8 **ptr;
161
162 for (ptr = start; ptr < end; ptr++) {
163 if (*ptr < text)
164 continue;
165 if (*ptr > text_end)
166 continue;
167 **ptr = noptable[1][0];
168 };
169}
170
171struct smp_alt_module {
172 /* what is this ??? */
173 struct module *mod;
174 char *name;
175
176 /* ptrs to lock prefixes */
177 u8 **locks;
178 u8 **locks_end;
179
180 /* .text segment, needed to avoid patching init code ;) */
181 u8 *text;
182 u8 *text_end;
183
184 struct list_head next;
185};
186static LIST_HEAD(smp_alt_modules);
187static DEFINE_SPINLOCK(smp_alt);
188
189static int smp_alt_once = 0;
190static int __init bootonly(char *str)
191{
192 smp_alt_once = 1;
193 return 1;
194}
195__setup("smp-alt-boot", bootonly);
196
197void alternatives_smp_module_add(struct module *mod, char *name,
198 void *locks, void *locks_end,
199 void *text, void *text_end)
200{
201 struct smp_alt_module *smp;
202 unsigned long flags;
203
204 if (smp_alt_once) {
205 if (boot_cpu_has(X86_FEATURE_UP))
206 alternatives_smp_unlock(locks, locks_end,
207 text, text_end);
208 return;
209 }
210
211 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
212 if (NULL == smp)
213 return; /* we'll run the (safe but slow) SMP code then ... */
214
215 smp->mod = mod;
216 smp->name = name;
217 smp->locks = locks;
218 smp->locks_end = locks_end;
219 smp->text = text;
220 smp->text_end = text_end;
221 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
222 __FUNCTION__, smp->locks, smp->locks_end,
223 smp->text, smp->text_end, smp->name);
224
225 spin_lock_irqsave(&smp_alt, flags);
226 list_add_tail(&smp->next, &smp_alt_modules);
227 if (boot_cpu_has(X86_FEATURE_UP))
228 alternatives_smp_unlock(smp->locks, smp->locks_end,
229 smp->text, smp->text_end);
230 spin_unlock_irqrestore(&smp_alt, flags);
231}
232
233void alternatives_smp_module_del(struct module *mod)
234{
235 struct smp_alt_module *item;
236 unsigned long flags;
237
238 if (smp_alt_once)
239 return;
240
241 spin_lock_irqsave(&smp_alt, flags);
242 list_for_each_entry(item, &smp_alt_modules, next) {
243 if (mod != item->mod)
244 continue;
245 list_del(&item->next);
246 spin_unlock_irqrestore(&smp_alt, flags);
247 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
248 kfree(item);
249 return;
250 }
251 spin_unlock_irqrestore(&smp_alt, flags);
252}
253
254void alternatives_smp_switch(int smp)
255{
256 struct smp_alt_module *mod;
257 unsigned long flags;
258
259 if (smp_alt_once)
260 return;
261 BUG_ON(!smp && (num_online_cpus() > 1));
262
263 spin_lock_irqsave(&smp_alt, flags);
264 if (smp) {
265 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
266 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
267 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
268 alternatives_smp_apply(__smp_alt_instructions,
269 __smp_alt_instructions_end);
270 list_for_each_entry(mod, &smp_alt_modules, next)
271 alternatives_smp_lock(mod->locks, mod->locks_end,
272 mod->text, mod->text_end);
273 } else {
274 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
275 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
276 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
277 apply_alternatives(__smp_alt_instructions,
278 __smp_alt_instructions_end);
279 list_for_each_entry(mod, &smp_alt_modules, next)
280 alternatives_smp_unlock(mod->locks, mod->locks_end,
281 mod->text, mod->text_end);
282 }
283 spin_unlock_irqrestore(&smp_alt, flags);
284}
285
286void __init alternative_instructions(void)
287{
288 apply_alternatives(__alt_instructions, __alt_instructions_end);
289
290 /* switch to patch-once-at-boottime-only mode and free the
291 * tables in case we know the number of CPUs will never ever
292 * change */
293#ifdef CONFIG_HOTPLUG_CPU
294 if (num_possible_cpus() < 2)
295 smp_alt_once = 1;
296#else
297 smp_alt_once = 1;
298#endif
299
300 if (smp_alt_once) {
301 if (1 == num_possible_cpus()) {
302 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
303 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
304 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
305 apply_alternatives(__smp_alt_instructions,
306 __smp_alt_instructions_end);
307 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
308 _text, _etext);
309 }
310 free_init_pages("SMP alternatives",
311 (unsigned long)__smp_alt_begin,
312 (unsigned long)__smp_alt_end);
313 } else {
314 alternatives_smp_save(__smp_alt_instructions,
315 __smp_alt_instructions_end);
316 alternatives_smp_module_add(NULL, "core kernel",
317 __smp_locks, __smp_locks_end,
318 _text, _etext);
319 alternatives_smp_switch(0);
320 }
321}
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index 89a85af33d28..5cfbd8011698 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -40,7 +40,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
40 /* Other (Linux-defined) */ 40 /* Other (Linux-defined) */
41 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", 41 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
42 NULL, NULL, NULL, NULL, 42 NULL, NULL, NULL, NULL,
43 "constant_tsc", NULL, NULL, NULL, NULL, NULL, NULL, NULL, 43 "constant_tsc", "up", NULL, NULL, NULL, NULL, NULL, NULL,
44 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 44 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
45 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 45 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
46 46
diff --git a/arch/i386/kernel/module.c b/arch/i386/kernel/module.c
index 5149c8a621f0..470cf97e7cd3 100644
--- a/arch/i386/kernel/module.c
+++ b/arch/i386/kernel/module.c
@@ -104,26 +104,38 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
104 return -ENOEXEC; 104 return -ENOEXEC;
105} 105}
106 106
107extern void apply_alternatives(void *start, void *end);
108
109int module_finalize(const Elf_Ehdr *hdr, 107int module_finalize(const Elf_Ehdr *hdr,
110 const Elf_Shdr *sechdrs, 108 const Elf_Shdr *sechdrs,
111 struct module *me) 109 struct module *me)
112{ 110{
113 const Elf_Shdr *s; 111 const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL;
114 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 112 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
115 113
116 /* look for .altinstructions to patch */
117 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { 114 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
118 void *seg; 115 if (!strcmp(".text", secstrings + s->sh_name))
119 if (strcmp(".altinstructions", secstrings + s->sh_name)) 116 text = s;
120 continue; 117 if (!strcmp(".altinstructions", secstrings + s->sh_name))
121 seg = (void *)s->sh_addr; 118 alt = s;
122 apply_alternatives(seg, seg + s->sh_size); 119 if (!strcmp(".smp_locks", secstrings + s->sh_name))
123 } 120 locks= s;
121 }
122
123 if (alt) {
124 /* patch .altinstructions */
125 void *aseg = (void *)alt->sh_addr;
126 apply_alternatives(aseg, aseg + alt->sh_size);
127 }
128 if (locks && text) {
129 void *lseg = (void *)locks->sh_addr;
130 void *tseg = (void *)text->sh_addr;
131 alternatives_smp_module_add(me, me->name,
132 lseg, lseg + locks->sh_size,
133 tseg, tseg + text->sh_size);
134 }
124 return 0; 135 return 0;
125} 136}
126 137
127void module_arch_cleanup(struct module *mod) 138void module_arch_cleanup(struct module *mod)
128{ 139{
140 alternatives_smp_module_del(mod);
129} 141}
diff --git a/arch/i386/kernel/semaphore.c b/arch/i386/kernel/semaphore.c
index 7455ab643943..967dc74df9ee 100644
--- a/arch/i386/kernel/semaphore.c
+++ b/arch/i386/kernel/semaphore.c
@@ -110,11 +110,11 @@ asm(
110".align 4\n" 110".align 4\n"
111".globl __write_lock_failed\n" 111".globl __write_lock_failed\n"
112"__write_lock_failed:\n\t" 112"__write_lock_failed:\n\t"
113 LOCK "addl $" RW_LOCK_BIAS_STR ",(%eax)\n" 113 LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ",(%eax)\n"
114"1: rep; nop\n\t" 114"1: rep; nop\n\t"
115 "cmpl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" 115 "cmpl $" RW_LOCK_BIAS_STR ",(%eax)\n\t"
116 "jne 1b\n\t" 116 "jne 1b\n\t"
117 LOCK "subl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" 117 LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",(%eax)\n\t"
118 "jnz __write_lock_failed\n\t" 118 "jnz __write_lock_failed\n\t"
119 "ret" 119 "ret"
120); 120);
@@ -124,11 +124,11 @@ asm(
124".align 4\n" 124".align 4\n"
125".globl __read_lock_failed\n" 125".globl __read_lock_failed\n"
126"__read_lock_failed:\n\t" 126"__read_lock_failed:\n\t"
127 LOCK "incl (%eax)\n" 127 LOCK_PREFIX "incl (%eax)\n"
128"1: rep; nop\n\t" 128"1: rep; nop\n\t"
129 "cmpl $1,(%eax)\n\t" 129 "cmpl $1,(%eax)\n\t"
130 "js 1b\n\t" 130 "js 1b\n\t"
131 LOCK "decl (%eax)\n\t" 131 LOCK_PREFIX "decl (%eax)\n\t"
132 "js __read_lock_failed\n\t" 132 "js __read_lock_failed\n\t"
133 "ret" 133 "ret"
134); 134);
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index ab62a9f4701e..5f58f8cb9836 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -1377,101 +1377,6 @@ static void __init register_memory(void)
1377 pci_mem_start, gapstart, gapsize); 1377 pci_mem_start, gapstart, gapsize);
1378} 1378}
1379 1379
1380/* Use inline assembly to define this because the nops are defined
1381 as inline assembly strings in the include files and we cannot
1382 get them easily into strings. */
1383asm("\t.data\nintelnops: "
1384 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
1385 GENERIC_NOP7 GENERIC_NOP8);
1386asm("\t.data\nk8nops: "
1387 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
1388 K8_NOP7 K8_NOP8);
1389asm("\t.data\nk7nops: "
1390 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
1391 K7_NOP7 K7_NOP8);
1392
1393extern unsigned char intelnops[], k8nops[], k7nops[];
1394static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
1395 NULL,
1396 intelnops,
1397 intelnops + 1,
1398 intelnops + 1 + 2,
1399 intelnops + 1 + 2 + 3,
1400 intelnops + 1 + 2 + 3 + 4,
1401 intelnops + 1 + 2 + 3 + 4 + 5,
1402 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
1403 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
1404};
1405static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
1406 NULL,
1407 k8nops,
1408 k8nops + 1,
1409 k8nops + 1 + 2,
1410 k8nops + 1 + 2 + 3,
1411 k8nops + 1 + 2 + 3 + 4,
1412 k8nops + 1 + 2 + 3 + 4 + 5,
1413 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
1414 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
1415};
1416static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
1417 NULL,
1418 k7nops,
1419 k7nops + 1,
1420 k7nops + 1 + 2,
1421 k7nops + 1 + 2 + 3,
1422 k7nops + 1 + 2 + 3 + 4,
1423 k7nops + 1 + 2 + 3 + 4 + 5,
1424 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
1425 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
1426};
1427static struct nop {
1428 int cpuid;
1429 unsigned char **noptable;
1430} noptypes[] = {
1431 { X86_FEATURE_K8, k8_nops },
1432 { X86_FEATURE_K7, k7_nops },
1433 { -1, NULL }
1434};
1435
1436/* Replace instructions with better alternatives for this CPU type.
1437
1438 This runs before SMP is initialized to avoid SMP problems with
1439 self modifying code. This implies that assymetric systems where
1440 APs have less capabilities than the boot processor are not handled.
1441 Tough. Make sure you disable such features by hand. */
1442void apply_alternatives(void *start, void *end)
1443{
1444 struct alt_instr *a;
1445 int diff, i, k;
1446 unsigned char **noptable = intel_nops;
1447 for (i = 0; noptypes[i].cpuid >= 0; i++) {
1448 if (boot_cpu_has(noptypes[i].cpuid)) {
1449 noptable = noptypes[i].noptable;
1450 break;
1451 }
1452 }
1453 for (a = start; (void *)a < end; a++) {
1454 if (!boot_cpu_has(a->cpuid))
1455 continue;
1456 BUG_ON(a->replacementlen > a->instrlen);
1457 memcpy(a->instr, a->replacement, a->replacementlen);
1458 diff = a->instrlen - a->replacementlen;
1459 /* Pad the rest with nops */
1460 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
1461 k = diff;
1462 if (k > ASM_NOP_MAX)
1463 k = ASM_NOP_MAX;
1464 memcpy(a->instr + i, noptable[k], k);
1465 }
1466 }
1467}
1468
1469void __init alternative_instructions(void)
1470{
1471 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
1472 apply_alternatives(__alt_instructions, __alt_instructions_end);
1473}
1474
1475static char * __init machine_specific_memory_setup(void); 1380static char * __init machine_specific_memory_setup(void);
1476 1381
1477#ifdef CONFIG_MCA 1382#ifdef CONFIG_MCA
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 7007e1783797..4c470e99a742 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -899,6 +899,7 @@ static int __devinit do_boot_cpu(int apicid, int cpu)
899 unsigned short nmi_high = 0, nmi_low = 0; 899 unsigned short nmi_high = 0, nmi_low = 0;
900 900
901 ++cpucount; 901 ++cpucount;
902 alternatives_smp_switch(1);
902 903
903 /* 904 /*
904 * We can't use kernel_thread since we must avoid to 905 * We can't use kernel_thread since we must avoid to
@@ -1368,6 +1369,8 @@ void __cpu_die(unsigned int cpu)
1368 /* They ack this in play_dead by setting CPU_DEAD */ 1369 /* They ack this in play_dead by setting CPU_DEAD */
1369 if (per_cpu(cpu_state, cpu) == CPU_DEAD) { 1370 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1370 printk ("CPU %d is now offline\n", cpu); 1371 printk ("CPU %d is now offline\n", cpu);
1372 if (1 == num_online_cpus())
1373 alternatives_smp_switch(0);
1371 return; 1374 return;
1372 } 1375 }
1373 msleep(100); 1376 msleep(100);
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
index 4710195b6b74..3f21c6f6466d 100644
--- a/arch/i386/kernel/vmlinux.lds.S
+++ b/arch/i386/kernel/vmlinux.lds.S
@@ -68,6 +68,26 @@ SECTIONS
68 *(.data.init_task) 68 *(.data.init_task)
69 } 69 }
70 70
71 /* might get freed after init */
72 . = ALIGN(4096);
73 __smp_alt_begin = .;
74 __smp_alt_instructions = .;
75 .smp_altinstructions : AT(ADDR(.smp_altinstructions) - LOAD_OFFSET) {
76 *(.smp_altinstructions)
77 }
78 __smp_alt_instructions_end = .;
79 . = ALIGN(4);
80 __smp_locks = .;
81 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
82 *(.smp_locks)
83 }
84 __smp_locks_end = .;
85 .smp_altinstr_replacement : AT(ADDR(.smp_altinstr_replacement) - LOAD_OFFSET) {
86 *(.smp_altinstr_replacement)
87 }
88 . = ALIGN(4096);
89 __smp_alt_end = .;
90
71 /* will be freed after init */ 91 /* will be freed after init */
72 . = ALIGN(4096); /* Init code and data */ 92 . = ALIGN(4096); /* Init code and data */
73 __init_begin = .; 93 __init_begin = .;
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 7ba55a6e2dbc..9f66ac582a8b 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -720,21 +720,6 @@ static int noinline do_test_wp_bit(void)
720 return flag; 720 return flag;
721} 721}
722 722
723void free_initmem(void)
724{
725 unsigned long addr;
726
727 addr = (unsigned long)(&__init_begin);
728 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
729 ClearPageReserved(virt_to_page(addr));
730 init_page_count(virt_to_page(addr));
731 memset((void *)addr, 0xcc, PAGE_SIZE);
732 free_page(addr);
733 totalram_pages++;
734 }
735 printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10);
736}
737
738#ifdef CONFIG_DEBUG_RODATA 723#ifdef CONFIG_DEBUG_RODATA
739 724
740extern char __start_rodata, __end_rodata; 725extern char __start_rodata, __end_rodata;
@@ -758,17 +743,31 @@ void mark_rodata_ro(void)
758} 743}
759#endif 744#endif
760 745
746void free_init_pages(char *what, unsigned long begin, unsigned long end)
747{
748 unsigned long addr;
749
750 for (addr = begin; addr < end; addr += PAGE_SIZE) {
751 ClearPageReserved(virt_to_page(addr));
752 init_page_count(virt_to_page(addr));
753 memset((void *)addr, 0xcc, PAGE_SIZE);
754 free_page(addr);
755 totalram_pages++;
756 }
757 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
758}
759
760void free_initmem(void)
761{
762 free_init_pages("unused kernel memory",
763 (unsigned long)(&__init_begin),
764 (unsigned long)(&__init_end));
765}
761 766
762#ifdef CONFIG_BLK_DEV_INITRD 767#ifdef CONFIG_BLK_DEV_INITRD
763void free_initrd_mem(unsigned long start, unsigned long end) 768void free_initrd_mem(unsigned long start, unsigned long end)
764{ 769{
765 if (start < end) 770 free_init_pages("initrd memory", start, end);
766 printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
767 for (; start < end; start += PAGE_SIZE) {
768 ClearPageReserved(virt_to_page(start));
769 init_page_count(virt_to_page(start));
770 free_page(start);
771 totalram_pages++;
772 }
773} 771}
774#endif 772#endif
773
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 27cdf9164422..80c9c18aae94 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -491,6 +491,16 @@ void __init check_bugs(void)
491 check_devanon(); 491 check_devanon();
492} 492}
493 493
494void apply_alternatives(void *start, void *end) 494void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
495{
496}
497
498void alternatives_smp_module_add(struct module *mod, char *name,
499 void *locks, void *locks_end,
500 void *text, void *text_end)
501{
502}
503
504void alternatives_smp_module_del(struct module *mod)
495{ 505{
496} 506}
diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h
new file mode 100644
index 000000000000..e201decea0c9
--- /dev/null
+++ b/include/asm-i386/alternative.h
@@ -0,0 +1,129 @@
1#ifndef _I386_ALTERNATIVE_H
2#define _I386_ALTERNATIVE_H
3
4#ifdef __KERNEL__
5
6struct alt_instr {
7 u8 *instr; /* original instruction */
8 u8 *replacement;
9 u8 cpuid; /* cpuid bit set for replacement */
10 u8 instrlen; /* length of original instruction */
11 u8 replacementlen; /* length of new instruction, <= instrlen */
12 u8 pad;
13};
14
15extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
16
17struct module;
18extern void alternatives_smp_module_add(struct module *mod, char *name,
19 void *locks, void *locks_end,
20 void *text, void *text_end);
21extern void alternatives_smp_module_del(struct module *mod);
22extern void alternatives_smp_switch(int smp);
23
24#endif
25
26/*
27 * Alternative instructions for different CPU types or capabilities.
28 *
29 * This allows to use optimized instructions even on generic binary
30 * kernels.
31 *
32 * length of oldinstr must be longer or equal the length of newinstr
33 * It can be padded with nops as needed.
34 *
35 * For non barrier like inlines please define new variants
36 * without volatile and memory clobber.
37 */
38#define alternative(oldinstr, newinstr, feature) \
39 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
40 ".section .altinstructions,\"a\"\n" \
41 " .align 4\n" \
42 " .long 661b\n" /* label */ \
43 " .long 663f\n" /* new instruction */ \
44 " .byte %c0\n" /* feature bit */ \
45 " .byte 662b-661b\n" /* sourcelen */ \
46 " .byte 664f-663f\n" /* replacementlen */ \
47 ".previous\n" \
48 ".section .altinstr_replacement,\"ax\"\n" \
49 "663:\n\t" newinstr "\n664:\n" /* replacement */\
50 ".previous" :: "i" (feature) : "memory")
51
52/*
53 * Alternative inline assembly with input.
54 *
55 * Pecularities:
56 * No memory clobber here.
57 * Argument numbers start with 1.
58 * Best is to use constraints that are fixed size (like (%1) ... "r")
59 * If you use variable sized constraints like "m" or "g" in the
60 * replacement maake sure to pad to the worst case length.
61 */
62#define alternative_input(oldinstr, newinstr, feature, input...) \
63 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
64 ".section .altinstructions,\"a\"\n" \
65 " .align 4\n" \
66 " .long 661b\n" /* label */ \
67 " .long 663f\n" /* new instruction */ \
68 " .byte %c0\n" /* feature bit */ \
69 " .byte 662b-661b\n" /* sourcelen */ \
70 " .byte 664f-663f\n" /* replacementlen */ \
71 ".previous\n" \
72 ".section .altinstr_replacement,\"ax\"\n" \
73 "663:\n\t" newinstr "\n664:\n" /* replacement */\
74 ".previous" :: "i" (feature), ##input)
75
76/*
77 * Alternative inline assembly for SMP.
78 *
79 * alternative_smp() takes two versions (SMP first, UP second) and is
80 * for more complex stuff such as spinlocks.
81 *
82 * The LOCK_PREFIX macro defined here replaces the LOCK and
83 * LOCK_PREFIX macros used everywhere in the source tree.
84 *
85 * SMP alternatives use the same data structures as the other
86 * alternatives and the X86_FEATURE_UP flag to indicate the case of a
87 * UP system running a SMP kernel. The existing apply_alternatives()
88 * works fine for patching a SMP kernel for UP.
89 *
90 * The SMP alternative tables can be kept after boot and contain both
91 * UP and SMP versions of the instructions to allow switching back to
92 * SMP at runtime, when hotplugging in a new CPU, which is especially
93 * useful in virtualized environments.
94 *
95 * The very common lock prefix is handled as special case in a
96 * separate table which is a pure address list without replacement ptr
97 * and size information. That keeps the table sizes small.
98 */
99
100#ifdef CONFIG_SMP
101#define alternative_smp(smpinstr, upinstr, args...) \
102 asm volatile ("661:\n\t" smpinstr "\n662:\n" \
103 ".section .smp_altinstructions,\"a\"\n" \
104 " .align 4\n" \
105 " .long 661b\n" /* label */ \
106 " .long 663f\n" /* new instruction */ \
107 " .byte 0x68\n" /* X86_FEATURE_UP */ \
108 " .byte 662b-661b\n" /* sourcelen */ \
109 " .byte 664f-663f\n" /* replacementlen */ \
110 ".previous\n" \
111 ".section .smp_altinstr_replacement,\"awx\"\n" \
112 "663:\n\t" upinstr "\n" /* replacement */ \
113 "664:\n\t.fill 662b-661b,1,0x42\n" /* space for original */ \
114 ".previous" : args)
115
116#define LOCK_PREFIX \
117 ".section .smp_locks,\"a\"\n" \
118 " .align 4\n" \
119 " .long 661f\n" /* address */ \
120 ".previous\n" \
121 "661:\n\tlock; "
122
123#else /* ! CONFIG_SMP */
124#define alternative_smp(smpinstr, upinstr, args...) \
125 asm volatile (upinstr : args)
126#define LOCK_PREFIX ""
127#endif
128
129#endif /* _I386_ALTERNATIVE_H */
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index de649d3aa2d4..78b0032d1f29 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -10,12 +10,6 @@
10 * resource counting etc.. 10 * resource counting etc..
11 */ 11 */
12 12
13#ifdef CONFIG_SMP
14#define LOCK "lock ; "
15#else
16#define LOCK ""
17#endif
18
19/* 13/*
20 * Make sure gcc doesn't try to be clever and move things around 14 * Make sure gcc doesn't try to be clever and move things around
21 * on us. We need to use _exactly_ the address the user gave us, 15 * on us. We need to use _exactly_ the address the user gave us,
@@ -52,7 +46,7 @@ typedef struct { volatile int counter; } atomic_t;
52static __inline__ void atomic_add(int i, atomic_t *v) 46static __inline__ void atomic_add(int i, atomic_t *v)
53{ 47{
54 __asm__ __volatile__( 48 __asm__ __volatile__(
55 LOCK "addl %1,%0" 49 LOCK_PREFIX "addl %1,%0"
56 :"=m" (v->counter) 50 :"=m" (v->counter)
57 :"ir" (i), "m" (v->counter)); 51 :"ir" (i), "m" (v->counter));
58} 52}
@@ -67,7 +61,7 @@ static __inline__ void atomic_add(int i, atomic_t *v)
67static __inline__ void atomic_sub(int i, atomic_t *v) 61static __inline__ void atomic_sub(int i, atomic_t *v)
68{ 62{
69 __asm__ __volatile__( 63 __asm__ __volatile__(
70 LOCK "subl %1,%0" 64 LOCK_PREFIX "subl %1,%0"
71 :"=m" (v->counter) 65 :"=m" (v->counter)
72 :"ir" (i), "m" (v->counter)); 66 :"ir" (i), "m" (v->counter));
73} 67}
@@ -86,7 +80,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
86 unsigned char c; 80 unsigned char c;
87 81
88 __asm__ __volatile__( 82 __asm__ __volatile__(
89 LOCK "subl %2,%0; sete %1" 83 LOCK_PREFIX "subl %2,%0; sete %1"
90 :"=m" (v->counter), "=qm" (c) 84 :"=m" (v->counter), "=qm" (c)
91 :"ir" (i), "m" (v->counter) : "memory"); 85 :"ir" (i), "m" (v->counter) : "memory");
92 return c; 86 return c;
@@ -101,7 +95,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
101static __inline__ void atomic_inc(atomic_t *v) 95static __inline__ void atomic_inc(atomic_t *v)
102{ 96{
103 __asm__ __volatile__( 97 __asm__ __volatile__(
104 LOCK "incl %0" 98 LOCK_PREFIX "incl %0"
105 :"=m" (v->counter) 99 :"=m" (v->counter)
106 :"m" (v->counter)); 100 :"m" (v->counter));
107} 101}
@@ -115,7 +109,7 @@ static __inline__ void atomic_inc(atomic_t *v)
115static __inline__ void atomic_dec(atomic_t *v) 109static __inline__ void atomic_dec(atomic_t *v)
116{ 110{
117 __asm__ __volatile__( 111 __asm__ __volatile__(
118 LOCK "decl %0" 112 LOCK_PREFIX "decl %0"
119 :"=m" (v->counter) 113 :"=m" (v->counter)
120 :"m" (v->counter)); 114 :"m" (v->counter));
121} 115}
@@ -133,7 +127,7 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
133 unsigned char c; 127 unsigned char c;
134 128
135 __asm__ __volatile__( 129 __asm__ __volatile__(
136 LOCK "decl %0; sete %1" 130 LOCK_PREFIX "decl %0; sete %1"
137 :"=m" (v->counter), "=qm" (c) 131 :"=m" (v->counter), "=qm" (c)
138 :"m" (v->counter) : "memory"); 132 :"m" (v->counter) : "memory");
139 return c != 0; 133 return c != 0;
@@ -152,7 +146,7 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
152 unsigned char c; 146 unsigned char c;
153 147
154 __asm__ __volatile__( 148 __asm__ __volatile__(
155 LOCK "incl %0; sete %1" 149 LOCK_PREFIX "incl %0; sete %1"
156 :"=m" (v->counter), "=qm" (c) 150 :"=m" (v->counter), "=qm" (c)
157 :"m" (v->counter) : "memory"); 151 :"m" (v->counter) : "memory");
158 return c != 0; 152 return c != 0;
@@ -172,7 +166,7 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)
172 unsigned char c; 166 unsigned char c;
173 167
174 __asm__ __volatile__( 168 __asm__ __volatile__(
175 LOCK "addl %2,%0; sets %1" 169 LOCK_PREFIX "addl %2,%0; sets %1"
176 :"=m" (v->counter), "=qm" (c) 170 :"=m" (v->counter), "=qm" (c)
177 :"ir" (i), "m" (v->counter) : "memory"); 171 :"ir" (i), "m" (v->counter) : "memory");
178 return c; 172 return c;
@@ -195,7 +189,7 @@ static __inline__ int atomic_add_return(int i, atomic_t *v)
195 /* Modern 486+ processor */ 189 /* Modern 486+ processor */
196 __i = i; 190 __i = i;
197 __asm__ __volatile__( 191 __asm__ __volatile__(
198 LOCK "xaddl %0, %1;" 192 LOCK_PREFIX "xaddl %0, %1;"
199 :"=r"(i) 193 :"=r"(i)
200 :"m"(v->counter), "0"(i)); 194 :"m"(v->counter), "0"(i));
201 return i + __i; 195 return i + __i;
@@ -242,11 +236,11 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
242 236
243/* These are x86-specific, used by some header files */ 237/* These are x86-specific, used by some header files */
244#define atomic_clear_mask(mask, addr) \ 238#define atomic_clear_mask(mask, addr) \
245__asm__ __volatile__(LOCK "andl %0,%1" \ 239__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
246: : "r" (~(mask)),"m" (*addr) : "memory") 240: : "r" (~(mask)),"m" (*addr) : "memory")
247 241
248#define atomic_set_mask(mask, addr) \ 242#define atomic_set_mask(mask, addr) \
249__asm__ __volatile__(LOCK "orl %0,%1" \ 243__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
250: : "r" (mask),"m" (*(addr)) : "memory") 244: : "r" (mask),"m" (*(addr)) : "memory")
251 245
252/* Atomic operations are already serializing on x86 */ 246/* Atomic operations are already serializing on x86 */
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h
index 88e6ca248cd7..7d20b95edb3b 100644
--- a/include/asm-i386/bitops.h
+++ b/include/asm-i386/bitops.h
@@ -7,6 +7,7 @@
7 7
8#include <linux/config.h> 8#include <linux/config.h>
9#include <linux/compiler.h> 9#include <linux/compiler.h>
10#include <asm/alternative.h>
10 11
11/* 12/*
12 * These have to be done with inline assembly: that way the bit-setting 13 * These have to be done with inline assembly: that way the bit-setting
@@ -16,12 +17,6 @@
16 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). 17 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
17 */ 18 */
18 19
19#ifdef CONFIG_SMP
20#define LOCK_PREFIX "lock ; "
21#else
22#define LOCK_PREFIX ""
23#endif
24
25#define ADDR (*(volatile long *) addr) 20#define ADDR (*(volatile long *) addr)
26 21
27/** 22/**
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h
index c4ec2a4d8fdf..5c0b5876b931 100644
--- a/include/asm-i386/cpufeature.h
+++ b/include/asm-i386/cpufeature.h
@@ -70,6 +70,7 @@
70#define X86_FEATURE_P3 (3*32+ 6) /* P3 */ 70#define X86_FEATURE_P3 (3*32+ 6) /* P3 */
71#define X86_FEATURE_P4 (3*32+ 7) /* P4 */ 71#define X86_FEATURE_P4 (3*32+ 7) /* P4 */
72#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ 72#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
73#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
73 74
74/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 75/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
75#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ 76#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
diff --git a/include/asm-i386/mutex.h b/include/asm-i386/mutex.h
index 9b2199e829f3..05a538531229 100644
--- a/include/asm-i386/mutex.h
+++ b/include/asm-i386/mutex.h
@@ -9,6 +9,8 @@
9#ifndef _ASM_MUTEX_H 9#ifndef _ASM_MUTEX_H
10#define _ASM_MUTEX_H 10#define _ASM_MUTEX_H
11 11
12#include "asm/alternative.h"
13
12/** 14/**
13 * __mutex_fastpath_lock - try to take the lock by moving the count 15 * __mutex_fastpath_lock - try to take the lock by moving the count
14 * from 1 to a 0 value 16 * from 1 to a 0 value
@@ -27,7 +29,7 @@ do { \
27 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ 29 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
28 \ 30 \
29 __asm__ __volatile__( \ 31 __asm__ __volatile__( \
30 LOCK " decl (%%eax) \n" \ 32 LOCK_PREFIX " decl (%%eax) \n" \
31 " js 2f \n" \ 33 " js 2f \n" \
32 "1: \n" \ 34 "1: \n" \
33 \ 35 \
@@ -83,7 +85,7 @@ do { \
83 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ 85 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
84 \ 86 \
85 __asm__ __volatile__( \ 87 __asm__ __volatile__( \
86 LOCK " incl (%%eax) \n" \ 88 LOCK_PREFIX " incl (%%eax) \n" \
87 " jle 2f \n" \ 89 " jle 2f \n" \
88 "1: \n" \ 90 "1: \n" \
89 \ 91 \
diff --git a/include/asm-i386/rwlock.h b/include/asm-i386/rwlock.h
index b57cc7afdf7e..94f00195d543 100644
--- a/include/asm-i386/rwlock.h
+++ b/include/asm-i386/rwlock.h
@@ -21,21 +21,23 @@
21#define RW_LOCK_BIAS_STR "0x01000000" 21#define RW_LOCK_BIAS_STR "0x01000000"
22 22
23#define __build_read_lock_ptr(rw, helper) \ 23#define __build_read_lock_ptr(rw, helper) \
24 asm volatile(LOCK "subl $1,(%0)\n\t" \ 24 alternative_smp("lock; subl $1,(%0)\n\t" \
25 "jns 1f\n" \ 25 "jns 1f\n" \
26 "call " helper "\n\t" \ 26 "call " helper "\n\t" \
27 "1:\n" \ 27 "1:\n", \
28 ::"a" (rw) : "memory") 28 "subl $1,(%0)\n\t", \
29 :"a" (rw) : "memory")
29 30
30#define __build_read_lock_const(rw, helper) \ 31#define __build_read_lock_const(rw, helper) \
31 asm volatile(LOCK "subl $1,%0\n\t" \ 32 alternative_smp("lock; subl $1,%0\n\t" \
32 "jns 1f\n" \ 33 "jns 1f\n" \
33 "pushl %%eax\n\t" \ 34 "pushl %%eax\n\t" \
34 "leal %0,%%eax\n\t" \ 35 "leal %0,%%eax\n\t" \
35 "call " helper "\n\t" \ 36 "call " helper "\n\t" \
36 "popl %%eax\n\t" \ 37 "popl %%eax\n\t" \
37 "1:\n" \ 38 "1:\n", \
38 :"=m" (*(volatile int *)rw) : : "memory") 39 "subl $1,%0\n\t", \
40 "=m" (*(volatile int *)rw) : : "memory")
39 41
40#define __build_read_lock(rw, helper) do { \ 42#define __build_read_lock(rw, helper) do { \
41 if (__builtin_constant_p(rw)) \ 43 if (__builtin_constant_p(rw)) \
@@ -45,21 +47,23 @@
45 } while (0) 47 } while (0)
46 48
47#define __build_write_lock_ptr(rw, helper) \ 49#define __build_write_lock_ptr(rw, helper) \
48 asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ 50 alternative_smp("lock; subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
49 "jz 1f\n" \ 51 "jz 1f\n" \
50 "call " helper "\n\t" \ 52 "call " helper "\n\t" \
51 "1:\n" \ 53 "1:\n", \
52 ::"a" (rw) : "memory") 54 "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t", \
55 :"a" (rw) : "memory")
53 56
54#define __build_write_lock_const(rw, helper) \ 57#define __build_write_lock_const(rw, helper) \
55 asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ 58 alternative_smp("lock; subl $" RW_LOCK_BIAS_STR ",%0\n\t" \
56 "jz 1f\n" \ 59 "jz 1f\n" \
57 "pushl %%eax\n\t" \ 60 "pushl %%eax\n\t" \
58 "leal %0,%%eax\n\t" \ 61 "leal %0,%%eax\n\t" \
59 "call " helper "\n\t" \ 62 "call " helper "\n\t" \
60 "popl %%eax\n\t" \ 63 "popl %%eax\n\t" \
61 "1:\n" \ 64 "1:\n", \
62 :"=m" (*(volatile int *)rw) : : "memory") 65 "subl $" RW_LOCK_BIAS_STR ",%0\n\t", \
66 "=m" (*(volatile int *)rw) : : "memory")
63 67
64#define __build_write_lock(rw, helper) do { \ 68#define __build_write_lock(rw, helper) do { \
65 if (__builtin_constant_p(rw)) \ 69 if (__builtin_constant_p(rw)) \
diff --git a/include/asm-i386/semaphore.h b/include/asm-i386/semaphore.h
index 6a42b2142fd6..f7a0f310c524 100644
--- a/include/asm-i386/semaphore.h
+++ b/include/asm-i386/semaphore.h
@@ -99,7 +99,7 @@ static inline void down(struct semaphore * sem)
99 might_sleep(); 99 might_sleep();
100 __asm__ __volatile__( 100 __asm__ __volatile__(
101 "# atomic down operation\n\t" 101 "# atomic down operation\n\t"
102 LOCK "decl %0\n\t" /* --sem->count */ 102 LOCK_PREFIX "decl %0\n\t" /* --sem->count */
103 "js 2f\n" 103 "js 2f\n"
104 "1:\n" 104 "1:\n"
105 LOCK_SECTION_START("") 105 LOCK_SECTION_START("")
@@ -123,7 +123,7 @@ static inline int down_interruptible(struct semaphore * sem)
123 might_sleep(); 123 might_sleep();
124 __asm__ __volatile__( 124 __asm__ __volatile__(
125 "# atomic interruptible down operation\n\t" 125 "# atomic interruptible down operation\n\t"
126 LOCK "decl %1\n\t" /* --sem->count */ 126 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
127 "js 2f\n\t" 127 "js 2f\n\t"
128 "xorl %0,%0\n" 128 "xorl %0,%0\n"
129 "1:\n" 129 "1:\n"
@@ -148,7 +148,7 @@ static inline int down_trylock(struct semaphore * sem)
148 148
149 __asm__ __volatile__( 149 __asm__ __volatile__(
150 "# atomic interruptible down operation\n\t" 150 "# atomic interruptible down operation\n\t"
151 LOCK "decl %1\n\t" /* --sem->count */ 151 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
152 "js 2f\n\t" 152 "js 2f\n\t"
153 "xorl %0,%0\n" 153 "xorl %0,%0\n"
154 "1:\n" 154 "1:\n"
@@ -173,7 +173,7 @@ static inline void up(struct semaphore * sem)
173{ 173{
174 __asm__ __volatile__( 174 __asm__ __volatile__(
175 "# atomic up operation\n\t" 175 "# atomic up operation\n\t"
176 LOCK "incl %0\n\t" /* ++sem->count */ 176 LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
177 "jle 2f\n" 177 "jle 2f\n"
178 "1:\n" 178 "1:\n"
179 LOCK_SECTION_START("") 179 LOCK_SECTION_START("")
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index 23604350cdf4..a1b8a8a30e21 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -48,18 +48,23 @@
48 "jmp 1b\n" \ 48 "jmp 1b\n" \
49 "4:\n\t" 49 "4:\n\t"
50 50
51#define __raw_spin_lock_string_up \
52 "\n\tdecb %0"
53
51static inline void __raw_spin_lock(raw_spinlock_t *lock) 54static inline void __raw_spin_lock(raw_spinlock_t *lock)
52{ 55{
53 __asm__ __volatile__( 56 alternative_smp(
54 __raw_spin_lock_string 57 __raw_spin_lock_string,
55 :"=m" (lock->slock) : : "memory"); 58 __raw_spin_lock_string_up,
59 "=m" (lock->slock) : : "memory");
56} 60}
57 61
58static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 62static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
59{ 63{
60 __asm__ __volatile__( 64 alternative_smp(
61 __raw_spin_lock_string_flags 65 __raw_spin_lock_string_flags,
62 :"=m" (lock->slock) : "r" (flags) : "memory"); 66 __raw_spin_lock_string_up,
67 "=m" (lock->slock) : "r" (flags) : "memory");
63} 68}
64 69
65static inline int __raw_spin_trylock(raw_spinlock_t *lock) 70static inline int __raw_spin_trylock(raw_spinlock_t *lock)
@@ -178,12 +183,12 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
178 183
179static inline void __raw_read_unlock(raw_rwlock_t *rw) 184static inline void __raw_read_unlock(raw_rwlock_t *rw)
180{ 185{
181 asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory"); 186 asm volatile(LOCK_PREFIX "incl %0" :"=m" (rw->lock) : : "memory");
182} 187}
183 188
184static inline void __raw_write_unlock(raw_rwlock_t *rw) 189static inline void __raw_write_unlock(raw_rwlock_t *rw)
185{ 190{
186 asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ", %0" 191 asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0"
187 : "=m" (rw->lock) : : "memory"); 192 : "=m" (rw->lock) : : "memory");
188} 193}
189 194
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 399145a247f2..d0d8d7448d88 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -352,67 +352,6 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l
352 352
353#endif 353#endif
354 354
355#ifdef __KERNEL__
356struct alt_instr {
357 __u8 *instr; /* original instruction */
358 __u8 *replacement;
359 __u8 cpuid; /* cpuid bit set for replacement */
360 __u8 instrlen; /* length of original instruction */
361 __u8 replacementlen; /* length of new instruction, <= instrlen */
362 __u8 pad;
363};
364#endif
365
366/*
367 * Alternative instructions for different CPU types or capabilities.
368 *
369 * This allows to use optimized instructions even on generic binary
370 * kernels.
371 *
372 * length of oldinstr must be longer or equal the length of newinstr
373 * It can be padded with nops as needed.
374 *
375 * For non barrier like inlines please define new variants
376 * without volatile and memory clobber.
377 */
378#define alternative(oldinstr, newinstr, feature) \
379 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
380 ".section .altinstructions,\"a\"\n" \
381 " .align 4\n" \
382 " .long 661b\n" /* label */ \
383 " .long 663f\n" /* new instruction */ \
384 " .byte %c0\n" /* feature bit */ \
385 " .byte 662b-661b\n" /* sourcelen */ \
386 " .byte 664f-663f\n" /* replacementlen */ \
387 ".previous\n" \
388 ".section .altinstr_replacement,\"ax\"\n" \
389 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
390 ".previous" :: "i" (feature) : "memory")
391
392/*
393 * Alternative inline assembly with input.
394 *
395 * Pecularities:
396 * No memory clobber here.
397 * Argument numbers start with 1.
398 * Best is to use constraints that are fixed size (like (%1) ... "r")
399 * If you use variable sized constraints like "m" or "g" in the
400 * replacement maake sure to pad to the worst case length.
401 */
402#define alternative_input(oldinstr, newinstr, feature, input...) \
403 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
404 ".section .altinstructions,\"a\"\n" \
405 " .align 4\n" \
406 " .long 661b\n" /* label */ \
407 " .long 663f\n" /* new instruction */ \
408 " .byte %c0\n" /* feature bit */ \
409 " .byte 662b-661b\n" /* sourcelen */ \
410 " .byte 664f-663f\n" /* replacementlen */ \
411 ".previous\n" \
412 ".section .altinstr_replacement,\"ax\"\n" \
413 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
414 ".previous" :: "i" (feature), ##input)
415
416/* 355/*
417 * Force strict CPU ordering. 356 * Force strict CPU ordering.
418 * And yes, this is required on UP too when we're talking 357 * And yes, this is required on UP too when we're talking
@@ -558,5 +497,6 @@ static inline void sched_cacheflush(void)
558} 497}
559 498
560extern unsigned long arch_align_stack(unsigned long sp); 499extern unsigned long arch_align_stack(unsigned long sp);
500extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
561 501
562#endif 502#endif
diff --git a/include/asm-um/alternative.h b/include/asm-um/alternative.h
new file mode 100644
index 000000000000..b6434396bd42
--- /dev/null
+++ b/include/asm-um/alternative.h
@@ -0,0 +1,6 @@
1#ifndef __UM_ALTERNATIVE_H
2#define __UM_ALTERNATIVE_H
3
4#include "asm/arch/alternative.h"
5
6#endif