aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386
diff options
context:
space:
mode:
authorGerd Hoffmann <kraxel@suse.de>2006-03-23 05:59:32 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-23 10:38:04 -0500
commit9a0b5817ad97bb718ab85322759d19a238712b47 (patch)
tree39bd21eb69c4001b99096d96a76a2e5d37904108 /arch/i386
parent4d7d8c82c181711d28c8336108330a9121f5ef07 (diff)
[PATCH] x86: SMP alternatives
Implement SMP alternatives, i.e. switching at runtime between different code versions for UP and SMP. The code can patch both SMP->UP and UP->SMP. The UP->SMP case is useful for CPU hotplug. With CONFIG_CPU_HOTPLUG enabled the code switches to UP at boot time and when the number of CPUs goes down to 1, and switches to SMP when the number of CPUs goes up to 2. Without CONFIG_CPU_HOTPLUG or on non-SMP-capable systems the code is patched once at boot time (if needed) and the tables are released afterwards. The changes in detail: * The current alternatives bits are moved to a separate file, the SMP alternatives code is added there. * The patch adds some new elf sections to the kernel: .smp_altinstructions like .altinstructions, also contains a list of alt_instr structs. .smp_altinstr_replacement like .altinstr_replacement, but also has some space to save original instruction before replaving it. .smp_locks list of pointers to lock prefixes which can be nop'ed out on UP. The first two are used to replace more complex instruction sequences such as spinlocks and semaphores. It would be possible to deal with the lock prefixes with that as well, but by handling them as special case the table sizes become much smaller. * The sections are page-aligned and padded up to page size, so they can be free if they are not needed. * Splitted the code to release init pages to a separate function and use it to release the elf sections if they are unused. Signed-off-by: Gerd Hoffmann <kraxel@suse.de> Signed-off-by: Chuck Ebbert <76306.1226@compuserve.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/kernel/Makefile2
-rw-r--r--arch/i386/kernel/alternative.c321
-rw-r--r--arch/i386/kernel/cpu/proc.c2
-rw-r--r--arch/i386/kernel/module.c32
-rw-r--r--arch/i386/kernel/semaphore.c8
-rw-r--r--arch/i386/kernel/setup.c95
-rw-r--r--arch/i386/kernel/smpboot.c3
-rw-r--r--arch/i386/kernel/vmlinux.lds.S20
-rw-r--r--arch/i386/mm/init.c45
9 files changed, 394 insertions, 134 deletions
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index 65656c033d70..5b9ed21216cf 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds
7obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \ 7obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \
8 ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \ 8 ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \
9 pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \ 9 pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \
10 quirks.o i8237.o topology.o 10 quirks.o i8237.o topology.o alternative.o
11 11
12obj-y += cpu/ 12obj-y += cpu/
13obj-y += timers/ 13obj-y += timers/
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c
new file mode 100644
index 000000000000..5cbd6f99fb2a
--- /dev/null
+++ b/arch/i386/kernel/alternative.c
@@ -0,0 +1,321 @@
1#include <linux/module.h>
2#include <linux/spinlock.h>
3#include <linux/list.h>
4#include <asm/alternative.h>
5#include <asm/sections.h>
6
7#define DEBUG 0
8#if DEBUG
9# define DPRINTK(fmt, args...) printk(fmt, args)
10#else
11# define DPRINTK(fmt, args...)
12#endif
13
14/* Use inline assembly to define this because the nops are defined
15 as inline assembly strings in the include files and we cannot
16 get them easily into strings. */
17asm("\t.data\nintelnops: "
18 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
19 GENERIC_NOP7 GENERIC_NOP8);
20asm("\t.data\nk8nops: "
21 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
22 K8_NOP7 K8_NOP8);
23asm("\t.data\nk7nops: "
24 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
25 K7_NOP7 K7_NOP8);
26
27extern unsigned char intelnops[], k8nops[], k7nops[];
28static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
29 NULL,
30 intelnops,
31 intelnops + 1,
32 intelnops + 1 + 2,
33 intelnops + 1 + 2 + 3,
34 intelnops + 1 + 2 + 3 + 4,
35 intelnops + 1 + 2 + 3 + 4 + 5,
36 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
37 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
38};
39static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
40 NULL,
41 k8nops,
42 k8nops + 1,
43 k8nops + 1 + 2,
44 k8nops + 1 + 2 + 3,
45 k8nops + 1 + 2 + 3 + 4,
46 k8nops + 1 + 2 + 3 + 4 + 5,
47 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
48 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
49};
50static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
51 NULL,
52 k7nops,
53 k7nops + 1,
54 k7nops + 1 + 2,
55 k7nops + 1 + 2 + 3,
56 k7nops + 1 + 2 + 3 + 4,
57 k7nops + 1 + 2 + 3 + 4 + 5,
58 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
59 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
60};
61static struct nop {
62 int cpuid;
63 unsigned char **noptable;
64} noptypes[] = {
65 { X86_FEATURE_K8, k8_nops },
66 { X86_FEATURE_K7, k7_nops },
67 { -1, NULL }
68};
69
70
71extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
72extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
73extern u8 *__smp_locks[], *__smp_locks_end[];
74
75extern u8 __smp_alt_begin[], __smp_alt_end[];
76
77
78static unsigned char** find_nop_table(void)
79{
80 unsigned char **noptable = intel_nops;
81 int i;
82
83 for (i = 0; noptypes[i].cpuid >= 0; i++) {
84 if (boot_cpu_has(noptypes[i].cpuid)) {
85 noptable = noptypes[i].noptable;
86 break;
87 }
88 }
89 return noptable;
90}
91
92/* Replace instructions with better alternatives for this CPU type.
93 This runs before SMP is initialized to avoid SMP problems with
94 self modifying code. This implies that assymetric systems where
95 APs have less capabilities than the boot processor are not handled.
96 Tough. Make sure you disable such features by hand. */
97
98void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
99{
100 unsigned char **noptable = find_nop_table();
101 struct alt_instr *a;
102 int diff, i, k;
103
104 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
105 for (a = start; a < end; a++) {
106 BUG_ON(a->replacementlen > a->instrlen);
107 if (!boot_cpu_has(a->cpuid))
108 continue;
109 memcpy(a->instr, a->replacement, a->replacementlen);
110 diff = a->instrlen - a->replacementlen;
111 /* Pad the rest with nops */
112 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
113 k = diff;
114 if (k > ASM_NOP_MAX)
115 k = ASM_NOP_MAX;
116 memcpy(a->instr + i, noptable[k], k);
117 }
118 }
119}
120
121static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end)
122{
123 struct alt_instr *a;
124
125 DPRINTK("%s: alt table %p-%p\n", __FUNCTION__, start, end);
126 for (a = start; a < end; a++) {
127 memcpy(a->replacement + a->replacementlen,
128 a->instr,
129 a->instrlen);
130 }
131}
132
133static void alternatives_smp_apply(struct alt_instr *start, struct alt_instr *end)
134{
135 struct alt_instr *a;
136
137 for (a = start; a < end; a++) {
138 memcpy(a->instr,
139 a->replacement + a->replacementlen,
140 a->instrlen);
141 }
142}
143
144static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
145{
146 u8 **ptr;
147
148 for (ptr = start; ptr < end; ptr++) {
149 if (*ptr < text)
150 continue;
151 if (*ptr > text_end)
152 continue;
153 **ptr = 0xf0; /* lock prefix */
154 };
155}
156
157static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
158{
159 unsigned char **noptable = find_nop_table();
160 u8 **ptr;
161
162 for (ptr = start; ptr < end; ptr++) {
163 if (*ptr < text)
164 continue;
165 if (*ptr > text_end)
166 continue;
167 **ptr = noptable[1][0];
168 };
169}
170
171struct smp_alt_module {
172 /* what is this ??? */
173 struct module *mod;
174 char *name;
175
176 /* ptrs to lock prefixes */
177 u8 **locks;
178 u8 **locks_end;
179
180 /* .text segment, needed to avoid patching init code ;) */
181 u8 *text;
182 u8 *text_end;
183
184 struct list_head next;
185};
186static LIST_HEAD(smp_alt_modules);
187static DEFINE_SPINLOCK(smp_alt);
188
189static int smp_alt_once = 0;
190static int __init bootonly(char *str)
191{
192 smp_alt_once = 1;
193 return 1;
194}
195__setup("smp-alt-boot", bootonly);
196
197void alternatives_smp_module_add(struct module *mod, char *name,
198 void *locks, void *locks_end,
199 void *text, void *text_end)
200{
201 struct smp_alt_module *smp;
202 unsigned long flags;
203
204 if (smp_alt_once) {
205 if (boot_cpu_has(X86_FEATURE_UP))
206 alternatives_smp_unlock(locks, locks_end,
207 text, text_end);
208 return;
209 }
210
211 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
212 if (NULL == smp)
213 return; /* we'll run the (safe but slow) SMP code then ... */
214
215 smp->mod = mod;
216 smp->name = name;
217 smp->locks = locks;
218 smp->locks_end = locks_end;
219 smp->text = text;
220 smp->text_end = text_end;
221 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
222 __FUNCTION__, smp->locks, smp->locks_end,
223 smp->text, smp->text_end, smp->name);
224
225 spin_lock_irqsave(&smp_alt, flags);
226 list_add_tail(&smp->next, &smp_alt_modules);
227 if (boot_cpu_has(X86_FEATURE_UP))
228 alternatives_smp_unlock(smp->locks, smp->locks_end,
229 smp->text, smp->text_end);
230 spin_unlock_irqrestore(&smp_alt, flags);
231}
232
233void alternatives_smp_module_del(struct module *mod)
234{
235 struct smp_alt_module *item;
236 unsigned long flags;
237
238 if (smp_alt_once)
239 return;
240
241 spin_lock_irqsave(&smp_alt, flags);
242 list_for_each_entry(item, &smp_alt_modules, next) {
243 if (mod != item->mod)
244 continue;
245 list_del(&item->next);
246 spin_unlock_irqrestore(&smp_alt, flags);
247 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
248 kfree(item);
249 return;
250 }
251 spin_unlock_irqrestore(&smp_alt, flags);
252}
253
254void alternatives_smp_switch(int smp)
255{
256 struct smp_alt_module *mod;
257 unsigned long flags;
258
259 if (smp_alt_once)
260 return;
261 BUG_ON(!smp && (num_online_cpus() > 1));
262
263 spin_lock_irqsave(&smp_alt, flags);
264 if (smp) {
265 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
266 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
267 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
268 alternatives_smp_apply(__smp_alt_instructions,
269 __smp_alt_instructions_end);
270 list_for_each_entry(mod, &smp_alt_modules, next)
271 alternatives_smp_lock(mod->locks, mod->locks_end,
272 mod->text, mod->text_end);
273 } else {
274 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
275 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
276 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
277 apply_alternatives(__smp_alt_instructions,
278 __smp_alt_instructions_end);
279 list_for_each_entry(mod, &smp_alt_modules, next)
280 alternatives_smp_unlock(mod->locks, mod->locks_end,
281 mod->text, mod->text_end);
282 }
283 spin_unlock_irqrestore(&smp_alt, flags);
284}
285
286void __init alternative_instructions(void)
287{
288 apply_alternatives(__alt_instructions, __alt_instructions_end);
289
290 /* switch to patch-once-at-boottime-only mode and free the
291 * tables in case we know the number of CPUs will never ever
292 * change */
293#ifdef CONFIG_HOTPLUG_CPU
294 if (num_possible_cpus() < 2)
295 smp_alt_once = 1;
296#else
297 smp_alt_once = 1;
298#endif
299
300 if (smp_alt_once) {
301 if (1 == num_possible_cpus()) {
302 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
303 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
304 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
305 apply_alternatives(__smp_alt_instructions,
306 __smp_alt_instructions_end);
307 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
308 _text, _etext);
309 }
310 free_init_pages("SMP alternatives",
311 (unsigned long)__smp_alt_begin,
312 (unsigned long)__smp_alt_end);
313 } else {
314 alternatives_smp_save(__smp_alt_instructions,
315 __smp_alt_instructions_end);
316 alternatives_smp_module_add(NULL, "core kernel",
317 __smp_locks, __smp_locks_end,
318 _text, _etext);
319 alternatives_smp_switch(0);
320 }
321}
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index 89a85af33d28..5cfbd8011698 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -40,7 +40,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
40 /* Other (Linux-defined) */ 40 /* Other (Linux-defined) */
41 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", 41 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
42 NULL, NULL, NULL, NULL, 42 NULL, NULL, NULL, NULL,
43 "constant_tsc", NULL, NULL, NULL, NULL, NULL, NULL, NULL, 43 "constant_tsc", "up", NULL, NULL, NULL, NULL, NULL, NULL,
44 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 44 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
45 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 45 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
46 46
diff --git a/arch/i386/kernel/module.c b/arch/i386/kernel/module.c
index 5149c8a621f0..470cf97e7cd3 100644
--- a/arch/i386/kernel/module.c
+++ b/arch/i386/kernel/module.c
@@ -104,26 +104,38 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
104 return -ENOEXEC; 104 return -ENOEXEC;
105} 105}
106 106
107extern void apply_alternatives(void *start, void *end);
108
109int module_finalize(const Elf_Ehdr *hdr, 107int module_finalize(const Elf_Ehdr *hdr,
110 const Elf_Shdr *sechdrs, 108 const Elf_Shdr *sechdrs,
111 struct module *me) 109 struct module *me)
112{ 110{
113 const Elf_Shdr *s; 111 const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL;
114 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 112 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
115 113
116 /* look for .altinstructions to patch */
117 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { 114 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
118 void *seg; 115 if (!strcmp(".text", secstrings + s->sh_name))
119 if (strcmp(".altinstructions", secstrings + s->sh_name)) 116 text = s;
120 continue; 117 if (!strcmp(".altinstructions", secstrings + s->sh_name))
121 seg = (void *)s->sh_addr; 118 alt = s;
122 apply_alternatives(seg, seg + s->sh_size); 119 if (!strcmp(".smp_locks", secstrings + s->sh_name))
123 } 120 locks= s;
121 }
122
123 if (alt) {
124 /* patch .altinstructions */
125 void *aseg = (void *)alt->sh_addr;
126 apply_alternatives(aseg, aseg + alt->sh_size);
127 }
128 if (locks && text) {
129 void *lseg = (void *)locks->sh_addr;
130 void *tseg = (void *)text->sh_addr;
131 alternatives_smp_module_add(me, me->name,
132 lseg, lseg + locks->sh_size,
133 tseg, tseg + text->sh_size);
134 }
124 return 0; 135 return 0;
125} 136}
126 137
127void module_arch_cleanup(struct module *mod) 138void module_arch_cleanup(struct module *mod)
128{ 139{
140 alternatives_smp_module_del(mod);
129} 141}
diff --git a/arch/i386/kernel/semaphore.c b/arch/i386/kernel/semaphore.c
index 7455ab643943..967dc74df9ee 100644
--- a/arch/i386/kernel/semaphore.c
+++ b/arch/i386/kernel/semaphore.c
@@ -110,11 +110,11 @@ asm(
110".align 4\n" 110".align 4\n"
111".globl __write_lock_failed\n" 111".globl __write_lock_failed\n"
112"__write_lock_failed:\n\t" 112"__write_lock_failed:\n\t"
113 LOCK "addl $" RW_LOCK_BIAS_STR ",(%eax)\n" 113 LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ",(%eax)\n"
114"1: rep; nop\n\t" 114"1: rep; nop\n\t"
115 "cmpl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" 115 "cmpl $" RW_LOCK_BIAS_STR ",(%eax)\n\t"
116 "jne 1b\n\t" 116 "jne 1b\n\t"
117 LOCK "subl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" 117 LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",(%eax)\n\t"
118 "jnz __write_lock_failed\n\t" 118 "jnz __write_lock_failed\n\t"
119 "ret" 119 "ret"
120); 120);
@@ -124,11 +124,11 @@ asm(
124".align 4\n" 124".align 4\n"
125".globl __read_lock_failed\n" 125".globl __read_lock_failed\n"
126"__read_lock_failed:\n\t" 126"__read_lock_failed:\n\t"
127 LOCK "incl (%eax)\n" 127 LOCK_PREFIX "incl (%eax)\n"
128"1: rep; nop\n\t" 128"1: rep; nop\n\t"
129 "cmpl $1,(%eax)\n\t" 129 "cmpl $1,(%eax)\n\t"
130 "js 1b\n\t" 130 "js 1b\n\t"
131 LOCK "decl (%eax)\n\t" 131 LOCK_PREFIX "decl (%eax)\n\t"
132 "js __read_lock_failed\n\t" 132 "js __read_lock_failed\n\t"
133 "ret" 133 "ret"
134); 134);
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index ab62a9f4701e..5f58f8cb9836 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -1377,101 +1377,6 @@ static void __init register_memory(void)
1377 pci_mem_start, gapstart, gapsize); 1377 pci_mem_start, gapstart, gapsize);
1378} 1378}
1379 1379
1380/* Use inline assembly to define this because the nops are defined
1381 as inline assembly strings in the include files and we cannot
1382 get them easily into strings. */
1383asm("\t.data\nintelnops: "
1384 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
1385 GENERIC_NOP7 GENERIC_NOP8);
1386asm("\t.data\nk8nops: "
1387 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
1388 K8_NOP7 K8_NOP8);
1389asm("\t.data\nk7nops: "
1390 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
1391 K7_NOP7 K7_NOP8);
1392
1393extern unsigned char intelnops[], k8nops[], k7nops[];
1394static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
1395 NULL,
1396 intelnops,
1397 intelnops + 1,
1398 intelnops + 1 + 2,
1399 intelnops + 1 + 2 + 3,
1400 intelnops + 1 + 2 + 3 + 4,
1401 intelnops + 1 + 2 + 3 + 4 + 5,
1402 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
1403 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
1404};
1405static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
1406 NULL,
1407 k8nops,
1408 k8nops + 1,
1409 k8nops + 1 + 2,
1410 k8nops + 1 + 2 + 3,
1411 k8nops + 1 + 2 + 3 + 4,
1412 k8nops + 1 + 2 + 3 + 4 + 5,
1413 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
1414 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
1415};
1416static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
1417 NULL,
1418 k7nops,
1419 k7nops + 1,
1420 k7nops + 1 + 2,
1421 k7nops + 1 + 2 + 3,
1422 k7nops + 1 + 2 + 3 + 4,
1423 k7nops + 1 + 2 + 3 + 4 + 5,
1424 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
1425 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
1426};
1427static struct nop {
1428 int cpuid;
1429 unsigned char **noptable;
1430} noptypes[] = {
1431 { X86_FEATURE_K8, k8_nops },
1432 { X86_FEATURE_K7, k7_nops },
1433 { -1, NULL }
1434};
1435
1436/* Replace instructions with better alternatives for this CPU type.
1437
1438 This runs before SMP is initialized to avoid SMP problems with
1439 self modifying code. This implies that assymetric systems where
1440 APs have less capabilities than the boot processor are not handled.
1441 Tough. Make sure you disable such features by hand. */
1442void apply_alternatives(void *start, void *end)
1443{
1444 struct alt_instr *a;
1445 int diff, i, k;
1446 unsigned char **noptable = intel_nops;
1447 for (i = 0; noptypes[i].cpuid >= 0; i++) {
1448 if (boot_cpu_has(noptypes[i].cpuid)) {
1449 noptable = noptypes[i].noptable;
1450 break;
1451 }
1452 }
1453 for (a = start; (void *)a < end; a++) {
1454 if (!boot_cpu_has(a->cpuid))
1455 continue;
1456 BUG_ON(a->replacementlen > a->instrlen);
1457 memcpy(a->instr, a->replacement, a->replacementlen);
1458 diff = a->instrlen - a->replacementlen;
1459 /* Pad the rest with nops */
1460 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
1461 k = diff;
1462 if (k > ASM_NOP_MAX)
1463 k = ASM_NOP_MAX;
1464 memcpy(a->instr + i, noptable[k], k);
1465 }
1466 }
1467}
1468
1469void __init alternative_instructions(void)
1470{
1471 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
1472 apply_alternatives(__alt_instructions, __alt_instructions_end);
1473}
1474
1475static char * __init machine_specific_memory_setup(void); 1380static char * __init machine_specific_memory_setup(void);
1476 1381
1477#ifdef CONFIG_MCA 1382#ifdef CONFIG_MCA
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 7007e1783797..4c470e99a742 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -899,6 +899,7 @@ static int __devinit do_boot_cpu(int apicid, int cpu)
899 unsigned short nmi_high = 0, nmi_low = 0; 899 unsigned short nmi_high = 0, nmi_low = 0;
900 900
901 ++cpucount; 901 ++cpucount;
902 alternatives_smp_switch(1);
902 903
903 /* 904 /*
904 * We can't use kernel_thread since we must avoid to 905 * We can't use kernel_thread since we must avoid to
@@ -1368,6 +1369,8 @@ void __cpu_die(unsigned int cpu)
1368 /* They ack this in play_dead by setting CPU_DEAD */ 1369 /* They ack this in play_dead by setting CPU_DEAD */
1369 if (per_cpu(cpu_state, cpu) == CPU_DEAD) { 1370 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1370 printk ("CPU %d is now offline\n", cpu); 1371 printk ("CPU %d is now offline\n", cpu);
1372 if (1 == num_online_cpus())
1373 alternatives_smp_switch(0);
1371 return; 1374 return;
1372 } 1375 }
1373 msleep(100); 1376 msleep(100);
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
index 4710195b6b74..3f21c6f6466d 100644
--- a/arch/i386/kernel/vmlinux.lds.S
+++ b/arch/i386/kernel/vmlinux.lds.S
@@ -68,6 +68,26 @@ SECTIONS
68 *(.data.init_task) 68 *(.data.init_task)
69 } 69 }
70 70
71 /* might get freed after init */
72 . = ALIGN(4096);
73 __smp_alt_begin = .;
74 __smp_alt_instructions = .;
75 .smp_altinstructions : AT(ADDR(.smp_altinstructions) - LOAD_OFFSET) {
76 *(.smp_altinstructions)
77 }
78 __smp_alt_instructions_end = .;
79 . = ALIGN(4);
80 __smp_locks = .;
81 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
82 *(.smp_locks)
83 }
84 __smp_locks_end = .;
85 .smp_altinstr_replacement : AT(ADDR(.smp_altinstr_replacement) - LOAD_OFFSET) {
86 *(.smp_altinstr_replacement)
87 }
88 . = ALIGN(4096);
89 __smp_alt_end = .;
90
71 /* will be freed after init */ 91 /* will be freed after init */
72 . = ALIGN(4096); /* Init code and data */ 92 . = ALIGN(4096); /* Init code and data */
73 __init_begin = .; 93 __init_begin = .;
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 7ba55a6e2dbc..9f66ac582a8b 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -720,21 +720,6 @@ static int noinline do_test_wp_bit(void)
720 return flag; 720 return flag;
721} 721}
722 722
723void free_initmem(void)
724{
725 unsigned long addr;
726
727 addr = (unsigned long)(&__init_begin);
728 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
729 ClearPageReserved(virt_to_page(addr));
730 init_page_count(virt_to_page(addr));
731 memset((void *)addr, 0xcc, PAGE_SIZE);
732 free_page(addr);
733 totalram_pages++;
734 }
735 printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10);
736}
737
738#ifdef CONFIG_DEBUG_RODATA 723#ifdef CONFIG_DEBUG_RODATA
739 724
740extern char __start_rodata, __end_rodata; 725extern char __start_rodata, __end_rodata;
@@ -758,17 +743,31 @@ void mark_rodata_ro(void)
758} 743}
759#endif 744#endif
760 745
746void free_init_pages(char *what, unsigned long begin, unsigned long end)
747{
748 unsigned long addr;
749
750 for (addr = begin; addr < end; addr += PAGE_SIZE) {
751 ClearPageReserved(virt_to_page(addr));
752 init_page_count(virt_to_page(addr));
753 memset((void *)addr, 0xcc, PAGE_SIZE);
754 free_page(addr);
755 totalram_pages++;
756 }
757 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
758}
759
760void free_initmem(void)
761{
762 free_init_pages("unused kernel memory",
763 (unsigned long)(&__init_begin),
764 (unsigned long)(&__init_end));
765}
761 766
762#ifdef CONFIG_BLK_DEV_INITRD 767#ifdef CONFIG_BLK_DEV_INITRD
763void free_initrd_mem(unsigned long start, unsigned long end) 768void free_initrd_mem(unsigned long start, unsigned long end)
764{ 769{
765 if (start < end) 770 free_init_pages("initrd memory", start, end);
766 printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
767 for (; start < end; start += PAGE_SIZE) {
768 ClearPageReserved(virt_to_page(start));
769 init_page_count(virt_to_page(start));
770 free_page(start);
771 totalram_pages++;
772 }
773} 771}
774#endif 772#endif
773