aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2007-05-02 13:27:13 -0400
committerAndi Kleen <andi@basil.nowhere.org>2007-05-02 13:27:13 -0400
commitd0175ab64412aabc93da8682aaa99124d6815056 (patch)
tree46607da58133cb1d51883bf91e13b108f20bfb90
parent4bc5aa91fb1e544ad37805520030a0d9fc6e11d3 (diff)
[PATCH] i386: Remove smp_alt_instructions
The .smp_altinstructions section and its corresponding symbols are completely unused, so remove them. Also, remove stray #ifdef __KENREL__ in asm-i386/alternative.h Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Andi Kleen <ak@suse.de> Cc: Andi Kleen <ak@suse.de>
-rw-r--r--arch/i386/kernel/alternative.c38
-rw-r--r--arch/i386/kernel/vmlinux.lds.S11
-rw-r--r--include/asm-i386/alternative.h6
3 files changed, 3 insertions, 52 deletions
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c
index a27c8d347364..f09635408049 100644
--- a/arch/i386/kernel/alternative.c
+++ b/arch/i386/kernel/alternative.c
@@ -132,11 +132,8 @@ static void nop_out(void *insns, unsigned int len)
132} 132}
133 133
134extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; 134extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
135extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
136extern u8 *__smp_locks[], *__smp_locks_end[]; 135extern u8 *__smp_locks[], *__smp_locks_end[];
137 136
138extern u8 __smp_alt_begin[], __smp_alt_end[];
139
140/* Replace instructions with better alternatives for this CPU type. 137/* Replace instructions with better alternatives for this CPU type.
141 This runs before SMP is initialized to avoid SMP problems with 138 This runs before SMP is initialized to avoid SMP problems with
142 self modifying code. This implies that assymetric systems where 139 self modifying code. This implies that assymetric systems where
@@ -171,29 +168,6 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
171 168
172#ifdef CONFIG_SMP 169#ifdef CONFIG_SMP
173 170
174static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end)
175{
176 struct alt_instr *a;
177
178 DPRINTK("%s: alt table %p-%p\n", __FUNCTION__, start, end);
179 for (a = start; a < end; a++) {
180 memcpy(a->replacement + a->replacementlen,
181 a->instr,
182 a->instrlen);
183 }
184}
185
186static void alternatives_smp_apply(struct alt_instr *start, struct alt_instr *end)
187{
188 struct alt_instr *a;
189
190 for (a = start; a < end; a++) {
191 memcpy(a->instr,
192 a->replacement + a->replacementlen,
193 a->instrlen);
194 }
195}
196
197static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end) 171static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
198{ 172{
199 u8 **ptr; 173 u8 **ptr;
@@ -319,8 +293,6 @@ void alternatives_smp_switch(int smp)
319 printk(KERN_INFO "SMP alternatives: switching to SMP code\n"); 293 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
320 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); 294 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
321 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); 295 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
322 alternatives_smp_apply(__smp_alt_instructions,
323 __smp_alt_instructions_end);
324 list_for_each_entry(mod, &smp_alt_modules, next) 296 list_for_each_entry(mod, &smp_alt_modules, next)
325 alternatives_smp_lock(mod->locks, mod->locks_end, 297 alternatives_smp_lock(mod->locks, mod->locks_end,
326 mod->text, mod->text_end); 298 mod->text, mod->text_end);
@@ -328,8 +300,6 @@ void alternatives_smp_switch(int smp)
328 printk(KERN_INFO "SMP alternatives: switching to UP code\n"); 300 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
329 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); 301 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
330 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); 302 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
331 apply_alternatives(__smp_alt_instructions,
332 __smp_alt_instructions_end);
333 list_for_each_entry(mod, &smp_alt_modules, next) 303 list_for_each_entry(mod, &smp_alt_modules, next)
334 alternatives_smp_unlock(mod->locks, mod->locks_end, 304 alternatives_smp_unlock(mod->locks, mod->locks_end,
335 mod->text, mod->text_end); 305 mod->text, mod->text_end);
@@ -396,17 +366,13 @@ void __init alternative_instructions(void)
396 printk(KERN_INFO "SMP alternatives: switching to UP code\n"); 366 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
397 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); 367 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
398 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); 368 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
399 apply_alternatives(__smp_alt_instructions,
400 __smp_alt_instructions_end);
401 alternatives_smp_unlock(__smp_locks, __smp_locks_end, 369 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
402 _text, _etext); 370 _text, _etext);
403 } 371 }
404 free_init_pages("SMP alternatives", 372 free_init_pages("SMP alternatives",
405 __pa_symbol(&__smp_alt_begin), 373 __pa_symbol(&__smp_locks),
406 __pa_symbol(&__smp_alt_end)); 374 __pa_symbol(&__smp_locks_end));
407 } else { 375 } else {
408 alternatives_smp_save(__smp_alt_instructions,
409 __smp_alt_instructions_end);
410 alternatives_smp_module_add(NULL, "core kernel", 376 alternatives_smp_module_add(NULL, "core kernel",
411 __smp_locks, __smp_locks_end, 377 __smp_locks, __smp_locks_end,
412 _text, _etext); 378 _text, _etext);
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
index 97fe6eac47c9..2ce4aa185fc8 100644
--- a/arch/i386/kernel/vmlinux.lds.S
+++ b/arch/i386/kernel/vmlinux.lds.S
@@ -117,22 +117,11 @@ SECTIONS
117 117
118 /* might get freed after init */ 118 /* might get freed after init */
119 . = ALIGN(4096); 119 . = ALIGN(4096);
120 .smp_altinstructions : AT(ADDR(.smp_altinstructions) - LOAD_OFFSET) {
121 __smp_alt_begin = .;
122 __smp_alt_instructions = .;
123 *(.smp_altinstructions)
124 __smp_alt_instructions_end = .;
125 }
126 . = ALIGN(4);
127 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { 120 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
128 __smp_locks = .; 121 __smp_locks = .;
129 *(.smp_locks) 122 *(.smp_locks)
130 __smp_locks_end = .; 123 __smp_locks_end = .;
131 } 124 }
132 .smp_altinstr_replacement : AT(ADDR(.smp_altinstr_replacement) - LOAD_OFFSET) {
133 *(.smp_altinstr_replacement)
134 __smp_alt_end = .;
135 }
136 /* will be freed after init 125 /* will be freed after init
137 * Following ALIGN() is required to make sure no other data falls on the 126 * Following ALIGN() is required to make sure no other data falls on the
138 * same page where __smp_alt_end is pointing as that page might be freed 127 * same page where __smp_alt_end is pointing as that page might be freed
diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h
index dbc1a29284f3..4d518eebe461 100644
--- a/include/asm-i386/alternative.h
+++ b/include/asm-i386/alternative.h
@@ -1,8 +1,6 @@
1#ifndef _I386_ALTERNATIVE_H 1#ifndef _I386_ALTERNATIVE_H
2#define _I386_ALTERNATIVE_H 2#define _I386_ALTERNATIVE_H
3 3
4#ifdef __KERNEL__
5
6#include <asm/types.h> 4#include <asm/types.h>
7#include <linux/stddef.h> 5#include <linux/stddef.h>
8#include <linux/types.h> 6#include <linux/types.h>
@@ -32,9 +30,7 @@ static inline void alternatives_smp_module_add(struct module *mod, char *name,
32 void *text, void *text_end) {} 30 void *text, void *text_end) {}
33static inline void alternatives_smp_module_del(struct module *mod) {} 31static inline void alternatives_smp_module_del(struct module *mod) {}
34static inline void alternatives_smp_switch(int smp) {} 32static inline void alternatives_smp_switch(int smp) {}
35#endif 33#endif /* CONFIG_SMP */
36
37#endif
38 34
39/* 35/*
40 * Alternative instructions for different CPU types or capabilities. 36 * Alternative instructions for different CPU types or capabilities.