diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-19 20:55:12 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-19 20:55:12 -0400 |
commit | 016281880439a8665ecf37514865742da58131d4 (patch) | |
tree | 2f6a757a581c71cfb8cd891e0583fbffb359830b /arch/x86/kernel | |
parent | 17b141803c6c6e27fbade3f97c1c9d8d66c72866 (diff) | |
parent | 865be7a81071a77014c83cd01536c989eed362b4 (diff) |
Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86, cpu: Fix detection of Celeron Covington stepping A1 and B0
Documentation, ABI: Update L3 cache index disable text
x86, AMD, cacheinfo: Fix L3 cache index disable checks
x86, AMD, cacheinfo: Fix fallout caused by max3 conversion
x86, cpu: Change NOP selection for certain Intel CPUs
x86, cpu: Clean up and unify the NOP selection infrastructure
x86, percpu: Use ASM_NOP4 instead of hardcoding P6_NOP4
x86, cpu: Move AMD Elan Kconfig under "Processor family"
Fix up trivial conflicts in alternative handling (commit dc326fca2b64
"x86, cpu: Clean up and unify the NOP selection infrastructure" removed
some hacky 5-byte instruction stuff, while commit d430d3d7e646 "jump
label: Introduce static_branch() interface" renamed HAVE_JUMP_LABEL to
CONFIG_JUMP_LABEL in the code that went away)
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/alternative.c | 194 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel.c | 10 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 20 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/jump_label.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/setup.c | 6 |
6 files changed, 126 insertions, 113 deletions
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 1eeeafcb4410..a81f2d52f869 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -67,17 +67,30 @@ __setup("noreplace-paravirt", setup_noreplace_paravirt); | |||
67 | #define DPRINTK(fmt, args...) if (debug_alternative) \ | 67 | #define DPRINTK(fmt, args...) if (debug_alternative) \ |
68 | printk(KERN_DEBUG fmt, args) | 68 | printk(KERN_DEBUG fmt, args) |
69 | 69 | ||
70 | /* | ||
71 | * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes | ||
72 | * that correspond to that nop. Getting from one nop to the next, we | ||
73 | * add to the array the offset that is equal to the sum of all sizes of | ||
74 | * nops preceding the one we are after. | ||
75 | * | ||
76 | * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the | ||
77 | * nice symmetry of sizes of the previous nops. | ||
78 | */ | ||
70 | #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64) | 79 | #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64) |
71 | /* Use inline assembly to define this because the nops are defined | 80 | static const unsigned char intelnops[] = |
72 | as inline assembly strings in the include files and we cannot | 81 | { |
73 | get them easily into strings. */ | 82 | GENERIC_NOP1, |
74 | asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nintelnops: " | 83 | GENERIC_NOP2, |
75 | GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 | 84 | GENERIC_NOP3, |
76 | GENERIC_NOP7 GENERIC_NOP8 | 85 | GENERIC_NOP4, |
77 | "\t.previous"); | 86 | GENERIC_NOP5, |
78 | extern const unsigned char intelnops[]; | 87 | GENERIC_NOP6, |
79 | static const unsigned char *const __initconst_or_module | 88 | GENERIC_NOP7, |
80 | intel_nops[ASM_NOP_MAX+1] = { | 89 | GENERIC_NOP8, |
90 | GENERIC_NOP5_ATOMIC | ||
91 | }; | ||
92 | static const unsigned char * const intel_nops[ASM_NOP_MAX+2] = | ||
93 | { | ||
81 | NULL, | 94 | NULL, |
82 | intelnops, | 95 | intelnops, |
83 | intelnops + 1, | 96 | intelnops + 1, |
@@ -87,17 +100,25 @@ intel_nops[ASM_NOP_MAX+1] = { | |||
87 | intelnops + 1 + 2 + 3 + 4 + 5, | 100 | intelnops + 1 + 2 + 3 + 4 + 5, |
88 | intelnops + 1 + 2 + 3 + 4 + 5 + 6, | 101 | intelnops + 1 + 2 + 3 + 4 + 5 + 6, |
89 | intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | 102 | intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, |
103 | intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, | ||
90 | }; | 104 | }; |
91 | #endif | 105 | #endif |
92 | 106 | ||
93 | #ifdef K8_NOP1 | 107 | #ifdef K8_NOP1 |
94 | asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk8nops: " | 108 | static const unsigned char k8nops[] = |
95 | K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 | 109 | { |
96 | K8_NOP7 K8_NOP8 | 110 | K8_NOP1, |
97 | "\t.previous"); | 111 | K8_NOP2, |
98 | extern const unsigned char k8nops[]; | 112 | K8_NOP3, |
99 | static const unsigned char *const __initconst_or_module | 113 | K8_NOP4, |
100 | k8_nops[ASM_NOP_MAX+1] = { | 114 | K8_NOP5, |
115 | K8_NOP6, | ||
116 | K8_NOP7, | ||
117 | K8_NOP8, | ||
118 | K8_NOP5_ATOMIC | ||
119 | }; | ||
120 | static const unsigned char * const k8_nops[ASM_NOP_MAX+2] = | ||
121 | { | ||
101 | NULL, | 122 | NULL, |
102 | k8nops, | 123 | k8nops, |
103 | k8nops + 1, | 124 | k8nops + 1, |
@@ -107,17 +128,25 @@ k8_nops[ASM_NOP_MAX+1] = { | |||
107 | k8nops + 1 + 2 + 3 + 4 + 5, | 128 | k8nops + 1 + 2 + 3 + 4 + 5, |
108 | k8nops + 1 + 2 + 3 + 4 + 5 + 6, | 129 | k8nops + 1 + 2 + 3 + 4 + 5 + 6, |
109 | k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | 130 | k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, |
131 | k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, | ||
110 | }; | 132 | }; |
111 | #endif | 133 | #endif |
112 | 134 | ||
113 | #if defined(K7_NOP1) && !defined(CONFIG_X86_64) | 135 | #if defined(K7_NOP1) && !defined(CONFIG_X86_64) |
114 | asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk7nops: " | 136 | static const unsigned char k7nops[] = |
115 | K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 | 137 | { |
116 | K7_NOP7 K7_NOP8 | 138 | K7_NOP1, |
117 | "\t.previous"); | 139 | K7_NOP2, |
118 | extern const unsigned char k7nops[]; | 140 | K7_NOP3, |
119 | static const unsigned char *const __initconst_or_module | 141 | K7_NOP4, |
120 | k7_nops[ASM_NOP_MAX+1] = { | 142 | K7_NOP5, |
143 | K7_NOP6, | ||
144 | K7_NOP7, | ||
145 | K7_NOP8, | ||
146 | K7_NOP5_ATOMIC | ||
147 | }; | ||
148 | static const unsigned char * const k7_nops[ASM_NOP_MAX+2] = | ||
149 | { | ||
121 | NULL, | 150 | NULL, |
122 | k7nops, | 151 | k7nops, |
123 | k7nops + 1, | 152 | k7nops + 1, |
@@ -127,17 +156,25 @@ k7_nops[ASM_NOP_MAX+1] = { | |||
127 | k7nops + 1 + 2 + 3 + 4 + 5, | 156 | k7nops + 1 + 2 + 3 + 4 + 5, |
128 | k7nops + 1 + 2 + 3 + 4 + 5 + 6, | 157 | k7nops + 1 + 2 + 3 + 4 + 5 + 6, |
129 | k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | 158 | k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, |
159 | k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, | ||
130 | }; | 160 | }; |
131 | #endif | 161 | #endif |
132 | 162 | ||
133 | #ifdef P6_NOP1 | 163 | #ifdef P6_NOP1 |
134 | asm("\t" __stringify(__INITRODATA_OR_MODULE) "\np6nops: " | 164 | static const unsigned char __initconst_or_module p6nops[] = |
135 | P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6 | 165 | { |
136 | P6_NOP7 P6_NOP8 | 166 | P6_NOP1, |
137 | "\t.previous"); | 167 | P6_NOP2, |
138 | extern const unsigned char p6nops[]; | 168 | P6_NOP3, |
139 | static const unsigned char *const __initconst_or_module | 169 | P6_NOP4, |
140 | p6_nops[ASM_NOP_MAX+1] = { | 170 | P6_NOP5, |
171 | P6_NOP6, | ||
172 | P6_NOP7, | ||
173 | P6_NOP8, | ||
174 | P6_NOP5_ATOMIC | ||
175 | }; | ||
176 | static const unsigned char * const p6_nops[ASM_NOP_MAX+2] = | ||
177 | { | ||
141 | NULL, | 178 | NULL, |
142 | p6nops, | 179 | p6nops, |
143 | p6nops + 1, | 180 | p6nops + 1, |
@@ -147,47 +184,65 @@ p6_nops[ASM_NOP_MAX+1] = { | |||
147 | p6nops + 1 + 2 + 3 + 4 + 5, | 184 | p6nops + 1 + 2 + 3 + 4 + 5, |
148 | p6nops + 1 + 2 + 3 + 4 + 5 + 6, | 185 | p6nops + 1 + 2 + 3 + 4 + 5 + 6, |
149 | p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | 186 | p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, |
187 | p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, | ||
150 | }; | 188 | }; |
151 | #endif | 189 | #endif |
152 | 190 | ||
191 | /* Initialize these to a safe default */ | ||
153 | #ifdef CONFIG_X86_64 | 192 | #ifdef CONFIG_X86_64 |
193 | const unsigned char * const *ideal_nops = p6_nops; | ||
194 | #else | ||
195 | const unsigned char * const *ideal_nops = intel_nops; | ||
196 | #endif | ||
154 | 197 | ||
155 | extern char __vsyscall_0; | 198 | void __init arch_init_ideal_nops(void) |
156 | static const unsigned char *const *__init_or_module find_nop_table(void) | ||
157 | { | 199 | { |
158 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | 200 | switch (boot_cpu_data.x86_vendor) { |
159 | boot_cpu_has(X86_FEATURE_NOPL)) | 201 | case X86_VENDOR_INTEL: |
160 | return p6_nops; | 202 | /* |
161 | else | 203 | * Due to a decoder implementation quirk, some |
162 | return k8_nops; | 204 | * specific Intel CPUs actually perform better with |
163 | } | 205 | * the "k8_nops" than with the SDM-recommended NOPs. |
164 | 206 | */ | |
165 | #else /* CONFIG_X86_64 */ | 207 | if (boot_cpu_data.x86 == 6 && |
208 | boot_cpu_data.x86_model >= 0x0f && | ||
209 | boot_cpu_data.x86_model != 0x1c && | ||
210 | boot_cpu_data.x86_model != 0x26 && | ||
211 | boot_cpu_data.x86_model != 0x27 && | ||
212 | boot_cpu_data.x86_model < 0x30) { | ||
213 | ideal_nops = k8_nops; | ||
214 | } else if (boot_cpu_has(X86_FEATURE_NOPL)) { | ||
215 | ideal_nops = p6_nops; | ||
216 | } else { | ||
217 | #ifdef CONFIG_X86_64 | ||
218 | ideal_nops = k8_nops; | ||
219 | #else | ||
220 | ideal_nops = intel_nops; | ||
221 | #endif | ||
222 | } | ||
166 | 223 | ||
167 | static const unsigned char *const *__init_or_module find_nop_table(void) | 224 | default: |
168 | { | 225 | #ifdef CONFIG_X86_64 |
169 | if (boot_cpu_has(X86_FEATURE_K8)) | 226 | ideal_nops = k8_nops; |
170 | return k8_nops; | 227 | #else |
171 | else if (boot_cpu_has(X86_FEATURE_K7)) | 228 | if (boot_cpu_has(X86_FEATURE_K8)) |
172 | return k7_nops; | 229 | ideal_nops = k8_nops; |
173 | else if (boot_cpu_has(X86_FEATURE_NOPL)) | 230 | else if (boot_cpu_has(X86_FEATURE_K7)) |
174 | return p6_nops; | 231 | ideal_nops = k7_nops; |
175 | else | 232 | else |
176 | return intel_nops; | 233 | ideal_nops = intel_nops; |
234 | #endif | ||
235 | } | ||
177 | } | 236 | } |
178 | 237 | ||
179 | #endif /* CONFIG_X86_64 */ | ||
180 | |||
181 | /* Use this to add nops to a buffer, then text_poke the whole buffer. */ | 238 | /* Use this to add nops to a buffer, then text_poke the whole buffer. */ |
182 | static void __init_or_module add_nops(void *insns, unsigned int len) | 239 | static void __init_or_module add_nops(void *insns, unsigned int len) |
183 | { | 240 | { |
184 | const unsigned char *const *noptable = find_nop_table(); | ||
185 | |||
186 | while (len > 0) { | 241 | while (len > 0) { |
187 | unsigned int noplen = len; | 242 | unsigned int noplen = len; |
188 | if (noplen > ASM_NOP_MAX) | 243 | if (noplen > ASM_NOP_MAX) |
189 | noplen = ASM_NOP_MAX; | 244 | noplen = ASM_NOP_MAX; |
190 | memcpy(insns, noptable[noplen], noplen); | 245 | memcpy(insns, ideal_nops[noplen], noplen); |
191 | insns += noplen; | 246 | insns += noplen; |
192 | len -= noplen; | 247 | len -= noplen; |
193 | } | 248 | } |
@@ -195,6 +250,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len) | |||
195 | 250 | ||
196 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; | 251 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; |
197 | extern s32 __smp_locks[], __smp_locks_end[]; | 252 | extern s32 __smp_locks[], __smp_locks_end[]; |
253 | extern char __vsyscall_0; | ||
198 | void *text_poke_early(void *addr, const void *opcode, size_t len); | 254 | void *text_poke_early(void *addr, const void *opcode, size_t len); |
199 | 255 | ||
200 | /* Replace instructions with better alternatives for this CPU type. | 256 | /* Replace instructions with better alternatives for this CPU type. |
@@ -687,29 +743,3 @@ void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n) | |||
687 | wrote_text = 0; | 743 | wrote_text = 0; |
688 | __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); | 744 | __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); |
689 | } | 745 | } |
690 | |||
691 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_JUMP_LABEL) | ||
692 | |||
693 | #ifdef CONFIG_X86_64 | ||
694 | unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 }; | ||
695 | #else | ||
696 | unsigned char ideal_nop5[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 }; | ||
697 | #endif | ||
698 | |||
699 | void __init arch_init_ideal_nop5(void) | ||
700 | { | ||
701 | /* | ||
702 | * There is no good nop for all x86 archs. This selection | ||
703 | * algorithm should be unified with the one in find_nop_table(), | ||
704 | * but this should be good enough for now. | ||
705 | * | ||
706 | * For cases other than the ones below, use the safe (as in | ||
707 | * always functional) defaults above. | ||
708 | */ | ||
709 | #ifdef CONFIG_X86_64 | ||
710 | /* Don't use these on 32 bits due to broken virtualizers */ | ||
711 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) | ||
712 | memcpy(ideal_nop5, p6_nops[5], 5); | ||
713 | #endif | ||
714 | } | ||
715 | #endif | ||
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index fc73a34ba8c9..1edf5ba4fb2b 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -411,12 +411,10 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
411 | 411 | ||
412 | switch (c->x86_model) { | 412 | switch (c->x86_model) { |
413 | case 5: | 413 | case 5: |
414 | if (c->x86_mask == 0) { | 414 | if (l2 == 0) |
415 | if (l2 == 0) | 415 | p = "Celeron (Covington)"; |
416 | p = "Celeron (Covington)"; | 416 | else if (l2 == 256) |
417 | else if (l2 == 256) | 417 | p = "Mobile Pentium II (Dixon)"; |
418 | p = "Mobile Pentium II (Dixon)"; | ||
419 | } | ||
420 | break; | 418 | break; |
421 | 419 | ||
422 | case 6: | 420 | case 6: |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 1ce1af2899df..c105c533ed94 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -327,7 +327,6 @@ static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3) | |||
327 | l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9)); | 327 | l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9)); |
328 | l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13)); | 328 | l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13)); |
329 | 329 | ||
330 | l3->indices = (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1; | ||
331 | l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; | 330 | l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; |
332 | } | 331 | } |
333 | 332 | ||
@@ -454,27 +453,16 @@ int amd_set_l3_disable_slot(struct amd_l3_cache *l3, int cpu, unsigned slot, | |||
454 | { | 453 | { |
455 | int ret = 0; | 454 | int ret = 0; |
456 | 455 | ||
457 | #define SUBCACHE_MASK (3UL << 20) | 456 | /* check if @slot is already used or the index is already disabled */ |
458 | #define SUBCACHE_INDEX 0xfff | ||
459 | |||
460 | /* | ||
461 | * check whether this slot is already used or | ||
462 | * the index is already disabled | ||
463 | */ | ||
464 | ret = amd_get_l3_disable_slot(l3, slot); | 457 | ret = amd_get_l3_disable_slot(l3, slot); |
465 | if (ret >= 0) | 458 | if (ret >= 0) |
466 | return -EINVAL; | 459 | return -EINVAL; |
467 | 460 | ||
468 | /* | 461 | if (index > l3->indices) |
469 | * check whether the other slot has disabled the | ||
470 | * same index already | ||
471 | */ | ||
472 | if (index == amd_get_l3_disable_slot(l3, !slot)) | ||
473 | return -EINVAL; | 462 | return -EINVAL; |
474 | 463 | ||
475 | /* do not allow writes outside of allowed bits */ | 464 | /* check whether the other slot has disabled the same index already */ |
476 | if ((index & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) || | 465 | if (index == amd_get_l3_disable_slot(l3, !slot)) |
477 | ((index & SUBCACHE_INDEX) > l3->indices)) | ||
478 | return -EINVAL; | 466 | return -EINVAL; |
479 | 467 | ||
480 | amd_l3_disable_index(l3, cpu, slot, index); | 468 | amd_l3_disable_index(l3, cpu, slot, index); |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index a93742a57468..0ba15a6cc57e 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -260,9 +260,9 @@ do_ftrace_mod_code(unsigned long ip, void *new_code) | |||
260 | return mod_code_status; | 260 | return mod_code_status; |
261 | } | 261 | } |
262 | 262 | ||
263 | static unsigned char *ftrace_nop_replace(void) | 263 | static const unsigned char *ftrace_nop_replace(void) |
264 | { | 264 | { |
265 | return ideal_nop5; | 265 | return ideal_nops[NOP_ATOMIC5]; |
266 | } | 266 | } |
267 | 267 | ||
268 | static int | 268 | static int |
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c index 961b6b30ba90..3fee346ef545 100644 --- a/arch/x86/kernel/jump_label.c +++ b/arch/x86/kernel/jump_label.c | |||
@@ -34,7 +34,7 @@ void arch_jump_label_transform(struct jump_entry *entry, | |||
34 | code.offset = entry->target - | 34 | code.offset = entry->target - |
35 | (entry->code + JUMP_LABEL_NOP_SIZE); | 35 | (entry->code + JUMP_LABEL_NOP_SIZE); |
36 | } else | 36 | } else |
37 | memcpy(&code, ideal_nop5, JUMP_LABEL_NOP_SIZE); | 37 | memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE); |
38 | get_online_cpus(); | 38 | get_online_cpus(); |
39 | mutex_lock(&text_mutex); | 39 | mutex_lock(&text_mutex); |
40 | text_poke_smp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE); | 40 | text_poke_smp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE); |
@@ -44,7 +44,8 @@ void arch_jump_label_transform(struct jump_entry *entry, | |||
44 | 44 | ||
45 | void arch_jump_label_text_poke_early(jump_label_t addr) | 45 | void arch_jump_label_text_poke_early(jump_label_t addr) |
46 | { | 46 | { |
47 | text_poke_early((void *)addr, ideal_nop5, JUMP_LABEL_NOP_SIZE); | 47 | text_poke_early((void *)addr, ideal_nops[NOP_ATOMIC5], |
48 | JUMP_LABEL_NOP_SIZE); | ||
48 | } | 49 | } |
49 | 50 | ||
50 | #endif | 51 | #endif |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 4be9b398470e..c3050af9306d 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -691,8 +691,6 @@ early_param("reservelow", parse_reservelow); | |||
691 | 691 | ||
692 | void __init setup_arch(char **cmdline_p) | 692 | void __init setup_arch(char **cmdline_p) |
693 | { | 693 | { |
694 | unsigned long flags; | ||
695 | |||
696 | #ifdef CONFIG_X86_32 | 694 | #ifdef CONFIG_X86_32 |
697 | memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); | 695 | memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); |
698 | visws_early_detect(); | 696 | visws_early_detect(); |
@@ -1041,9 +1039,7 @@ void __init setup_arch(char **cmdline_p) | |||
1041 | 1039 | ||
1042 | mcheck_init(); | 1040 | mcheck_init(); |
1043 | 1041 | ||
1044 | local_irq_save(flags); | 1042 | arch_init_ideal_nops(); |
1045 | arch_init_ideal_nop5(); | ||
1046 | local_irq_restore(flags); | ||
1047 | } | 1043 | } |
1048 | 1044 | ||
1049 | #ifdef CONFIG_X86_32 | 1045 | #ifdef CONFIG_X86_32 |