aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/Kconfig.debug10
-rw-r--r--arch/i386/kernel/alternative.c63
-rw-r--r--arch/i386/kernel/entry.S39
-rw-r--r--arch/i386/kernel/module.c11
-rw-r--r--arch/i386/kernel/paravirt.c44
-rw-r--r--arch/i386/kernel/vmlinux.lds.S6
-rw-r--r--include/asm-i386/alternative.h13
-rw-r--r--include/asm-i386/desc.h41
-rw-r--r--include/asm-i386/irqflags.h4
-rw-r--r--include/asm-i386/paravirt.h189
-rw-r--r--include/asm-i386/processor.h198
-rw-r--r--include/asm-i386/spinlock.h15
-rw-r--r--include/asm-x86_64/alternative.h12
-rw-r--r--scripts/mod/modpost.c2
14 files changed, 459 insertions, 188 deletions
diff --git a/arch/i386/Kconfig.debug b/arch/i386/Kconfig.debug
index b31c0802e1cc..f68cc6f215f8 100644
--- a/arch/i386/Kconfig.debug
+++ b/arch/i386/Kconfig.debug
@@ -85,4 +85,14 @@ config DOUBLEFAULT
85 option saves about 4k and might cause you much additional grey 85 option saves about 4k and might cause you much additional grey
86 hair. 86 hair.
87 87
88config DEBUG_PARAVIRT
89 bool "Enable some paravirtualization debugging"
90 default y
91 depends on PARAVIRT && DEBUG_KERNEL
92 help
93 Currently deliberately clobbers regs which are allowed to be
94 clobbered in inlined paravirt hooks, even in native mode.
95 If turning this off solves a problem, then DISABLE_INTERRUPTS() or
96 ENABLE_INTERRUPTS() is lying about what registers can be clobbered.
97
88endmenu 98endmenu
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c
index 535f9794fba1..9eca21b49f6b 100644
--- a/arch/i386/kernel/alternative.c
+++ b/arch/i386/kernel/alternative.c
@@ -124,6 +124,20 @@ static unsigned char** find_nop_table(void)
124 124
125#endif /* CONFIG_X86_64 */ 125#endif /* CONFIG_X86_64 */
126 126
127static void nop_out(void *insns, unsigned int len)
128{
129 unsigned char **noptable = find_nop_table();
130
131 while (len > 0) {
132 unsigned int noplen = len;
133 if (noplen > ASM_NOP_MAX)
134 noplen = ASM_NOP_MAX;
135 memcpy(insns, noptable[noplen], noplen);
136 insns += noplen;
137 len -= noplen;
138 }
139}
140
127extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; 141extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
128extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[]; 142extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
129extern u8 *__smp_locks[], *__smp_locks_end[]; 143extern u8 *__smp_locks[], *__smp_locks_end[];
@@ -138,10 +152,9 @@ extern u8 __smp_alt_begin[], __smp_alt_end[];
138 152
139void apply_alternatives(struct alt_instr *start, struct alt_instr *end) 153void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
140{ 154{
141 unsigned char **noptable = find_nop_table();
142 struct alt_instr *a; 155 struct alt_instr *a;
143 u8 *instr; 156 u8 *instr;
144 int diff, i, k; 157 int diff;
145 158
146 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end); 159 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
147 for (a = start; a < end; a++) { 160 for (a = start; a < end; a++) {
@@ -159,13 +172,7 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
159#endif 172#endif
160 memcpy(instr, a->replacement, a->replacementlen); 173 memcpy(instr, a->replacement, a->replacementlen);
161 diff = a->instrlen - a->replacementlen; 174 diff = a->instrlen - a->replacementlen;
162 /* Pad the rest with nops */ 175 nop_out(instr + a->replacementlen, diff);
163 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
164 k = diff;
165 if (k > ASM_NOP_MAX)
166 k = ASM_NOP_MAX;
167 memcpy(a->instr + i, noptable[k], k);
168 }
169 } 176 }
170} 177}
171 178
@@ -209,7 +216,6 @@ static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
209 216
210static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end) 217static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
211{ 218{
212 unsigned char **noptable = find_nop_table();
213 u8 **ptr; 219 u8 **ptr;
214 220
215 for (ptr = start; ptr < end; ptr++) { 221 for (ptr = start; ptr < end; ptr++) {
@@ -217,7 +223,7 @@ static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end
217 continue; 223 continue;
218 if (*ptr > text_end) 224 if (*ptr > text_end)
219 continue; 225 continue;
220 **ptr = noptable[1][0]; 226 nop_out(*ptr, 1);
221 }; 227 };
222} 228}
223 229
@@ -343,6 +349,40 @@ void alternatives_smp_switch(int smp)
343 349
344#endif 350#endif
345 351
352#ifdef CONFIG_PARAVIRT
353void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
354{
355 struct paravirt_patch *p;
356
357 for (p = start; p < end; p++) {
358 unsigned int used;
359
360 used = paravirt_ops.patch(p->instrtype, p->clobbers, p->instr,
361 p->len);
362#ifdef CONFIG_DEBUG_PARAVIRT
363 {
364 int i;
365 /* Deliberately clobber regs using "not %reg" to find bugs. */
366 for (i = 0; i < 3; i++) {
367 if (p->len - used >= 2 && (p->clobbers & (1 << i))) {
368 memcpy(p->instr + used, "\xf7\xd0", 2);
369 p->instr[used+1] |= i;
370 used += 2;
371 }
372 }
373 }
374#endif
375 /* Pad the rest with nops */
376 nop_out(p->instr + used, p->len - used);
377 }
378
379 /* Sync to be conservative, in case we patched following instructions */
380 sync_core();
381}
382extern struct paravirt_patch __start_parainstructions[],
383 __stop_parainstructions[];
384#endif /* CONFIG_PARAVIRT */
385
346void __init alternative_instructions(void) 386void __init alternative_instructions(void)
347{ 387{
348 unsigned long flags; 388 unsigned long flags;
@@ -390,5 +430,6 @@ void __init alternative_instructions(void)
390 alternatives_smp_switch(0); 430 alternatives_smp_switch(0);
391 } 431 }
392#endif 432#endif
433 apply_paravirt(__start_parainstructions, __stop_parainstructions);
393 local_irq_restore(flags); 434 local_irq_restore(flags);
394} 435}
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index d274612e05cd..de34b7fed3c1 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -53,6 +53,19 @@
53#include <asm/dwarf2.h> 53#include <asm/dwarf2.h>
54#include "irq_vectors.h" 54#include "irq_vectors.h"
55 55
56/*
57 * We use macros for low-level operations which need to be overridden
58 * for paravirtualization. The following will never clobber any registers:
59 * INTERRUPT_RETURN (aka. "iret")
60 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
61 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
62 *
63 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
64 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
65 * Allowing a register to be clobbered can shrink the paravirt replacement
66 * enough to patch inline, increasing performance.
67 */
68
56#define nr_syscalls ((syscall_table_size)/4) 69#define nr_syscalls ((syscall_table_size)/4)
57 70
58CF_MASK = 0x00000001 71CF_MASK = 0x00000001
@@ -63,9 +76,9 @@ NT_MASK = 0x00004000
63VM_MASK = 0x00020000 76VM_MASK = 0x00020000
64 77
65#ifdef CONFIG_PREEMPT 78#ifdef CONFIG_PREEMPT
66#define preempt_stop DISABLE_INTERRUPTS; TRACE_IRQS_OFF 79#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
67#else 80#else
68#define preempt_stop 81#define preempt_stop(clobbers)
69#define resume_kernel restore_nocheck 82#define resume_kernel restore_nocheck
70#endif 83#endif
71 84
@@ -226,7 +239,7 @@ ENTRY(ret_from_fork)
226 ALIGN 239 ALIGN
227 RING0_PTREGS_FRAME 240 RING0_PTREGS_FRAME
228ret_from_exception: 241ret_from_exception:
229 preempt_stop 242 preempt_stop(CLBR_ANY)
230ret_from_intr: 243ret_from_intr:
231 GET_THREAD_INFO(%ebp) 244 GET_THREAD_INFO(%ebp)
232check_userspace: 245check_userspace:
@@ -237,7 +250,7 @@ check_userspace:
237 jb resume_kernel # not returning to v8086 or userspace 250 jb resume_kernel # not returning to v8086 or userspace
238 251
239ENTRY(resume_userspace) 252ENTRY(resume_userspace)
240 DISABLE_INTERRUPTS # make sure we don't miss an interrupt 253 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
241 # setting need_resched or sigpending 254 # setting need_resched or sigpending
242 # between sampling and the iret 255 # between sampling and the iret
243 movl TI_flags(%ebp), %ecx 256 movl TI_flags(%ebp), %ecx
@@ -248,7 +261,7 @@ ENTRY(resume_userspace)
248 261
249#ifdef CONFIG_PREEMPT 262#ifdef CONFIG_PREEMPT
250ENTRY(resume_kernel) 263ENTRY(resume_kernel)
251 DISABLE_INTERRUPTS 264 DISABLE_INTERRUPTS(CLBR_ANY)
252 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? 265 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
253 jnz restore_nocheck 266 jnz restore_nocheck
254need_resched: 267need_resched:
@@ -277,7 +290,7 @@ sysenter_past_esp:
277 * No need to follow this irqs on/off section: the syscall 290 * No need to follow this irqs on/off section: the syscall
278 * disabled irqs and here we enable it straight after entry: 291 * disabled irqs and here we enable it straight after entry:
279 */ 292 */
280 ENABLE_INTERRUPTS 293 ENABLE_INTERRUPTS(CLBR_NONE)
281 pushl $(__USER_DS) 294 pushl $(__USER_DS)
282 CFI_ADJUST_CFA_OFFSET 4 295 CFI_ADJUST_CFA_OFFSET 4
283 /*CFI_REL_OFFSET ss, 0*/ 296 /*CFI_REL_OFFSET ss, 0*/
@@ -322,7 +335,7 @@ sysenter_past_esp:
322 jae syscall_badsys 335 jae syscall_badsys
323 call *sys_call_table(,%eax,4) 336 call *sys_call_table(,%eax,4)
324 movl %eax,PT_EAX(%esp) 337 movl %eax,PT_EAX(%esp)
325 DISABLE_INTERRUPTS 338 DISABLE_INTERRUPTS(CLBR_ECX|CLBR_EDX)
326 TRACE_IRQS_OFF 339 TRACE_IRQS_OFF
327 movl TI_flags(%ebp), %ecx 340 movl TI_flags(%ebp), %ecx
328 testw $_TIF_ALLWORK_MASK, %cx 341 testw $_TIF_ALLWORK_MASK, %cx
@@ -364,7 +377,7 @@ syscall_call:
364 call *sys_call_table(,%eax,4) 377 call *sys_call_table(,%eax,4)
365 movl %eax,PT_EAX(%esp) # store the return value 378 movl %eax,PT_EAX(%esp) # store the return value
366syscall_exit: 379syscall_exit:
367 DISABLE_INTERRUPTS # make sure we don't miss an interrupt 380 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
368 # setting need_resched or sigpending 381 # setting need_resched or sigpending
369 # between sampling and the iret 382 # between sampling and the iret
370 TRACE_IRQS_OFF 383 TRACE_IRQS_OFF
@@ -393,7 +406,7 @@ restore_nocheck_notrace:
393.section .fixup,"ax" 406.section .fixup,"ax"
394iret_exc: 407iret_exc:
395 TRACE_IRQS_ON 408 TRACE_IRQS_ON
396 ENABLE_INTERRUPTS 409 ENABLE_INTERRUPTS(CLBR_NONE)
397 pushl $0 # no error code 410 pushl $0 # no error code
398 pushl $do_iret_error 411 pushl $do_iret_error
399 jmp error_code 412 jmp error_code
@@ -436,7 +449,7 @@ ldt_ss:
436 CFI_ADJUST_CFA_OFFSET 4 449 CFI_ADJUST_CFA_OFFSET 4
437 pushl %eax 450 pushl %eax
438 CFI_ADJUST_CFA_OFFSET 4 451 CFI_ADJUST_CFA_OFFSET 4
439 DISABLE_INTERRUPTS 452 DISABLE_INTERRUPTS(CLBR_EAX)
440 TRACE_IRQS_OFF 453 TRACE_IRQS_OFF
441 lss (%esp), %esp 454 lss (%esp), %esp
442 CFI_ADJUST_CFA_OFFSET -8 455 CFI_ADJUST_CFA_OFFSET -8
@@ -451,7 +464,7 @@ work_pending:
451 jz work_notifysig 464 jz work_notifysig
452work_resched: 465work_resched:
453 call schedule 466 call schedule
454 DISABLE_INTERRUPTS # make sure we don't miss an interrupt 467 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
455 # setting need_resched or sigpending 468 # setting need_resched or sigpending
456 # between sampling and the iret 469 # between sampling and the iret
457 TRACE_IRQS_OFF 470 TRACE_IRQS_OFF
@@ -509,7 +522,7 @@ syscall_exit_work:
509 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl 522 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
510 jz work_pending 523 jz work_pending
511 TRACE_IRQS_ON 524 TRACE_IRQS_ON
512 ENABLE_INTERRUPTS # could let do_syscall_trace() call 525 ENABLE_INTERRUPTS(CLBR_ANY) # could let do_syscall_trace() call
513 # schedule() instead 526 # schedule() instead
514 movl %esp, %eax 527 movl %esp, %eax
515 movl $1, %edx 528 movl $1, %edx
@@ -693,7 +706,7 @@ ENTRY(device_not_available)
693 GET_CR0_INTO_EAX 706 GET_CR0_INTO_EAX
694 testl $0x4, %eax # EM (math emulation bit) 707 testl $0x4, %eax # EM (math emulation bit)
695 jne device_not_available_emulate 708 jne device_not_available_emulate
696 preempt_stop 709 preempt_stop(CLBR_ANY)
697 call math_state_restore 710 call math_state_restore
698 jmp ret_from_exception 711 jmp ret_from_exception
699device_not_available_emulate: 712device_not_available_emulate:
diff --git a/arch/i386/kernel/module.c b/arch/i386/kernel/module.c
index 470cf97e7cd3..d7d9c8b23f72 100644
--- a/arch/i386/kernel/module.c
+++ b/arch/i386/kernel/module.c
@@ -108,7 +108,8 @@ int module_finalize(const Elf_Ehdr *hdr,
108 const Elf_Shdr *sechdrs, 108 const Elf_Shdr *sechdrs,
109 struct module *me) 109 struct module *me)
110{ 110{
111 const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL; 111 const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
112 *para = NULL;
112 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 113 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
113 114
114 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { 115 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
@@ -118,6 +119,8 @@ int module_finalize(const Elf_Ehdr *hdr,
118 alt = s; 119 alt = s;
119 if (!strcmp(".smp_locks", secstrings + s->sh_name)) 120 if (!strcmp(".smp_locks", secstrings + s->sh_name))
120 locks= s; 121 locks= s;
122 if (!strcmp(".parainstructions", secstrings + s->sh_name))
123 para = s;
121 } 124 }
122 125
123 if (alt) { 126 if (alt) {
@@ -132,6 +135,12 @@ int module_finalize(const Elf_Ehdr *hdr,
132 lseg, lseg + locks->sh_size, 135 lseg, lseg + locks->sh_size,
133 tseg, tseg + text->sh_size); 136 tseg, tseg + text->sh_size);
134 } 137 }
138
139 if (para) {
140 void *pseg = (void *)para->sh_addr;
141 apply_paravirt(pseg, pseg + para->sh_size);
142 }
143
135 return 0; 144 return 0;
136} 145}
137 146
diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c
index 478192cd4b90..d46460426446 100644
--- a/arch/i386/kernel/paravirt.c
+++ b/arch/i386/kernel/paravirt.c
@@ -45,6 +45,49 @@ char *memory_setup(void)
45 return paravirt_ops.memory_setup(); 45 return paravirt_ops.memory_setup();
46} 46}
47 47
48/* Simple instruction patching code. */
49#define DEF_NATIVE(name, code) \
50 extern const char start_##name[], end_##name[]; \
51 asm("start_" #name ": " code "; end_" #name ":")
52DEF_NATIVE(cli, "cli");
53DEF_NATIVE(sti, "sti");
54DEF_NATIVE(popf, "push %eax; popf");
55DEF_NATIVE(pushf, "pushf; pop %eax");
56DEF_NATIVE(pushf_cli, "pushf; pop %eax; cli");
57DEF_NATIVE(iret, "iret");
58DEF_NATIVE(sti_sysexit, "sti; sysexit");
59
60static const struct native_insns
61{
62 const char *start, *end;
63} native_insns[] = {
64 [PARAVIRT_IRQ_DISABLE] = { start_cli, end_cli },
65 [PARAVIRT_IRQ_ENABLE] = { start_sti, end_sti },
66 [PARAVIRT_RESTORE_FLAGS] = { start_popf, end_popf },
67 [PARAVIRT_SAVE_FLAGS] = { start_pushf, end_pushf },
68 [PARAVIRT_SAVE_FLAGS_IRQ_DISABLE] = { start_pushf_cli, end_pushf_cli },
69 [PARAVIRT_INTERRUPT_RETURN] = { start_iret, end_iret },
70 [PARAVIRT_STI_SYSEXIT] = { start_sti_sysexit, end_sti_sysexit },
71};
72
73static unsigned native_patch(u8 type, u16 clobbers, void *insns, unsigned len)
74{
75 unsigned int insn_len;
76
77 /* Don't touch it if we don't have a replacement */
78 if (type >= ARRAY_SIZE(native_insns) || !native_insns[type].start)
79 return len;
80
81 insn_len = native_insns[type].end - native_insns[type].start;
82
83 /* Similarly if we can't fit replacement. */
84 if (len < insn_len)
85 return len;
86
87 memcpy(insns, native_insns[type].start, insn_len);
88 return insn_len;
89}
90
48static fastcall unsigned long native_get_debugreg(int regno) 91static fastcall unsigned long native_get_debugreg(int regno)
49{ 92{
50 unsigned long val = 0; /* Damn you, gcc! */ 93 unsigned long val = 0; /* Damn you, gcc! */
@@ -349,6 +392,7 @@ struct paravirt_ops paravirt_ops = {
349 .paravirt_enabled = 0, 392 .paravirt_enabled = 0,
350 .kernel_rpl = 0, 393 .kernel_rpl = 0,
351 394
395 .patch = native_patch,
352 .banner = default_banner, 396 .banner = default_banner,
353 .arch_setup = native_nop, 397 .arch_setup = native_nop,
354 .memory_setup = machine_specific_memory_setup, 398 .memory_setup = machine_specific_memory_setup,
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
index 6860f20aa579..5c69cf0e5944 100644
--- a/arch/i386/kernel/vmlinux.lds.S
+++ b/arch/i386/kernel/vmlinux.lds.S
@@ -165,6 +165,12 @@ SECTIONS
165 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { 165 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
166 *(.altinstr_replacement) 166 *(.altinstr_replacement)
167 } 167 }
168 . = ALIGN(4);
169 __start_parainstructions = .;
170 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
171 *(.parainstructions)
172 }
173 __stop_parainstructions = .;
168 /* .exit.text is discard at runtime, not link time, to deal with references 174 /* .exit.text is discard at runtime, not link time, to deal with references
169 from .altinstructions and .eh_frame */ 175 from .altinstructions and .eh_frame */
170 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) } 176 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) }
diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h
index b01a7ec409ce..b8fa9557c532 100644
--- a/include/asm-i386/alternative.h
+++ b/include/asm-i386/alternative.h
@@ -4,7 +4,7 @@
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#include <asm/types.h> 6#include <asm/types.h>
7 7#include <linux/stddef.h>
8#include <linux/types.h> 8#include <linux/types.h>
9 9
10struct alt_instr { 10struct alt_instr {
@@ -118,4 +118,15 @@ static inline void alternatives_smp_switch(int smp) {}
118#define LOCK_PREFIX "" 118#define LOCK_PREFIX ""
119#endif 119#endif
120 120
121struct paravirt_patch;
122#ifdef CONFIG_PARAVIRT
123void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end);
124#else
125static inline void
126apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
127{}
128#define __start_parainstructions NULL
129#define __stop_parainstructions NULL
130#endif
131
121#endif /* _I386_ALTERNATIVE_H */ 132#endif /* _I386_ALTERNATIVE_H */
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h
index f19820f0834f..f398cc456448 100644
--- a/include/asm-i386/desc.h
+++ b/include/asm-i386/desc.h
@@ -81,31 +81,15 @@ static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
81#undef C 81#undef C
82} 82}
83 83
84static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b)
85{
86 __u32 *lp = (__u32 *)((char *)dt + entry*8);
87 *lp = entry_a;
88 *(lp+1) = entry_b;
89}
90
91#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) 84#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
92#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) 85#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
93#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) 86#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
94 87
95static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg) 88static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b)
96{
97 __u32 a, b;
98 pack_gate(&a, &b, (unsigned long)addr, seg, type, 0);
99 write_idt_entry(idt_table, gate, a, b);
100}
101
102static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const void *addr)
103{ 89{
104 __u32 a, b; 90 __u32 *lp = (__u32 *)((char *)dt + entry*8);
105 pack_descriptor(&a, &b, (unsigned long)addr, 91 *lp = entry_a;
106 offsetof(struct tss_struct, __cacheline_filler) - 1, 92 *(lp+1) = entry_b;
107 DESCTYPE_TSS, 0);
108 write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b);
109} 93}
110 94
111#define set_ldt native_set_ldt 95#define set_ldt native_set_ldt
@@ -128,6 +112,23 @@ static inline fastcall void native_set_ldt(const void *addr,
128 } 112 }
129} 113}
130 114
115static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg)
116{
117 __u32 a, b;
118 pack_gate(&a, &b, (unsigned long)addr, seg, type, 0);
119 write_idt_entry(idt_table, gate, a, b);
120}
121
122static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const void *addr)
123{
124 __u32 a, b;
125 pack_descriptor(&a, &b, (unsigned long)addr,
126 offsetof(struct tss_struct, __cacheline_filler) - 1,
127 DESCTYPE_TSS, 0);
128 write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b);
129}
130
131
131#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) 132#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
132 133
133#define LDT_entry_a(info) \ 134#define LDT_entry_a(info) \
diff --git a/include/asm-i386/irqflags.h b/include/asm-i386/irqflags.h
index 9ce01f3fb7bc..17b18cf4fe9d 100644
--- a/include/asm-i386/irqflags.h
+++ b/include/asm-i386/irqflags.h
@@ -79,8 +79,8 @@ static inline unsigned long __raw_local_irq_save(void)
79} 79}
80 80
81#else 81#else
82#define DISABLE_INTERRUPTS cli 82#define DISABLE_INTERRUPTS(clobbers) cli
83#define ENABLE_INTERRUPTS sti 83#define ENABLE_INTERRUPTS(clobbers) sti
84#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit 84#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
85#define INTERRUPT_RETURN iret 85#define INTERRUPT_RETURN iret
86#define GET_CR0_INTO_EAX movl %cr0, %eax 86#define GET_CR0_INTO_EAX movl %cr0, %eax
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h
index a7551a44686f..081194751ade 100644
--- a/include/asm-i386/paravirt.h
+++ b/include/asm-i386/paravirt.h
@@ -3,8 +3,26 @@
3/* Various instructions on x86 need to be replaced for 3/* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */ 4 * para-virtualization: those hooks are defined here. */
5#include <linux/linkage.h> 5#include <linux/linkage.h>
6#include <linux/stringify.h>
6 7
7#ifdef CONFIG_PARAVIRT 8#ifdef CONFIG_PARAVIRT
9/* These are the most performance critical ops, so we want to be able to patch
10 * callers */
11#define PARAVIRT_IRQ_DISABLE 0
12#define PARAVIRT_IRQ_ENABLE 1
13#define PARAVIRT_RESTORE_FLAGS 2
14#define PARAVIRT_SAVE_FLAGS 3
15#define PARAVIRT_SAVE_FLAGS_IRQ_DISABLE 4
16#define PARAVIRT_INTERRUPT_RETURN 5
17#define PARAVIRT_STI_SYSEXIT 6
18
19/* Bitmask of what can be clobbered: usually at least eax. */
20#define CLBR_NONE 0x0
21#define CLBR_EAX 0x1
22#define CLBR_ECX 0x2
23#define CLBR_EDX 0x4
24#define CLBR_ANY 0x7
25
8#ifndef __ASSEMBLY__ 26#ifndef __ASSEMBLY__
9struct thread_struct; 27struct thread_struct;
10struct Xgt_desc_struct; 28struct Xgt_desc_struct;
@@ -15,6 +33,15 @@ struct paravirt_ops
15 int paravirt_enabled; 33 int paravirt_enabled;
16 const char *name; 34 const char *name;
17 35
36 /*
37 * Patch may replace one of the defined code sequences with arbitrary
38 * code, subject to the same register constraints. This generally
39 * means the code is not free to clobber any registers other than EAX.
40 * The patch function should return the number of bytes of code
41 * generated, as we nop pad the rest in generic code.
42 */
43 unsigned (*patch)(u8 type, u16 clobber, void *firstinsn, unsigned len);
44
18 void (*arch_setup)(void); 45 void (*arch_setup)(void);
19 char *(*memory_setup)(void); 46 char *(*memory_setup)(void);
20 void (*init_IRQ)(void); 47 void (*init_IRQ)(void);
@@ -147,35 +174,6 @@ static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
147#define read_cr4_safe(x) paravirt_ops.read_cr4_safe() 174#define read_cr4_safe(x) paravirt_ops.read_cr4_safe()
148#define write_cr4(x) paravirt_ops.write_cr4(x) 175#define write_cr4(x) paravirt_ops.write_cr4(x)
149 176
150static inline unsigned long __raw_local_save_flags(void)
151{
152 return paravirt_ops.save_fl();
153}
154
155static inline void raw_local_irq_restore(unsigned long flags)
156{
157 return paravirt_ops.restore_fl(flags);
158}
159
160static inline void raw_local_irq_disable(void)
161{
162 paravirt_ops.irq_disable();
163}
164
165static inline void raw_local_irq_enable(void)
166{
167 paravirt_ops.irq_enable();
168}
169
170static inline unsigned long __raw_local_irq_save(void)
171{
172 unsigned long flags = paravirt_ops.save_fl();
173
174 paravirt_ops.irq_disable();
175
176 return flags;
177}
178
179static inline void raw_safe_halt(void) 177static inline void raw_safe_halt(void)
180{ 178{
181 paravirt_ops.safe_halt(); 179 paravirt_ops.safe_halt();
@@ -267,15 +265,134 @@ static inline void slow_down_io(void) {
267#endif 265#endif
268} 266}
269 267
270#define CLI_STRING "pushl %eax; pushl %ecx; pushl %edx; call *paravirt_ops+PARAVIRT_irq_disable; popl %edx; popl %ecx; popl %eax" 268/* These all sit in the .parainstructions section to tell us what to patch. */
271#define STI_STRING "pushl %eax; pushl %ecx; pushl %edx; call *paravirt_ops+PARAVIRT_irq_enable; popl %edx; popl %ecx; popl %eax" 269struct paravirt_patch {
270 u8 *instr; /* original instructions */
271 u8 instrtype; /* type of this instruction */
272 u8 len; /* length of original instruction */
273 u16 clobbers; /* what registers you may clobber */
274};
275
276#define paravirt_alt(insn_string, typenum, clobber) \
277 "771:\n\t" insn_string "\n" "772:\n" \
278 ".pushsection .parainstructions,\"a\"\n" \
279 " .long 771b\n" \
280 " .byte " __stringify(typenum) "\n" \
281 " .byte 772b-771b\n" \
282 " .short " __stringify(clobber) "\n" \
283 ".popsection"
284
285static inline unsigned long __raw_local_save_flags(void)
286{
287 unsigned long f;
288
289 __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
290 "call *%1;"
291 "popl %%edx; popl %%ecx",
292 PARAVIRT_SAVE_FLAGS, CLBR_NONE)
293 : "=a"(f): "m"(paravirt_ops.save_fl)
294 : "memory", "cc");
295 return f;
296}
297
298static inline void raw_local_irq_restore(unsigned long f)
299{
300 __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
301 "call *%1;"
302 "popl %%edx; popl %%ecx",
303 PARAVIRT_RESTORE_FLAGS, CLBR_EAX)
304 : "=a"(f) : "m" (paravirt_ops.restore_fl), "0"(f)
305 : "memory", "cc");
306}
307
308static inline void raw_local_irq_disable(void)
309{
310 __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
311 "call *%0;"
312 "popl %%edx; popl %%ecx",
313 PARAVIRT_IRQ_DISABLE, CLBR_EAX)
314 : : "m" (paravirt_ops.irq_disable)
315 : "memory", "eax", "cc");
316}
317
318static inline void raw_local_irq_enable(void)
319{
320 __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
321 "call *%0;"
322 "popl %%edx; popl %%ecx",
323 PARAVIRT_IRQ_ENABLE, CLBR_EAX)
324 : : "m" (paravirt_ops.irq_enable)
325 : "memory", "eax", "cc");
326}
327
328static inline unsigned long __raw_local_irq_save(void)
329{
330 unsigned long f;
331
332 __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
333 "call *%1; pushl %%eax;"
334 "call *%2; popl %%eax;"
335 "popl %%edx; popl %%ecx",
336 PARAVIRT_SAVE_FLAGS_IRQ_DISABLE,
337 CLBR_NONE)
338 : "=a"(f)
339 : "m" (paravirt_ops.save_fl),
340 "m" (paravirt_ops.irq_disable)
341 : "memory", "cc");
342 return f;
343}
344
345#define CLI_STRING paravirt_alt("pushl %%ecx; pushl %%edx;" \
346 "call *paravirt_ops+%c[irq_disable];" \
347 "popl %%edx; popl %%ecx", \
348 PARAVIRT_IRQ_DISABLE, CLBR_EAX)
349
350#define STI_STRING paravirt_alt("pushl %%ecx; pushl %%edx;" \
351 "call *paravirt_ops+%c[irq_enable];" \
352 "popl %%edx; popl %%ecx", \
353 PARAVIRT_IRQ_ENABLE, CLBR_EAX)
354#define CLI_STI_CLOBBERS , "%eax"
355#define CLI_STI_INPUT_ARGS \
356 , \
357 [irq_disable] "i" (offsetof(struct paravirt_ops, irq_disable)), \
358 [irq_enable] "i" (offsetof(struct paravirt_ops, irq_enable))
359
272#else /* __ASSEMBLY__ */ 360#else /* __ASSEMBLY__ */
273 361
274#define INTERRUPT_RETURN jmp *%cs:paravirt_ops+PARAVIRT_iret 362#define PARA_PATCH(ptype, clobbers, ops) \
275#define DISABLE_INTERRUPTS pushl %eax; pushl %ecx; pushl %edx; call *paravirt_ops+PARAVIRT_irq_disable; popl %edx; popl %ecx; popl %eax 363771:; \
276#define ENABLE_INTERRUPTS pushl %eax; pushl %ecx; pushl %edx; call *%cs:paravirt_ops+PARAVIRT_irq_enable; popl %edx; popl %ecx; popl %eax 364 ops; \
277#define ENABLE_INTERRUPTS_SYSEXIT jmp *%cs:paravirt_ops+PARAVIRT_irq_enable_sysexit 365772:; \
278#define GET_CR0_INTO_EAX call *paravirt_ops+PARAVIRT_read_cr0 366 .pushsection .parainstructions,"a"; \
367 .long 771b; \
368 .byte ptype; \
369 .byte 772b-771b; \
370 .short clobbers; \
371 .popsection
372
373#define INTERRUPT_RETURN \
374 PARA_PATCH(PARAVIRT_INTERRUPT_RETURN, CLBR_ANY, \
375 jmp *%cs:paravirt_ops+PARAVIRT_iret)
376
377#define DISABLE_INTERRUPTS(clobbers) \
378 PARA_PATCH(PARAVIRT_IRQ_DISABLE, clobbers, \
379 pushl %ecx; pushl %edx; \
380 call *paravirt_ops+PARAVIRT_irq_disable; \
381 popl %edx; popl %ecx) \
382
383#define ENABLE_INTERRUPTS(clobbers) \
384 PARA_PATCH(PARAVIRT_IRQ_ENABLE, clobbers, \
385 pushl %ecx; pushl %edx; \
386 call *%cs:paravirt_ops+PARAVIRT_irq_enable; \
387 popl %edx; popl %ecx)
388
389#define ENABLE_INTERRUPTS_SYSEXIT \
390 PARA_PATCH(PARAVIRT_STI_SYSEXIT, CLBR_ANY, \
391 jmp *%cs:paravirt_ops+PARAVIRT_irq_enable_sysexit)
392
393#define GET_CR0_INTO_EAX \
394 call *paravirt_ops+PARAVIRT_read_cr0
395
279#endif /* __ASSEMBLY__ */ 396#endif /* __ASSEMBLY__ */
280#endif /* CONFIG_PARAVIRT */ 397#endif /* CONFIG_PARAVIRT */
281#endif /* __ASM_PARAVIRT_H */ 398#endif /* __ASM_PARAVIRT_H */
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 6c2c4457be0a..5f0418d0078c 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -156,59 +156,6 @@ static inline fastcall void native_cpuid(unsigned int *eax, unsigned int *ebx,
156 : "0" (*eax), "2" (*ecx)); 156 : "0" (*eax), "2" (*ecx));
157} 157}
158 158
159/*
160 * Generic CPUID function
161 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
162 * resulting in stale register contents being returned.
163 */
164static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
165{
166 *eax = op;
167 *ecx = 0;
168 __cpuid(eax, ebx, ecx, edx);
169}
170
171/* Some CPUID calls want 'count' to be placed in ecx */
172static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
173 int *edx)
174{
175 *eax = op;
176 *ecx = count;
177 __cpuid(eax, ebx, ecx, edx);
178}
179
180/*
181 * CPUID functions returning a single datum
182 */
183static inline unsigned int cpuid_eax(unsigned int op)
184{
185 unsigned int eax, ebx, ecx, edx;
186
187 cpuid(op, &eax, &ebx, &ecx, &edx);
188 return eax;
189}
190static inline unsigned int cpuid_ebx(unsigned int op)
191{
192 unsigned int eax, ebx, ecx, edx;
193
194 cpuid(op, &eax, &ebx, &ecx, &edx);
195 return ebx;
196}
197static inline unsigned int cpuid_ecx(unsigned int op)
198{
199 unsigned int eax, ebx, ecx, edx;
200
201 cpuid(op, &eax, &ebx, &ecx, &edx);
202 return ecx;
203}
204static inline unsigned int cpuid_edx(unsigned int op)
205{
206 unsigned int eax, ebx, ecx, edx;
207
208 cpuid(op, &eax, &ebx, &ecx, &edx);
209 return edx;
210}
211
212#define load_cr3(pgdir) write_cr3(__pa(pgdir)) 159#define load_cr3(pgdir) write_cr3(__pa(pgdir))
213 160
214/* 161/*
@@ -491,22 +438,6 @@ struct thread_struct {
491 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ 438 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
492} 439}
493 440
494#ifdef CONFIG_PARAVIRT
495#include <asm/paravirt.h>
496#else
497#define paravirt_enabled() 0
498#define __cpuid native_cpuid
499
500static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
501{
502 tss->esp0 = thread->esp0;
503 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
504 if (unlikely(tss->ss1 != thread->sysenter_cs)) {
505 tss->ss1 = thread->sysenter_cs;
506 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
507 }
508}
509
510#define start_thread(regs, new_eip, new_esp) do { \ 441#define start_thread(regs, new_eip, new_esp) do { \
511 __asm__("movl %0,%%fs": :"r" (0)); \ 442 __asm__("movl %0,%%fs": :"r" (0)); \
512 regs->xgs = 0; \ 443 regs->xgs = 0; \
@@ -519,36 +450,6 @@ static inline void load_esp0(struct tss_struct *tss, struct thread_struct *threa
519 regs->esp = new_esp; \ 450 regs->esp = new_esp; \
520} while (0) 451} while (0)
521 452
522/*
523 * These special macros can be used to get or set a debugging register
524 */
525#define get_debugreg(var, register) \
526 __asm__("movl %%db" #register ", %0" \
527 :"=r" (var))
528#define set_debugreg(value, register) \
529 __asm__("movl %0,%%db" #register \
530 : /* no output */ \
531 :"r" (value))
532
533#define set_iopl_mask native_set_iopl_mask
534#endif /* CONFIG_PARAVIRT */
535
536/*
537 * Set IOPL bits in EFLAGS from given mask
538 */
539static fastcall inline void native_set_iopl_mask(unsigned mask)
540{
541 unsigned int reg;
542 __asm__ __volatile__ ("pushfl;"
543 "popl %0;"
544 "andl %1, %0;"
545 "orl %2, %0;"
546 "pushl %0;"
547 "popfl"
548 : "=&r" (reg)
549 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
550}
551
552/* Forward declaration, a strange C thing */ 453/* Forward declaration, a strange C thing */
553struct task_struct; 454struct task_struct;
554struct mm_struct; 455struct mm_struct;
@@ -640,6 +541,105 @@ static inline void rep_nop(void)
640 541
641#define cpu_relax() rep_nop() 542#define cpu_relax() rep_nop()
642 543
544#ifdef CONFIG_PARAVIRT
545#include <asm/paravirt.h>
546#else
547#define paravirt_enabled() 0
548#define __cpuid native_cpuid
549
550static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
551{
552 tss->esp0 = thread->esp0;
553 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
554 if (unlikely(tss->ss1 != thread->sysenter_cs)) {
555 tss->ss1 = thread->sysenter_cs;
556 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
557 }
558}
559
560/*
561 * These special macros can be used to get or set a debugging register
562 */
563#define get_debugreg(var, register) \
564 __asm__("movl %%db" #register ", %0" \
565 :"=r" (var))
566#define set_debugreg(value, register) \
567 __asm__("movl %0,%%db" #register \
568 : /* no output */ \
569 :"r" (value))
570
571#define set_iopl_mask native_set_iopl_mask
572#endif /* CONFIG_PARAVIRT */
573
574/*
575 * Set IOPL bits in EFLAGS from given mask
576 */
577static fastcall inline void native_set_iopl_mask(unsigned mask)
578{
579 unsigned int reg;
580 __asm__ __volatile__ ("pushfl;"
581 "popl %0;"
582 "andl %1, %0;"
583 "orl %2, %0;"
584 "pushl %0;"
585 "popfl"
586 : "=&r" (reg)
587 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
588}
589
590/*
591 * Generic CPUID function
592 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
593 * resulting in stale register contents being returned.
594 */
595static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
596{
597 *eax = op;
598 *ecx = 0;
599 __cpuid(eax, ebx, ecx, edx);
600}
601
602/* Some CPUID calls want 'count' to be placed in ecx */
603static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
604 int *edx)
605{
606 *eax = op;
607 *ecx = count;
608 __cpuid(eax, ebx, ecx, edx);
609}
610
611/*
612 * CPUID functions returning a single datum
613 */
614static inline unsigned int cpuid_eax(unsigned int op)
615{
616 unsigned int eax, ebx, ecx, edx;
617
618 cpuid(op, &eax, &ebx, &ecx, &edx);
619 return eax;
620}
621static inline unsigned int cpuid_ebx(unsigned int op)
622{
623 unsigned int eax, ebx, ecx, edx;
624
625 cpuid(op, &eax, &ebx, &ecx, &edx);
626 return ebx;
627}
628static inline unsigned int cpuid_ecx(unsigned int op)
629{
630 unsigned int eax, ebx, ecx, edx;
631
632 cpuid(op, &eax, &ebx, &ecx, &edx);
633 return ecx;
634}
635static inline unsigned int cpuid_edx(unsigned int op)
636{
637 unsigned int eax, ebx, ecx, edx;
638
639 cpuid(op, &eax, &ebx, &ecx, &edx);
640 return edx;
641}
642
643/* generic versions from gas */ 643/* generic versions from gas */
644#define GENERIC_NOP1 ".byte 0x90\n" 644#define GENERIC_NOP1 ".byte 0x90\n"
645#define GENERIC_NOP2 ".byte 0x89,0xf6\n" 645#define GENERIC_NOP2 ".byte 0x89,0xf6\n"
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index dea60709db29..d3bcebed60ca 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -12,6 +12,8 @@
12#else 12#else
13#define CLI_STRING "cli" 13#define CLI_STRING "cli"
14#define STI_STRING "sti" 14#define STI_STRING "sti"
15#define CLI_STI_CLOBBERS
16#define CLI_STI_INPUT_ARGS
15#endif /* CONFIG_PARAVIRT */ 17#endif /* CONFIG_PARAVIRT */
16 18
17/* 19/*
@@ -57,25 +59,28 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
57{ 59{
58 asm volatile( 60 asm volatile(
59 "\n1:\t" 61 "\n1:\t"
60 LOCK_PREFIX " ; decb %0\n\t" 62 LOCK_PREFIX " ; decb %[slock]\n\t"
61 "jns 5f\n" 63 "jns 5f\n"
62 "2:\t" 64 "2:\t"
63 "testl $0x200, %1\n\t" 65 "testl $0x200, %[flags]\n\t"
64 "jz 4f\n\t" 66 "jz 4f\n\t"
65 STI_STRING "\n" 67 STI_STRING "\n"
66 "3:\t" 68 "3:\t"
67 "rep;nop\n\t" 69 "rep;nop\n\t"
68 "cmpb $0, %0\n\t" 70 "cmpb $0, %[slock]\n\t"
69 "jle 3b\n\t" 71 "jle 3b\n\t"
70 CLI_STRING "\n\t" 72 CLI_STRING "\n\t"
71 "jmp 1b\n" 73 "jmp 1b\n"
72 "4:\t" 74 "4:\t"
73 "rep;nop\n\t" 75 "rep;nop\n\t"
74 "cmpb $0, %0\n\t" 76 "cmpb $0, %[slock]\n\t"
75 "jg 1b\n\t" 77 "jg 1b\n\t"
76 "jmp 4b\n" 78 "jmp 4b\n"
77 "5:\n\t" 79 "5:\n\t"
78 : "+m" (lock->slock) : "r" (flags) : "memory"); 80 : [slock] "+m" (lock->slock)
81 : [flags] "r" (flags)
82 CLI_STI_INPUT_ARGS
83 : "memory" CLI_STI_CLOBBERS);
79} 84}
80#endif 85#endif
81 86
diff --git a/include/asm-x86_64/alternative.h b/include/asm-x86_64/alternative.h
index a584826cc570..a6657b4f3e0e 100644
--- a/include/asm-x86_64/alternative.h
+++ b/include/asm-x86_64/alternative.h
@@ -4,6 +4,7 @@
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/stddef.h>
7#include <asm/cpufeature.h> 8#include <asm/cpufeature.h>
8 9
9struct alt_instr { 10struct alt_instr {
@@ -133,4 +134,15 @@ static inline void alternatives_smp_switch(int smp) {}
133#define LOCK_PREFIX "" 134#define LOCK_PREFIX ""
134#endif 135#endif
135 136
137struct paravirt_patch;
138#ifdef CONFIG_PARAVIRT
139void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end);
140#else
141static inline void
142apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
143{}
144#define __start_parainstructions NULL
145#define __stop_parainstructions NULL
146#endif
147
136#endif /* _X86_64_ALTERNATIVE_H */ 148#endif /* _X86_64_ALTERNATIVE_H */
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 2e1141623147..ac0a58222992 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -911,6 +911,7 @@ static int init_section_ref_ok(const char *name)
911 ".toc1", /* used by ppc64 */ 911 ".toc1", /* used by ppc64 */
912 ".stab", 912 ".stab",
913 ".rodata", 913 ".rodata",
914 ".parainstructions",
914 ".text.lock", 915 ".text.lock",
915 "__bug_table", /* used by powerpc for BUG() */ 916 "__bug_table", /* used by powerpc for BUG() */
916 ".pci_fixup_header", 917 ".pci_fixup_header",
@@ -931,6 +932,7 @@ static int init_section_ref_ok(const char *name)
931 ".altinstructions", 932 ".altinstructions",
932 ".eh_frame", 933 ".eh_frame",
933 ".debug", 934 ".debug",
935 ".parainstructions",
934 NULL 936 NULL
935 }; 937 };
936 /* part of section name */ 938 /* part of section name */