aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/asm-offsets_32.c2
-rw-r--r--arch/x86/kernel/entry_32.S8
-rw-r--r--arch/x86/kernel/paravirt_32.c10
-rw-r--r--arch/x86/kernel/vmi_32.c4
4 files changed, 12 insertions, 12 deletions
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index 0e45981b2dd7..c1ccfabb4a9e 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -123,7 +123,7 @@ void foo(void)
123 OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable); 123 OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
124 OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable); 124 OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
125 OFFSET(PV_CPU_iret, pv_cpu_ops, iret); 125 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
126 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit); 126 OFFSET(PV_CPU_irq_enable_syscall_ret, pv_cpu_ops, irq_enable_syscall_ret);
127 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); 127 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
128#endif 128#endif
129 129
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index dc7f938e5015..d63609dd64b9 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -58,7 +58,7 @@
58 * for paravirtualization. The following will never clobber any registers: 58 * for paravirtualization. The following will never clobber any registers:
59 * INTERRUPT_RETURN (aka. "iret") 59 * INTERRUPT_RETURN (aka. "iret")
60 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax") 60 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
61 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit"). 61 * ENABLE_INTERRUPTS_SYSCALL_RET (aka "sti; sysexit").
62 * 62 *
63 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must 63 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
64 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY). 64 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
@@ -351,7 +351,7 @@ sysenter_past_esp:
351 xorl %ebp,%ebp 351 xorl %ebp,%ebp
352 TRACE_IRQS_ON 352 TRACE_IRQS_ON
3531: mov PT_FS(%esp), %fs 3531: mov PT_FS(%esp), %fs
354 ENABLE_INTERRUPTS_SYSEXIT 354 ENABLE_INTERRUPTS_SYSCALL_RET
355 CFI_ENDPROC 355 CFI_ENDPROC
356.pushsection .fixup,"ax" 356.pushsection .fixup,"ax"
3572: movl $0,PT_FS(%esp) 3572: movl $0,PT_FS(%esp)
@@ -882,10 +882,10 @@ ENTRY(native_iret)
882.previous 882.previous
883END(native_iret) 883END(native_iret)
884 884
885ENTRY(native_irq_enable_sysexit) 885ENTRY(native_irq_enable_syscall_ret)
886 sti 886 sti
887 sysexit 887 sysexit
888END(native_irq_enable_sysexit) 888END(native_irq_enable_syscall_ret)
889#endif 889#endif
890 890
891KPROBE_ENTRY(int3) 891KPROBE_ENTRY(int3)
diff --git a/arch/x86/kernel/paravirt_32.c b/arch/x86/kernel/paravirt_32.c
index f5000799f8ef..706b0562ea40 100644
--- a/arch/x86/kernel/paravirt_32.c
+++ b/arch/x86/kernel/paravirt_32.c
@@ -60,7 +60,7 @@ DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
60DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf"); 60DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf");
61DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax"); 61DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax");
62DEF_NATIVE(pv_cpu_ops, iret, "iret"); 62DEF_NATIVE(pv_cpu_ops, iret, "iret");
63DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "sti; sysexit"); 63DEF_NATIVE(pv_cpu_ops, irq_enable_syscall_ret, "sti; sysexit");
64DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax"); 64DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
65DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3"); 65DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
66DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax"); 66DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
@@ -88,7 +88,7 @@ static unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
88 SITE(pv_irq_ops, restore_fl); 88 SITE(pv_irq_ops, restore_fl);
89 SITE(pv_irq_ops, save_fl); 89 SITE(pv_irq_ops, save_fl);
90 SITE(pv_cpu_ops, iret); 90 SITE(pv_cpu_ops, iret);
91 SITE(pv_cpu_ops, irq_enable_sysexit); 91 SITE(pv_cpu_ops, irq_enable_syscall_ret);
92 SITE(pv_mmu_ops, read_cr2); 92 SITE(pv_mmu_ops, read_cr2);
93 SITE(pv_mmu_ops, read_cr3); 93 SITE(pv_mmu_ops, read_cr3);
94 SITE(pv_mmu_ops, write_cr3); 94 SITE(pv_mmu_ops, write_cr3);
@@ -186,7 +186,7 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
186 /* If the operation is a nop, then nop the callsite */ 186 /* If the operation is a nop, then nop the callsite */
187 ret = paravirt_patch_nop(); 187 ret = paravirt_patch_nop();
188 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || 188 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
189 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit)) 189 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_syscall_ret))
190 /* If operation requires a jmp, then jmp */ 190 /* If operation requires a jmp, then jmp */
191 ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len); 191 ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
192 else 192 else
@@ -237,7 +237,7 @@ static void native_flush_tlb_single(unsigned long addr)
237 237
238/* These are in entry.S */ 238/* These are in entry.S */
239extern void native_iret(void); 239extern void native_iret(void);
240extern void native_irq_enable_sysexit(void); 240extern void native_irq_enable_syscall_ret(void);
241 241
242static int __init print_banner(void) 242static int __init print_banner(void)
243{ 243{
@@ -384,7 +384,7 @@ struct pv_cpu_ops pv_cpu_ops = {
384 .write_idt_entry = write_dt_entry, 384 .write_idt_entry = write_dt_entry,
385 .load_esp0 = native_load_esp0, 385 .load_esp0 = native_load_esp0,
386 386
387 .irq_enable_sysexit = native_irq_enable_sysexit, 387 .irq_enable_syscall_ret = native_irq_enable_syscall_ret,
388 .iret = native_iret, 388 .iret = native_iret,
389 389
390 .set_iopl_mask = native_set_iopl_mask, 390 .set_iopl_mask = native_set_iopl_mask,
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index f02bad68abaa..aacce426cbd0 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -148,7 +148,7 @@ static unsigned vmi_patch(u8 type, u16 clobbers, void *insns,
148 insns, eip); 148 insns, eip);
149 case PARAVIRT_PATCH(pv_cpu_ops.iret): 149 case PARAVIRT_PATCH(pv_cpu_ops.iret):
150 return patch_internal(VMI_CALL_IRET, len, insns, eip); 150 return patch_internal(VMI_CALL_IRET, len, insns, eip);
151 case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit): 151 case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_syscall_ret):
152 return patch_internal(VMI_CALL_SYSEXIT, len, insns, eip); 152 return patch_internal(VMI_CALL_SYSEXIT, len, insns, eip);
153 default: 153 default:
154 break; 154 break;
@@ -870,7 +870,7 @@ static inline int __init activate_vmi(void)
870 * the backend. They are performance critical anyway, so requiring 870 * the backend. They are performance critical anyway, so requiring
871 * a patch is not a big problem. 871 * a patch is not a big problem.
872 */ 872 */
873 pv_cpu_ops.irq_enable_sysexit = (void *)0xfeedbab0; 873 pv_cpu_ops.irq_enable_syscall_ret = (void *)0xfeedbab0;
874 pv_cpu_ops.iret = (void *)0xbadbab0; 874 pv_cpu_ops.iret = (void *)0xbadbab0;
875 875
876#ifdef CONFIG_SMP 876#ifdef CONFIG_SMP