diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2006-09-26 04:52:39 -0400 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2006-09-26 04:52:39 -0400 |
commit | 0da5db313317e3195482d3e660a1074857374a89 (patch) | |
tree | 9322a8eb60da97ae5f3a199cec13afd9b34c202d | |
parent | 7b0bda74f7e77f362eaeee837e7911238acf4c76 (diff) |
[PATCH] i386: Abstract sensitive instructions
Abstract sensitive instructions in assembler code, replacing them with macros
(which currently are #defined to the native versions). We use long names:
assembler is case-insensitive, so if something goes wrong and macros do not
expand, it would assemble anyway.
Resulting object files are exactly the same as before.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Andi Kleen <ak@suse.de>
-rw-r--r-- | arch/i386/kernel/entry.S | 38 | ||||
-rw-r--r-- | include/asm-i386/spinlock.h | 7 |
2 files changed, 27 insertions, 18 deletions
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S index 4b0845249222..3872fca5c74a 100644 --- a/arch/i386/kernel/entry.S +++ b/arch/i386/kernel/entry.S | |||
@@ -76,8 +76,15 @@ DF_MASK = 0x00000400 | |||
76 | NT_MASK = 0x00004000 | 76 | NT_MASK = 0x00004000 |
77 | VM_MASK = 0x00020000 | 77 | VM_MASK = 0x00020000 |
78 | 78 | ||
79 | /* These are replaces for paravirtualization */ | ||
80 | #define DISABLE_INTERRUPTS cli | ||
81 | #define ENABLE_INTERRUPTS sti | ||
82 | #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit | ||
83 | #define INTERRUPT_RETURN iret | ||
84 | #define GET_CR0_INTO_EAX movl %cr0, %eax | ||
85 | |||
79 | #ifdef CONFIG_PREEMPT | 86 | #ifdef CONFIG_PREEMPT |
80 | #define preempt_stop cli; TRACE_IRQS_OFF | 87 | #define preempt_stop DISABLE_INTERRUPTS; TRACE_IRQS_OFF |
81 | #else | 88 | #else |
82 | #define preempt_stop | 89 | #define preempt_stop |
83 | #define resume_kernel restore_nocheck | 90 | #define resume_kernel restore_nocheck |
@@ -236,7 +243,7 @@ check_userspace: | |||
236 | testl $(VM_MASK | 3), %eax | 243 | testl $(VM_MASK | 3), %eax |
237 | jz resume_kernel | 244 | jz resume_kernel |
238 | ENTRY(resume_userspace) | 245 | ENTRY(resume_userspace) |
239 | cli # make sure we don't miss an interrupt | 246 | DISABLE_INTERRUPTS # make sure we don't miss an interrupt |
240 | # setting need_resched or sigpending | 247 | # setting need_resched or sigpending |
241 | # between sampling and the iret | 248 | # between sampling and the iret |
242 | movl TI_flags(%ebp), %ecx | 249 | movl TI_flags(%ebp), %ecx |
@@ -247,7 +254,7 @@ ENTRY(resume_userspace) | |||
247 | 254 | ||
248 | #ifdef CONFIG_PREEMPT | 255 | #ifdef CONFIG_PREEMPT |
249 | ENTRY(resume_kernel) | 256 | ENTRY(resume_kernel) |
250 | cli | 257 | DISABLE_INTERRUPTS |
251 | cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? | 258 | cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? |
252 | jnz restore_nocheck | 259 | jnz restore_nocheck |
253 | need_resched: | 260 | need_resched: |
@@ -275,7 +282,7 @@ sysenter_past_esp: | |||
275 | * No need to follow this irqs on/off section: the syscall | 282 | * No need to follow this irqs on/off section: the syscall |
276 | * disabled irqs and here we enable it straight after entry: | 283 | * disabled irqs and here we enable it straight after entry: |
277 | */ | 284 | */ |
278 | sti | 285 | ENABLE_INTERRUPTS |
279 | pushl $(__USER_DS) | 286 | pushl $(__USER_DS) |
280 | CFI_ADJUST_CFA_OFFSET 4 | 287 | CFI_ADJUST_CFA_OFFSET 4 |
281 | /*CFI_REL_OFFSET ss, 0*/ | 288 | /*CFI_REL_OFFSET ss, 0*/ |
@@ -320,7 +327,7 @@ sysenter_past_esp: | |||
320 | jae syscall_badsys | 327 | jae syscall_badsys |
321 | call *sys_call_table(,%eax,4) | 328 | call *sys_call_table(,%eax,4) |
322 | movl %eax,EAX(%esp) | 329 | movl %eax,EAX(%esp) |
323 | cli | 330 | DISABLE_INTERRUPTS |
324 | TRACE_IRQS_OFF | 331 | TRACE_IRQS_OFF |
325 | movl TI_flags(%ebp), %ecx | 332 | movl TI_flags(%ebp), %ecx |
326 | testw $_TIF_ALLWORK_MASK, %cx | 333 | testw $_TIF_ALLWORK_MASK, %cx |
@@ -330,8 +337,7 @@ sysenter_past_esp: | |||
330 | movl OLDESP(%esp), %ecx | 337 | movl OLDESP(%esp), %ecx |
331 | xorl %ebp,%ebp | 338 | xorl %ebp,%ebp |
332 | TRACE_IRQS_ON | 339 | TRACE_IRQS_ON |
333 | sti | 340 | ENABLE_INTERRUPTS_SYSEXIT |
334 | sysexit | ||
335 | CFI_ENDPROC | 341 | CFI_ENDPROC |
336 | 342 | ||
337 | 343 | ||
@@ -356,7 +362,7 @@ syscall_call: | |||
356 | call *sys_call_table(,%eax,4) | 362 | call *sys_call_table(,%eax,4) |
357 | movl %eax,EAX(%esp) # store the return value | 363 | movl %eax,EAX(%esp) # store the return value |
358 | syscall_exit: | 364 | syscall_exit: |
359 | cli # make sure we don't miss an interrupt | 365 | DISABLE_INTERRUPTS # make sure we don't miss an interrupt |
360 | # setting need_resched or sigpending | 366 | # setting need_resched or sigpending |
361 | # between sampling and the iret | 367 | # between sampling and the iret |
362 | TRACE_IRQS_OFF | 368 | TRACE_IRQS_OFF |
@@ -381,11 +387,11 @@ restore_nocheck_notrace: | |||
381 | RESTORE_REGS | 387 | RESTORE_REGS |
382 | addl $4, %esp | 388 | addl $4, %esp |
383 | CFI_ADJUST_CFA_OFFSET -4 | 389 | CFI_ADJUST_CFA_OFFSET -4 |
384 | 1: iret | 390 | 1: INTERRUPT_RETURN |
385 | .section .fixup,"ax" | 391 | .section .fixup,"ax" |
386 | iret_exc: | 392 | iret_exc: |
387 | TRACE_IRQS_ON | 393 | TRACE_IRQS_ON |
388 | sti | 394 | ENABLE_INTERRUPTS |
389 | pushl $0 # no error code | 395 | pushl $0 # no error code |
390 | pushl $do_iret_error | 396 | pushl $do_iret_error |
391 | jmp error_code | 397 | jmp error_code |
@@ -409,7 +415,7 @@ ldt_ss: | |||
409 | * dosemu and wine happy. */ | 415 | * dosemu and wine happy. */ |
410 | subl $8, %esp # reserve space for switch16 pointer | 416 | subl $8, %esp # reserve space for switch16 pointer |
411 | CFI_ADJUST_CFA_OFFSET 8 | 417 | CFI_ADJUST_CFA_OFFSET 8 |
412 | cli | 418 | DISABLE_INTERRUPTS |
413 | TRACE_IRQS_OFF | 419 | TRACE_IRQS_OFF |
414 | movl %esp, %eax | 420 | movl %esp, %eax |
415 | /* Set up the 16bit stack frame with switch32 pointer on top, | 421 | /* Set up the 16bit stack frame with switch32 pointer on top, |
@@ -419,7 +425,7 @@ ldt_ss: | |||
419 | TRACE_IRQS_IRET | 425 | TRACE_IRQS_IRET |
420 | RESTORE_REGS | 426 | RESTORE_REGS |
421 | lss 20+4(%esp), %esp # switch to 16bit stack | 427 | lss 20+4(%esp), %esp # switch to 16bit stack |
422 | 1: iret | 428 | 1: INTERRUPT_RETURN |
423 | .section __ex_table,"a" | 429 | .section __ex_table,"a" |
424 | .align 4 | 430 | .align 4 |
425 | .long 1b,iret_exc | 431 | .long 1b,iret_exc |
@@ -434,7 +440,7 @@ work_pending: | |||
434 | jz work_notifysig | 440 | jz work_notifysig |
435 | work_resched: | 441 | work_resched: |
436 | call schedule | 442 | call schedule |
437 | cli # make sure we don't miss an interrupt | 443 | DISABLE_INTERRUPTS # make sure we don't miss an interrupt |
438 | # setting need_resched or sigpending | 444 | # setting need_resched or sigpending |
439 | # between sampling and the iret | 445 | # between sampling and the iret |
440 | TRACE_IRQS_OFF | 446 | TRACE_IRQS_OFF |
@@ -490,7 +496,7 @@ syscall_exit_work: | |||
490 | testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl | 496 | testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl |
491 | jz work_pending | 497 | jz work_pending |
492 | TRACE_IRQS_ON | 498 | TRACE_IRQS_ON |
493 | sti # could let do_syscall_trace() call | 499 | ENABLE_INTERRUPTS # could let do_syscall_trace() call |
494 | # schedule() instead | 500 | # schedule() instead |
495 | movl %esp, %eax | 501 | movl %esp, %eax |
496 | movl $1, %edx | 502 | movl $1, %edx |
@@ -668,7 +674,7 @@ ENTRY(device_not_available) | |||
668 | pushl $-1 # mark this as an int | 674 | pushl $-1 # mark this as an int |
669 | CFI_ADJUST_CFA_OFFSET 4 | 675 | CFI_ADJUST_CFA_OFFSET 4 |
670 | SAVE_ALL | 676 | SAVE_ALL |
671 | movl %cr0, %eax | 677 | GET_CR0_INTO_EAX |
672 | testl $0x4, %eax # EM (math emulation bit) | 678 | testl $0x4, %eax # EM (math emulation bit) |
673 | jne device_not_available_emulate | 679 | jne device_not_available_emulate |
674 | preempt_stop | 680 | preempt_stop |
@@ -811,7 +817,7 @@ nmi_16bit_stack: | |||
811 | call do_nmi | 817 | call do_nmi |
812 | RESTORE_REGS | 818 | RESTORE_REGS |
813 | lss 12+4(%esp), %esp # back to 16bit stack | 819 | lss 12+4(%esp), %esp # back to 16bit stack |
814 | 1: iret | 820 | 1: INTERRUPT_RETURN |
815 | CFI_ENDPROC | 821 | CFI_ENDPROC |
816 | .section __ex_table,"a" | 822 | .section __ex_table,"a" |
817 | .align 4 | 823 | .align 4 |
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index 324329313af8..b0b3043f05e1 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h | |||
@@ -7,6 +7,9 @@ | |||
7 | #include <asm/processor.h> | 7 | #include <asm/processor.h> |
8 | #include <linux/compiler.h> | 8 | #include <linux/compiler.h> |
9 | 9 | ||
10 | #define CLI_STRING "cli" | ||
11 | #define STI_STRING "sti" | ||
12 | |||
10 | /* | 13 | /* |
11 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 14 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
12 | * | 15 | * |
@@ -55,12 +58,12 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla | |||
55 | "2:\t" | 58 | "2:\t" |
56 | "testl $0x200, %1\n\t" | 59 | "testl $0x200, %1\n\t" |
57 | "jz 4f\n\t" | 60 | "jz 4f\n\t" |
58 | "sti\n" | 61 | STI_STRING "\n" |
59 | "3:\t" | 62 | "3:\t" |
60 | "rep;nop\n\t" | 63 | "rep;nop\n\t" |
61 | "cmpb $0, %0\n\t" | 64 | "cmpb $0, %0\n\t" |
62 | "jle 3b\n\t" | 65 | "jle 3b\n\t" |
63 | "cli\n\t" | 66 | CLI_STRING "\n\t" |
64 | "jmp 1b\n" | 67 | "jmp 1b\n" |
65 | "4:\t" | 68 | "4:\t" |
66 | "rep;nop\n\t" | 69 | "rep;nop\n\t" |