diff options
Diffstat (limited to 'arch/x86/kernel/entry_32.S')
-rw-r--r-- | arch/x86/kernel/entry_32.S | 79 |
1 files changed, 61 insertions, 18 deletions
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 6bc07f0f1202..109792bc7cfa 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -54,6 +54,16 @@ | |||
54 | #include <asm/ftrace.h> | 54 | #include <asm/ftrace.h> |
55 | #include <asm/irq_vectors.h> | 55 | #include <asm/irq_vectors.h> |
56 | 56 | ||
57 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ | ||
58 | #include <linux/elf-em.h> | ||
59 | #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) | ||
60 | #define __AUDIT_ARCH_LE 0x40000000 | ||
61 | |||
62 | #ifndef CONFIG_AUDITSYSCALL | ||
63 | #define sysenter_audit syscall_trace_entry | ||
64 | #define sysexit_audit syscall_exit_work | ||
65 | #endif | ||
66 | |||
57 | /* | 67 | /* |
58 | * We use macros for low-level operations which need to be overridden | 68 | * We use macros for low-level operations which need to be overridden |
59 | * for paravirtualization. The following will never clobber any registers: | 69 | * for paravirtualization. The following will never clobber any registers: |
@@ -332,8 +342,9 @@ sysenter_past_esp: | |||
332 | GET_THREAD_INFO(%ebp) | 342 | GET_THREAD_INFO(%ebp) |
333 | 343 | ||
334 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ | 344 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ |
335 | testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) | 345 | testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) |
336 | jnz syscall_trace_entry | 346 | jnz sysenter_audit |
347 | sysenter_do_call: | ||
337 | cmpl $(nr_syscalls), %eax | 348 | cmpl $(nr_syscalls), %eax |
338 | jae syscall_badsys | 349 | jae syscall_badsys |
339 | call *sys_call_table(,%eax,4) | 350 | call *sys_call_table(,%eax,4) |
@@ -343,7 +354,8 @@ sysenter_past_esp: | |||
343 | TRACE_IRQS_OFF | 354 | TRACE_IRQS_OFF |
344 | movl TI_flags(%ebp), %ecx | 355 | movl TI_flags(%ebp), %ecx |
345 | testw $_TIF_ALLWORK_MASK, %cx | 356 | testw $_TIF_ALLWORK_MASK, %cx |
346 | jne syscall_exit_work | 357 | jne sysexit_audit |
358 | sysenter_exit: | ||
347 | /* if something modifies registers it must also disable sysexit */ | 359 | /* if something modifies registers it must also disable sysexit */ |
348 | movl PT_EIP(%esp), %edx | 360 | movl PT_EIP(%esp), %edx |
349 | movl PT_OLDESP(%esp), %ecx | 361 | movl PT_OLDESP(%esp), %ecx |
@@ -351,6 +363,45 @@ sysenter_past_esp: | |||
351 | TRACE_IRQS_ON | 363 | TRACE_IRQS_ON |
352 | 1: mov PT_FS(%esp), %fs | 364 | 1: mov PT_FS(%esp), %fs |
353 | ENABLE_INTERRUPTS_SYSEXIT | 365 | ENABLE_INTERRUPTS_SYSEXIT |
366 | |||
367 | #ifdef CONFIG_AUDITSYSCALL | ||
368 | sysenter_audit: | ||
369 | testw $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp) | ||
370 | jnz syscall_trace_entry | ||
371 | addl $4,%esp | ||
372 | CFI_ADJUST_CFA_OFFSET -4 | ||
373 | /* %esi already in 8(%esp) 6th arg: 4th syscall arg */ | ||
374 | /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */ | ||
375 | /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */ | ||
376 | movl %ebx,%ecx /* 3rd arg: 1st syscall arg */ | ||
377 | movl %eax,%edx /* 2nd arg: syscall number */ | ||
378 | movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */ | ||
379 | call audit_syscall_entry | ||
380 | pushl %ebx | ||
381 | CFI_ADJUST_CFA_OFFSET 4 | ||
382 | movl PT_EAX(%esp),%eax /* reload syscall number */ | ||
383 | jmp sysenter_do_call | ||
384 | |||
385 | sysexit_audit: | ||
386 | testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx | ||
387 | jne syscall_exit_work | ||
388 | TRACE_IRQS_ON | ||
389 | ENABLE_INTERRUPTS(CLBR_ANY) | ||
390 | movl %eax,%edx /* second arg, syscall return value */ | ||
391 | cmpl $0,%eax /* is it < 0? */ | ||
392 | setl %al /* 1 if so, 0 if not */ | ||
393 | movzbl %al,%eax /* zero-extend that */ | ||
394 | inc %eax /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */ | ||
395 | call audit_syscall_exit | ||
396 | DISABLE_INTERRUPTS(CLBR_ANY) | ||
397 | TRACE_IRQS_OFF | ||
398 | movl TI_flags(%ebp), %ecx | ||
399 | testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx | ||
400 | jne syscall_exit_work | ||
401 | movl PT_EAX(%esp),%eax /* reload syscall return value */ | ||
402 | jmp sysenter_exit | ||
403 | #endif | ||
404 | |||
354 | CFI_ENDPROC | 405 | CFI_ENDPROC |
355 | .pushsection .fixup,"ax" | 406 | .pushsection .fixup,"ax" |
356 | 2: movl $0,PT_FS(%esp) | 407 | 2: movl $0,PT_FS(%esp) |
@@ -370,7 +421,7 @@ ENTRY(system_call) | |||
370 | GET_THREAD_INFO(%ebp) | 421 | GET_THREAD_INFO(%ebp) |
371 | # system call tracing in operation / emulation | 422 | # system call tracing in operation / emulation |
372 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ | 423 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ |
373 | testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) | 424 | testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) |
374 | jnz syscall_trace_entry | 425 | jnz syscall_trace_entry |
375 | cmpl $(nr_syscalls), %eax | 426 | cmpl $(nr_syscalls), %eax |
376 | jae syscall_badsys | 427 | jae syscall_badsys |
@@ -383,10 +434,6 @@ syscall_exit: | |||
383 | # setting need_resched or sigpending | 434 | # setting need_resched or sigpending |
384 | # between sampling and the iret | 435 | # between sampling and the iret |
385 | TRACE_IRQS_OFF | 436 | TRACE_IRQS_OFF |
386 | testl $X86_EFLAGS_TF,PT_EFLAGS(%esp) # If tracing set singlestep flag on exit | ||
387 | jz no_singlestep | ||
388 | orl $_TIF_SINGLESTEP,TI_flags(%ebp) | ||
389 | no_singlestep: | ||
390 | movl TI_flags(%ebp), %ecx | 437 | movl TI_flags(%ebp), %ecx |
391 | testw $_TIF_ALLWORK_MASK, %cx # current->work | 438 | testw $_TIF_ALLWORK_MASK, %cx # current->work |
392 | jne syscall_exit_work | 439 | jne syscall_exit_work |
@@ -514,12 +561,8 @@ END(work_pending) | |||
514 | syscall_trace_entry: | 561 | syscall_trace_entry: |
515 | movl $-ENOSYS,PT_EAX(%esp) | 562 | movl $-ENOSYS,PT_EAX(%esp) |
516 | movl %esp, %eax | 563 | movl %esp, %eax |
517 | xorl %edx,%edx | 564 | call syscall_trace_enter |
518 | call do_syscall_trace | 565 | /* What it returned is what we'll actually use. */ |
519 | cmpl $0, %eax | ||
520 | jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU, | ||
521 | # so must skip actual syscall | ||
522 | movl PT_ORIG_EAX(%esp), %eax | ||
523 | cmpl $(nr_syscalls), %eax | 566 | cmpl $(nr_syscalls), %eax |
524 | jnae syscall_call | 567 | jnae syscall_call |
525 | jmp syscall_exit | 568 | jmp syscall_exit |
@@ -528,14 +571,13 @@ END(syscall_trace_entry) | |||
528 | # perform syscall exit tracing | 571 | # perform syscall exit tracing |
529 | ALIGN | 572 | ALIGN |
530 | syscall_exit_work: | 573 | syscall_exit_work: |
531 | testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl | 574 | testb $_TIF_WORK_SYSCALL_EXIT, %cl |
532 | jz work_pending | 575 | jz work_pending |
533 | TRACE_IRQS_ON | 576 | TRACE_IRQS_ON |
534 | ENABLE_INTERRUPTS(CLBR_ANY) # could let do_syscall_trace() call | 577 | ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call |
535 | # schedule() instead | 578 | # schedule() instead |
536 | movl %esp, %eax | 579 | movl %esp, %eax |
537 | movl $1, %edx | 580 | call syscall_trace_leave |
538 | call do_syscall_trace | ||
539 | jmp resume_userspace | 581 | jmp resume_userspace |
540 | END(syscall_exit_work) | 582 | END(syscall_exit_work) |
541 | CFI_ENDPROC | 583 | CFI_ENDPROC |
@@ -1024,6 +1066,7 @@ ENDPROC(kernel_thread_helper) | |||
1024 | ENTRY(xen_sysenter_target) | 1066 | ENTRY(xen_sysenter_target) |
1025 | RING0_INT_FRAME | 1067 | RING0_INT_FRAME |
1026 | addl $5*4, %esp /* remove xen-provided frame */ | 1068 | addl $5*4, %esp /* remove xen-provided frame */ |
1069 | CFI_ADJUST_CFA_OFFSET -5*4 | ||
1027 | jmp sysenter_past_esp | 1070 | jmp sysenter_past_esp |
1028 | CFI_ENDPROC | 1071 | CFI_ENDPROC |
1029 | 1072 | ||