aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/entry.S
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2006-12-06 20:14:02 -0500
committerAndi Kleen <andi@basil.nowhere.org>2006-12-06 20:14:02 -0500
commiteb5b7b9d86f46b45ba1f986302fdf7df84fb8297 (patch)
tree9b29d3d5fae21e33a7f4cff72a340d1661e5023f /arch/i386/kernel/entry.S
parentbcddc0155f351ab3f06c6ede6d91fd399ef9e18f (diff)
[PATCH] i386: Use asm-offsets for the offsets of registers into the pt_regs struct
Use asm-offsets for the offsets of registers into the pt_regs struct, rather than having hard-coded constants I left the constants in the comments of entry.S because they're useful for reference; the code in entry.S is very dependent on the layout of pt_regs, even when using asm-offsets. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Andi Kleen <ak@suse.de> Cc: Keith Owens <kaos@ocs.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org>
Diffstat (limited to 'arch/i386/kernel/entry.S')
-rw-r--r--arch/i386/kernel/entry.S120
1 files changed, 52 insertions, 68 deletions
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index c38d801ba0bb..0069bf01603e 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -54,22 +54,6 @@
54 54
55#define nr_syscalls ((syscall_table_size)/4) 55#define nr_syscalls ((syscall_table_size)/4)
56 56
57EBX = 0x00
58ECX = 0x04
59EDX = 0x08
60ESI = 0x0C
61EDI = 0x10
62EBP = 0x14
63EAX = 0x18
64DS = 0x1C
65ES = 0x20
66ORIG_EAX = 0x24
67EIP = 0x28
68CS = 0x2C
69EFLAGS = 0x30
70OLDESP = 0x34
71OLDSS = 0x38
72
73CF_MASK = 0x00000001 57CF_MASK = 0x00000001
74TF_MASK = 0x00000100 58TF_MASK = 0x00000100
75IF_MASK = 0x00000200 59IF_MASK = 0x00000200
@@ -93,7 +77,7 @@ VM_MASK = 0x00020000
93 77
94.macro TRACE_IRQS_IRET 78.macro TRACE_IRQS_IRET
95#ifdef CONFIG_TRACE_IRQFLAGS 79#ifdef CONFIG_TRACE_IRQFLAGS
96 testl $IF_MASK,EFLAGS(%esp) # interrupts off? 80 testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off?
97 jz 1f 81 jz 1f
98 TRACE_IRQS_ON 82 TRACE_IRQS_ON
991: 831:
@@ -199,18 +183,18 @@ VM_MASK = 0x00020000
199#define RING0_PTREGS_FRAME \ 183#define RING0_PTREGS_FRAME \
200 CFI_STARTPROC simple;\ 184 CFI_STARTPROC simple;\
201 CFI_SIGNAL_FRAME;\ 185 CFI_SIGNAL_FRAME;\
202 CFI_DEF_CFA esp, OLDESP-EBX;\ 186 CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\
203 /*CFI_OFFSET cs, CS-OLDESP;*/\ 187 /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\
204 CFI_OFFSET eip, EIP-OLDESP;\ 188 CFI_OFFSET eip, PT_EIP-PT_OLDESP;\
205 /*CFI_OFFSET es, ES-OLDESP;*/\ 189 /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\
206 /*CFI_OFFSET ds, DS-OLDESP;*/\ 190 /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\
207 CFI_OFFSET eax, EAX-OLDESP;\ 191 CFI_OFFSET eax, PT_EAX-PT_OLDESP;\
208 CFI_OFFSET ebp, EBP-OLDESP;\ 192 CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\
209 CFI_OFFSET edi, EDI-OLDESP;\ 193 CFI_OFFSET edi, PT_EDI-PT_OLDESP;\
210 CFI_OFFSET esi, ESI-OLDESP;\ 194 CFI_OFFSET esi, PT_ESI-PT_OLDESP;\
211 CFI_OFFSET edx, EDX-OLDESP;\ 195 CFI_OFFSET edx, PT_EDX-PT_OLDESP;\
212 CFI_OFFSET ecx, ECX-OLDESP;\ 196 CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\
213 CFI_OFFSET ebx, EBX-OLDESP 197 CFI_OFFSET ebx, PT_EBX-PT_OLDESP
214 198
215ENTRY(ret_from_fork) 199ENTRY(ret_from_fork)
216 CFI_STARTPROC 200 CFI_STARTPROC
@@ -242,8 +226,8 @@ ret_from_exception:
242ret_from_intr: 226ret_from_intr:
243 GET_THREAD_INFO(%ebp) 227 GET_THREAD_INFO(%ebp)
244check_userspace: 228check_userspace:
245 movl EFLAGS(%esp), %eax # mix EFLAGS and CS 229 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
246 movb CS(%esp), %al 230 movb PT_CS(%esp), %al
247 andl $(VM_MASK | SEGMENT_RPL_MASK), %eax 231 andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
248 cmpl $USER_RPL, %eax 232 cmpl $USER_RPL, %eax
249 jb resume_kernel # not returning to v8086 or userspace 233 jb resume_kernel # not returning to v8086 or userspace
@@ -266,7 +250,7 @@ need_resched:
266 movl TI_flags(%ebp), %ecx # need_resched set ? 250 movl TI_flags(%ebp), %ecx # need_resched set ?
267 testb $_TIF_NEED_RESCHED, %cl 251 testb $_TIF_NEED_RESCHED, %cl
268 jz restore_all 252 jz restore_all
269 testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ? 253 testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off (exception path) ?
270 jz restore_all 254 jz restore_all
271 call preempt_schedule_irq 255 call preempt_schedule_irq
272 jmp need_resched 256 jmp need_resched
@@ -332,15 +316,15 @@ sysenter_past_esp:
332 cmpl $(nr_syscalls), %eax 316 cmpl $(nr_syscalls), %eax
333 jae syscall_badsys 317 jae syscall_badsys
334 call *sys_call_table(,%eax,4) 318 call *sys_call_table(,%eax,4)
335 movl %eax,EAX(%esp) 319 movl %eax,PT_EAX(%esp)
336 DISABLE_INTERRUPTS 320 DISABLE_INTERRUPTS
337 TRACE_IRQS_OFF 321 TRACE_IRQS_OFF
338 movl TI_flags(%ebp), %ecx 322 movl TI_flags(%ebp), %ecx
339 testw $_TIF_ALLWORK_MASK, %cx 323 testw $_TIF_ALLWORK_MASK, %cx
340 jne syscall_exit_work 324 jne syscall_exit_work
341/* if something modifies registers it must also disable sysexit */ 325/* if something modifies registers it must also disable sysexit */
342 movl EIP(%esp), %edx 326 movl PT_EIP(%esp), %edx
343 movl OLDESP(%esp), %ecx 327 movl PT_OLDESP(%esp), %ecx
344 xorl %ebp,%ebp 328 xorl %ebp,%ebp
345 TRACE_IRQS_ON 329 TRACE_IRQS_ON
346 ENABLE_INTERRUPTS_SYSEXIT 330 ENABLE_INTERRUPTS_SYSEXIT
@@ -354,7 +338,7 @@ ENTRY(system_call)
354 CFI_ADJUST_CFA_OFFSET 4 338 CFI_ADJUST_CFA_OFFSET 4
355 SAVE_ALL 339 SAVE_ALL
356 GET_THREAD_INFO(%ebp) 340 GET_THREAD_INFO(%ebp)
357 testl $TF_MASK,EFLAGS(%esp) 341 testl $TF_MASK,PT_EFLAGS(%esp)
358 jz no_singlestep 342 jz no_singlestep
359 orl $_TIF_SINGLESTEP,TI_flags(%ebp) 343 orl $_TIF_SINGLESTEP,TI_flags(%ebp)
360no_singlestep: 344no_singlestep:
@@ -366,7 +350,7 @@ no_singlestep:
366 jae syscall_badsys 350 jae syscall_badsys
367syscall_call: 351syscall_call:
368 call *sys_call_table(,%eax,4) 352 call *sys_call_table(,%eax,4)
369 movl %eax,EAX(%esp) # store the return value 353 movl %eax,PT_EAX(%esp) # store the return value
370syscall_exit: 354syscall_exit:
371 DISABLE_INTERRUPTS # make sure we don't miss an interrupt 355 DISABLE_INTERRUPTS # make sure we don't miss an interrupt
372 # setting need_resched or sigpending 356 # setting need_resched or sigpending
@@ -377,12 +361,12 @@ syscall_exit:
377 jne syscall_exit_work 361 jne syscall_exit_work
378 362
379restore_all: 363restore_all:
380 movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS 364 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
381 # Warning: OLDSS(%esp) contains the wrong/random values if we 365 # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
382 # are returning to the kernel. 366 # are returning to the kernel.
383 # See comments in process.c:copy_thread() for details. 367 # See comments in process.c:copy_thread() for details.
384 movb OLDSS(%esp), %ah 368 movb PT_OLDSS(%esp), %ah
385 movb CS(%esp), %al 369 movb PT_CS(%esp), %al
386 andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax 370 andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
387 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax 371 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
388 CFI_REMEMBER_STATE 372 CFI_REMEMBER_STATE
@@ -409,7 +393,7 @@ iret_exc:
409 393
410 CFI_RESTORE_STATE 394 CFI_RESTORE_STATE
411ldt_ss: 395ldt_ss:
412 larl OLDSS(%esp), %eax 396 larl PT_OLDSS(%esp), %eax
413 jnz restore_nocheck 397 jnz restore_nocheck
414 testl $0x00400000, %eax # returning to 32bit stack? 398 testl $0x00400000, %eax # returning to 32bit stack?
415 jnz restore_nocheck # allright, normal return 399 jnz restore_nocheck # allright, normal return
@@ -419,7 +403,7 @@ ldt_ss:
419 * This is an "official" bug of all the x86-compatible 403 * This is an "official" bug of all the x86-compatible
420 * CPUs, which we can try to work around to make 404 * CPUs, which we can try to work around to make
421 * dosemu and wine happy. */ 405 * dosemu and wine happy. */
422 movl OLDESP(%esp), %eax 406 movl PT_OLDESP(%esp), %eax
423 movl %esp, %edx 407 movl %esp, %edx
424 call patch_espfix_desc 408 call patch_espfix_desc
425 pushl $__ESPFIX_SS 409 pushl $__ESPFIX_SS
@@ -454,7 +438,7 @@ work_resched:
454 438
455work_notifysig: # deal with pending signals and 439work_notifysig: # deal with pending signals and
456 # notify-resume requests 440 # notify-resume requests
457 testl $VM_MASK, EFLAGS(%esp) 441 testl $VM_MASK, PT_EFLAGS(%esp)
458 movl %esp, %eax 442 movl %esp, %eax
459 jne work_notifysig_v86 # returning to kernel-space or 443 jne work_notifysig_v86 # returning to kernel-space or
460 # vm86-space 444 # vm86-space
@@ -479,14 +463,14 @@ work_notifysig_v86:
479 # perform syscall exit tracing 463 # perform syscall exit tracing
480 ALIGN 464 ALIGN
481syscall_trace_entry: 465syscall_trace_entry:
482 movl $-ENOSYS,EAX(%esp) 466 movl $-ENOSYS,PT_EAX(%esp)
483 movl %esp, %eax 467 movl %esp, %eax
484 xorl %edx,%edx 468 xorl %edx,%edx
485 call do_syscall_trace 469 call do_syscall_trace
486 cmpl $0, %eax 470 cmpl $0, %eax
487 jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU, 471 jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
488 # so must skip actual syscall 472 # so must skip actual syscall
489 movl ORIG_EAX(%esp), %eax 473 movl PT_ORIG_EAX(%esp), %eax
490 cmpl $(nr_syscalls), %eax 474 cmpl $(nr_syscalls), %eax
491 jnae syscall_call 475 jnae syscall_call
492 jmp syscall_exit 476 jmp syscall_exit
@@ -511,11 +495,11 @@ syscall_fault:
511 CFI_ADJUST_CFA_OFFSET 4 495 CFI_ADJUST_CFA_OFFSET 4
512 SAVE_ALL 496 SAVE_ALL
513 GET_THREAD_INFO(%ebp) 497 GET_THREAD_INFO(%ebp)
514 movl $-EFAULT,EAX(%esp) 498 movl $-EFAULT,PT_EAX(%esp)
515 jmp resume_userspace 499 jmp resume_userspace
516 500
517syscall_badsys: 501syscall_badsys:
518 movl $-ENOSYS,EAX(%esp) 502 movl $-ENOSYS,PT_EAX(%esp)
519 jmp resume_userspace 503 jmp resume_userspace
520 CFI_ENDPROC 504 CFI_ENDPROC
521 505
@@ -636,10 +620,10 @@ error_code:
636 popl %ecx 620 popl %ecx
637 CFI_ADJUST_CFA_OFFSET -4 621 CFI_ADJUST_CFA_OFFSET -4
638 /*CFI_REGISTER es, ecx*/ 622 /*CFI_REGISTER es, ecx*/
639 movl ES(%esp), %edi # get the function address 623 movl PT_ES(%esp), %edi # get the function address
640 movl ORIG_EAX(%esp), %edx # get the error code 624 movl PT_ORIG_EAX(%esp), %edx # get the error code
641 movl $-1, ORIG_EAX(%esp) 625 movl $-1, PT_ORIG_EAX(%esp)
642 movl %ecx, ES(%esp) 626 movl %ecx, PT_ES(%esp)
643 /*CFI_REL_OFFSET es, ES*/ 627 /*CFI_REL_OFFSET es, ES*/
644 movl $(__USER_DS), %ecx 628 movl $(__USER_DS), %ecx
645 movl %ecx, %ds 629 movl %ecx, %ds
@@ -942,26 +926,26 @@ ENTRY(arch_unwind_init_running)
942 movl 4(%esp), %edx 926 movl 4(%esp), %edx
943 movl (%esp), %ecx 927 movl (%esp), %ecx
944 leal 4(%esp), %eax 928 leal 4(%esp), %eax
945 movl %ebx, EBX(%edx) 929 movl %ebx, PT_EBX(%edx)
946 xorl %ebx, %ebx 930 xorl %ebx, %ebx
947 movl %ebx, ECX(%edx) 931 movl %ebx, PT_ECX(%edx)
948 movl %ebx, EDX(%edx) 932 movl %ebx, PT_EDX(%edx)
949 movl %esi, ESI(%edx) 933 movl %esi, PT_ESI(%edx)
950 movl %edi, EDI(%edx) 934 movl %edi, PT_EDI(%edx)
951 movl %ebp, EBP(%edx) 935 movl %ebp, PT_EBP(%edx)
952 movl %ebx, EAX(%edx) 936 movl %ebx, PT_EAX(%edx)
953 movl $__USER_DS, DS(%edx) 937 movl $__USER_DS, PT_DS(%edx)
954 movl $__USER_DS, ES(%edx) 938 movl $__USER_DS, PT_ES(%edx)
955 movl %ebx, ORIG_EAX(%edx) 939 movl %ebx, PT_ORIG_EAX(%edx)
956 movl %ecx, EIP(%edx) 940 movl %ecx, PT_EIP(%edx)
957 movl 12(%esp), %ecx 941 movl 12(%esp), %ecx
958 movl $__KERNEL_CS, CS(%edx) 942 movl $__KERNEL_CS, PT_CS(%edx)
959 movl %ebx, EFLAGS(%edx) 943 movl %ebx, PT_EFLAGS(%edx)
960 movl %eax, OLDESP(%edx) 944 movl %eax, PT_OLDESP(%edx)
961 movl 8(%esp), %eax 945 movl 8(%esp), %eax
962 movl %ecx, 8(%esp) 946 movl %ecx, 8(%esp)
963 movl EBX(%edx), %ebx 947 movl PT_EBX(%edx), %ebx
964 movl $__KERNEL_DS, OLDSS(%edx) 948 movl $__KERNEL_DS, PT_OLDSS(%edx)
965 jmpl *%eax 949 jmpl *%eax
966 CFI_ENDPROC 950 CFI_ENDPROC
967ENDPROC(arch_unwind_init_running) 951ENDPROC(arch_unwind_init_running)