aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2008-01-30 07:30:56 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:30:56 -0500
commit65ea5b0349903585bfed9720fa06f5edb4f1cd25 (patch)
tree6c252228c34416b7e2077f23475de34500c2ab8a /arch/x86/kernel
parent53756d3722172815f52272b28c6d5d5e9639adde (diff)
x86: rename the struct pt_regs members for 32/64-bit consistency
We have a lot of code which differs only by the naming of specific members of structures that contain registers. In order to enable additional unifications, this patch drops the e- or r- size prefix from the register names in struct pt_regs, and drops the x- prefixes for segment registers on the 32-bit side. This patch also performs the equivalent renames in some additional places that might be candidates for unification in the future. Signed-off-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/acpi/wakeup_64.S32
-rw-r--r--arch/x86/kernel/asm-offsets_32.c32
-rw-r--r--arch/x86/kernel/asm-offsets_64.c18
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c14
-rw-r--r--arch/x86/kernel/i8259_32.c2
-rw-r--r--arch/x86/kernel/io_apic_64.c2
-rw-r--r--arch/x86/kernel/ioport_32.c8
-rw-r--r--arch/x86/kernel/ioport_64.c6
-rw-r--r--arch/x86/kernel/irq_32.c20
-rw-r--r--arch/x86/kernel/irq_64.c10
-rw-r--r--arch/x86/kernel/kprobes_32.c92
-rw-r--r--arch/x86/kernel/kprobes_64.c69
-rw-r--r--arch/x86/kernel/process_32.c140
-rw-r--r--arch/x86/kernel/process_64.c44
-rw-r--r--arch/x86/kernel/ptrace_32.c26
-rw-r--r--arch/x86/kernel/ptrace_64.c24
-rw-r--r--arch/x86/kernel/signal_32.c192
-rw-r--r--arch/x86/kernel/signal_64.c115
-rw-r--r--arch/x86/kernel/smp_64.c2
-rw-r--r--arch/x86/kernel/smpboot_32.c10
-rw-r--r--arch/x86/kernel/step.c15
-rw-r--r--arch/x86/kernel/suspend_asm_64.S32
-rw-r--r--arch/x86/kernel/time_32.c8
-rw-r--r--arch/x86/kernel/time_64.c4
-rw-r--r--arch/x86/kernel/traps_32.c114
-rw-r--r--arch/x86/kernel/traps_64.c84
-rw-r--r--arch/x86/kernel/vm86_32.c94
-rw-r--r--arch/x86/kernel/vmi_32.c50
-rw-r--r--arch/x86/kernel/vsyscall_64.c2
30 files changed, 630 insertions, 633 deletions
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 5ed3bc5c61d7..2e1b9e0d0767 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -344,13 +344,13 @@ do_suspend_lowlevel:
344 call save_processor_state 344 call save_processor_state
345 345
346 movq $saved_context, %rax 346 movq $saved_context, %rax
347 movq %rsp, pt_regs_rsp(%rax) 347 movq %rsp, pt_regs_sp(%rax)
348 movq %rbp, pt_regs_rbp(%rax) 348 movq %rbp, pt_regs_bp(%rax)
349 movq %rsi, pt_regs_rsi(%rax) 349 movq %rsi, pt_regs_si(%rax)
350 movq %rdi, pt_regs_rdi(%rax) 350 movq %rdi, pt_regs_di(%rax)
351 movq %rbx, pt_regs_rbx(%rax) 351 movq %rbx, pt_regs_bx(%rax)
352 movq %rcx, pt_regs_rcx(%rax) 352 movq %rcx, pt_regs_cx(%rax)
353 movq %rdx, pt_regs_rdx(%rax) 353 movq %rdx, pt_regs_dx(%rax)
354 movq %r8, pt_regs_r8(%rax) 354 movq %r8, pt_regs_r8(%rax)
355 movq %r9, pt_regs_r9(%rax) 355 movq %r9, pt_regs_r9(%rax)
356 movq %r10, pt_regs_r10(%rax) 356 movq %r10, pt_regs_r10(%rax)
@@ -360,7 +360,7 @@ do_suspend_lowlevel:
360 movq %r14, pt_regs_r14(%rax) 360 movq %r14, pt_regs_r14(%rax)
361 movq %r15, pt_regs_r15(%rax) 361 movq %r15, pt_regs_r15(%rax)
362 pushfq 362 pushfq
363 popq pt_regs_eflags(%rax) 363 popq pt_regs_flags(%rax)
364 364
365 movq $.L97, saved_rip(%rip) 365 movq $.L97, saved_rip(%rip)
366 366
@@ -391,15 +391,15 @@ do_suspend_lowlevel:
391 movq %rbx, %cr2 391 movq %rbx, %cr2
392 movq saved_context_cr0(%rax), %rbx 392 movq saved_context_cr0(%rax), %rbx
393 movq %rbx, %cr0 393 movq %rbx, %cr0
394 pushq pt_regs_eflags(%rax) 394 pushq pt_regs_flags(%rax)
395 popfq 395 popfq
396 movq pt_regs_rsp(%rax), %rsp 396 movq pt_regs_sp(%rax), %rsp
397 movq pt_regs_rbp(%rax), %rbp 397 movq pt_regs_bp(%rax), %rbp
398 movq pt_regs_rsi(%rax), %rsi 398 movq pt_regs_si(%rax), %rsi
399 movq pt_regs_rdi(%rax), %rdi 399 movq pt_regs_di(%rax), %rdi
400 movq pt_regs_rbx(%rax), %rbx 400 movq pt_regs_bx(%rax), %rbx
401 movq pt_regs_rcx(%rax), %rcx 401 movq pt_regs_cx(%rax), %rcx
402 movq pt_regs_rdx(%rax), %rdx 402 movq pt_regs_dx(%rax), %rdx
403 movq pt_regs_r8(%rax), %r8 403 movq pt_regs_r8(%rax), %r8
404 movq pt_regs_r9(%rax), %r9 404 movq pt_regs_r9(%rax), %r9
405 movq pt_regs_r10(%rax), %r10 405 movq pt_regs_r10(%rax), %r10
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index fd7464d23339..a3a8be7618d1 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -75,22 +75,22 @@ void foo(void)
75 OFFSET(GDS_pad, Xgt_desc_struct, pad); 75 OFFSET(GDS_pad, Xgt_desc_struct, pad);
76 BLANK(); 76 BLANK();
77 77
78 OFFSET(PT_EBX, pt_regs, ebx); 78 OFFSET(PT_EBX, pt_regs, bx);
79 OFFSET(PT_ECX, pt_regs, ecx); 79 OFFSET(PT_ECX, pt_regs, cx);
80 OFFSET(PT_EDX, pt_regs, edx); 80 OFFSET(PT_EDX, pt_regs, dx);
81 OFFSET(PT_ESI, pt_regs, esi); 81 OFFSET(PT_ESI, pt_regs, si);
82 OFFSET(PT_EDI, pt_regs, edi); 82 OFFSET(PT_EDI, pt_regs, di);
83 OFFSET(PT_EBP, pt_regs, ebp); 83 OFFSET(PT_EBP, pt_regs, bp);
84 OFFSET(PT_EAX, pt_regs, eax); 84 OFFSET(PT_EAX, pt_regs, ax);
85 OFFSET(PT_DS, pt_regs, xds); 85 OFFSET(PT_DS, pt_regs, ds);
86 OFFSET(PT_ES, pt_regs, xes); 86 OFFSET(PT_ES, pt_regs, es);
87 OFFSET(PT_FS, pt_regs, xfs); 87 OFFSET(PT_FS, pt_regs, fs);
88 OFFSET(PT_ORIG_EAX, pt_regs, orig_eax); 88 OFFSET(PT_ORIG_EAX, pt_regs, orig_ax);
89 OFFSET(PT_EIP, pt_regs, eip); 89 OFFSET(PT_EIP, pt_regs, ip);
90 OFFSET(PT_CS, pt_regs, xcs); 90 OFFSET(PT_CS, pt_regs, cs);
91 OFFSET(PT_EFLAGS, pt_regs, eflags); 91 OFFSET(PT_EFLAGS, pt_regs, flags);
92 OFFSET(PT_OLDESP, pt_regs, esp); 92 OFFSET(PT_OLDESP, pt_regs, sp);
93 OFFSET(PT_OLDSS, pt_regs, xss); 93 OFFSET(PT_OLDSS, pt_regs, ss);
94 BLANK(); 94 BLANK();
95 95
96 OFFSET(EXEC_DOMAIN_handler, exec_domain, handler); 96 OFFSET(EXEC_DOMAIN_handler, exec_domain, handler);
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index c27c646214f4..2e918ebf21d3 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -83,14 +83,14 @@ int main(void)
83 DEFINE(pbe_next, offsetof(struct pbe, next)); 83 DEFINE(pbe_next, offsetof(struct pbe, next));
84 BLANK(); 84 BLANK();
85#define ENTRY(entry) DEFINE(pt_regs_ ## entry, offsetof(struct pt_regs, entry)) 85#define ENTRY(entry) DEFINE(pt_regs_ ## entry, offsetof(struct pt_regs, entry))
86 ENTRY(rbx); 86 ENTRY(bx);
87 ENTRY(rbx); 87 ENTRY(bx);
88 ENTRY(rcx); 88 ENTRY(cx);
89 ENTRY(rdx); 89 ENTRY(dx);
90 ENTRY(rsp); 90 ENTRY(sp);
91 ENTRY(rbp); 91 ENTRY(bp);
92 ENTRY(rsi); 92 ENTRY(si);
93 ENTRY(rdi); 93 ENTRY(di);
94 ENTRY(r8); 94 ENTRY(r8);
95 ENTRY(r9); 95 ENTRY(r9);
96 ENTRY(r10); 96 ENTRY(r10);
@@ -99,7 +99,7 @@ int main(void)
99 ENTRY(r13); 99 ENTRY(r13);
100 ENTRY(r14); 100 ENTRY(r14);
101 ENTRY(r15); 101 ENTRY(r15);
102 ENTRY(eflags); 102 ENTRY(flags);
103 BLANK(); 103 BLANK();
104#undef ENTRY 104#undef ENTRY
105#define ENTRY(entry) DEFINE(saved_context_ ## entry, offsetof(struct saved_context, entry)) 105#define ENTRY(entry) DEFINE(saved_context_ ## entry, offsetof(struct saved_context, entry))
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index e2fcf2051bdb..5db2a163bf4b 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -634,7 +634,7 @@ void __init early_cpu_init(void)
634struct pt_regs * __devinit idle_regs(struct pt_regs *regs) 634struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
635{ 635{
636 memset(regs, 0, sizeof(struct pt_regs)); 636 memset(regs, 0, sizeof(struct pt_regs));
637 regs->xfs = __KERNEL_PERCPU; 637 regs->fs = __KERNEL_PERCPU;
638 return regs; 638 return regs;
639} 639}
640 640
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 3c7672c40cf4..0adad772d0da 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -110,12 +110,12 @@ static void print_mce(struct mce *m)
110 KERN_EMERG 110 KERN_EMERG
111 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n", 111 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
112 m->cpu, m->mcgstatus, m->bank, m->status); 112 m->cpu, m->mcgstatus, m->bank, m->status);
113 if (m->rip) { 113 if (m->ip) {
114 printk(KERN_EMERG "RIP%s %02x:<%016Lx> ", 114 printk(KERN_EMERG "RIP%s %02x:<%016Lx> ",
115 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", 115 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
116 m->cs, m->rip); 116 m->cs, m->ip);
117 if (m->cs == __KERNEL_CS) 117 if (m->cs == __KERNEL_CS)
118 print_symbol("{%s}", m->rip); 118 print_symbol("{%s}", m->ip);
119 printk("\n"); 119 printk("\n");
120 } 120 }
121 printk(KERN_EMERG "TSC %Lx ", m->tsc); 121 printk(KERN_EMERG "TSC %Lx ", m->tsc);
@@ -156,16 +156,16 @@ static int mce_available(struct cpuinfo_x86 *c)
156static inline void mce_get_rip(struct mce *m, struct pt_regs *regs) 156static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
157{ 157{
158 if (regs && (m->mcgstatus & MCG_STATUS_RIPV)) { 158 if (regs && (m->mcgstatus & MCG_STATUS_RIPV)) {
159 m->rip = regs->rip; 159 m->ip = regs->ip;
160 m->cs = regs->cs; 160 m->cs = regs->cs;
161 } else { 161 } else {
162 m->rip = 0; 162 m->ip = 0;
163 m->cs = 0; 163 m->cs = 0;
164 } 164 }
165 if (rip_msr) { 165 if (rip_msr) {
166 /* Assume the RIP in the MSR is exact. Is this true? */ 166 /* Assume the RIP in the MSR is exact. Is this true? */
167 m->mcgstatus |= MCG_STATUS_EIPV; 167 m->mcgstatus |= MCG_STATUS_EIPV;
168 rdmsrl(rip_msr, m->rip); 168 rdmsrl(rip_msr, m->ip);
169 m->cs = 0; 169 m->cs = 0;
170 } 170 }
171} 171}
@@ -288,7 +288,7 @@ void do_machine_check(struct pt_regs * regs, long error_code)
288 * instruction which caused the MCE. 288 * instruction which caused the MCE.
289 */ 289 */
290 if (m.mcgstatus & MCG_STATUS_EIPV) 290 if (m.mcgstatus & MCG_STATUS_EIPV)
291 user_space = panicm.rip && (panicm.cs & 3); 291 user_space = panicm.ip && (panicm.cs & 3);
292 292
293 /* 293 /*
294 * If we know that the error was in user space, send a 294 * If we know that the error was in user space, send a
diff --git a/arch/x86/kernel/i8259_32.c b/arch/x86/kernel/i8259_32.c
index 3321ce669295..f201e7da1bbc 100644
--- a/arch/x86/kernel/i8259_32.c
+++ b/arch/x86/kernel/i8259_32.c
@@ -339,7 +339,7 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id)
339 outb(0,0xF0); 339 outb(0,0xF0);
340 if (ignore_fpu_irq || !boot_cpu_data.hard_math) 340 if (ignore_fpu_irq || !boot_cpu_data.hard_math)
341 return IRQ_NONE; 341 return IRQ_NONE;
342 math_error((void __user *)get_irq_regs()->eip); 342 math_error((void __user *)get_irq_regs()->ip);
343 return IRQ_HANDLED; 343 return IRQ_HANDLED;
344} 344}
345 345
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
index 4ef85a3b3f9f..fa70005be5e8 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic_64.c
@@ -1412,7 +1412,7 @@ static void irq_complete_move(unsigned int irq)
1412 if (likely(!cfg->move_in_progress)) 1412 if (likely(!cfg->move_in_progress))
1413 return; 1413 return;
1414 1414
1415 vector = ~get_irq_regs()->orig_rax; 1415 vector = ~get_irq_regs()->orig_ax;
1416 me = smp_processor_id(); 1416 me = smp_processor_id();
1417 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) { 1417 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
1418 cpumask_t cleanup_mask; 1418 cpumask_t cleanup_mask;
diff --git a/arch/x86/kernel/ioport_32.c b/arch/x86/kernel/ioport_32.c
index c281ffa18259..9295e01ff49c 100644
--- a/arch/x86/kernel/ioport_32.c
+++ b/arch/x86/kernel/ioport_32.c
@@ -100,7 +100,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
100 * beyond the 0x3ff range: to get the full 65536 ports bitmapped 100 * beyond the 0x3ff range: to get the full 65536 ports bitmapped
101 * you'd need 8kB of bitmaps/process, which is a bit excessive. 101 * you'd need 8kB of bitmaps/process, which is a bit excessive.
102 * 102 *
103 * Here we just change the eflags value on the stack: we allow 103 * Here we just change the flags value on the stack: we allow
104 * only the super-user to do it. This depends on the stack-layout 104 * only the super-user to do it. This depends on the stack-layout
105 * on system-call entry - see also fork() and the signal handling 105 * on system-call entry - see also fork() and the signal handling
106 * code. 106 * code.
@@ -109,8 +109,8 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
109asmlinkage long sys_iopl(unsigned long regsp) 109asmlinkage long sys_iopl(unsigned long regsp)
110{ 110{
111 volatile struct pt_regs *regs = (struct pt_regs *)&regsp; 111 volatile struct pt_regs *regs = (struct pt_regs *)&regsp;
112 unsigned int level = regs->ebx; 112 unsigned int level = regs->bx;
113 unsigned int old = (regs->eflags >> 12) & 3; 113 unsigned int old = (regs->flags >> 12) & 3;
114 struct thread_struct *t = &current->thread; 114 struct thread_struct *t = &current->thread;
115 115
116 if (level > 3) 116 if (level > 3)
@@ -122,7 +122,7 @@ asmlinkage long sys_iopl(unsigned long regsp)
122 } 122 }
123 123
124 t->iopl = level << 12; 124 t->iopl = level << 12;
125 regs->eflags = (regs->eflags & ~X86_EFLAGS_IOPL) | t->iopl; 125 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | t->iopl;
126 set_iopl_mask(t->iopl); 126 set_iopl_mask(t->iopl);
127 127
128 return 0; 128 return 0;
diff --git a/arch/x86/kernel/ioport_64.c b/arch/x86/kernel/ioport_64.c
index 5f62fad64dab..ff7514b757e5 100644
--- a/arch/x86/kernel/ioport_64.c
+++ b/arch/x86/kernel/ioport_64.c
@@ -95,7 +95,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
95 * beyond the 0x3ff range: to get the full 65536 ports bitmapped 95 * beyond the 0x3ff range: to get the full 65536 ports bitmapped
96 * you'd need 8kB of bitmaps/process, which is a bit excessive. 96 * you'd need 8kB of bitmaps/process, which is a bit excessive.
97 * 97 *
98 * Here we just change the eflags value on the stack: we allow 98 * Here we just change the flags value on the stack: we allow
99 * only the super-user to do it. This depends on the stack-layout 99 * only the super-user to do it. This depends on the stack-layout
100 * on system-call entry - see also fork() and the signal handling 100 * on system-call entry - see also fork() and the signal handling
101 * code. 101 * code.
@@ -103,7 +103,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
103 103
104asmlinkage long sys_iopl(unsigned int level, struct pt_regs *regs) 104asmlinkage long sys_iopl(unsigned int level, struct pt_regs *regs)
105{ 105{
106 unsigned int old = (regs->eflags >> 12) & 3; 106 unsigned int old = (regs->flags >> 12) & 3;
107 107
108 if (level > 3) 108 if (level > 3)
109 return -EINVAL; 109 return -EINVAL;
@@ -112,6 +112,6 @@ asmlinkage long sys_iopl(unsigned int level, struct pt_regs *regs)
112 if (!capable(CAP_SYS_RAWIO)) 112 if (!capable(CAP_SYS_RAWIO))
113 return -EPERM; 113 return -EPERM;
114 } 114 }
115 regs->eflags = (regs->eflags &~ X86_EFLAGS_IOPL) | (level << 12); 115 regs->flags = (regs->flags &~ X86_EFLAGS_IOPL) | (level << 12);
116 return 0; 116 return 0;
117} 117}
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index d3fde94f7345..b49616bcc16b 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -70,7 +70,7 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
70{ 70{
71 struct pt_regs *old_regs; 71 struct pt_regs *old_regs;
72 /* high bit used in ret_from_ code */ 72 /* high bit used in ret_from_ code */
73 int irq = ~regs->orig_eax; 73 int irq = ~regs->orig_ax;
74 struct irq_desc *desc = irq_desc + irq; 74 struct irq_desc *desc = irq_desc + irq;
75#ifdef CONFIG_4KSTACKS 75#ifdef CONFIG_4KSTACKS
76 union irq_ctx *curctx, *irqctx; 76 union irq_ctx *curctx, *irqctx;
@@ -88,13 +88,13 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
88#ifdef CONFIG_DEBUG_STACKOVERFLOW 88#ifdef CONFIG_DEBUG_STACKOVERFLOW
89 /* Debugging check for stack overflow: is there less than 1KB free? */ 89 /* Debugging check for stack overflow: is there less than 1KB free? */
90 { 90 {
91 long esp; 91 long sp;
92 92
93 __asm__ __volatile__("andl %%esp,%0" : 93 __asm__ __volatile__("andl %%esp,%0" :
94 "=r" (esp) : "0" (THREAD_SIZE - 1)); 94 "=r" (sp) : "0" (THREAD_SIZE - 1));
95 if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) { 95 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
96 printk("do_IRQ: stack overflow: %ld\n", 96 printk("do_IRQ: stack overflow: %ld\n",
97 esp - sizeof(struct thread_info)); 97 sp - sizeof(struct thread_info));
98 dump_stack(); 98 dump_stack();
99 } 99 }
100 } 100 }
@@ -112,7 +112,7 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
112 * current stack (which is the irq stack already after all) 112 * current stack (which is the irq stack already after all)
113 */ 113 */
114 if (curctx != irqctx) { 114 if (curctx != irqctx) {
115 int arg1, arg2, ebx; 115 int arg1, arg2, bx;
116 116
117 /* build the stack frame on the IRQ stack */ 117 /* build the stack frame on the IRQ stack */
118 isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); 118 isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
@@ -128,10 +128,10 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
128 (curctx->tinfo.preempt_count & SOFTIRQ_MASK); 128 (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
129 129
130 asm volatile( 130 asm volatile(
131 " xchgl %%ebx,%%esp \n" 131 " xchgl %%ebx,%%esp \n"
132 " call *%%edi \n" 132 " call *%%edi \n"
133 " movl %%ebx,%%esp \n" 133 " movl %%ebx,%%esp \n"
134 : "=a" (arg1), "=d" (arg2), "=b" (ebx) 134 : "=a" (arg1), "=d" (arg2), "=b" (bx)
135 : "0" (irq), "1" (desc), "2" (isp), 135 : "0" (irq), "1" (desc), "2" (isp),
136 "D" (desc->handle_irq) 136 "D" (desc->handle_irq)
137 : "memory", "cc" 137 : "memory", "cc"
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 6c3a3b6e5cf4..3aac15466a91 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -53,11 +53,11 @@ static inline void stack_overflow_check(struct pt_regs *regs)
53 u64 curbase = (u64)task_stack_page(current); 53 u64 curbase = (u64)task_stack_page(current);
54 static unsigned long warned = -60*HZ; 54 static unsigned long warned = -60*HZ;
55 55
56 if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE && 56 if (regs->sp >= curbase && regs->sp <= curbase + THREAD_SIZE &&
57 regs->rsp < curbase + sizeof(struct thread_info) + 128 && 57 regs->sp < curbase + sizeof(struct thread_info) + 128 &&
58 time_after(jiffies, warned + 60*HZ)) { 58 time_after(jiffies, warned + 60*HZ)) {
59 printk("do_IRQ: %s near stack overflow (cur:%Lx,rsp:%lx)\n", 59 printk("do_IRQ: %s near stack overflow (cur:%Lx,sp:%lx)\n",
60 current->comm, curbase, regs->rsp); 60 current->comm, curbase, regs->sp);
61 show_stack(NULL,NULL); 61 show_stack(NULL,NULL);
62 warned = jiffies; 62 warned = jiffies;
63 } 63 }
@@ -162,7 +162,7 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
162 struct pt_regs *old_regs = set_irq_regs(regs); 162 struct pt_regs *old_regs = set_irq_regs(regs);
163 163
164 /* high bit used in ret_from_ code */ 164 /* high bit used in ret_from_ code */
165 unsigned vector = ~regs->orig_rax; 165 unsigned vector = ~regs->orig_ax;
166 unsigned irq; 166 unsigned irq;
167 167
168 exit_idle(); 168 exit_idle();
diff --git a/arch/x86/kernel/kprobes_32.c b/arch/x86/kernel/kprobes_32.c
index bc4a68367cd0..d708cd4f956f 100644
--- a/arch/x86/kernel/kprobes_32.c
+++ b/arch/x86/kernel/kprobes_32.c
@@ -212,7 +212,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
212{ 212{
213 __get_cpu_var(current_kprobe) = p; 213 __get_cpu_var(current_kprobe) = p;
214 kcb->kprobe_saved_eflags = kcb->kprobe_old_eflags 214 kcb->kprobe_saved_eflags = kcb->kprobe_old_eflags
215 = (regs->eflags & (TF_MASK | IF_MASK)); 215 = (regs->flags & (TF_MASK | IF_MASK));
216 if (is_IF_modifier(p->opcode)) 216 if (is_IF_modifier(p->opcode))
217 kcb->kprobe_saved_eflags &= ~IF_MASK; 217 kcb->kprobe_saved_eflags &= ~IF_MASK;
218} 218}
@@ -232,20 +232,20 @@ static __always_inline void restore_btf(void)
232static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 232static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
233{ 233{
234 clear_btf(); 234 clear_btf();
235 regs->eflags |= TF_MASK; 235 regs->flags |= TF_MASK;
236 regs->eflags &= ~IF_MASK; 236 regs->flags &= ~IF_MASK;
237 /*single step inline if the instruction is an int3*/ 237 /*single step inline if the instruction is an int3*/
238 if (p->opcode == BREAKPOINT_INSTRUCTION) 238 if (p->opcode == BREAKPOINT_INSTRUCTION)
239 regs->eip = (unsigned long)p->addr; 239 regs->ip = (unsigned long)p->addr;
240 else 240 else
241 regs->eip = (unsigned long)p->ainsn.insn; 241 regs->ip = (unsigned long)p->ainsn.insn;
242} 242}
243 243
244/* Called with kretprobe_lock held */ 244/* Called with kretprobe_lock held */
245void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 245void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
246 struct pt_regs *regs) 246 struct pt_regs *regs)
247{ 247{
248 unsigned long *sara = (unsigned long *)&regs->esp; 248 unsigned long *sara = (unsigned long *)&regs->sp;
249 249
250 ri->ret_addr = (kprobe_opcode_t *) *sara; 250 ri->ret_addr = (kprobe_opcode_t *) *sara;
251 251
@@ -264,7 +264,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
264 kprobe_opcode_t *addr; 264 kprobe_opcode_t *addr;
265 struct kprobe_ctlblk *kcb; 265 struct kprobe_ctlblk *kcb;
266 266
267 addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t)); 267 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
268 268
269 /* 269 /*
270 * We don't want to be preempted for the entire 270 * We don't want to be preempted for the entire
@@ -279,8 +279,8 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
279 if (p) { 279 if (p) {
280 if (kcb->kprobe_status == KPROBE_HIT_SS && 280 if (kcb->kprobe_status == KPROBE_HIT_SS &&
281 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { 281 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
282 regs->eflags &= ~TF_MASK; 282 regs->flags &= ~TF_MASK;
283 regs->eflags |= kcb->kprobe_saved_eflags; 283 regs->flags |= kcb->kprobe_saved_eflags;
284 goto no_kprobe; 284 goto no_kprobe;
285 } 285 }
286 /* We have reentered the kprobe_handler(), since 286 /* We have reentered the kprobe_handler(), since
@@ -301,7 +301,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
301 * another cpu right after we hit, no further 301 * another cpu right after we hit, no further
302 * handling of this interrupt is appropriate 302 * handling of this interrupt is appropriate
303 */ 303 */
304 regs->eip -= sizeof(kprobe_opcode_t); 304 regs->ip -= sizeof(kprobe_opcode_t);
305 ret = 1; 305 ret = 1;
306 goto no_kprobe; 306 goto no_kprobe;
307 } 307 }
@@ -325,7 +325,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
325 * Back up over the (now missing) int3 and run 325 * Back up over the (now missing) int3 and run
326 * the original instruction. 326 * the original instruction.
327 */ 327 */
328 regs->eip -= sizeof(kprobe_opcode_t); 328 regs->ip -= sizeof(kprobe_opcode_t);
329 ret = 1; 329 ret = 1;
330 } 330 }
331 /* Not one of ours: let kernel handle it */ 331 /* Not one of ours: let kernel handle it */
@@ -344,7 +344,7 @@ ss_probe:
344 if (p->ainsn.boostable == 1 && !p->post_handler){ 344 if (p->ainsn.boostable == 1 && !p->post_handler){
345 /* Boost up -- we can execute copied instructions directly */ 345 /* Boost up -- we can execute copied instructions directly */
346 reset_current_kprobe(); 346 reset_current_kprobe();
347 regs->eip = (unsigned long)p->ainsn.insn; 347 regs->ip = (unsigned long)p->ainsn.insn;
348 preempt_enable_no_resched(); 348 preempt_enable_no_resched();
349 return 1; 349 return 1;
350 } 350 }
@@ -368,7 +368,7 @@ no_kprobe:
368 asm volatile ( ".global kretprobe_trampoline\n" 368 asm volatile ( ".global kretprobe_trampoline\n"
369 "kretprobe_trampoline: \n" 369 "kretprobe_trampoline: \n"
370 " pushf\n" 370 " pushf\n"
371 /* skip cs, eip, orig_eax */ 371 /* skip cs, ip, orig_ax */
372 " subl $12, %esp\n" 372 " subl $12, %esp\n"
373 " pushl %fs\n" 373 " pushl %fs\n"
374 " pushl %ds\n" 374 " pushl %ds\n"
@@ -382,10 +382,10 @@ no_kprobe:
382 " pushl %ebx\n" 382 " pushl %ebx\n"
383 " movl %esp, %eax\n" 383 " movl %esp, %eax\n"
384 " call trampoline_handler\n" 384 " call trampoline_handler\n"
385 /* move eflags to cs */ 385 /* move flags to cs */
386 " movl 52(%esp), %edx\n" 386 " movl 52(%esp), %edx\n"
387 " movl %edx, 48(%esp)\n" 387 " movl %edx, 48(%esp)\n"
388 /* save true return address on eflags */ 388 /* save true return address on flags */
389 " movl %eax, 52(%esp)\n" 389 " movl %eax, 52(%esp)\n"
390 " popl %ebx\n" 390 " popl %ebx\n"
391 " popl %ecx\n" 391 " popl %ecx\n"
@@ -394,7 +394,7 @@ no_kprobe:
394 " popl %edi\n" 394 " popl %edi\n"
395 " popl %ebp\n" 395 " popl %ebp\n"
396 " popl %eax\n" 396 " popl %eax\n"
397 /* skip eip, orig_eax, es, ds, fs */ 397 /* skip ip, orig_ax, es, ds, fs */
398 " addl $20, %esp\n" 398 " addl $20, %esp\n"
399 " popf\n" 399 " popf\n"
400 " ret\n"); 400 " ret\n");
@@ -415,9 +415,9 @@ fastcall void *__kprobes trampoline_handler(struct pt_regs *regs)
415 spin_lock_irqsave(&kretprobe_lock, flags); 415 spin_lock_irqsave(&kretprobe_lock, flags);
416 head = kretprobe_inst_table_head(current); 416 head = kretprobe_inst_table_head(current);
417 /* fixup registers */ 417 /* fixup registers */
418 regs->xcs = __KERNEL_CS | get_kernel_rpl(); 418 regs->cs = __KERNEL_CS | get_kernel_rpl();
419 regs->eip = trampoline_address; 419 regs->ip = trampoline_address;
420 regs->orig_eax = 0xffffffff; 420 regs->orig_ax = 0xffffffff;
421 421
422 /* 422 /*
423 * It is possible to have multiple instances associated with a given 423 * It is possible to have multiple instances associated with a given
@@ -478,11 +478,11 @@ fastcall void *__kprobes trampoline_handler(struct pt_regs *regs)
478 * interrupt. We have to fix up the stack as follows: 478 * interrupt. We have to fix up the stack as follows:
479 * 479 *
480 * 0) Except in the case of absolute or indirect jump or call instructions, 480 * 0) Except in the case of absolute or indirect jump or call instructions,
481 * the new eip is relative to the copied instruction. We need to make 481 * the new ip is relative to the copied instruction. We need to make
482 * it relative to the original instruction. 482 * it relative to the original instruction.
483 * 483 *
484 * 1) If the single-stepped instruction was pushfl, then the TF and IF 484 * 1) If the single-stepped instruction was pushfl, then the TF and IF
485 * flags are set in the just-pushed eflags, and may need to be cleared. 485 * flags are set in the just-pushed flags, and may need to be cleared.
486 * 486 *
487 * 2) If the single-stepped instruction was a call, the return address 487 * 2) If the single-stepped instruction was a call, the return address
488 * that is atop the stack is the address following the copied instruction. 488 * that is atop the stack is the address following the copied instruction.
@@ -493,11 +493,11 @@ fastcall void *__kprobes trampoline_handler(struct pt_regs *regs)
493static void __kprobes resume_execution(struct kprobe *p, 493static void __kprobes resume_execution(struct kprobe *p,
494 struct pt_regs *regs, struct kprobe_ctlblk *kcb) 494 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
495{ 495{
496 unsigned long *tos = (unsigned long *)&regs->esp; 496 unsigned long *tos = (unsigned long *)&regs->sp;
497 unsigned long copy_eip = (unsigned long)p->ainsn.insn; 497 unsigned long copy_eip = (unsigned long)p->ainsn.insn;
498 unsigned long orig_eip = (unsigned long)p->addr; 498 unsigned long orig_eip = (unsigned long)p->addr;
499 499
500 regs->eflags &= ~TF_MASK; 500 regs->flags &= ~TF_MASK;
501 switch (p->ainsn.insn[0]) { 501 switch (p->ainsn.insn[0]) {
502 case 0x9c: /* pushfl */ 502 case 0x9c: /* pushfl */
503 *tos &= ~(TF_MASK | IF_MASK); 503 *tos &= ~(TF_MASK | IF_MASK);
@@ -508,8 +508,8 @@ static void __kprobes resume_execution(struct kprobe *p,
508 case 0xca: 508 case 0xca:
509 case 0xcb: 509 case 0xcb:
510 case 0xcf: 510 case 0xcf:
511 case 0xea: /* jmp absolute -- eip is correct */ 511 case 0xea: /* jmp absolute -- ip is correct */
512 /* eip is already adjusted, no more changes required */ 512 /* ip is already adjusted, no more changes required */
513 p->ainsn.boostable = 1; 513 p->ainsn.boostable = 1;
514 goto no_change; 514 goto no_change;
515 case 0xe8: /* call relative - Fix return addr */ 515 case 0xe8: /* call relative - Fix return addr */
@@ -522,14 +522,14 @@ static void __kprobes resume_execution(struct kprobe *p,
522 if ((p->ainsn.insn[1] & 0x30) == 0x10) { 522 if ((p->ainsn.insn[1] & 0x30) == 0x10) {
523 /* 523 /*
524 * call absolute, indirect 524 * call absolute, indirect
525 * Fix return addr; eip is correct. 525 * Fix return addr; ip is correct.
526 * But this is not boostable 526 * But this is not boostable
527 */ 527 */
528 *tos = orig_eip + (*tos - copy_eip); 528 *tos = orig_eip + (*tos - copy_eip);
529 goto no_change; 529 goto no_change;
530 } else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */ 530 } else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
531 ((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */ 531 ((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
532 /* eip is correct. And this is boostable */ 532 /* ip is correct. And this is boostable */
533 p->ainsn.boostable = 1; 533 p->ainsn.boostable = 1;
534 goto no_change; 534 goto no_change;
535 } 535 }
@@ -538,21 +538,21 @@ static void __kprobes resume_execution(struct kprobe *p,
538 } 538 }
539 539
540 if (p->ainsn.boostable == 0) { 540 if (p->ainsn.boostable == 0) {
541 if ((regs->eip > copy_eip) && 541 if ((regs->ip > copy_eip) &&
542 (regs->eip - copy_eip) + 5 < MAX_INSN_SIZE) { 542 (regs->ip - copy_eip) + 5 < MAX_INSN_SIZE) {
543 /* 543 /*
544 * These instructions can be executed directly if it 544 * These instructions can be executed directly if it
545 * jumps back to correct address. 545 * jumps back to correct address.
546 */ 546 */
547 set_jmp_op((void *)regs->eip, 547 set_jmp_op((void *)regs->ip,
548 (void *)orig_eip + (regs->eip - copy_eip)); 548 (void *)orig_eip + (regs->ip - copy_eip));
549 p->ainsn.boostable = 1; 549 p->ainsn.boostable = 1;
550 } else { 550 } else {
551 p->ainsn.boostable = -1; 551 p->ainsn.boostable = -1;
552 } 552 }
553 } 553 }
554 554
555 regs->eip = orig_eip + (regs->eip - copy_eip); 555 regs->ip = orig_eip + (regs->ip - copy_eip);
556 556
557no_change: 557no_change:
558 restore_btf(); 558 restore_btf();
@@ -578,8 +578,8 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs)
578 } 578 }
579 579
580 resume_execution(cur, regs, kcb); 580 resume_execution(cur, regs, kcb);
581 regs->eflags |= kcb->kprobe_saved_eflags; 581 regs->flags |= kcb->kprobe_saved_eflags;
582 trace_hardirqs_fixup_flags(regs->eflags); 582 trace_hardirqs_fixup_flags(regs->flags);
583 583
584 /*Restore back the original saved kprobes variables and continue. */ 584 /*Restore back the original saved kprobes variables and continue. */
585 if (kcb->kprobe_status == KPROBE_REENTER) { 585 if (kcb->kprobe_status == KPROBE_REENTER) {
@@ -591,11 +591,11 @@ out:
591 preempt_enable_no_resched(); 591 preempt_enable_no_resched();
592 592
593 /* 593 /*
594 * if somebody else is singlestepping across a probe point, eflags 594 * if somebody else is singlestepping across a probe point, flags
595 * will have TF set, in which case, continue the remaining processing 595 * will have TF set, in which case, continue the remaining processing
596 * of do_debug, as if this is not a probe hit. 596 * of do_debug, as if this is not a probe hit.
597 */ 597 */
598 if (regs->eflags & TF_MASK) 598 if (regs->flags & TF_MASK)
599 return 0; 599 return 0;
600 600
601 return 1; 601 return 1;
@@ -612,12 +612,12 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
612 /* 612 /*
613 * We are here because the instruction being single 613 * We are here because the instruction being single
614 * stepped caused a page fault. We reset the current 614 * stepped caused a page fault. We reset the current
615 * kprobe and the eip points back to the probe address 615 * kprobe and the ip points back to the probe address
616 * and allow the page fault handler to continue as a 616 * and allow the page fault handler to continue as a
617 * normal page fault. 617 * normal page fault.
618 */ 618 */
619 regs->eip = (unsigned long)cur->addr; 619 regs->ip = (unsigned long)cur->addr;
620 regs->eflags |= kcb->kprobe_old_eflags; 620 regs->flags |= kcb->kprobe_old_eflags;
621 if (kcb->kprobe_status == KPROBE_REENTER) 621 if (kcb->kprobe_status == KPROBE_REENTER)
622 restore_previous_kprobe(kcb); 622 restore_previous_kprobe(kcb);
623 else 623 else
@@ -703,7 +703,7 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
703 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 703 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
704 704
705 kcb->jprobe_saved_regs = *regs; 705 kcb->jprobe_saved_regs = *regs;
706 kcb->jprobe_saved_esp = &regs->esp; 706 kcb->jprobe_saved_esp = &regs->sp;
707 addr = (unsigned long)(kcb->jprobe_saved_esp); 707 addr = (unsigned long)(kcb->jprobe_saved_esp);
708 708
709 /* 709 /*
@@ -715,9 +715,9 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
715 */ 715 */
716 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, 716 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
717 MIN_STACK_SIZE(addr)); 717 MIN_STACK_SIZE(addr));
718 regs->eflags &= ~IF_MASK; 718 regs->flags &= ~IF_MASK;
719 trace_hardirqs_off(); 719 trace_hardirqs_off();
720 regs->eip = (unsigned long)(jp->entry); 720 regs->ip = (unsigned long)(jp->entry);
721 return 1; 721 return 1;
722} 722}
723 723
@@ -736,15 +736,15 @@ void __kprobes jprobe_return(void)
736int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 736int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
737{ 737{
738 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 738 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
739 u8 *addr = (u8 *) (regs->eip - 1); 739 u8 *addr = (u8 *) (regs->ip - 1);
740 unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_esp); 740 unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_esp);
741 struct jprobe *jp = container_of(p, struct jprobe, kp); 741 struct jprobe *jp = container_of(p, struct jprobe, kp);
742 742
743 if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) { 743 if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) {
744 if (&regs->esp != kcb->jprobe_saved_esp) { 744 if (&regs->sp != kcb->jprobe_saved_esp) {
745 struct pt_regs *saved_regs = &kcb->jprobe_saved_regs; 745 struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
746 printk("current esp %p does not match saved esp %p\n", 746 printk("current sp %p does not match saved sp %p\n",
747 &regs->esp, kcb->jprobe_saved_esp); 747 &regs->sp, kcb->jprobe_saved_esp);
748 printk("Saved registers for jprobe %p\n", jp); 748 printk("Saved registers for jprobe %p\n", jp);
749 show_registers(saved_regs); 749 show_registers(saved_regs);
750 printk("Current registers\n"); 750 printk("Current registers\n");
diff --git a/arch/x86/kernel/kprobes_64.c b/arch/x86/kernel/kprobes_64.c
index 10d66e323c7d..f6837cd3bed5 100644
--- a/arch/x86/kernel/kprobes_64.c
+++ b/arch/x86/kernel/kprobes_64.c
@@ -251,7 +251,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
251{ 251{
252 __get_cpu_var(current_kprobe) = p; 252 __get_cpu_var(current_kprobe) = p;
253 kcb->kprobe_saved_rflags = kcb->kprobe_old_rflags 253 kcb->kprobe_saved_rflags = kcb->kprobe_old_rflags
254 = (regs->eflags & (TF_MASK | IF_MASK)); 254 = (regs->flags & (TF_MASK | IF_MASK));
255 if (is_IF_modifier(p->ainsn.insn)) 255 if (is_IF_modifier(p->ainsn.insn))
256 kcb->kprobe_saved_rflags &= ~IF_MASK; 256 kcb->kprobe_saved_rflags &= ~IF_MASK;
257} 257}
@@ -271,20 +271,20 @@ static __always_inline void restore_btf(void)
271static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 271static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
272{ 272{
273 clear_btf(); 273 clear_btf();
274 regs->eflags |= TF_MASK; 274 regs->flags |= TF_MASK;
275 regs->eflags &= ~IF_MASK; 275 regs->flags &= ~IF_MASK;
276 /*single step inline if the instruction is an int3*/ 276 /*single step inline if the instruction is an int3*/
277 if (p->opcode == BREAKPOINT_INSTRUCTION) 277 if (p->opcode == BREAKPOINT_INSTRUCTION)
278 regs->rip = (unsigned long)p->addr; 278 regs->ip = (unsigned long)p->addr;
279 else 279 else
280 regs->rip = (unsigned long)p->ainsn.insn; 280 regs->ip = (unsigned long)p->ainsn.insn;
281} 281}
282 282
283/* Called with kretprobe_lock held */ 283/* Called with kretprobe_lock held */
284void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 284void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
285 struct pt_regs *regs) 285 struct pt_regs *regs)
286{ 286{
287 unsigned long *sara = (unsigned long *)regs->rsp; 287 unsigned long *sara = (unsigned long *)regs->sp;
288 288
289 ri->ret_addr = (kprobe_opcode_t *) *sara; 289 ri->ret_addr = (kprobe_opcode_t *) *sara;
290 /* Replace the return addr with trampoline addr */ 290 /* Replace the return addr with trampoline addr */
@@ -295,7 +295,7 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
295{ 295{
296 struct kprobe *p; 296 struct kprobe *p;
297 int ret = 0; 297 int ret = 0;
298 kprobe_opcode_t *addr = (kprobe_opcode_t *)(regs->rip - sizeof(kprobe_opcode_t)); 298 kprobe_opcode_t *addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
299 struct kprobe_ctlblk *kcb; 299 struct kprobe_ctlblk *kcb;
300 300
301 /* 301 /*
@@ -311,8 +311,8 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
311 if (p) { 311 if (p) {
312 if (kcb->kprobe_status == KPROBE_HIT_SS && 312 if (kcb->kprobe_status == KPROBE_HIT_SS &&
313 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { 313 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
314 regs->eflags &= ~TF_MASK; 314 regs->flags &= ~TF_MASK;
315 regs->eflags |= kcb->kprobe_saved_rflags; 315 regs->flags |= kcb->kprobe_saved_rflags;
316 goto no_kprobe; 316 goto no_kprobe;
317 } else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) { 317 } else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) {
318 /* TODO: Provide re-entrancy from 318 /* TODO: Provide re-entrancy from
@@ -321,7 +321,7 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
321 * the instruction of the new probe. 321 * the instruction of the new probe.
322 */ 322 */
323 arch_disarm_kprobe(p); 323 arch_disarm_kprobe(p);
324 regs->rip = (unsigned long)p->addr; 324 regs->ip = (unsigned long)p->addr;
325 reset_current_kprobe(); 325 reset_current_kprobe();
326 ret = 1; 326 ret = 1;
327 } else { 327 } else {
@@ -345,7 +345,7 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
345 * another cpu right after we hit, no further 345 * another cpu right after we hit, no further
346 * handling of this interrupt is appropriate 346 * handling of this interrupt is appropriate
347 */ 347 */
348 regs->rip = (unsigned long)addr; 348 regs->ip = (unsigned long)addr;
349 ret = 1; 349 ret = 1;
350 goto no_kprobe; 350 goto no_kprobe;
351 } 351 }
@@ -369,7 +369,7 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
369 * Back up over the (now missing) int3 and run 369 * Back up over the (now missing) int3 and run
370 * the original instruction. 370 * the original instruction.
371 */ 371 */
372 regs->rip = (unsigned long)addr; 372 regs->ip = (unsigned long)addr;
373 ret = 1; 373 ret = 1;
374 } 374 }
375 /* Not one of ours: let kernel handle it */ 375 /* Not one of ours: let kernel handle it */
@@ -454,7 +454,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
454 } 454 }
455 455
456 kretprobe_assert(ri, orig_ret_address, trampoline_address); 456 kretprobe_assert(ri, orig_ret_address, trampoline_address);
457 regs->rip = orig_ret_address; 457 regs->ip = orig_ret_address;
458 458
459 reset_current_kprobe(); 459 reset_current_kprobe();
460 spin_unlock_irqrestore(&kretprobe_lock, flags); 460 spin_unlock_irqrestore(&kretprobe_lock, flags);
@@ -484,11 +484,11 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
484 * interrupt. We have to fix up the stack as follows: 484 * interrupt. We have to fix up the stack as follows:
485 * 485 *
486 * 0) Except in the case of absolute or indirect jump or call instructions, 486 * 0) Except in the case of absolute or indirect jump or call instructions,
487 * the new rip is relative to the copied instruction. We need to make 487 * the new ip is relative to the copied instruction. We need to make
488 * it relative to the original instruction. 488 * it relative to the original instruction.
489 * 489 *
490 * 1) If the single-stepped instruction was pushfl, then the TF and IF 490 * 1) If the single-stepped instruction was pushfl, then the TF and IF
491 * flags are set in the just-pushed eflags, and may need to be cleared. 491 * flags are set in the just-pushed flags, and may need to be cleared.
492 * 492 *
493 * 2) If the single-stepped instruction was a call, the return address 493 * 2) If the single-stepped instruction was a call, the return address
494 * that is atop the stack is the address following the copied instruction. 494 * that is atop the stack is the address following the copied instruction.
@@ -497,7 +497,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
497static void __kprobes resume_execution(struct kprobe *p, 497static void __kprobes resume_execution(struct kprobe *p,
498 struct pt_regs *regs, struct kprobe_ctlblk *kcb) 498 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
499{ 499{
500 unsigned long *tos = (unsigned long *)regs->rsp; 500 unsigned long *tos = (unsigned long *)regs->sp;
501 unsigned long copy_rip = (unsigned long)p->ainsn.insn; 501 unsigned long copy_rip = (unsigned long)p->ainsn.insn;
502 unsigned long orig_rip = (unsigned long)p->addr; 502 unsigned long orig_rip = (unsigned long)p->addr;
503 kprobe_opcode_t *insn = p->ainsn.insn; 503 kprobe_opcode_t *insn = p->ainsn.insn;
@@ -506,7 +506,7 @@ static void __kprobes resume_execution(struct kprobe *p,
506 if (*insn >= 0x40 && *insn <= 0x4f) 506 if (*insn >= 0x40 && *insn <= 0x4f)
507 insn++; 507 insn++;
508 508
509 regs->eflags &= ~TF_MASK; 509 regs->flags &= ~TF_MASK;
510 switch (*insn) { 510 switch (*insn) {
511 case 0x9c: /* pushfl */ 511 case 0x9c: /* pushfl */
512 *tos &= ~(TF_MASK | IF_MASK); 512 *tos &= ~(TF_MASK | IF_MASK);
@@ -538,7 +538,8 @@ static void __kprobes resume_execution(struct kprobe *p,
538 break; 538 break;
539 } 539 }
540 540
541 regs->rip = orig_rip + (regs->rip - copy_rip); 541 regs->ip = orig_rip + (regs->ip - copy_rip);
542
542no_change: 543no_change:
543 restore_btf(); 544 restore_btf();
544 545
@@ -559,8 +560,8 @@ int __kprobes post_kprobe_handler(struct pt_regs *regs)
559 } 560 }
560 561
561 resume_execution(cur, regs, kcb); 562 resume_execution(cur, regs, kcb);
562 regs->eflags |= kcb->kprobe_saved_rflags; 563 regs->flags |= kcb->kprobe_saved_rflags;
563 trace_hardirqs_fixup_flags(regs->eflags); 564 trace_hardirqs_fixup_flags(regs->flags);
564 565
565 /* Restore the original saved kprobes variables and continue. */ 566 /* Restore the original saved kprobes variables and continue. */
566 if (kcb->kprobe_status == KPROBE_REENTER) { 567 if (kcb->kprobe_status == KPROBE_REENTER) {
@@ -572,11 +573,11 @@ out:
572 preempt_enable_no_resched(); 573 preempt_enable_no_resched();
573 574
574 /* 575 /*
575 * if somebody else is singlestepping across a probe point, eflags 576 * if somebody else is singlestepping across a probe point, flags
576 * will have TF set, in which case, continue the remaining processing 577 * will have TF set, in which case, continue the remaining processing
577 * of do_debug, as if this is not a probe hit. 578 * of do_debug, as if this is not a probe hit.
578 */ 579 */
579 if (regs->eflags & TF_MASK) 580 if (regs->flags & TF_MASK)
580 return 0; 581 return 0;
581 582
582 return 1; 583 return 1;
@@ -594,12 +595,12 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
594 /* 595 /*
595 * We are here because the instruction being single 596 * We are here because the instruction being single
596 * stepped caused a page fault. We reset the current 597 * stepped caused a page fault. We reset the current
597 * kprobe and the rip points back to the probe address 598 * kprobe and the ip points back to the probe address
598 * and allow the page fault handler to continue as a 599 * and allow the page fault handler to continue as a
599 * normal page fault. 600 * normal page fault.
600 */ 601 */
601 regs->rip = (unsigned long)cur->addr; 602 regs->ip = (unsigned long)cur->addr;
602 regs->eflags |= kcb->kprobe_old_rflags; 603 regs->flags |= kcb->kprobe_old_rflags;
603 if (kcb->kprobe_status == KPROBE_REENTER) 604 if (kcb->kprobe_status == KPROBE_REENTER)
604 restore_previous_kprobe(kcb); 605 restore_previous_kprobe(kcb);
605 else 606 else
@@ -629,9 +630,9 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
629 * In case the user-specified fault handler returned 630 * In case the user-specified fault handler returned
630 * zero, try to fix up. 631 * zero, try to fix up.
631 */ 632 */
632 fixup = search_exception_tables(regs->rip); 633 fixup = search_exception_tables(regs->ip);
633 if (fixup) { 634 if (fixup) {
634 regs->rip = fixup->fixup; 635 regs->ip = fixup->fixup;
635 return 1; 636 return 1;
636 } 637 }
637 638
@@ -688,7 +689,7 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
688 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 689 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
689 690
690 kcb->jprobe_saved_regs = *regs; 691 kcb->jprobe_saved_regs = *regs;
691 kcb->jprobe_saved_rsp = (long *) regs->rsp; 692 kcb->jprobe_saved_rsp = (long *) regs->sp;
692 addr = (unsigned long)(kcb->jprobe_saved_rsp); 693 addr = (unsigned long)(kcb->jprobe_saved_rsp);
693 /* 694 /*
694 * As Linus pointed out, gcc assumes that the callee 695 * As Linus pointed out, gcc assumes that the callee
@@ -699,9 +700,9 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
699 */ 700 */
700 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, 701 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
701 MIN_STACK_SIZE(addr)); 702 MIN_STACK_SIZE(addr));
702 regs->eflags &= ~IF_MASK; 703 regs->flags &= ~IF_MASK;
703 trace_hardirqs_off(); 704 trace_hardirqs_off();
704 regs->rip = (unsigned long)(jp->entry); 705 regs->ip = (unsigned long)(jp->entry);
705 return 1; 706 return 1;
706} 707}
707 708
@@ -720,15 +721,15 @@ void __kprobes jprobe_return(void)
720int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 721int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
721{ 722{
722 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 723 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
723 u8 *addr = (u8 *) (regs->rip - 1); 724 u8 *addr = (u8 *) (regs->ip - 1);
724 unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_rsp); 725 unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_rsp);
725 struct jprobe *jp = container_of(p, struct jprobe, kp); 726 struct jprobe *jp = container_of(p, struct jprobe, kp);
726 727
727 if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) { 728 if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) {
728 if ((unsigned long *)regs->rsp != kcb->jprobe_saved_rsp) { 729 if ((unsigned long *)regs->sp != kcb->jprobe_saved_rsp) {
729 struct pt_regs *saved_regs = &kcb->jprobe_saved_regs; 730 struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
730 printk("current rsp %p does not match saved rsp %p\n", 731 printk("current sp %p does not match saved sp %p\n",
731 (long *)regs->rsp, kcb->jprobe_saved_rsp); 732 (long *)regs->sp, kcb->jprobe_saved_rsp);
732 printk("Saved registers for jprobe %p\n", jp); 733 printk("Saved registers for jprobe %p\n", jp);
733 show_registers(saved_regs); 734 show_registers(saved_regs);
734 printk("Current registers\n"); 735 printk("Current registers\n");
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index d5462f228daf..c9f28e02e86d 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -265,13 +265,13 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait);
265 * New with Core Duo processors, MWAIT can take some hints based on CPU 265 * New with Core Duo processors, MWAIT can take some hints based on CPU
266 * capability. 266 * capability.
267 */ 267 */
268void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) 268void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
269{ 269{
270 if (!need_resched()) { 270 if (!need_resched()) {
271 __monitor((void *)&current_thread_info()->flags, 0, 0); 271 __monitor((void *)&current_thread_info()->flags, 0, 0);
272 smp_mb(); 272 smp_mb();
273 if (!need_resched()) 273 if (!need_resched())
274 __mwait(eax, ecx); 274 __mwait(ax, cx);
275 } 275 }
276} 276}
277 277
@@ -320,15 +320,15 @@ void __show_registers(struct pt_regs *regs, int all)
320{ 320{
321 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; 321 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
322 unsigned long d0, d1, d2, d3, d6, d7; 322 unsigned long d0, d1, d2, d3, d6, d7;
323 unsigned long esp; 323 unsigned long sp;
324 unsigned short ss, gs; 324 unsigned short ss, gs;
325 325
326 if (user_mode_vm(regs)) { 326 if (user_mode_vm(regs)) {
327 esp = regs->esp; 327 sp = regs->sp;
328 ss = regs->xss & 0xffff; 328 ss = regs->ss & 0xffff;
329 savesegment(gs, gs); 329 savesegment(gs, gs);
330 } else { 330 } else {
331 esp = (unsigned long) (&regs->esp); 331 sp = (unsigned long) (&regs->sp);
332 savesegment(ss, ss); 332 savesegment(ss, ss);
333 savesegment(gs, gs); 333 savesegment(gs, gs);
334 } 334 }
@@ -341,17 +341,17 @@ void __show_registers(struct pt_regs *regs, int all)
341 init_utsname()->version); 341 init_utsname()->version);
342 342
343 printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", 343 printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
344 0xffff & regs->xcs, regs->eip, regs->eflags, 344 0xffff & regs->cs, regs->ip, regs->flags,
345 smp_processor_id()); 345 smp_processor_id());
346 print_symbol("EIP is at %s\n", regs->eip); 346 print_symbol("EIP is at %s\n", regs->ip);
347 347
348 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", 348 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
349 regs->eax, regs->ebx, regs->ecx, regs->edx); 349 regs->ax, regs->bx, regs->cx, regs->dx);
350 printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", 350 printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
351 regs->esi, regs->edi, regs->ebp, esp); 351 regs->si, regs->di, regs->bp, sp);
352 printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", 352 printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
353 regs->xds & 0xffff, regs->xes & 0xffff, 353 regs->ds & 0xffff, regs->es & 0xffff,
354 regs->xfs & 0xffff, gs, ss); 354 regs->fs & 0xffff, gs, ss);
355 355
356 if (!all) 356 if (!all)
357 return; 357 return;
@@ -379,12 +379,12 @@ void __show_registers(struct pt_regs *regs, int all)
379void show_regs(struct pt_regs *regs) 379void show_regs(struct pt_regs *regs)
380{ 380{
381 __show_registers(regs, 1); 381 __show_registers(regs, 1);
382 show_trace(NULL, regs, &regs->esp); 382 show_trace(NULL, regs, &regs->sp);
383} 383}
384 384
385/* 385/*
386 * This gets run with %ebx containing the 386 * This gets run with %bx containing the
387 * function to call, and %edx containing 387 * function to call, and %dx containing
388 * the "args". 388 * the "args".
389 */ 389 */
390extern void kernel_thread_helper(void); 390extern void kernel_thread_helper(void);
@@ -398,16 +398,16 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
398 398
399 memset(&regs, 0, sizeof(regs)); 399 memset(&regs, 0, sizeof(regs));
400 400
401 regs.ebx = (unsigned long) fn; 401 regs.bx = (unsigned long) fn;
402 regs.edx = (unsigned long) arg; 402 regs.dx = (unsigned long) arg;
403 403
404 regs.xds = __USER_DS; 404 regs.ds = __USER_DS;
405 regs.xes = __USER_DS; 405 regs.es = __USER_DS;
406 regs.xfs = __KERNEL_PERCPU; 406 regs.fs = __KERNEL_PERCPU;
407 regs.orig_eax = -1; 407 regs.orig_ax = -1;
408 regs.eip = (unsigned long) kernel_thread_helper; 408 regs.ip = (unsigned long) kernel_thread_helper;
409 regs.xcs = __KERNEL_CS | get_kernel_rpl(); 409 regs.cs = __KERNEL_CS | get_kernel_rpl();
410 regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2; 410 regs.flags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
411 411
412 /* Ok, create the new process.. */ 412 /* Ok, create the new process.. */
413 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL); 413 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
@@ -470,7 +470,7 @@ void prepare_to_copy(struct task_struct *tsk)
470 unlazy_fpu(tsk); 470 unlazy_fpu(tsk);
471} 471}
472 472
473int copy_thread(int nr, unsigned long clone_flags, unsigned long esp, 473int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
474 unsigned long unused, 474 unsigned long unused,
475 struct task_struct * p, struct pt_regs * regs) 475 struct task_struct * p, struct pt_regs * regs)
476{ 476{
@@ -480,8 +480,8 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
480 480
481 childregs = task_pt_regs(p); 481 childregs = task_pt_regs(p);
482 *childregs = *regs; 482 *childregs = *regs;
483 childregs->eax = 0; 483 childregs->ax = 0;
484 childregs->esp = esp; 484 childregs->sp = sp;
485 485
486 p->thread.esp = (unsigned long) childregs; 486 p->thread.esp = (unsigned long) childregs;
487 p->thread.esp0 = (unsigned long) (childregs+1); 487 p->thread.esp0 = (unsigned long) (childregs+1);
@@ -508,7 +508,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
508 */ 508 */
509 if (clone_flags & CLONE_SETTLS) 509 if (clone_flags & CLONE_SETTLS)
510 err = do_set_thread_area(p, -1, 510 err = do_set_thread_area(p, -1,
511 (struct user_desc __user *)childregs->esi, 0); 511 (struct user_desc __user *)childregs->si, 0);
512 512
513 if (err && p->thread.io_bitmap_ptr) { 513 if (err && p->thread.io_bitmap_ptr) {
514 kfree(p->thread.io_bitmap_ptr); 514 kfree(p->thread.io_bitmap_ptr);
@@ -527,7 +527,7 @@ void dump_thread(struct pt_regs * regs, struct user * dump)
527/* changed the size calculations - should hopefully work better. lbt */ 527/* changed the size calculations - should hopefully work better. lbt */
528 dump->magic = CMAGIC; 528 dump->magic = CMAGIC;
529 dump->start_code = 0; 529 dump->start_code = 0;
530 dump->start_stack = regs->esp & ~(PAGE_SIZE - 1); 530 dump->start_stack = regs->sp & ~(PAGE_SIZE - 1);
531 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; 531 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
532 dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT; 532 dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
533 dump->u_dsize -= dump->u_tsize; 533 dump->u_dsize -= dump->u_tsize;
@@ -538,23 +538,23 @@ void dump_thread(struct pt_regs * regs, struct user * dump)
538 if (dump->start_stack < TASK_SIZE) 538 if (dump->start_stack < TASK_SIZE)
539 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT; 539 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
540 540
541 dump->regs.ebx = regs->ebx; 541 dump->regs.ebx = regs->bx;
542 dump->regs.ecx = regs->ecx; 542 dump->regs.ecx = regs->cx;
543 dump->regs.edx = regs->edx; 543 dump->regs.edx = regs->dx;
544 dump->regs.esi = regs->esi; 544 dump->regs.esi = regs->si;
545 dump->regs.edi = regs->edi; 545 dump->regs.edi = regs->di;
546 dump->regs.ebp = regs->ebp; 546 dump->regs.ebp = regs->bp;
547 dump->regs.eax = regs->eax; 547 dump->regs.eax = regs->ax;
548 dump->regs.ds = regs->xds; 548 dump->regs.ds = regs->ds;
549 dump->regs.es = regs->xes; 549 dump->regs.es = regs->es;
550 dump->regs.fs = regs->xfs; 550 dump->regs.fs = regs->fs;
551 savesegment(gs,dump->regs.gs); 551 savesegment(gs,dump->regs.gs);
552 dump->regs.orig_eax = regs->orig_eax; 552 dump->regs.orig_eax = regs->orig_ax;
553 dump->regs.eip = regs->eip; 553 dump->regs.eip = regs->ip;
554 dump->regs.cs = regs->xcs; 554 dump->regs.cs = regs->cs;
555 dump->regs.eflags = regs->eflags; 555 dump->regs.eflags = regs->flags;
556 dump->regs.esp = regs->esp; 556 dump->regs.esp = regs->sp;
557 dump->regs.ss = regs->xss; 557 dump->regs.ss = regs->ss;
558 558
559 dump->u_fpvalid = dump_fpu (regs, &dump->i387); 559 dump->u_fpvalid = dump_fpu (regs, &dump->i387);
560} 560}
@@ -566,10 +566,10 @@ EXPORT_SYMBOL(dump_thread);
566int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) 566int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
567{ 567{
568 struct pt_regs ptregs = *task_pt_regs(tsk); 568 struct pt_regs ptregs = *task_pt_regs(tsk);
569 ptregs.xcs &= 0xffff; 569 ptregs.cs &= 0xffff;
570 ptregs.xds &= 0xffff; 570 ptregs.ds &= 0xffff;
571 ptregs.xes &= 0xffff; 571 ptregs.es &= 0xffff;
572 ptregs.xss &= 0xffff; 572 ptregs.ss &= 0xffff;
573 573
574 elf_core_copy_regs(regs, &ptregs); 574 elf_core_copy_regs(regs, &ptregs);
575 575
@@ -684,7 +684,7 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
684 * More important, however, is the fact that this allows us much 684 * More important, however, is the fact that this allows us much
685 * more flexibility. 685 * more flexibility.
686 * 686 *
687 * The return value (in %eax) will be the "prev" task after 687 * The return value (in %ax) will be the "prev" task after
688 * the task-switch, and shows up in ret_from_fork in entry.S, 688 * the task-switch, and shows up in ret_from_fork in entry.S,
689 * for example. 689 * for example.
690 */ 690 */
@@ -771,7 +771,7 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
771 771
772asmlinkage int sys_fork(struct pt_regs regs) 772asmlinkage int sys_fork(struct pt_regs regs)
773{ 773{
774 return do_fork(SIGCHLD, regs.esp, &regs, 0, NULL, NULL); 774 return do_fork(SIGCHLD, regs.sp, &regs, 0, NULL, NULL);
775} 775}
776 776
777asmlinkage int sys_clone(struct pt_regs regs) 777asmlinkage int sys_clone(struct pt_regs regs)
@@ -780,12 +780,12 @@ asmlinkage int sys_clone(struct pt_regs regs)
780 unsigned long newsp; 780 unsigned long newsp;
781 int __user *parent_tidptr, *child_tidptr; 781 int __user *parent_tidptr, *child_tidptr;
782 782
783 clone_flags = regs.ebx; 783 clone_flags = regs.bx;
784 newsp = regs.ecx; 784 newsp = regs.cx;
785 parent_tidptr = (int __user *)regs.edx; 785 parent_tidptr = (int __user *)regs.dx;
786 child_tidptr = (int __user *)regs.edi; 786 child_tidptr = (int __user *)regs.di;
787 if (!newsp) 787 if (!newsp)
788 newsp = regs.esp; 788 newsp = regs.sp;
789 return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr); 789 return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
790} 790}
791 791
@@ -801,7 +801,7 @@ asmlinkage int sys_clone(struct pt_regs regs)
801 */ 801 */
802asmlinkage int sys_vfork(struct pt_regs regs) 802asmlinkage int sys_vfork(struct pt_regs regs)
803{ 803{
804 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, &regs, 0, NULL, NULL); 804 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.sp, &regs, 0, NULL, NULL);
805} 805}
806 806
807/* 807/*
@@ -812,13 +812,13 @@ asmlinkage int sys_execve(struct pt_regs regs)
812 int error; 812 int error;
813 char * filename; 813 char * filename;
814 814
815 filename = getname((char __user *) regs.ebx); 815 filename = getname((char __user *) regs.bx);
816 error = PTR_ERR(filename); 816 error = PTR_ERR(filename);
817 if (IS_ERR(filename)) 817 if (IS_ERR(filename))
818 goto out; 818 goto out;
819 error = do_execve(filename, 819 error = do_execve(filename,
820 (char __user * __user *) regs.ecx, 820 (char __user * __user *) regs.cx,
821 (char __user * __user *) regs.edx, 821 (char __user * __user *) regs.dx,
822 &regs); 822 &regs);
823 if (error == 0) { 823 if (error == 0) {
824 /* Make sure we don't return using sysenter.. */ 824 /* Make sure we don't return using sysenter.. */
@@ -834,24 +834,24 @@ out:
834 834
835unsigned long get_wchan(struct task_struct *p) 835unsigned long get_wchan(struct task_struct *p)
836{ 836{
837 unsigned long ebp, esp, eip; 837 unsigned long bp, sp, ip;
838 unsigned long stack_page; 838 unsigned long stack_page;
839 int count = 0; 839 int count = 0;
840 if (!p || p == current || p->state == TASK_RUNNING) 840 if (!p || p == current || p->state == TASK_RUNNING)
841 return 0; 841 return 0;
842 stack_page = (unsigned long)task_stack_page(p); 842 stack_page = (unsigned long)task_stack_page(p);
843 esp = p->thread.esp; 843 sp = p->thread.esp;
844 if (!stack_page || esp < stack_page || esp > top_esp+stack_page) 844 if (!stack_page || sp < stack_page || sp > top_esp+stack_page)
845 return 0; 845 return 0;
846 /* include/asm-i386/system.h:switch_to() pushes ebp last. */ 846 /* include/asm-i386/system.h:switch_to() pushes bp last. */
847 ebp = *(unsigned long *) esp; 847 bp = *(unsigned long *) sp;
848 do { 848 do {
849 if (ebp < stack_page || ebp > top_ebp+stack_page) 849 if (bp < stack_page || bp > top_ebp+stack_page)
850 return 0; 850 return 0;
851 eip = *(unsigned long *) (ebp+4); 851 ip = *(unsigned long *) (bp+4);
852 if (!in_sched_functions(eip)) 852 if (!in_sched_functions(ip))
853 return eip; 853 return ip;
854 ebp = *(unsigned long *) ebp; 854 bp = *(unsigned long *) bp;
855 } while (count++ < 16); 855 } while (count++ < 16);
856 return 0; 856 return 0;
857} 857}
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index ae5eca17aa3c..efbb1a2eab97 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -257,13 +257,13 @@ void cpu_idle(void)
257 * New with Core Duo processors, MWAIT can take some hints based on CPU 257 * New with Core Duo processors, MWAIT can take some hints based on CPU
258 * capability. 258 * capability.
259 */ 259 */
260void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) 260void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
261{ 261{
262 if (!need_resched()) { 262 if (!need_resched()) {
263 __monitor((void *)&current_thread_info()->flags, 0, 0); 263 __monitor((void *)&current_thread_info()->flags, 0, 0);
264 smp_mb(); 264 smp_mb();
265 if (!need_resched()) 265 if (!need_resched())
266 __mwait(eax, ecx); 266 __mwait(ax, cx);
267 } 267 }
268} 268}
269 269
@@ -330,16 +330,16 @@ void __show_regs(struct pt_regs * regs)
330 init_utsname()->release, 330 init_utsname()->release,
331 (int)strcspn(init_utsname()->version, " "), 331 (int)strcspn(init_utsname()->version, " "),
332 init_utsname()->version); 332 init_utsname()->version);
333 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip); 333 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
334 printk_address(regs->rip); 334 printk_address(regs->ip);
335 printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp, 335 printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->sp,
336 regs->eflags); 336 regs->flags);
337 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n", 337 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
338 regs->rax, regs->rbx, regs->rcx); 338 regs->ax, regs->bx, regs->cx);
339 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n", 339 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
340 regs->rdx, regs->rsi, regs->rdi); 340 regs->dx, regs->si, regs->di);
341 printk("RBP: %016lx R08: %016lx R09: %016lx\n", 341 printk("RBP: %016lx R08: %016lx R09: %016lx\n",
342 regs->rbp, regs->r8, regs->r9); 342 regs->bp, regs->r8, regs->r9);
343 printk("R10: %016lx R11: %016lx R12: %016lx\n", 343 printk("R10: %016lx R11: %016lx R12: %016lx\n",
344 regs->r10, regs->r11, regs->r12); 344 regs->r10, regs->r11, regs->r12);
345 printk("R13: %016lx R14: %016lx R15: %016lx\n", 345 printk("R13: %016lx R14: %016lx R15: %016lx\n",
@@ -476,7 +476,7 @@ void prepare_to_copy(struct task_struct *tsk)
476 unlazy_fpu(tsk); 476 unlazy_fpu(tsk);
477} 477}
478 478
479int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp, 479int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
480 unsigned long unused, 480 unsigned long unused,
481 struct task_struct * p, struct pt_regs * regs) 481 struct task_struct * p, struct pt_regs * regs)
482{ 482{
@@ -488,10 +488,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
488 (THREAD_SIZE + task_stack_page(p))) - 1; 488 (THREAD_SIZE + task_stack_page(p))) - 1;
489 *childregs = *regs; 489 *childregs = *regs;
490 490
491 childregs->rax = 0; 491 childregs->ax = 0;
492 childregs->rsp = rsp; 492 childregs->sp = sp;
493 if (rsp == ~0UL) 493 if (sp == ~0UL)
494 childregs->rsp = (unsigned long)childregs; 494 childregs->sp = (unsigned long)childregs;
495 495
496 p->thread.rsp = (unsigned long) childregs; 496 p->thread.rsp = (unsigned long) childregs;
497 p->thread.rsp0 = (unsigned long) (childregs+1); 497 p->thread.rsp0 = (unsigned long) (childregs+1);
@@ -525,7 +525,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
525#ifdef CONFIG_IA32_EMULATION 525#ifdef CONFIG_IA32_EMULATION
526 if (test_thread_flag(TIF_IA32)) 526 if (test_thread_flag(TIF_IA32))
527 err = do_set_thread_area(p, -1, 527 err = do_set_thread_area(p, -1,
528 (struct user_desc __user *)childregs->rsi, 0); 528 (struct user_desc __user *)childregs->si, 0);
529 else 529 else
530#endif 530#endif
531 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); 531 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
@@ -732,7 +732,7 @@ void set_personality_64bit(void)
732 732
733asmlinkage long sys_fork(struct pt_regs *regs) 733asmlinkage long sys_fork(struct pt_regs *regs)
734{ 734{
735 return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL); 735 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
736} 736}
737 737
738asmlinkage long 738asmlinkage long
@@ -740,7 +740,7 @@ sys_clone(unsigned long clone_flags, unsigned long newsp,
740 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs) 740 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
741{ 741{
742 if (!newsp) 742 if (!newsp)
743 newsp = regs->rsp; 743 newsp = regs->sp;
744 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); 744 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
745} 745}
746 746
@@ -756,14 +756,14 @@ sys_clone(unsigned long clone_flags, unsigned long newsp,
756 */ 756 */
757asmlinkage long sys_vfork(struct pt_regs *regs) 757asmlinkage long sys_vfork(struct pt_regs *regs)
758{ 758{
759 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0, 759 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
760 NULL, NULL); 760 NULL, NULL);
761} 761}
762 762
763unsigned long get_wchan(struct task_struct *p) 763unsigned long get_wchan(struct task_struct *p)
764{ 764{
765 unsigned long stack; 765 unsigned long stack;
766 u64 fp,rip; 766 u64 fp,ip;
767 int count = 0; 767 int count = 0;
768 768
769 if (!p || p == current || p->state==TASK_RUNNING) 769 if (!p || p == current || p->state==TASK_RUNNING)
@@ -776,9 +776,9 @@ unsigned long get_wchan(struct task_struct *p)
776 if (fp < (unsigned long)stack || 776 if (fp < (unsigned long)stack ||
777 fp > (unsigned long)stack+THREAD_SIZE) 777 fp > (unsigned long)stack+THREAD_SIZE)
778 return 0; 778 return 0;
779 rip = *(u64 *)(fp+8); 779 ip = *(u64 *)(fp+8);
780 if (!in_sched_functions(rip)) 780 if (!in_sched_functions(ip))
781 return rip; 781 return ip;
782 fp = *(u64 *)fp; 782 fp = *(u64 *)fp;
783 } while (count++ < 16); 783 } while (count++ < 16);
784 return 0; 784 return 0;
diff --git a/arch/x86/kernel/ptrace_32.c b/arch/x86/kernel/ptrace_32.c
index 512f8412b799..f81e2f1827d4 100644
--- a/arch/x86/kernel/ptrace_32.c
+++ b/arch/x86/kernel/ptrace_32.c
@@ -39,10 +39,10 @@
39 39
40static long *pt_regs_access(struct pt_regs *regs, unsigned long regno) 40static long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
41{ 41{
42 BUILD_BUG_ON(offsetof(struct pt_regs, ebx) != 0); 42 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
43 if (regno > FS) 43 if (regno > FS)
44 --regno; 44 --regno;
45 return &regs->ebx + regno; 45 return &regs->bx + regno;
46} 46}
47 47
48static int putreg(struct task_struct *child, 48static int putreg(struct task_struct *child,
@@ -80,7 +80,7 @@ static int putreg(struct task_struct *child,
80 clear_tsk_thread_flag(child, TIF_FORCED_TF); 80 clear_tsk_thread_flag(child, TIF_FORCED_TF);
81 else if (test_tsk_thread_flag(child, TIF_FORCED_TF)) 81 else if (test_tsk_thread_flag(child, TIF_FORCED_TF))
82 value |= X86_EFLAGS_TF; 82 value |= X86_EFLAGS_TF;
83 value |= regs->eflags & ~FLAG_MASK; 83 value |= regs->flags & ~FLAG_MASK;
84 break; 84 break;
85 } 85 }
86 *pt_regs_access(regs, regno) = value; 86 *pt_regs_access(regs, regno) = value;
@@ -98,7 +98,7 @@ static unsigned long getreg(struct task_struct *child, unsigned long regno)
98 /* 98 /*
99 * If the debugger set TF, hide it from the readout. 99 * If the debugger set TF, hide it from the readout.
100 */ 100 */
101 retval = regs->eflags; 101 retval = regs->flags;
102 if (test_tsk_thread_flag(child, TIF_FORCED_TF)) 102 if (test_tsk_thread_flag(child, TIF_FORCED_TF))
103 retval &= ~X86_EFLAGS_TF; 103 retval &= ~X86_EFLAGS_TF;
104 break; 104 break;
@@ -369,8 +369,8 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
369 info.si_signo = SIGTRAP; 369 info.si_signo = SIGTRAP;
370 info.si_code = TRAP_BRKPT; 370 info.si_code = TRAP_BRKPT;
371 371
372 /* User-mode eip? */ 372 /* User-mode ip? */
373 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->eip : NULL; 373 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
374 374
375 /* Send us the fake SIGTRAP */ 375 /* Send us the fake SIGTRAP */
376 force_sig_info(SIGTRAP, &info, tsk); 376 force_sig_info(SIGTRAP, &info, tsk);
@@ -392,12 +392,12 @@ int do_syscall_trace(struct pt_regs *regs, int entryexit)
392 392
393 /* do the secure computing check first */ 393 /* do the secure computing check first */
394 if (!entryexit) 394 if (!entryexit)
395 secure_computing(regs->orig_eax); 395 secure_computing(regs->orig_ax);
396 396
397 if (unlikely(current->audit_context)) { 397 if (unlikely(current->audit_context)) {
398 if (entryexit) 398 if (entryexit)
399 audit_syscall_exit(AUDITSC_RESULT(regs->eax), 399 audit_syscall_exit(AUDITSC_RESULT(regs->ax),
400 regs->eax); 400 regs->ax);
401 /* Debug traps, when using PTRACE_SINGLESTEP, must be sent only 401 /* Debug traps, when using PTRACE_SINGLESTEP, must be sent only
402 * on the syscall exit path. Normally, when TIF_SYSCALL_AUDIT is 402 * on the syscall exit path. Normally, when TIF_SYSCALL_AUDIT is
403 * not used, entry.S will call us only on syscall exit, not 403 * not used, entry.S will call us only on syscall exit, not
@@ -445,13 +445,13 @@ int do_syscall_trace(struct pt_regs *regs, int entryexit)
445 ret = is_sysemu; 445 ret = is_sysemu;
446out: 446out:
447 if (unlikely(current->audit_context) && !entryexit) 447 if (unlikely(current->audit_context) && !entryexit)
448 audit_syscall_entry(AUDIT_ARCH_I386, regs->orig_eax, 448 audit_syscall_entry(AUDIT_ARCH_I386, regs->orig_ax,
449 regs->ebx, regs->ecx, regs->edx, regs->esi); 449 regs->bx, regs->cx, regs->dx, regs->si);
450 if (ret == 0) 450 if (ret == 0)
451 return 0; 451 return 0;
452 452
453 regs->orig_eax = -1; /* force skip of syscall restarting */ 453 regs->orig_ax = -1; /* force skip of syscall restarting */
454 if (unlikely(current->audit_context)) 454 if (unlikely(current->audit_context))
455 audit_syscall_exit(AUDITSC_RESULT(regs->eax), regs->eax); 455 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
456 return 1; 456 return 1;
457} 457}
diff --git a/arch/x86/kernel/ptrace_64.c b/arch/x86/kernel/ptrace_64.c
index 4ba66d8af717..bee20bb1a6c0 100644
--- a/arch/x86/kernel/ptrace_64.c
+++ b/arch/x86/kernel/ptrace_64.c
@@ -119,7 +119,7 @@ static int putreg(struct task_struct *child,
119 clear_tsk_thread_flag(child, TIF_FORCED_TF); 119 clear_tsk_thread_flag(child, TIF_FORCED_TF);
120 else if (test_tsk_thread_flag(child, TIF_FORCED_TF)) 120 else if (test_tsk_thread_flag(child, TIF_FORCED_TF))
121 value |= X86_EFLAGS_TF; 121 value |= X86_EFLAGS_TF;
122 value |= regs->eflags & ~FLAG_MASK; 122 value |= regs->flags & ~FLAG_MASK;
123 break; 123 break;
124 case offsetof(struct user_regs_struct,cs): 124 case offsetof(struct user_regs_struct,cs):
125 if ((value & 3) != 3) 125 if ((value & 3) != 3)
@@ -168,7 +168,7 @@ static unsigned long getreg(struct task_struct *child, unsigned long regno)
168 /* 168 /*
169 * If the debugger set TF, hide it from the readout. 169 * If the debugger set TF, hide it from the readout.
170 */ 170 */
171 val = regs->eflags; 171 val = regs->flags;
172 if (test_tsk_thread_flag(child, TIF_IA32)) 172 if (test_tsk_thread_flag(child, TIF_IA32))
173 val &= 0xffffffff; 173 val &= 0xffffffff;
174 if (test_tsk_thread_flag(child, TIF_FORCED_TF)) 174 if (test_tsk_thread_flag(child, TIF_FORCED_TF))
@@ -383,9 +383,9 @@ static void syscall_trace(struct pt_regs *regs)
383{ 383{
384 384
385#if 0 385#if 0
386 printk("trace %s rip %lx rsp %lx rax %d origrax %d caller %lx tiflags %x ptrace %x\n", 386 printk("trace %s ip %lx sp %lx ax %d origrax %d caller %lx tiflags %x ptrace %x\n",
387 current->comm, 387 current->comm,
388 regs->rip, regs->rsp, regs->rax, regs->orig_rax, __builtin_return_address(0), 388 regs->ip, regs->sp, regs->ax, regs->orig_ax, __builtin_return_address(0),
389 current_thread_info()->flags, current->ptrace); 389 current_thread_info()->flags, current->ptrace);
390#endif 390#endif
391 391
@@ -405,7 +405,7 @@ static void syscall_trace(struct pt_regs *regs)
405asmlinkage void syscall_trace_enter(struct pt_regs *regs) 405asmlinkage void syscall_trace_enter(struct pt_regs *regs)
406{ 406{
407 /* do the secure computing check first */ 407 /* do the secure computing check first */
408 secure_computing(regs->orig_rax); 408 secure_computing(regs->orig_ax);
409 409
410 if (test_thread_flag(TIF_SYSCALL_TRACE) 410 if (test_thread_flag(TIF_SYSCALL_TRACE)
411 && (current->ptrace & PT_PTRACED)) 411 && (current->ptrace & PT_PTRACED))
@@ -414,14 +414,14 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
414 if (unlikely(current->audit_context)) { 414 if (unlikely(current->audit_context)) {
415 if (test_thread_flag(TIF_IA32)) { 415 if (test_thread_flag(TIF_IA32)) {
416 audit_syscall_entry(AUDIT_ARCH_I386, 416 audit_syscall_entry(AUDIT_ARCH_I386,
417 regs->orig_rax, 417 regs->orig_ax,
418 regs->rbx, regs->rcx, 418 regs->bx, regs->cx,
419 regs->rdx, regs->rsi); 419 regs->dx, regs->si);
420 } else { 420 } else {
421 audit_syscall_entry(AUDIT_ARCH_X86_64, 421 audit_syscall_entry(AUDIT_ARCH_X86_64,
422 regs->orig_rax, 422 regs->orig_ax,
423 regs->rdi, regs->rsi, 423 regs->di, regs->si,
424 regs->rdx, regs->r10); 424 regs->dx, regs->r10);
425 } 425 }
426 } 426 }
427} 427}
@@ -429,7 +429,7 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
429asmlinkage void syscall_trace_leave(struct pt_regs *regs) 429asmlinkage void syscall_trace_leave(struct pt_regs *regs)
430{ 430{
431 if (unlikely(current->audit_context)) 431 if (unlikely(current->audit_context))
432 audit_syscall_exit(AUDITSC_RESULT(regs->rax), regs->rax); 432 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
433 433
434 if ((test_thread_flag(TIF_SYSCALL_TRACE) 434 if ((test_thread_flag(TIF_SYSCALL_TRACE)
435 || test_thread_flag(TIF_SINGLESTEP)) 435 || test_thread_flag(TIF_SINGLESTEP))
diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c
index 0a7c812212c9..40fd3515ccf1 100644
--- a/arch/x86/kernel/signal_32.c
+++ b/arch/x86/kernel/signal_32.c
@@ -82,14 +82,14 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
82} 82}
83 83
84asmlinkage int 84asmlinkage int
85sys_sigaltstack(unsigned long ebx) 85sys_sigaltstack(unsigned long bx)
86{ 86{
87 /* This is needed to make gcc realize it doesn't own the "struct pt_regs" */ 87 /* This is needed to make gcc realize it doesn't own the "struct pt_regs" */
88 struct pt_regs *regs = (struct pt_regs *)&ebx; 88 struct pt_regs *regs = (struct pt_regs *)&bx;
89 const stack_t __user *uss = (const stack_t __user *)ebx; 89 const stack_t __user *uss = (const stack_t __user *)bx;
90 stack_t __user *uoss = (stack_t __user *)regs->ecx; 90 stack_t __user *uoss = (stack_t __user *)regs->cx;
91 91
92 return do_sigaltstack(uss, uoss, regs->esp); 92 return do_sigaltstack(uss, uoss, regs->sp);
93} 93}
94 94
95 95
@@ -105,17 +105,17 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax
105 /* Always make any pending restarted system calls return -EINTR */ 105 /* Always make any pending restarted system calls return -EINTR */
106 current_thread_info()->restart_block.fn = do_no_restart_syscall; 106 current_thread_info()->restart_block.fn = do_no_restart_syscall;
107 107
108#define COPY(x) err |= __get_user(regs->x, &sc->x) 108#define COPY(x) err |= __get_user(regs->x, &sc->e ## x)
109 109
110#define COPY_SEG(seg) \ 110#define COPY_SEG(seg) \
111 { unsigned short tmp; \ 111 { unsigned short tmp; \
112 err |= __get_user(tmp, &sc->seg); \ 112 err |= __get_user(tmp, &sc->seg); \
113 regs->x##seg = tmp; } 113 regs->seg = tmp; }
114 114
115#define COPY_SEG_STRICT(seg) \ 115#define COPY_SEG_STRICT(seg) \
116 { unsigned short tmp; \ 116 { unsigned short tmp; \
117 err |= __get_user(tmp, &sc->seg); \ 117 err |= __get_user(tmp, &sc->seg); \
118 regs->x##seg = tmp|3; } 118 regs->seg = tmp|3; }
119 119
120#define GET_SEG(seg) \ 120#define GET_SEG(seg) \
121 { unsigned short tmp; \ 121 { unsigned short tmp; \
@@ -131,22 +131,22 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax
131 COPY_SEG(fs); 131 COPY_SEG(fs);
132 COPY_SEG(es); 132 COPY_SEG(es);
133 COPY_SEG(ds); 133 COPY_SEG(ds);
134 COPY(edi); 134 COPY(di);
135 COPY(esi); 135 COPY(si);
136 COPY(ebp); 136 COPY(bp);
137 COPY(esp); 137 COPY(sp);
138 COPY(ebx); 138 COPY(bx);
139 COPY(edx); 139 COPY(dx);
140 COPY(ecx); 140 COPY(cx);
141 COPY(eip); 141 COPY(ip);
142 COPY_SEG_STRICT(cs); 142 COPY_SEG_STRICT(cs);
143 COPY_SEG_STRICT(ss); 143 COPY_SEG_STRICT(ss);
144 144
145 { 145 {
146 unsigned int tmpflags; 146 unsigned int tmpflags;
147 err |= __get_user(tmpflags, &sc->eflags); 147 err |= __get_user(tmpflags, &sc->eflags);
148 regs->eflags = (regs->eflags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); 148 regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
149 regs->orig_eax = -1; /* disable syscall checks */ 149 regs->orig_ax = -1; /* disable syscall checks */
150 } 150 }
151 151
152 { 152 {
@@ -175,9 +175,9 @@ badframe:
175asmlinkage int sys_sigreturn(unsigned long __unused) 175asmlinkage int sys_sigreturn(unsigned long __unused)
176{ 176{
177 struct pt_regs *regs = (struct pt_regs *) &__unused; 177 struct pt_regs *regs = (struct pt_regs *) &__unused;
178 struct sigframe __user *frame = (struct sigframe __user *)(regs->esp - 8); 178 struct sigframe __user *frame = (struct sigframe __user *)(regs->sp - 8);
179 sigset_t set; 179 sigset_t set;
180 int eax; 180 int ax;
181 181
182 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 182 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
183 goto badframe; 183 goto badframe;
@@ -193,17 +193,17 @@ asmlinkage int sys_sigreturn(unsigned long __unused)
193 recalc_sigpending(); 193 recalc_sigpending();
194 spin_unlock_irq(&current->sighand->siglock); 194 spin_unlock_irq(&current->sighand->siglock);
195 195
196 if (restore_sigcontext(regs, &frame->sc, &eax)) 196 if (restore_sigcontext(regs, &frame->sc, &ax))
197 goto badframe; 197 goto badframe;
198 return eax; 198 return ax;
199 199
200badframe: 200badframe:
201 if (show_unhandled_signals && printk_ratelimit()) 201 if (show_unhandled_signals && printk_ratelimit())
202 printk("%s%s[%d] bad frame in sigreturn frame:%p eip:%lx" 202 printk("%s%s[%d] bad frame in sigreturn frame:%p ip:%lx"
203 " esp:%lx oeax:%lx\n", 203 " sp:%lx oeax:%lx\n",
204 task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG, 204 task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
205 current->comm, task_pid_nr(current), frame, regs->eip, 205 current->comm, task_pid_nr(current), frame, regs->ip,
206 regs->esp, regs->orig_eax); 206 regs->sp, regs->orig_ax);
207 207
208 force_sig(SIGSEGV, current); 208 force_sig(SIGSEGV, current);
209 return 0; 209 return 0;
@@ -212,9 +212,9 @@ badframe:
212asmlinkage int sys_rt_sigreturn(unsigned long __unused) 212asmlinkage int sys_rt_sigreturn(unsigned long __unused)
213{ 213{
214 struct pt_regs *regs = (struct pt_regs *) &__unused; 214 struct pt_regs *regs = (struct pt_regs *) &__unused;
215 struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(regs->esp - 4); 215 struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(regs->sp - 4);
216 sigset_t set; 216 sigset_t set;
217 int eax; 217 int ax;
218 218
219 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 219 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
220 goto badframe; 220 goto badframe;
@@ -227,13 +227,13 @@ asmlinkage int sys_rt_sigreturn(unsigned long __unused)
227 recalc_sigpending(); 227 recalc_sigpending();
228 spin_unlock_irq(&current->sighand->siglock); 228 spin_unlock_irq(&current->sighand->siglock);
229 229
230 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax)) 230 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
231 goto badframe; 231 goto badframe;
232 232
233 if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->esp) == -EFAULT) 233 if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
234 goto badframe; 234 goto badframe;
235 235
236 return eax; 236 return ax;
237 237
238badframe: 238badframe:
239 force_sig(SIGSEGV, current); 239 force_sig(SIGSEGV, current);
@@ -250,27 +250,27 @@ setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate,
250{ 250{
251 int tmp, err = 0; 251 int tmp, err = 0;
252 252
253 err |= __put_user(regs->xfs, (unsigned int __user *)&sc->fs); 253 err |= __put_user(regs->fs, (unsigned int __user *)&sc->fs);
254 savesegment(gs, tmp); 254 savesegment(gs, tmp);
255 err |= __put_user(tmp, (unsigned int __user *)&sc->gs); 255 err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
256 256
257 err |= __put_user(regs->xes, (unsigned int __user *)&sc->es); 257 err |= __put_user(regs->es, (unsigned int __user *)&sc->es);
258 err |= __put_user(regs->xds, (unsigned int __user *)&sc->ds); 258 err |= __put_user(regs->ds, (unsigned int __user *)&sc->ds);
259 err |= __put_user(regs->edi, &sc->edi); 259 err |= __put_user(regs->di, &sc->edi);
260 err |= __put_user(regs->esi, &sc->esi); 260 err |= __put_user(regs->si, &sc->esi);
261 err |= __put_user(regs->ebp, &sc->ebp); 261 err |= __put_user(regs->bp, &sc->ebp);
262 err |= __put_user(regs->esp, &sc->esp); 262 err |= __put_user(regs->sp, &sc->esp);
263 err |= __put_user(regs->ebx, &sc->ebx); 263 err |= __put_user(regs->bx, &sc->ebx);
264 err |= __put_user(regs->edx, &sc->edx); 264 err |= __put_user(regs->dx, &sc->edx);
265 err |= __put_user(regs->ecx, &sc->ecx); 265 err |= __put_user(regs->cx, &sc->ecx);
266 err |= __put_user(regs->eax, &sc->eax); 266 err |= __put_user(regs->ax, &sc->eax);
267 err |= __put_user(current->thread.trap_no, &sc->trapno); 267 err |= __put_user(current->thread.trap_no, &sc->trapno);
268 err |= __put_user(current->thread.error_code, &sc->err); 268 err |= __put_user(current->thread.error_code, &sc->err);
269 err |= __put_user(regs->eip, &sc->eip); 269 err |= __put_user(regs->ip, &sc->eip);
270 err |= __put_user(regs->xcs, (unsigned int __user *)&sc->cs); 270 err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs);
271 err |= __put_user(regs->eflags, &sc->eflags); 271 err |= __put_user(regs->flags, &sc->eflags);
272 err |= __put_user(regs->esp, &sc->esp_at_signal); 272 err |= __put_user(regs->sp, &sc->esp_at_signal);
273 err |= __put_user(regs->xss, (unsigned int __user *)&sc->ss); 273 err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss);
274 274
275 tmp = save_i387(fpstate); 275 tmp = save_i387(fpstate);
276 if (tmp < 0) 276 if (tmp < 0)
@@ -291,36 +291,36 @@ setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate,
291static inline void __user * 291static inline void __user *
292get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) 292get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
293{ 293{
294 unsigned long esp; 294 unsigned long sp;
295 295
296 /* Default to using normal stack */ 296 /* Default to using normal stack */
297 esp = regs->esp; 297 sp = regs->sp;
298 298
299 /* 299 /*
300 * If we are on the alternate signal stack and would overflow it, don't. 300 * If we are on the alternate signal stack and would overflow it, don't.
301 * Return an always-bogus address instead so we will die with SIGSEGV. 301 * Return an always-bogus address instead so we will die with SIGSEGV.
302 */ 302 */
303 if (on_sig_stack(esp) && !likely(on_sig_stack(esp - frame_size))) 303 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size)))
304 return (void __user *) -1L; 304 return (void __user *) -1L;
305 305
306 /* This is the X/Open sanctioned signal stack switching. */ 306 /* This is the X/Open sanctioned signal stack switching. */
307 if (ka->sa.sa_flags & SA_ONSTACK) { 307 if (ka->sa.sa_flags & SA_ONSTACK) {
308 if (sas_ss_flags(esp) == 0) 308 if (sas_ss_flags(sp) == 0)
309 esp = current->sas_ss_sp + current->sas_ss_size; 309 sp = current->sas_ss_sp + current->sas_ss_size;
310 } 310 }
311 311
312 /* This is the legacy signal stack switching. */ 312 /* This is the legacy signal stack switching. */
313 else if ((regs->xss & 0xffff) != __USER_DS && 313 else if ((regs->ss & 0xffff) != __USER_DS &&
314 !(ka->sa.sa_flags & SA_RESTORER) && 314 !(ka->sa.sa_flags & SA_RESTORER) &&
315 ka->sa.sa_restorer) { 315 ka->sa.sa_restorer) {
316 esp = (unsigned long) ka->sa.sa_restorer; 316 sp = (unsigned long) ka->sa.sa_restorer;
317 } 317 }
318 318
319 esp -= frame_size; 319 sp -= frame_size;
320 /* Align the stack pointer according to the i386 ABI, 320 /* Align the stack pointer according to the i386 ABI,
321 * i.e. so that on function entry ((sp + 4) & 15) == 0. */ 321 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
322 esp = ((esp + 4) & -16ul) - 4; 322 sp = ((sp + 4) & -16ul) - 4;
323 return (void __user *) esp; 323 return (void __user *) sp;
324} 324}
325 325
326/* These symbols are defined with the addresses in the vsyscall page. 326/* These symbols are defined with the addresses in the vsyscall page.
@@ -387,16 +387,16 @@ static int setup_frame(int sig, struct k_sigaction *ka,
387 goto give_sigsegv; 387 goto give_sigsegv;
388 388
389 /* Set up registers for signal handler */ 389 /* Set up registers for signal handler */
390 regs->esp = (unsigned long) frame; 390 regs->sp = (unsigned long) frame;
391 regs->eip = (unsigned long) ka->sa.sa_handler; 391 regs->ip = (unsigned long) ka->sa.sa_handler;
392 regs->eax = (unsigned long) sig; 392 regs->ax = (unsigned long) sig;
393 regs->edx = (unsigned long) 0; 393 regs->dx = (unsigned long) 0;
394 regs->ecx = (unsigned long) 0; 394 regs->cx = (unsigned long) 0;
395 395
396 regs->xds = __USER_DS; 396 regs->ds = __USER_DS;
397 regs->xes = __USER_DS; 397 regs->es = __USER_DS;
398 regs->xss = __USER_DS; 398 regs->ss = __USER_DS;
399 regs->xcs = __USER_CS; 399 regs->cs = __USER_CS;
400 400
401 /* 401 /*
402 * Clear TF when entering the signal handler, but 402 * Clear TF when entering the signal handler, but
@@ -404,13 +404,13 @@ static int setup_frame(int sig, struct k_sigaction *ka,
404 * The tracer may want to single-step inside the 404 * The tracer may want to single-step inside the
405 * handler too. 405 * handler too.
406 */ 406 */
407 regs->eflags &= ~TF_MASK; 407 regs->flags &= ~TF_MASK;
408 if (test_thread_flag(TIF_SINGLESTEP)) 408 if (test_thread_flag(TIF_SINGLESTEP))
409 ptrace_notify(SIGTRAP); 409 ptrace_notify(SIGTRAP);
410 410
411#if DEBUG_SIG 411#if DEBUG_SIG
412 printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", 412 printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
413 current->comm, current->pid, frame, regs->eip, frame->pretcode); 413 current->comm, current->pid, frame, regs->ip, frame->pretcode);
414#endif 414#endif
415 415
416 return 0; 416 return 0;
@@ -450,7 +450,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
450 err |= __put_user(0, &frame->uc.uc_flags); 450 err |= __put_user(0, &frame->uc.uc_flags);
451 err |= __put_user(0, &frame->uc.uc_link); 451 err |= __put_user(0, &frame->uc.uc_link);
452 err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); 452 err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
453 err |= __put_user(sas_ss_flags(regs->esp), 453 err |= __put_user(sas_ss_flags(regs->sp),
454 &frame->uc.uc_stack.ss_flags); 454 &frame->uc.uc_stack.ss_flags);
455 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); 455 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
456 err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate, 456 err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate,
@@ -466,7 +466,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
466 err |= __put_user(restorer, &frame->pretcode); 466 err |= __put_user(restorer, &frame->pretcode);
467 467
468 /* 468 /*
469 * This is movl $,%eax ; int $0x80 469 * This is movl $,%ax ; int $0x80
470 * 470 *
471 * WE DO NOT USE IT ANY MORE! It's only left here for historical 471 * WE DO NOT USE IT ANY MORE! It's only left here for historical
472 * reasons and because gdb uses it as a signature to notice 472 * reasons and because gdb uses it as a signature to notice
@@ -480,16 +480,16 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
480 goto give_sigsegv; 480 goto give_sigsegv;
481 481
482 /* Set up registers for signal handler */ 482 /* Set up registers for signal handler */
483 regs->esp = (unsigned long) frame; 483 regs->sp = (unsigned long) frame;
484 regs->eip = (unsigned long) ka->sa.sa_handler; 484 regs->ip = (unsigned long) ka->sa.sa_handler;
485 regs->eax = (unsigned long) usig; 485 regs->ax = (unsigned long) usig;
486 regs->edx = (unsigned long) &frame->info; 486 regs->dx = (unsigned long) &frame->info;
487 regs->ecx = (unsigned long) &frame->uc; 487 regs->cx = (unsigned long) &frame->uc;
488 488
489 regs->xds = __USER_DS; 489 regs->ds = __USER_DS;
490 regs->xes = __USER_DS; 490 regs->es = __USER_DS;
491 regs->xss = __USER_DS; 491 regs->ss = __USER_DS;
492 regs->xcs = __USER_CS; 492 regs->cs = __USER_CS;
493 493
494 /* 494 /*
495 * Clear TF when entering the signal handler, but 495 * Clear TF when entering the signal handler, but
@@ -497,13 +497,13 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
497 * The tracer may want to single-step inside the 497 * The tracer may want to single-step inside the
498 * handler too. 498 * handler too.
499 */ 499 */
500 regs->eflags &= ~TF_MASK; 500 regs->flags &= ~TF_MASK;
501 if (test_thread_flag(TIF_SINGLESTEP)) 501 if (test_thread_flag(TIF_SINGLESTEP))
502 ptrace_notify(SIGTRAP); 502 ptrace_notify(SIGTRAP);
503 503
504#if DEBUG_SIG 504#if DEBUG_SIG
505 printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", 505 printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
506 current->comm, current->pid, frame, regs->eip, frame->pretcode); 506 current->comm, current->pid, frame, regs->ip, frame->pretcode);
507#endif 507#endif
508 508
509 return 0; 509 return 0;
@@ -524,23 +524,23 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
524 int ret; 524 int ret;
525 525
526 /* Are we from a system call? */ 526 /* Are we from a system call? */
527 if (regs->orig_eax >= 0) { 527 if (regs->orig_ax >= 0) {
528 /* If so, check system call restarting.. */ 528 /* If so, check system call restarting.. */
529 switch (regs->eax) { 529 switch (regs->ax) {
530 case -ERESTART_RESTARTBLOCK: 530 case -ERESTART_RESTARTBLOCK:
531 case -ERESTARTNOHAND: 531 case -ERESTARTNOHAND:
532 regs->eax = -EINTR; 532 regs->ax = -EINTR;
533 break; 533 break;
534 534
535 case -ERESTARTSYS: 535 case -ERESTARTSYS:
536 if (!(ka->sa.sa_flags & SA_RESTART)) { 536 if (!(ka->sa.sa_flags & SA_RESTART)) {
537 regs->eax = -EINTR; 537 regs->ax = -EINTR;
538 break; 538 break;
539 } 539 }
540 /* fallthrough */ 540 /* fallthrough */
541 case -ERESTARTNOINTR: 541 case -ERESTARTNOINTR:
542 regs->eax = regs->orig_eax; 542 regs->ax = regs->orig_ax;
543 regs->eip -= 2; 543 regs->ip -= 2;
544 } 544 }
545 } 545 }
546 546
@@ -548,9 +548,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
548 * If TF is set due to a debugger (TIF_FORCED_TF), clear the TF 548 * If TF is set due to a debugger (TIF_FORCED_TF), clear the TF
549 * flag so that register information in the sigcontext is correct. 549 * flag so that register information in the sigcontext is correct.
550 */ 550 */
551 if (unlikely(regs->eflags & X86_EFLAGS_TF) && 551 if (unlikely(regs->flags & X86_EFLAGS_TF) &&
552 likely(test_and_clear_thread_flag(TIF_FORCED_TF))) 552 likely(test_and_clear_thread_flag(TIF_FORCED_TF)))
553 regs->eflags &= ~X86_EFLAGS_TF; 553 regs->flags &= ~X86_EFLAGS_TF;
554 554
555 /* Set up the stack frame */ 555 /* Set up the stack frame */
556 if (ka->sa.sa_flags & SA_SIGINFO) 556 if (ka->sa.sa_flags & SA_SIGINFO)
@@ -622,19 +622,19 @@ static void fastcall do_signal(struct pt_regs *regs)
622 } 622 }
623 623
624 /* Did we come from a system call? */ 624 /* Did we come from a system call? */
625 if (regs->orig_eax >= 0) { 625 if (regs->orig_ax >= 0) {
626 /* Restart the system call - no handlers present */ 626 /* Restart the system call - no handlers present */
627 switch (regs->eax) { 627 switch (regs->ax) {
628 case -ERESTARTNOHAND: 628 case -ERESTARTNOHAND:
629 case -ERESTARTSYS: 629 case -ERESTARTSYS:
630 case -ERESTARTNOINTR: 630 case -ERESTARTNOINTR:
631 regs->eax = regs->orig_eax; 631 regs->ax = regs->orig_ax;
632 regs->eip -= 2; 632 regs->ip -= 2;
633 break; 633 break;
634 634
635 case -ERESTART_RESTARTBLOCK: 635 case -ERESTART_RESTARTBLOCK:
636 regs->eax = __NR_restart_syscall; 636 regs->ax = __NR_restart_syscall;
637 regs->eip -= 2; 637 regs->ip -= 2;
638 break; 638 break;
639 } 639 }
640 } 640 }
@@ -657,7 +657,7 @@ void do_notify_resume(struct pt_regs *regs, void *_unused,
657{ 657{
658 /* Pending single-step? */ 658 /* Pending single-step? */
659 if (thread_info_flags & _TIF_SINGLESTEP) { 659 if (thread_info_flags & _TIF_SINGLESTEP) {
660 regs->eflags |= TF_MASK; 660 regs->flags |= TF_MASK;
661 clear_thread_flag(TIF_SINGLESTEP); 661 clear_thread_flag(TIF_SINGLESTEP);
662 } 662 }
663 663
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
index ab0178ebe00a..4b228fd83b31 100644
--- a/arch/x86/kernel/signal_64.c
+++ b/arch/x86/kernel/signal_64.c
@@ -39,7 +39,7 @@ asmlinkage long
39sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, 39sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
40 struct pt_regs *regs) 40 struct pt_regs *regs)
41{ 41{
42 return do_sigaltstack(uss, uoss, regs->rsp); 42 return do_sigaltstack(uss, uoss, regs->sp);
43} 43}
44 44
45 45
@@ -62,10 +62,11 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned
62 /* Always make any pending restarted system calls return -EINTR */ 62 /* Always make any pending restarted system calls return -EINTR */
63 current_thread_info()->restart_block.fn = do_no_restart_syscall; 63 current_thread_info()->restart_block.fn = do_no_restart_syscall;
64 64
65#define COPYR(x) err |= __get_user(regs->x, &sc->r ## x)
65#define COPY(x) err |= __get_user(regs->x, &sc->x) 66#define COPY(x) err |= __get_user(regs->x, &sc->x)
66 67
67 COPY(rdi); COPY(rsi); COPY(rbp); COPY(rsp); COPY(rbx); 68 COPYR(di); COPYR(si); COPYR(bp); COPYR(sp); COPYR(bx);
68 COPY(rdx); COPY(rcx); COPY(rip); 69 COPYR(dx); COPYR(cx); COPYR(ip);
69 COPY(r8); 70 COPY(r8);
70 COPY(r9); 71 COPY(r9);
71 COPY(r10); 72 COPY(r10);
@@ -87,8 +88,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned
87 { 88 {
88 unsigned int tmpflags; 89 unsigned int tmpflags;
89 err |= __get_user(tmpflags, &sc->eflags); 90 err |= __get_user(tmpflags, &sc->eflags);
90 regs->eflags = (regs->eflags & ~0x40DD5) | (tmpflags & 0x40DD5); 91 regs->flags = (regs->flags & ~0x40DD5) | (tmpflags & 0x40DD5);
91 regs->orig_rax = -1; /* disable syscall checks */ 92 regs->orig_ax = -1; /* disable syscall checks */
92 } 93 }
93 94
94 { 95 {
@@ -119,9 +120,9 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
119{ 120{
120 struct rt_sigframe __user *frame; 121 struct rt_sigframe __user *frame;
121 sigset_t set; 122 sigset_t set;
122 unsigned long eax; 123 unsigned long ax;
123 124
124 frame = (struct rt_sigframe __user *)(regs->rsp - 8); 125 frame = (struct rt_sigframe __user *)(regs->sp - 8);
125 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) { 126 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) {
126 goto badframe; 127 goto badframe;
127 } 128 }
@@ -135,17 +136,17 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
135 recalc_sigpending(); 136 recalc_sigpending();
136 spin_unlock_irq(&current->sighand->siglock); 137 spin_unlock_irq(&current->sighand->siglock);
137 138
138 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax)) 139 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
139 goto badframe; 140 goto badframe;
140 141
141#ifdef DEBUG_SIG 142#ifdef DEBUG_SIG
142 printk("%d sigreturn rip:%lx rsp:%lx frame:%p rax:%lx\n",current->pid,regs->rip,regs->rsp,frame,eax); 143 printk("%d sigreturn ip:%lx sp:%lx frame:%p ax:%lx\n",current->pid,regs->ip,regs->sp,frame,ax);
143#endif 144#endif
144 145
145 if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->rsp) == -EFAULT) 146 if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
146 goto badframe; 147 goto badframe;
147 148
148 return eax; 149 return ax;
149 150
150badframe: 151badframe:
151 signal_fault(regs,frame,"sigreturn"); 152 signal_fault(regs,frame,"sigreturn");
@@ -165,14 +166,14 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned lo
165 err |= __put_user(0, &sc->gs); 166 err |= __put_user(0, &sc->gs);
166 err |= __put_user(0, &sc->fs); 167 err |= __put_user(0, &sc->fs);
167 168
168 err |= __put_user(regs->rdi, &sc->rdi); 169 err |= __put_user(regs->di, &sc->rdi);
169 err |= __put_user(regs->rsi, &sc->rsi); 170 err |= __put_user(regs->si, &sc->rsi);
170 err |= __put_user(regs->rbp, &sc->rbp); 171 err |= __put_user(regs->bp, &sc->rbp);
171 err |= __put_user(regs->rsp, &sc->rsp); 172 err |= __put_user(regs->sp, &sc->rsp);
172 err |= __put_user(regs->rbx, &sc->rbx); 173 err |= __put_user(regs->bx, &sc->rbx);
173 err |= __put_user(regs->rdx, &sc->rdx); 174 err |= __put_user(regs->dx, &sc->rdx);
174 err |= __put_user(regs->rcx, &sc->rcx); 175 err |= __put_user(regs->cx, &sc->rcx);
175 err |= __put_user(regs->rax, &sc->rax); 176 err |= __put_user(regs->ax, &sc->rax);
176 err |= __put_user(regs->r8, &sc->r8); 177 err |= __put_user(regs->r8, &sc->r8);
177 err |= __put_user(regs->r9, &sc->r9); 178 err |= __put_user(regs->r9, &sc->r9);
178 err |= __put_user(regs->r10, &sc->r10); 179 err |= __put_user(regs->r10, &sc->r10);
@@ -183,8 +184,8 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned lo
183 err |= __put_user(regs->r15, &sc->r15); 184 err |= __put_user(regs->r15, &sc->r15);
184 err |= __put_user(me->thread.trap_no, &sc->trapno); 185 err |= __put_user(me->thread.trap_no, &sc->trapno);
185 err |= __put_user(me->thread.error_code, &sc->err); 186 err |= __put_user(me->thread.error_code, &sc->err);
186 err |= __put_user(regs->rip, &sc->rip); 187 err |= __put_user(regs->ip, &sc->rip);
187 err |= __put_user(regs->eflags, &sc->eflags); 188 err |= __put_user(regs->flags, &sc->eflags);
188 err |= __put_user(mask, &sc->oldmask); 189 err |= __put_user(mask, &sc->oldmask);
189 err |= __put_user(me->thread.cr2, &sc->cr2); 190 err |= __put_user(me->thread.cr2, &sc->cr2);
190 191
@@ -198,18 +199,18 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned lo
198static void __user * 199static void __user *
199get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size) 200get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size)
200{ 201{
201 unsigned long rsp; 202 unsigned long sp;
202 203
203 /* Default to using normal stack - redzone*/ 204 /* Default to using normal stack - redzone*/
204 rsp = regs->rsp - 128; 205 sp = regs->sp - 128;
205 206
206 /* This is the X/Open sanctioned signal stack switching. */ 207 /* This is the X/Open sanctioned signal stack switching. */
207 if (ka->sa.sa_flags & SA_ONSTACK) { 208 if (ka->sa.sa_flags & SA_ONSTACK) {
208 if (sas_ss_flags(rsp) == 0) 209 if (sas_ss_flags(sp) == 0)
209 rsp = current->sas_ss_sp + current->sas_ss_size; 210 sp = current->sas_ss_sp + current->sas_ss_size;
210 } 211 }
211 212
212 return (void __user *)round_down(rsp - size, 16); 213 return (void __user *)round_down(sp - size, 16);
213} 214}
214 215
215static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 216static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
@@ -246,7 +247,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
246 err |= __put_user(0, &frame->uc.uc_flags); 247 err |= __put_user(0, &frame->uc.uc_flags);
247 err |= __put_user(0, &frame->uc.uc_link); 248 err |= __put_user(0, &frame->uc.uc_link);
248 err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); 249 err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
249 err |= __put_user(sas_ss_flags(regs->rsp), 250 err |= __put_user(sas_ss_flags(regs->sp),
250 &frame->uc.uc_stack.ss_flags); 251 &frame->uc.uc_stack.ss_flags);
251 err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); 252 err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
252 err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me); 253 err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me);
@@ -271,21 +272,21 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
271 goto give_sigsegv; 272 goto give_sigsegv;
272 273
273#ifdef DEBUG_SIG 274#ifdef DEBUG_SIG
274 printk("%d old rip %lx old rsp %lx old rax %lx\n", current->pid,regs->rip,regs->rsp,regs->rax); 275 printk("%d old ip %lx old sp %lx old ax %lx\n", current->pid,regs->ip,regs->sp,regs->ax);
275#endif 276#endif
276 277
277 /* Set up registers for signal handler */ 278 /* Set up registers for signal handler */
278 regs->rdi = sig; 279 regs->di = sig;
279 /* In case the signal handler was declared without prototypes */ 280 /* In case the signal handler was declared without prototypes */
280 regs->rax = 0; 281 regs->ax = 0;
281 282
282 /* This also works for non SA_SIGINFO handlers because they expect the 283 /* This also works for non SA_SIGINFO handlers because they expect the
283 next argument after the signal number on the stack. */ 284 next argument after the signal number on the stack. */
284 regs->rsi = (unsigned long)&frame->info; 285 regs->si = (unsigned long)&frame->info;
285 regs->rdx = (unsigned long)&frame->uc; 286 regs->dx = (unsigned long)&frame->uc;
286 regs->rip = (unsigned long) ka->sa.sa_handler; 287 regs->ip = (unsigned long) ka->sa.sa_handler;
287 288
288 regs->rsp = (unsigned long)frame; 289 regs->sp = (unsigned long)frame;
289 290
290 /* Set up the CS register to run signal handlers in 64-bit mode, 291 /* Set up the CS register to run signal handlers in 64-bit mode,
291 even if the handler happens to be interrupting 32-bit code. */ 292 even if the handler happens to be interrupting 32-bit code. */
@@ -295,12 +296,12 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
295 see include/asm-x86_64/uaccess.h for details. */ 296 see include/asm-x86_64/uaccess.h for details. */
296 set_fs(USER_DS); 297 set_fs(USER_DS);
297 298
298 regs->eflags &= ~TF_MASK; 299 regs->flags &= ~TF_MASK;
299 if (test_thread_flag(TIF_SINGLESTEP)) 300 if (test_thread_flag(TIF_SINGLESTEP))
300 ptrace_notify(SIGTRAP); 301 ptrace_notify(SIGTRAP);
301#ifdef DEBUG_SIG 302#ifdef DEBUG_SIG
302 printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%p\n", 303 printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%p\n",
303 current->comm, current->pid, frame, regs->rip, frame->pretcode); 304 current->comm, current->pid, frame, regs->ip, frame->pretcode);
304#endif 305#endif
305 306
306 return 0; 307 return 0;
@@ -321,29 +322,29 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
321 int ret; 322 int ret;
322 323
323#ifdef DEBUG_SIG 324#ifdef DEBUG_SIG
324 printk("handle_signal pid:%d sig:%lu rip:%lx rsp:%lx regs=%p\n", 325 printk("handle_signal pid:%d sig:%lu ip:%lx sp:%lx regs=%p\n",
325 current->pid, sig, 326 current->pid, sig,
326 regs->rip, regs->rsp, regs); 327 regs->ip, regs->sp, regs);
327#endif 328#endif
328 329
329 /* Are we from a system call? */ 330 /* Are we from a system call? */
330 if ((long)regs->orig_rax >= 0) { 331 if ((long)regs->orig_ax >= 0) {
331 /* If so, check system call restarting.. */ 332 /* If so, check system call restarting.. */
332 switch (regs->rax) { 333 switch (regs->ax) {
333 case -ERESTART_RESTARTBLOCK: 334 case -ERESTART_RESTARTBLOCK:
334 case -ERESTARTNOHAND: 335 case -ERESTARTNOHAND:
335 regs->rax = -EINTR; 336 regs->ax = -EINTR;
336 break; 337 break;
337 338
338 case -ERESTARTSYS: 339 case -ERESTARTSYS:
339 if (!(ka->sa.sa_flags & SA_RESTART)) { 340 if (!(ka->sa.sa_flags & SA_RESTART)) {
340 regs->rax = -EINTR; 341 regs->ax = -EINTR;
341 break; 342 break;
342 } 343 }
343 /* fallthrough */ 344 /* fallthrough */
344 case -ERESTARTNOINTR: 345 case -ERESTARTNOINTR:
345 regs->rax = regs->orig_rax; 346 regs->ax = regs->orig_ax;
346 regs->rip -= 2; 347 regs->ip -= 2;
347 break; 348 break;
348 } 349 }
349 } 350 }
@@ -352,9 +353,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
352 * If TF is set due to a debugger (TIF_FORCED_TF), clear the TF 353 * If TF is set due to a debugger (TIF_FORCED_TF), clear the TF
353 * flag so that register information in the sigcontext is correct. 354 * flag so that register information in the sigcontext is correct.
354 */ 355 */
355 if (unlikely(regs->eflags & X86_EFLAGS_TF) && 356 if (unlikely(regs->flags & X86_EFLAGS_TF) &&
356 likely(test_and_clear_thread_flag(TIF_FORCED_TF))) 357 likely(test_and_clear_thread_flag(TIF_FORCED_TF)))
357 regs->eflags &= ~X86_EFLAGS_TF; 358 regs->flags &= ~X86_EFLAGS_TF;
358 359
359#ifdef CONFIG_IA32_EMULATION 360#ifdef CONFIG_IA32_EMULATION
360 if (test_thread_flag(TIF_IA32)) { 361 if (test_thread_flag(TIF_IA32)) {
@@ -426,21 +427,21 @@ static void do_signal(struct pt_regs *regs)
426 } 427 }
427 428
428 /* Did we come from a system call? */ 429 /* Did we come from a system call? */
429 if ((long)regs->orig_rax >= 0) { 430 if ((long)regs->orig_ax >= 0) {
430 /* Restart the system call - no handlers present */ 431 /* Restart the system call - no handlers present */
431 long res = regs->rax; 432 long res = regs->ax;
432 switch (res) { 433 switch (res) {
433 case -ERESTARTNOHAND: 434 case -ERESTARTNOHAND:
434 case -ERESTARTSYS: 435 case -ERESTARTSYS:
435 case -ERESTARTNOINTR: 436 case -ERESTARTNOINTR:
436 regs->rax = regs->orig_rax; 437 regs->ax = regs->orig_ax;
437 regs->rip -= 2; 438 regs->ip -= 2;
438 break; 439 break;
439 case -ERESTART_RESTARTBLOCK: 440 case -ERESTART_RESTARTBLOCK:
440 regs->rax = test_thread_flag(TIF_IA32) ? 441 regs->ax = test_thread_flag(TIF_IA32) ?
441 __NR_ia32_restart_syscall : 442 __NR_ia32_restart_syscall :
442 __NR_restart_syscall; 443 __NR_restart_syscall;
443 regs->rip -= 2; 444 regs->ip -= 2;
444 break; 445 break;
445 } 446 }
446 } 447 }
@@ -457,13 +458,13 @@ void
457do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) 458do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
458{ 459{
459#ifdef DEBUG_SIG 460#ifdef DEBUG_SIG
460 printk("do_notify_resume flags:%x rip:%lx rsp:%lx caller:%p pending:%x\n", 461 printk("do_notify_resume flags:%x ip:%lx sp:%lx caller:%p pending:%x\n",
461 thread_info_flags, regs->rip, regs->rsp, __builtin_return_address(0),signal_pending(current)); 462 thread_info_flags, regs->ip, regs->sp, __builtin_return_address(0),signal_pending(current));
462#endif 463#endif
463 464
464 /* Pending single-step? */ 465 /* Pending single-step? */
465 if (thread_info_flags & _TIF_SINGLESTEP) { 466 if (thread_info_flags & _TIF_SINGLESTEP) {
466 regs->eflags |= TF_MASK; 467 regs->flags |= TF_MASK;
467 clear_thread_flag(TIF_SINGLESTEP); 468 clear_thread_flag(TIF_SINGLESTEP);
468 } 469 }
469 470
@@ -485,8 +486,8 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
485{ 486{
486 struct task_struct *me = current; 487 struct task_struct *me = current;
487 if (show_unhandled_signals && printk_ratelimit()) 488 if (show_unhandled_signals && printk_ratelimit())
488 printk("%s[%d] bad frame in %s frame:%p rip:%lx rsp:%lx orax:%lx\n", 489 printk("%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx\n",
489 me->comm,me->pid,where,frame,regs->rip,regs->rsp,regs->orig_rax); 490 me->comm,me->pid,where,frame,regs->ip,regs->sp,regs->orig_ax);
490 491
491 force_sig(SIGSEGV, me); 492 force_sig(SIGSEGV, me);
492} 493}
diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c
index 7142447b5666..02a6533e8909 100644
--- a/arch/x86/kernel/smp_64.c
+++ b/arch/x86/kernel/smp_64.c
@@ -136,7 +136,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
136 * orig_rax contains the negated interrupt vector. 136 * orig_rax contains the negated interrupt vector.
137 * Use that to determine where the sender put the data. 137 * Use that to determine where the sender put the data.
138 */ 138 */
139 sender = ~regs->orig_rax - INVALIDATE_TLB_VECTOR_START; 139 sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
140 f = &per_cpu(flush_state, sender); 140 f = &per_cpu(flush_state, sender);
141 141
142 if (!cpu_isset(cpu, f->flush_cpumask)) 142 if (!cpu_isset(cpu, f->flush_cpumask))
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c
index 0bf7f20baba0..3566191832b3 100644
--- a/arch/x86/kernel/smpboot_32.c
+++ b/arch/x86/kernel/smpboot_32.c
@@ -447,7 +447,7 @@ void __devinit initialize_secondary(void)
447{ 447{
448 /* 448 /*
449 * We don't actually need to load the full TSS, 449 * We don't actually need to load the full TSS,
450 * basically just the stack pointer and the eip. 450 * basically just the stack pointer and the ip.
451 */ 451 */
452 452
453 asm volatile( 453 asm volatile(
@@ -459,7 +459,7 @@ void __devinit initialize_secondary(void)
459 459
460/* Static state in head.S used to set up a CPU */ 460/* Static state in head.S used to set up a CPU */
461extern struct { 461extern struct {
462 void * esp; 462 void * sp;
463 unsigned short ss; 463 unsigned short ss;
464} stack_start; 464} stack_start;
465 465
@@ -667,7 +667,7 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
667 * target processor state. 667 * target processor state.
668 */ 668 */
669 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary, 669 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
670 (unsigned long) stack_start.esp); 670 (unsigned long) stack_start.sp);
671 671
672 /* 672 /*
673 * Run STARTUP IPI loop. 673 * Run STARTUP IPI loop.
@@ -806,9 +806,9 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
806 alternatives_smp_switch(1); 806 alternatives_smp_switch(1);
807 807
808 /* So we see what's up */ 808 /* So we see what's up */
809 printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip); 809 printk("Booting processor %d/%d ip %lx\n", cpu, apicid, start_eip);
810 /* Stack for startup_32 can be just as for start_secondary onwards */ 810 /* Stack for startup_32 can be just as for start_secondary onwards */
811 stack_start.esp = (void *) idle->thread.esp; 811 stack_start.sp = (void *) idle->thread.esp;
812 812
813 irq_ctx_init(cpu); 813 irq_ctx_init(cpu);
814 814
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index cf4b9dac4a05..f55c003f5b63 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -12,17 +12,12 @@ unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *r
12{ 12{
13 unsigned long addr, seg; 13 unsigned long addr, seg;
14 14
15#ifdef CONFIG_X86_64 15 addr = regs->ip;
16 addr = regs->rip;
17 seg = regs->cs & 0xffff; 16 seg = regs->cs & 0xffff;
18#else 17 if (v8086_mode(regs)) {
19 addr = regs->eip;
20 seg = regs->xcs & 0xffff;
21 if (regs->eflags & X86_EFLAGS_VM) {
22 addr = (addr & 0xffff) + (seg << 4); 18 addr = (addr & 0xffff) + (seg << 4);
23 return addr; 19 return addr;
24 } 20 }
25#endif
26 21
27 /* 22 /*
28 * We'll assume that the code segments in the GDT 23 * We'll assume that the code segments in the GDT
@@ -124,11 +119,11 @@ static int enable_single_step(struct task_struct *child)
124 /* 119 /*
125 * If TF was already set, don't do anything else 120 * If TF was already set, don't do anything else
126 */ 121 */
127 if (regs->eflags & X86_EFLAGS_TF) 122 if (regs->flags & X86_EFLAGS_TF)
128 return 0; 123 return 0;
129 124
130 /* Set TF on the kernel stack.. */ 125 /* Set TF on the kernel stack.. */
131 regs->eflags |= X86_EFLAGS_TF; 126 regs->flags |= X86_EFLAGS_TF;
132 127
133 /* 128 /*
134 * ..but if TF is changed by the instruction we will trace, 129 * ..but if TF is changed by the instruction we will trace,
@@ -203,5 +198,5 @@ void user_disable_single_step(struct task_struct *child)
203 198
204 /* But touch TF only if it was set by us.. */ 199 /* But touch TF only if it was set by us.. */
205 if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF)) 200 if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF))
206 task_pt_regs(child)->eflags &= ~X86_EFLAGS_TF; 201 task_pt_regs(child)->flags &= ~X86_EFLAGS_TF;
207} 202}
diff --git a/arch/x86/kernel/suspend_asm_64.S b/arch/x86/kernel/suspend_asm_64.S
index 72f952103e50..aeb9a4d7681e 100644
--- a/arch/x86/kernel/suspend_asm_64.S
+++ b/arch/x86/kernel/suspend_asm_64.S
@@ -18,13 +18,13 @@
18 18
19ENTRY(swsusp_arch_suspend) 19ENTRY(swsusp_arch_suspend)
20 movq $saved_context, %rax 20 movq $saved_context, %rax
21 movq %rsp, pt_regs_rsp(%rax) 21 movq %rsp, pt_regs_sp(%rax)
22 movq %rbp, pt_regs_rbp(%rax) 22 movq %rbp, pt_regs_bp(%rax)
23 movq %rsi, pt_regs_rsi(%rax) 23 movq %rsi, pt_regs_si(%rax)
24 movq %rdi, pt_regs_rdi(%rax) 24 movq %rdi, pt_regs_di(%rax)
25 movq %rbx, pt_regs_rbx(%rax) 25 movq %rbx, pt_regs_bx(%rax)
26 movq %rcx, pt_regs_rcx(%rax) 26 movq %rcx, pt_regs_cx(%rax)
27 movq %rdx, pt_regs_rdx(%rax) 27 movq %rdx, pt_regs_dx(%rax)
28 movq %r8, pt_regs_r8(%rax) 28 movq %r8, pt_regs_r8(%rax)
29 movq %r9, pt_regs_r9(%rax) 29 movq %r9, pt_regs_r9(%rax)
30 movq %r10, pt_regs_r10(%rax) 30 movq %r10, pt_regs_r10(%rax)
@@ -34,7 +34,7 @@ ENTRY(swsusp_arch_suspend)
34 movq %r14, pt_regs_r14(%rax) 34 movq %r14, pt_regs_r14(%rax)
35 movq %r15, pt_regs_r15(%rax) 35 movq %r15, pt_regs_r15(%rax)
36 pushfq 36 pushfq
37 popq pt_regs_eflags(%rax) 37 popq pt_regs_flags(%rax)
38 38
39 /* save the address of restore_registers */ 39 /* save the address of restore_registers */
40 movq $restore_registers, %rax 40 movq $restore_registers, %rax
@@ -115,13 +115,13 @@ ENTRY(restore_registers)
115 115
116 /* We don't restore %rax, it must be 0 anyway */ 116 /* We don't restore %rax, it must be 0 anyway */
117 movq $saved_context, %rax 117 movq $saved_context, %rax
118 movq pt_regs_rsp(%rax), %rsp 118 movq pt_regs_sp(%rax), %rsp
119 movq pt_regs_rbp(%rax), %rbp 119 movq pt_regs_bp(%rax), %rbp
120 movq pt_regs_rsi(%rax), %rsi 120 movq pt_regs_si(%rax), %rsi
121 movq pt_regs_rdi(%rax), %rdi 121 movq pt_regs_di(%rax), %rdi
122 movq pt_regs_rbx(%rax), %rbx 122 movq pt_regs_bx(%rax), %rbx
123 movq pt_regs_rcx(%rax), %rcx 123 movq pt_regs_cx(%rax), %rcx
124 movq pt_regs_rdx(%rax), %rdx 124 movq pt_regs_dx(%rax), %rdx
125 movq pt_regs_r8(%rax), %r8 125 movq pt_regs_r8(%rax), %r8
126 movq pt_regs_r9(%rax), %r9 126 movq pt_regs_r9(%rax), %r9
127 movq pt_regs_r10(%rax), %r10 127 movq pt_regs_r10(%rax), %r10
@@ -130,7 +130,7 @@ ENTRY(restore_registers)
130 movq pt_regs_r13(%rax), %r13 130 movq pt_regs_r13(%rax), %r13
131 movq pt_regs_r14(%rax), %r14 131 movq pt_regs_r14(%rax), %r14
132 movq pt_regs_r15(%rax), %r15 132 movq pt_regs_r15(%rax), %r15
133 pushq pt_regs_eflags(%rax) 133 pushq pt_regs_flags(%rax)
134 popfq 134 popfq
135 135
136 xorq %rax, %rax 136 xorq %rax, %rax
diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c
index 2dcbb81b4cd3..1a89e93f3f1c 100644
--- a/arch/x86/kernel/time_32.c
+++ b/arch/x86/kernel/time_32.c
@@ -49,15 +49,15 @@ unsigned long profile_pc(struct pt_regs *regs)
49 unsigned long pc = instruction_pointer(regs); 49 unsigned long pc = instruction_pointer(regs);
50 50
51#ifdef CONFIG_SMP 51#ifdef CONFIG_SMP
52 if (!v8086_mode(regs) && SEGMENT_IS_KERNEL_CODE(regs->xcs) && 52 if (!v8086_mode(regs) && SEGMENT_IS_KERNEL_CODE(regs->cs) &&
53 in_lock_functions(pc)) { 53 in_lock_functions(pc)) {
54#ifdef CONFIG_FRAME_POINTER 54#ifdef CONFIG_FRAME_POINTER
55 return *(unsigned long *)(regs->ebp + 4); 55 return *(unsigned long *)(regs->bp + 4);
56#else 56#else
57 unsigned long *sp = (unsigned long *)&regs->esp; 57 unsigned long *sp = (unsigned long *)&regs->sp;
58 58
59 /* Return address is either directly at stack pointer 59 /* Return address is either directly at stack pointer
60 or above a saved eflags. Eflags has bits 22-31 zero, 60 or above a saved flags. Eflags has bits 22-31 zero,
61 kernel addresses don't. */ 61 kernel addresses don't. */
62 if (sp[0] >> 22) 62 if (sp[0] >> 22)
63 return sp[0]; 63 return sp[0];
diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c
index f88bf6b802e3..bf0bcc9bb001 100644
--- a/arch/x86/kernel/time_64.c
+++ b/arch/x86/kernel/time_64.c
@@ -29,10 +29,10 @@ unsigned long profile_pc(struct pt_regs *regs)
29 unsigned long pc = instruction_pointer(regs); 29 unsigned long pc = instruction_pointer(regs);
30 30
31 /* Assume the lock function has either no stack frame or a copy 31 /* Assume the lock function has either no stack frame or a copy
32 of eflags from PUSHF 32 of flags from PUSHF
33 Eflags always has bits 22 and up cleared unlike kernel addresses. */ 33 Eflags always has bits 22 and up cleared unlike kernel addresses. */
34 if (!user_mode(regs) && in_lock_functions(pc)) { 34 if (!user_mode(regs) && in_lock_functions(pc)) {
35 unsigned long *sp = (unsigned long *)regs->rsp; 35 unsigned long *sp = (unsigned long *)regs->sp;
36 if (sp[0] >> 22) 36 if (sp[0] >> 22)
37 return sp[0]; 37 return sp[0];
38 if (sp[1] >> 22) 38 if (sp[1] >> 22)
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c
index 9b0bbd508cd5..931ef10960ee 100644
--- a/arch/x86/kernel/traps_32.c
+++ b/arch/x86/kernel/traps_32.c
@@ -114,11 +114,11 @@ struct stack_frame {
114}; 114};
115 115
116static inline unsigned long print_context_stack(struct thread_info *tinfo, 116static inline unsigned long print_context_stack(struct thread_info *tinfo,
117 unsigned long *stack, unsigned long ebp, 117 unsigned long *stack, unsigned long bp,
118 const struct stacktrace_ops *ops, void *data) 118 const struct stacktrace_ops *ops, void *data)
119{ 119{
120#ifdef CONFIG_FRAME_POINTER 120#ifdef CONFIG_FRAME_POINTER
121 struct stack_frame *frame = (struct stack_frame *)ebp; 121 struct stack_frame *frame = (struct stack_frame *)bp;
122 while (valid_stack_ptr(tinfo, frame, sizeof(*frame))) { 122 while (valid_stack_ptr(tinfo, frame, sizeof(*frame))) {
123 struct stack_frame *next; 123 struct stack_frame *next;
124 unsigned long addr; 124 unsigned long addr;
@@ -145,7 +145,7 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo,
145 ops->address(data, addr); 145 ops->address(data, addr);
146 } 146 }
147#endif 147#endif
148 return ebp; 148 return bp;
149} 149}
150 150
151#define MSG(msg) ops->warning(data, msg) 151#define MSG(msg) ops->warning(data, msg)
@@ -154,7 +154,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
154 unsigned long *stack, 154 unsigned long *stack,
155 const struct stacktrace_ops *ops, void *data) 155 const struct stacktrace_ops *ops, void *data)
156{ 156{
157 unsigned long ebp = 0; 157 unsigned long bp = 0;
158 158
159 if (!task) 159 if (!task)
160 task = current; 160 task = current;
@@ -167,13 +167,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
167 } 167 }
168 168
169#ifdef CONFIG_FRAME_POINTER 169#ifdef CONFIG_FRAME_POINTER
170 if (!ebp) { 170 if (!bp) {
171 if (task == current) { 171 if (task == current) {
172 /* Grab ebp right from our regs */ 172 /* Grab bp right from our regs */
173 asm ("movl %%ebp, %0" : "=r" (ebp) : ); 173 asm ("movl %%ebp, %0" : "=r" (bp) : );
174 } else { 174 } else {
175 /* ebp is the last reg pushed by switch_to */ 175 /* bp is the last reg pushed by switch_to */
176 ebp = *(unsigned long *) task->thread.esp; 176 bp = *(unsigned long *) task->thread.esp;
177 } 177 }
178 } 178 }
179#endif 179#endif
@@ -182,7 +182,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
182 struct thread_info *context; 182 struct thread_info *context;
183 context = (struct thread_info *) 183 context = (struct thread_info *)
184 ((unsigned long)stack & (~(THREAD_SIZE - 1))); 184 ((unsigned long)stack & (~(THREAD_SIZE - 1)));
185 ebp = print_context_stack(context, stack, ebp, ops, data); 185 bp = print_context_stack(context, stack, bp, ops, data);
186 /* Should be after the line below, but somewhere 186 /* Should be after the line below, but somewhere
187 in early boot context comes out corrupted and we 187 in early boot context comes out corrupted and we
188 can't reference it -AK */ 188 can't reference it -AK */
@@ -246,19 +246,19 @@ void show_trace(struct task_struct *task, struct pt_regs *regs,
246} 246}
247 247
248static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, 248static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
249 unsigned long *esp, char *log_lvl) 249 unsigned long *sp, char *log_lvl)
250{ 250{
251 unsigned long *stack; 251 unsigned long *stack;
252 int i; 252 int i;
253 253
254 if (esp == NULL) { 254 if (sp == NULL) {
255 if (task) 255 if (task)
256 esp = (unsigned long*)task->thread.esp; 256 sp = (unsigned long*)task->thread.esp;
257 else 257 else
258 esp = (unsigned long *)&esp; 258 sp = (unsigned long *)&sp;
259 } 259 }
260 260
261 stack = esp; 261 stack = sp;
262 for(i = 0; i < kstack_depth_to_print; i++) { 262 for(i = 0; i < kstack_depth_to_print; i++) {
263 if (kstack_end(stack)) 263 if (kstack_end(stack))
264 break; 264 break;
@@ -267,13 +267,13 @@ static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
267 printk("%08lx ", *stack++); 267 printk("%08lx ", *stack++);
268 } 268 }
269 printk("\n%sCall Trace:\n", log_lvl); 269 printk("\n%sCall Trace:\n", log_lvl);
270 show_trace_log_lvl(task, regs, esp, log_lvl); 270 show_trace_log_lvl(task, regs, sp, log_lvl);
271} 271}
272 272
273void show_stack(struct task_struct *task, unsigned long *esp) 273void show_stack(struct task_struct *task, unsigned long *sp)
274{ 274{
275 printk(" "); 275 printk(" ");
276 show_stack_log_lvl(task, NULL, esp, ""); 276 show_stack_log_lvl(task, NULL, sp, "");
277} 277}
278 278
279/* 279/*
@@ -307,30 +307,30 @@ void show_registers(struct pt_regs *regs)
307 * time of the fault.. 307 * time of the fault..
308 */ 308 */
309 if (!user_mode_vm(regs)) { 309 if (!user_mode_vm(regs)) {
310 u8 *eip; 310 u8 *ip;
311 unsigned int code_prologue = code_bytes * 43 / 64; 311 unsigned int code_prologue = code_bytes * 43 / 64;
312 unsigned int code_len = code_bytes; 312 unsigned int code_len = code_bytes;
313 unsigned char c; 313 unsigned char c;
314 314
315 printk("\n" KERN_EMERG "Stack: "); 315 printk("\n" KERN_EMERG "Stack: ");
316 show_stack_log_lvl(NULL, regs, &regs->esp, KERN_EMERG); 316 show_stack_log_lvl(NULL, regs, &regs->sp, KERN_EMERG);
317 317
318 printk(KERN_EMERG "Code: "); 318 printk(KERN_EMERG "Code: ");
319 319
320 eip = (u8 *)regs->eip - code_prologue; 320 ip = (u8 *)regs->ip - code_prologue;
321 if (eip < (u8 *)PAGE_OFFSET || 321 if (ip < (u8 *)PAGE_OFFSET ||
322 probe_kernel_address(eip, c)) { 322 probe_kernel_address(ip, c)) {
323 /* try starting at EIP */ 323 /* try starting at EIP */
324 eip = (u8 *)regs->eip; 324 ip = (u8 *)regs->ip;
325 code_len = code_len - code_prologue + 1; 325 code_len = code_len - code_prologue + 1;
326 } 326 }
327 for (i = 0; i < code_len; i++, eip++) { 327 for (i = 0; i < code_len; i++, ip++) {
328 if (eip < (u8 *)PAGE_OFFSET || 328 if (ip < (u8 *)PAGE_OFFSET ||
329 probe_kernel_address(eip, c)) { 329 probe_kernel_address(ip, c)) {
330 printk(" Bad EIP value."); 330 printk(" Bad EIP value.");
331 break; 331 break;
332 } 332 }
333 if (eip == (u8 *)regs->eip) 333 if (ip == (u8 *)regs->ip)
334 printk("<%02x> ", c); 334 printk("<%02x> ", c);
335 else 335 else
336 printk("%02x ", c); 336 printk("%02x ", c);
@@ -339,13 +339,13 @@ void show_registers(struct pt_regs *regs)
339 printk("\n"); 339 printk("\n");
340} 340}
341 341
342int is_valid_bugaddr(unsigned long eip) 342int is_valid_bugaddr(unsigned long ip)
343{ 343{
344 unsigned short ud2; 344 unsigned short ud2;
345 345
346 if (eip < PAGE_OFFSET) 346 if (ip < PAGE_OFFSET)
347 return 0; 347 return 0;
348 if (probe_kernel_address((unsigned short *)eip, ud2)) 348 if (probe_kernel_address((unsigned short *)ip, ud2))
349 return 0; 349 return 0;
350 350
351 return ud2 == 0x0b0f; 351 return ud2 == 0x0b0f;
@@ -382,10 +382,10 @@ void die(const char * str, struct pt_regs * regs, long err)
382 raw_local_irq_save(flags); 382 raw_local_irq_save(flags);
383 383
384 if (++die.lock_owner_depth < 3) { 384 if (++die.lock_owner_depth < 3) {
385 unsigned long esp; 385 unsigned long sp;
386 unsigned short ss; 386 unsigned short ss;
387 387
388 report_bug(regs->eip, regs); 388 report_bug(regs->ip, regs);
389 389
390 printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, 390 printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff,
391 ++die_counter); 391 ++die_counter);
@@ -405,15 +405,15 @@ void die(const char * str, struct pt_regs * regs, long err)
405 NOTIFY_STOP) { 405 NOTIFY_STOP) {
406 show_registers(regs); 406 show_registers(regs);
407 /* Executive summary in case the oops scrolled away */ 407 /* Executive summary in case the oops scrolled away */
408 esp = (unsigned long) (&regs->esp); 408 sp = (unsigned long) (&regs->sp);
409 savesegment(ss, ss); 409 savesegment(ss, ss);
410 if (user_mode(regs)) { 410 if (user_mode(regs)) {
411 esp = regs->esp; 411 sp = regs->sp;
412 ss = regs->xss & 0xffff; 412 ss = regs->ss & 0xffff;
413 } 413 }
414 printk(KERN_EMERG "EIP: [<%08lx>] ", regs->eip); 414 printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip);
415 print_symbol("%s", regs->eip); 415 print_symbol("%s", regs->ip);
416 printk(" SS:ESP %04x:%08lx\n", ss, esp); 416 printk(" SS:ESP %04x:%08lx\n", ss, sp);
417 } 417 }
418 else 418 else
419 regs = NULL; 419 regs = NULL;
@@ -454,7 +454,7 @@ static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
454{ 454{
455 struct task_struct *tsk = current; 455 struct task_struct *tsk = current;
456 456
457 if (regs->eflags & VM_MASK) { 457 if (regs->flags & VM_MASK) {
458 if (vm86) 458 if (vm86)
459 goto vm86_trap; 459 goto vm86_trap;
460 goto trap_signal; 460 goto trap_signal;
@@ -548,13 +548,13 @@ fastcall void do_##name(struct pt_regs * regs, long error_code) \
548 do_trap(trapnr, signr, str, 1, regs, error_code, &info); \ 548 do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
549} 549}
550 550
551DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip) 551DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip)
552#ifndef CONFIG_KPROBES 552#ifndef CONFIG_KPROBES
553DO_VM86_ERROR( 3, SIGTRAP, "int3", int3) 553DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
554#endif 554#endif
555DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow) 555DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
556DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds) 556DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
557DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip, 0) 557DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip, 0)
558DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) 558DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
559DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) 559DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
560DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) 560DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
@@ -596,7 +596,7 @@ fastcall void __kprobes do_general_protection(struct pt_regs * regs,
596 } 596 }
597 put_cpu(); 597 put_cpu();
598 598
599 if (regs->eflags & VM_MASK) 599 if (regs->flags & VM_MASK)
600 goto gp_in_vm86; 600 goto gp_in_vm86;
601 601
602 if (!user_mode(regs)) 602 if (!user_mode(regs))
@@ -607,9 +607,9 @@ fastcall void __kprobes do_general_protection(struct pt_regs * regs,
607 if (show_unhandled_signals && unhandled_signal(current, SIGSEGV) && 607 if (show_unhandled_signals && unhandled_signal(current, SIGSEGV) &&
608 printk_ratelimit()) 608 printk_ratelimit())
609 printk(KERN_INFO 609 printk(KERN_INFO
610 "%s[%d] general protection eip:%lx esp:%lx error:%lx\n", 610 "%s[%d] general protection ip:%lx sp:%lx error:%lx\n",
611 current->comm, task_pid_nr(current), 611 current->comm, task_pid_nr(current),
612 regs->eip, regs->esp, error_code); 612 regs->ip, regs->sp, error_code);
613 613
614 force_sig(SIGSEGV, current); 614 force_sig(SIGSEGV, current);
615 return; 615 return;
@@ -705,8 +705,8 @@ void __kprobes die_nmi(struct pt_regs *regs, const char *msg)
705 */ 705 */
706 bust_spinlocks(1); 706 bust_spinlocks(1);
707 printk(KERN_EMERG "%s", msg); 707 printk(KERN_EMERG "%s", msg);
708 printk(" on CPU%d, eip %08lx, registers:\n", 708 printk(" on CPU%d, ip %08lx, registers:\n",
709 smp_processor_id(), regs->eip); 709 smp_processor_id(), regs->ip);
710 show_registers(regs); 710 show_registers(regs);
711 console_silent(); 711 console_silent();
712 spin_unlock(&nmi_print_lock); 712 spin_unlock(&nmi_print_lock);
@@ -847,7 +847,7 @@ fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
847 SIGTRAP) == NOTIFY_STOP) 847 SIGTRAP) == NOTIFY_STOP)
848 return; 848 return;
849 /* It's safe to allow irq's after DR6 has been saved */ 849 /* It's safe to allow irq's after DR6 has been saved */
850 if (regs->eflags & X86_EFLAGS_IF) 850 if (regs->flags & X86_EFLAGS_IF)
851 local_irq_enable(); 851 local_irq_enable();
852 852
853 /* Mask out spurious debug traps due to lazy DR7 setting */ 853 /* Mask out spurious debug traps due to lazy DR7 setting */
@@ -856,7 +856,7 @@ fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
856 goto clear_dr7; 856 goto clear_dr7;
857 } 857 }
858 858
859 if (regs->eflags & VM_MASK) 859 if (regs->flags & VM_MASK)
860 goto debug_vm86; 860 goto debug_vm86;
861 861
862 /* Save debug status register where ptrace can see it */ 862 /* Save debug status register where ptrace can see it */
@@ -892,7 +892,7 @@ debug_vm86:
892 892
893clear_TF_reenable: 893clear_TF_reenable:
894 set_tsk_thread_flag(tsk, TIF_SINGLESTEP); 894 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
895 regs->eflags &= ~TF_MASK; 895 regs->flags &= ~TF_MASK;
896 return; 896 return;
897} 897}
898 898
@@ -901,7 +901,7 @@ clear_TF_reenable:
901 * the correct behaviour even in the presence of the asynchronous 901 * the correct behaviour even in the presence of the asynchronous
902 * IRQ13 behaviour 902 * IRQ13 behaviour
903 */ 903 */
904void math_error(void __user *eip) 904void math_error(void __user *ip)
905{ 905{
906 struct task_struct * task; 906 struct task_struct * task;
907 siginfo_t info; 907 siginfo_t info;
@@ -917,7 +917,7 @@ void math_error(void __user *eip)
917 info.si_signo = SIGFPE; 917 info.si_signo = SIGFPE;
918 info.si_errno = 0; 918 info.si_errno = 0;
919 info.si_code = __SI_FAULT; 919 info.si_code = __SI_FAULT;
920 info.si_addr = eip; 920 info.si_addr = ip;
921 /* 921 /*
922 * (~cwd & swd) will mask out exceptions that are not set to unmasked 922 * (~cwd & swd) will mask out exceptions that are not set to unmasked
923 * status. 0x3f is the exception bits in these regs, 0x200 is the 923 * status. 0x3f is the exception bits in these regs, 0x200 is the
@@ -963,10 +963,10 @@ void math_error(void __user *eip)
963fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code) 963fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
964{ 964{
965 ignore_fpu_irq = 1; 965 ignore_fpu_irq = 1;
966 math_error((void __user *)regs->eip); 966 math_error((void __user *)regs->ip);
967} 967}
968 968
969static void simd_math_error(void __user *eip) 969static void simd_math_error(void __user *ip)
970{ 970{
971 struct task_struct * task; 971 struct task_struct * task;
972 siginfo_t info; 972 siginfo_t info;
@@ -982,7 +982,7 @@ static void simd_math_error(void __user *eip)
982 info.si_signo = SIGFPE; 982 info.si_signo = SIGFPE;
983 info.si_errno = 0; 983 info.si_errno = 0;
984 info.si_code = __SI_FAULT; 984 info.si_code = __SI_FAULT;
985 info.si_addr = eip; 985 info.si_addr = ip;
986 /* 986 /*
987 * The SIMD FPU exceptions are handled a little differently, as there 987 * The SIMD FPU exceptions are handled a little differently, as there
988 * is only a single status/control register. Thus, to determine which 988 * is only a single status/control register. Thus, to determine which
@@ -1020,13 +1020,13 @@ fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
1020 if (cpu_has_xmm) { 1020 if (cpu_has_xmm) {
1021 /* Handle SIMD FPU exceptions on PIII+ processors. */ 1021 /* Handle SIMD FPU exceptions on PIII+ processors. */
1022 ignore_fpu_irq = 1; 1022 ignore_fpu_irq = 1;
1023 simd_math_error((void __user *)regs->eip); 1023 simd_math_error((void __user *)regs->ip);
1024 } else { 1024 } else {
1025 /* 1025 /*
1026 * Handle strange cache flush from user space exception 1026 * Handle strange cache flush from user space exception
1027 * in all other cases. This is undocumented behaviour. 1027 * in all other cases. This is undocumented behaviour.
1028 */ 1028 */
1029 if (regs->eflags & VM_MASK) { 1029 if (regs->flags & VM_MASK) {
1030 handle_vm86_fault((struct kernel_vm86_regs *)regs, 1030 handle_vm86_fault((struct kernel_vm86_regs *)regs,
1031 error_code); 1031 error_code);
1032 return; 1032 return;
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
index 610a64d6bdf0..f7fecf9d47c3 100644
--- a/arch/x86/kernel/traps_64.c
+++ b/arch/x86/kernel/traps_64.c
@@ -76,20 +76,20 @@ asmlinkage void spurious_interrupt_bug(void);
76 76
77static inline void conditional_sti(struct pt_regs *regs) 77static inline void conditional_sti(struct pt_regs *regs)
78{ 78{
79 if (regs->eflags & X86_EFLAGS_IF) 79 if (regs->flags & X86_EFLAGS_IF)
80 local_irq_enable(); 80 local_irq_enable();
81} 81}
82 82
83static inline void preempt_conditional_sti(struct pt_regs *regs) 83static inline void preempt_conditional_sti(struct pt_regs *regs)
84{ 84{
85 preempt_disable(); 85 preempt_disable();
86 if (regs->eflags & X86_EFLAGS_IF) 86 if (regs->flags & X86_EFLAGS_IF)
87 local_irq_enable(); 87 local_irq_enable();
88} 88}
89 89
90static inline void preempt_conditional_cli(struct pt_regs *regs) 90static inline void preempt_conditional_cli(struct pt_regs *regs)
91{ 91{
92 if (regs->eflags & X86_EFLAGS_IF) 92 if (regs->flags & X86_EFLAGS_IF)
93 local_irq_disable(); 93 local_irq_disable();
94 /* Make sure to not schedule here because we could be running 94 /* Make sure to not schedule here because we could be running
95 on an exception stack. */ 95 on an exception stack. */
@@ -353,7 +353,7 @@ show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack)
353} 353}
354 354
355static void 355static void
356_show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *rsp) 356_show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *sp)
357{ 357{
358 unsigned long *stack; 358 unsigned long *stack;
359 int i; 359 int i;
@@ -364,14 +364,14 @@ _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *rsp)
364 // debugging aid: "show_stack(NULL, NULL);" prints the 364 // debugging aid: "show_stack(NULL, NULL);" prints the
365 // back trace for this cpu. 365 // back trace for this cpu.
366 366
367 if (rsp == NULL) { 367 if (sp == NULL) {
368 if (tsk) 368 if (tsk)
369 rsp = (unsigned long *)tsk->thread.rsp; 369 sp = (unsigned long *)tsk->thread.rsp;
370 else 370 else
371 rsp = (unsigned long *)&rsp; 371 sp = (unsigned long *)&sp;
372 } 372 }
373 373
374 stack = rsp; 374 stack = sp;
375 for(i=0; i < kstack_depth_to_print; i++) { 375 for(i=0; i < kstack_depth_to_print; i++) {
376 if (stack >= irqstack && stack <= irqstack_end) { 376 if (stack >= irqstack && stack <= irqstack_end) {
377 if (stack == irqstack_end) { 377 if (stack == irqstack_end) {
@@ -387,12 +387,12 @@ _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *rsp)
387 printk(" %016lx", *stack++); 387 printk(" %016lx", *stack++);
388 touch_nmi_watchdog(); 388 touch_nmi_watchdog();
389 } 389 }
390 show_trace(tsk, regs, rsp); 390 show_trace(tsk, regs, sp);
391} 391}
392 392
393void show_stack(struct task_struct *tsk, unsigned long * rsp) 393void show_stack(struct task_struct *tsk, unsigned long * sp)
394{ 394{
395 _show_stack(tsk, NULL, rsp); 395 _show_stack(tsk, NULL, sp);
396} 396}
397 397
398/* 398/*
@@ -416,11 +416,11 @@ void show_registers(struct pt_regs *regs)
416{ 416{
417 int i; 417 int i;
418 int in_kernel = !user_mode(regs); 418 int in_kernel = !user_mode(regs);
419 unsigned long rsp; 419 unsigned long sp;
420 const int cpu = smp_processor_id(); 420 const int cpu = smp_processor_id();
421 struct task_struct *cur = cpu_pda(cpu)->pcurrent; 421 struct task_struct *cur = cpu_pda(cpu)->pcurrent;
422 422
423 rsp = regs->rsp; 423 sp = regs->sp;
424 printk("CPU %d ", cpu); 424 printk("CPU %d ", cpu);
425 __show_regs(regs); 425 __show_regs(regs);
426 printk("Process %s (pid: %d, threadinfo %p, task %p)\n", 426 printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
@@ -432,15 +432,15 @@ void show_registers(struct pt_regs *regs)
432 */ 432 */
433 if (in_kernel) { 433 if (in_kernel) {
434 printk("Stack: "); 434 printk("Stack: ");
435 _show_stack(NULL, regs, (unsigned long*)rsp); 435 _show_stack(NULL, regs, (unsigned long*)sp);
436 436
437 printk("\nCode: "); 437 printk("\nCode: ");
438 if (regs->rip < PAGE_OFFSET) 438 if (regs->ip < PAGE_OFFSET)
439 goto bad; 439 goto bad;
440 440
441 for (i=0; i<20; i++) { 441 for (i=0; i<20; i++) {
442 unsigned char c; 442 unsigned char c;
443 if (__get_user(c, &((unsigned char*)regs->rip)[i])) { 443 if (__get_user(c, &((unsigned char*)regs->ip)[i])) {
444bad: 444bad:
445 printk(" Bad RIP value."); 445 printk(" Bad RIP value.");
446 break; 446 break;
@@ -451,11 +451,11 @@ bad:
451 printk("\n"); 451 printk("\n");
452} 452}
453 453
454int is_valid_bugaddr(unsigned long rip) 454int is_valid_bugaddr(unsigned long ip)
455{ 455{
456 unsigned short ud2; 456 unsigned short ud2;
457 457
458 if (__copy_from_user(&ud2, (const void __user *) rip, sizeof(ud2))) 458 if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
459 return 0; 459 return 0;
460 460
461 return ud2 == 0x0b0f; 461 return ud2 == 0x0b0f;
@@ -521,8 +521,8 @@ void __kprobes __die(const char * str, struct pt_regs * regs, long err)
521 add_taint(TAINT_DIE); 521 add_taint(TAINT_DIE);
522 /* Executive summary in case the oops scrolled away */ 522 /* Executive summary in case the oops scrolled away */
523 printk(KERN_ALERT "RIP "); 523 printk(KERN_ALERT "RIP ");
524 printk_address(regs->rip); 524 printk_address(regs->ip);
525 printk(" RSP <%016lx>\n", regs->rsp); 525 printk(" RSP <%016lx>\n", regs->sp);
526 if (kexec_should_crash(current)) 526 if (kexec_should_crash(current))
527 crash_kexec(regs); 527 crash_kexec(regs);
528} 528}
@@ -532,7 +532,7 @@ void die(const char * str, struct pt_regs * regs, long err)
532 unsigned long flags = oops_begin(); 532 unsigned long flags = oops_begin();
533 533
534 if (!user_mode(regs)) 534 if (!user_mode(regs))
535 report_bug(regs->rip, regs); 535 report_bug(regs->ip, regs);
536 536
537 __die(str, regs, err); 537 __die(str, regs, err);
538 oops_end(flags); 538 oops_end(flags);
@@ -582,9 +582,9 @@ static void __kprobes do_trap(int trapnr, int signr, char *str,
582 if (show_unhandled_signals && unhandled_signal(tsk, signr) && 582 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
583 printk_ratelimit()) 583 printk_ratelimit())
584 printk(KERN_INFO 584 printk(KERN_INFO
585 "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n", 585 "%s[%d] trap %s ip:%lx sp:%lx error:%lx\n",
586 tsk->comm, tsk->pid, str, 586 tsk->comm, tsk->pid, str,
587 regs->rip, regs->rsp, error_code); 587 regs->ip, regs->sp, error_code);
588 588
589 if (info) 589 if (info)
590 force_sig_info(signr, info, tsk); 590 force_sig_info(signr, info, tsk);
@@ -597,9 +597,9 @@ static void __kprobes do_trap(int trapnr, int signr, char *str,
597 /* kernel trap */ 597 /* kernel trap */
598 { 598 {
599 const struct exception_table_entry *fixup; 599 const struct exception_table_entry *fixup;
600 fixup = search_exception_tables(regs->rip); 600 fixup = search_exception_tables(regs->ip);
601 if (fixup) 601 if (fixup)
602 regs->rip = fixup->fixup; 602 regs->ip = fixup->fixup;
603 else { 603 else {
604 tsk->thread.error_code = error_code; 604 tsk->thread.error_code = error_code;
605 tsk->thread.trap_no = trapnr; 605 tsk->thread.trap_no = trapnr;
@@ -635,10 +635,10 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
635 do_trap(trapnr, signr, str, regs, error_code, &info); \ 635 do_trap(trapnr, signr, str, regs, error_code, &info); \
636} 636}
637 637
638DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->rip) 638DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip)
639DO_ERROR( 4, SIGSEGV, "overflow", overflow) 639DO_ERROR( 4, SIGSEGV, "overflow", overflow)
640DO_ERROR( 5, SIGSEGV, "bounds", bounds) 640DO_ERROR( 5, SIGSEGV, "bounds", bounds)
641DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->rip) 641DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
642DO_ERROR( 7, SIGSEGV, "device not available", device_not_available) 642DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
643DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) 643DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
644DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) 644DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
@@ -688,9 +688,9 @@ asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
688 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && 688 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
689 printk_ratelimit()) 689 printk_ratelimit())
690 printk(KERN_INFO 690 printk(KERN_INFO
691 "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n", 691 "%s[%d] general protection ip:%lx sp:%lx error:%lx\n",
692 tsk->comm, tsk->pid, 692 tsk->comm, tsk->pid,
693 regs->rip, regs->rsp, error_code); 693 regs->ip, regs->sp, error_code);
694 694
695 force_sig(SIGSEGV, tsk); 695 force_sig(SIGSEGV, tsk);
696 return; 696 return;
@@ -699,9 +699,9 @@ asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
699 /* kernel gp */ 699 /* kernel gp */
700 { 700 {
701 const struct exception_table_entry *fixup; 701 const struct exception_table_entry *fixup;
702 fixup = search_exception_tables(regs->rip); 702 fixup = search_exception_tables(regs->ip);
703 if (fixup) { 703 if (fixup) {
704 regs->rip = fixup->fixup; 704 regs->ip = fixup->fixup;
705 return; 705 return;
706 } 706 }
707 707
@@ -824,15 +824,15 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
824{ 824{
825 struct pt_regs *regs = eregs; 825 struct pt_regs *regs = eregs;
826 /* Did already sync */ 826 /* Did already sync */
827 if (eregs == (struct pt_regs *)eregs->rsp) 827 if (eregs == (struct pt_regs *)eregs->sp)
828 ; 828 ;
829 /* Exception from user space */ 829 /* Exception from user space */
830 else if (user_mode(eregs)) 830 else if (user_mode(eregs))
831 regs = task_pt_regs(current); 831 regs = task_pt_regs(current);
832 /* Exception from kernel and interrupts are enabled. Move to 832 /* Exception from kernel and interrupts are enabled. Move to
833 kernel process stack. */ 833 kernel process stack. */
834 else if (eregs->eflags & X86_EFLAGS_IF) 834 else if (eregs->flags & X86_EFLAGS_IF)
835 regs = (struct pt_regs *)(eregs->rsp -= sizeof(struct pt_regs)); 835 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
836 if (eregs != regs) 836 if (eregs != regs)
837 *regs = *eregs; 837 *regs = *eregs;
838 return regs; 838 return regs;
@@ -887,7 +887,7 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs,
887 info.si_signo = SIGTRAP; 887 info.si_signo = SIGTRAP;
888 info.si_errno = 0; 888 info.si_errno = 0;
889 info.si_code = TRAP_BRKPT; 889 info.si_code = TRAP_BRKPT;
890 info.si_addr = user_mode(regs) ? (void __user *)regs->rip : NULL; 890 info.si_addr = user_mode(regs) ? (void __user *)regs->ip : NULL;
891 force_sig_info(SIGTRAP, &info, tsk); 891 force_sig_info(SIGTRAP, &info, tsk);
892 892
893clear_dr7: 893clear_dr7:
@@ -897,16 +897,16 @@ clear_dr7:
897 897
898clear_TF_reenable: 898clear_TF_reenable:
899 set_tsk_thread_flag(tsk, TIF_SINGLESTEP); 899 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
900 regs->eflags &= ~TF_MASK; 900 regs->flags &= ~TF_MASK;
901 preempt_conditional_cli(regs); 901 preempt_conditional_cli(regs);
902} 902}
903 903
904static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr) 904static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
905{ 905{
906 const struct exception_table_entry *fixup; 906 const struct exception_table_entry *fixup;
907 fixup = search_exception_tables(regs->rip); 907 fixup = search_exception_tables(regs->ip);
908 if (fixup) { 908 if (fixup) {
909 regs->rip = fixup->fixup; 909 regs->ip = fixup->fixup;
910 return 1; 910 return 1;
911 } 911 }
912 notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE); 912 notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
@@ -923,7 +923,7 @@ static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
923 */ 923 */
924asmlinkage void do_coprocessor_error(struct pt_regs *regs) 924asmlinkage void do_coprocessor_error(struct pt_regs *regs)
925{ 925{
926 void __user *rip = (void __user *)(regs->rip); 926 void __user *ip = (void __user *)(regs->ip);
927 struct task_struct * task; 927 struct task_struct * task;
928 siginfo_t info; 928 siginfo_t info;
929 unsigned short cwd, swd; 929 unsigned short cwd, swd;
@@ -943,7 +943,7 @@ asmlinkage void do_coprocessor_error(struct pt_regs *regs)
943 info.si_signo = SIGFPE; 943 info.si_signo = SIGFPE;
944 info.si_errno = 0; 944 info.si_errno = 0;
945 info.si_code = __SI_FAULT; 945 info.si_code = __SI_FAULT;
946 info.si_addr = rip; 946 info.si_addr = ip;
947 /* 947 /*
948 * (~cwd & swd) will mask out exceptions that are not set to unmasked 948 * (~cwd & swd) will mask out exceptions that are not set to unmasked
949 * status. 0x3f is the exception bits in these regs, 0x200 is the 949 * status. 0x3f is the exception bits in these regs, 0x200 is the
@@ -992,7 +992,7 @@ asmlinkage void bad_intr(void)
992 992
993asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) 993asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
994{ 994{
995 void __user *rip = (void __user *)(regs->rip); 995 void __user *ip = (void __user *)(regs->ip);
996 struct task_struct * task; 996 struct task_struct * task;
997 siginfo_t info; 997 siginfo_t info;
998 unsigned short mxcsr; 998 unsigned short mxcsr;
@@ -1012,7 +1012,7 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
1012 info.si_signo = SIGFPE; 1012 info.si_signo = SIGFPE;
1013 info.si_errno = 0; 1013 info.si_errno = 0;
1014 info.si_code = __SI_FAULT; 1014 info.si_code = __SI_FAULT;
1015 info.si_addr = rip; 1015 info.si_addr = ip;
1016 /* 1016 /*
1017 * The SIMD FPU exceptions are handled a little differently, as there 1017 * The SIMD FPU exceptions are handled a little differently, as there
1018 * is only a single status/control register. Thus, to determine which 1018 * is only a single status/control register. Thus, to determine which
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 157e4bedd3c5..980e85b90091 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -70,10 +70,10 @@
70/* 70/*
71 * 8- and 16-bit register defines.. 71 * 8- and 16-bit register defines..
72 */ 72 */
73#define AL(regs) (((unsigned char *)&((regs)->pt.eax))[0]) 73#define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0])
74#define AH(regs) (((unsigned char *)&((regs)->pt.eax))[1]) 74#define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1])
75#define IP(regs) (*(unsigned short *)&((regs)->pt.eip)) 75#define IP(regs) (*(unsigned short *)&((regs)->pt.ip))
76#define SP(regs) (*(unsigned short *)&((regs)->pt.esp)) 76#define SP(regs) (*(unsigned short *)&((regs)->pt.sp))
77 77
78/* 78/*
79 * virtual flags (16 and 32-bit versions) 79 * virtual flags (16 and 32-bit versions)
@@ -93,12 +93,12 @@ static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
93{ 93{
94 int ret = 0; 94 int ret = 0;
95 95
96 /* kernel_vm86_regs is missing xgs, so copy everything up to 96 /* kernel_vm86_regs is missing gs, so copy everything up to
97 (but not including) orig_eax, and then rest including orig_eax. */ 97 (but not including) orig_eax, and then rest including orig_eax. */
98 ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_eax)); 98 ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax));
99 ret += copy_to_user(&user->orig_eax, &regs->pt.orig_eax, 99 ret += copy_to_user(&user->orig_eax, &regs->pt.orig_ax,
100 sizeof(struct kernel_vm86_regs) - 100 sizeof(struct kernel_vm86_regs) -
101 offsetof(struct kernel_vm86_regs, pt.orig_eax)); 101 offsetof(struct kernel_vm86_regs, pt.orig_ax));
102 102
103 return ret; 103 return ret;
104} 104}
@@ -110,12 +110,12 @@ static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
110{ 110{
111 int ret = 0; 111 int ret = 0;
112 112
113 /* copy eax-xfs inclusive */ 113 /* copy ax-fs inclusive */
114 ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_eax)); 114 ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_ax));
115 /* copy orig_eax-__gsh+extra */ 115 /* copy orig_ax-__gsh+extra */
116 ret += copy_from_user(&regs->pt.orig_eax, &user->orig_eax, 116 ret += copy_from_user(&regs->pt.orig_ax, &user->orig_eax,
117 sizeof(struct kernel_vm86_regs) - 117 sizeof(struct kernel_vm86_regs) -
118 offsetof(struct kernel_vm86_regs, pt.orig_eax) + 118 offsetof(struct kernel_vm86_regs, pt.orig_ax) +
119 extra); 119 extra);
120 return ret; 120 return ret;
121} 121}
@@ -138,7 +138,7 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
138 printk("no vm86_info: BAD\n"); 138 printk("no vm86_info: BAD\n");
139 do_exit(SIGSEGV); 139 do_exit(SIGSEGV);
140 } 140 }
141 set_flags(regs->pt.eflags, VEFLAGS, VIF_MASK | current->thread.v86mask); 141 set_flags(regs->pt.flags, VEFLAGS, VIF_MASK | current->thread.v86mask);
142 tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs,regs); 142 tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs,regs);
143 tmp += put_user(current->thread.screen_bitmap,&current->thread.vm86_info->screen_bitmap); 143 tmp += put_user(current->thread.screen_bitmap,&current->thread.vm86_info->screen_bitmap);
144 if (tmp) { 144 if (tmp) {
@@ -155,7 +155,7 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
155 155
156 ret = KVM86->regs32; 156 ret = KVM86->regs32;
157 157
158 ret->xfs = current->thread.saved_fs; 158 ret->fs = current->thread.saved_fs;
159 loadsegment(gs, current->thread.saved_gs); 159 loadsegment(gs, current->thread.saved_gs);
160 160
161 return ret; 161 return ret;
@@ -197,7 +197,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
197 197
198asmlinkage int sys_vm86old(struct pt_regs regs) 198asmlinkage int sys_vm86old(struct pt_regs regs)
199{ 199{
200 struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.ebx; 200 struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.bx;
201 struct kernel_vm86_struct info; /* declare this _on top_, 201 struct kernel_vm86_struct info; /* declare this _on top_,
202 * this avoids wasting of stack space. 202 * this avoids wasting of stack space.
203 * This remains on the stack until we 203 * This remains on the stack until we
@@ -237,12 +237,12 @@ asmlinkage int sys_vm86(struct pt_regs regs)
237 struct vm86plus_struct __user *v86; 237 struct vm86plus_struct __user *v86;
238 238
239 tsk = current; 239 tsk = current;
240 switch (regs.ebx) { 240 switch (regs.bx) {
241 case VM86_REQUEST_IRQ: 241 case VM86_REQUEST_IRQ:
242 case VM86_FREE_IRQ: 242 case VM86_FREE_IRQ:
243 case VM86_GET_IRQ_BITS: 243 case VM86_GET_IRQ_BITS:
244 case VM86_GET_AND_RESET_IRQ: 244 case VM86_GET_AND_RESET_IRQ:
245 ret = do_vm86_irq_handling(regs.ebx, (int)regs.ecx); 245 ret = do_vm86_irq_handling(regs.bx, (int)regs.cx);
246 goto out; 246 goto out;
247 case VM86_PLUS_INSTALL_CHECK: 247 case VM86_PLUS_INSTALL_CHECK:
248 /* NOTE: on old vm86 stuff this will return the error 248 /* NOTE: on old vm86 stuff this will return the error
@@ -258,7 +258,7 @@ asmlinkage int sys_vm86(struct pt_regs regs)
258 ret = -EPERM; 258 ret = -EPERM;
259 if (tsk->thread.saved_esp0) 259 if (tsk->thread.saved_esp0)
260 goto out; 260 goto out;
261 v86 = (struct vm86plus_struct __user *)regs.ecx; 261 v86 = (struct vm86plus_struct __user *)regs.cx;
262 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, 262 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
263 offsetof(struct kernel_vm86_struct, regs32) - 263 offsetof(struct kernel_vm86_struct, regs32) -
264 sizeof(info.regs)); 264 sizeof(info.regs));
@@ -281,23 +281,23 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
281/* 281/*
282 * make sure the vm86() system call doesn't try to do anything silly 282 * make sure the vm86() system call doesn't try to do anything silly
283 */ 283 */
284 info->regs.pt.xds = 0; 284 info->regs.pt.ds = 0;
285 info->regs.pt.xes = 0; 285 info->regs.pt.es = 0;
286 info->regs.pt.xfs = 0; 286 info->regs.pt.fs = 0;
287 287
288/* we are clearing gs later just before "jmp resume_userspace", 288/* we are clearing gs later just before "jmp resume_userspace",
289 * because it is not saved/restored. 289 * because it is not saved/restored.
290 */ 290 */
291 291
292/* 292/*
293 * The eflags register is also special: we cannot trust that the user 293 * The flags register is also special: we cannot trust that the user
294 * has set it up safely, so this makes sure interrupt etc flags are 294 * has set it up safely, so this makes sure interrupt etc flags are
295 * inherited from protected mode. 295 * inherited from protected mode.
296 */ 296 */
297 VEFLAGS = info->regs.pt.eflags; 297 VEFLAGS = info->regs.pt.flags;
298 info->regs.pt.eflags &= SAFE_MASK; 298 info->regs.pt.flags &= SAFE_MASK;
299 info->regs.pt.eflags |= info->regs32->eflags & ~SAFE_MASK; 299 info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK;
300 info->regs.pt.eflags |= VM_MASK; 300 info->regs.pt.flags |= VM_MASK;
301 301
302 switch (info->cpu_type) { 302 switch (info->cpu_type) {
303 case CPU_286: 303 case CPU_286:
@@ -315,11 +315,11 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
315 } 315 }
316 316
317/* 317/*
318 * Save old state, set default return value (%eax) to 0 318 * Save old state, set default return value (%ax) to 0
319 */ 319 */
320 info->regs32->eax = 0; 320 info->regs32->ax = 0;
321 tsk->thread.saved_esp0 = tsk->thread.esp0; 321 tsk->thread.saved_esp0 = tsk->thread.esp0;
322 tsk->thread.saved_fs = info->regs32->xfs; 322 tsk->thread.saved_fs = info->regs32->fs;
323 savesegment(gs, tsk->thread.saved_gs); 323 savesegment(gs, tsk->thread.saved_gs);
324 324
325 tss = &per_cpu(init_tss, get_cpu()); 325 tss = &per_cpu(init_tss, get_cpu());
@@ -352,7 +352,7 @@ static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval)
352 struct pt_regs * regs32; 352 struct pt_regs * regs32;
353 353
354 regs32 = save_v86_state(regs16); 354 regs32 = save_v86_state(regs16);
355 regs32->eax = retval; 355 regs32->ax = retval;
356 __asm__ __volatile__("movl %0,%%esp\n\t" 356 __asm__ __volatile__("movl %0,%%esp\n\t"
357 "movl %1,%%ebp\n\t" 357 "movl %1,%%ebp\n\t"
358 "jmp resume_userspace" 358 "jmp resume_userspace"
@@ -373,12 +373,12 @@ static inline void clear_IF(struct kernel_vm86_regs * regs)
373 373
374static inline void clear_TF(struct kernel_vm86_regs * regs) 374static inline void clear_TF(struct kernel_vm86_regs * regs)
375{ 375{
376 regs->pt.eflags &= ~TF_MASK; 376 regs->pt.flags &= ~TF_MASK;
377} 377}
378 378
379static inline void clear_AC(struct kernel_vm86_regs * regs) 379static inline void clear_AC(struct kernel_vm86_regs * regs)
380{ 380{
381 regs->pt.eflags &= ~AC_MASK; 381 regs->pt.flags &= ~AC_MASK;
382} 382}
383 383
384/* It is correct to call set_IF(regs) from the set_vflags_* 384/* It is correct to call set_IF(regs) from the set_vflags_*
@@ -392,11 +392,11 @@ static inline void clear_AC(struct kernel_vm86_regs * regs)
392 * [KD] 392 * [KD]
393 */ 393 */
394 394
395static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs * regs) 395static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs * regs)
396{ 396{
397 set_flags(VEFLAGS, eflags, current->thread.v86mask); 397 set_flags(VEFLAGS, flags, current->thread.v86mask);
398 set_flags(regs->pt.eflags, eflags, SAFE_MASK); 398 set_flags(regs->pt.flags, flags, SAFE_MASK);
399 if (eflags & IF_MASK) 399 if (flags & IF_MASK)
400 set_IF(regs); 400 set_IF(regs);
401 else 401 else
402 clear_IF(regs); 402 clear_IF(regs);
@@ -405,7 +405,7 @@ static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs
405static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs) 405static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs)
406{ 406{
407 set_flags(VFLAGS, flags, current->thread.v86mask); 407 set_flags(VFLAGS, flags, current->thread.v86mask);
408 set_flags(regs->pt.eflags, flags, SAFE_MASK); 408 set_flags(regs->pt.flags, flags, SAFE_MASK);
409 if (flags & IF_MASK) 409 if (flags & IF_MASK)
410 set_IF(regs); 410 set_IF(regs);
411 else 411 else
@@ -414,7 +414,7 @@ static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_reg
414 414
415static inline unsigned long get_vflags(struct kernel_vm86_regs * regs) 415static inline unsigned long get_vflags(struct kernel_vm86_regs * regs)
416{ 416{
417 unsigned long flags = regs->pt.eflags & RETURN_MASK; 417 unsigned long flags = regs->pt.flags & RETURN_MASK;
418 418
419 if (VEFLAGS & VIF_MASK) 419 if (VEFLAGS & VIF_MASK)
420 flags |= IF_MASK; 420 flags |= IF_MASK;
@@ -518,7 +518,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
518 unsigned long __user *intr_ptr; 518 unsigned long __user *intr_ptr;
519 unsigned long segoffs; 519 unsigned long segoffs;
520 520
521 if (regs->pt.xcs == BIOSSEG) 521 if (regs->pt.cs == BIOSSEG)
522 goto cannot_handle; 522 goto cannot_handle;
523 if (is_revectored(i, &KVM86->int_revectored)) 523 if (is_revectored(i, &KVM86->int_revectored))
524 goto cannot_handle; 524 goto cannot_handle;
@@ -530,9 +530,9 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
530 if ((segoffs >> 16) == BIOSSEG) 530 if ((segoffs >> 16) == BIOSSEG)
531 goto cannot_handle; 531 goto cannot_handle;
532 pushw(ssp, sp, get_vflags(regs), cannot_handle); 532 pushw(ssp, sp, get_vflags(regs), cannot_handle);
533 pushw(ssp, sp, regs->pt.xcs, cannot_handle); 533 pushw(ssp, sp, regs->pt.cs, cannot_handle);
534 pushw(ssp, sp, IP(regs), cannot_handle); 534 pushw(ssp, sp, IP(regs), cannot_handle);
535 regs->pt.xcs = segoffs >> 16; 535 regs->pt.cs = segoffs >> 16;
536 SP(regs) -= 6; 536 SP(regs) -= 6;
537 IP(regs) = segoffs & 0xffff; 537 IP(regs) = segoffs & 0xffff;
538 clear_TF(regs); 538 clear_TF(regs);
@@ -549,7 +549,7 @@ int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno
549 if (VMPI.is_vm86pus) { 549 if (VMPI.is_vm86pus) {
550 if ( (trapno==3) || (trapno==1) ) 550 if ( (trapno==3) || (trapno==1) )
551 return_to_32bit(regs, VM86_TRAP + (trapno << 8)); 551 return_to_32bit(regs, VM86_TRAP + (trapno << 8));
552 do_int(regs, trapno, (unsigned char __user *) (regs->pt.xss << 4), SP(regs)); 552 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
553 return 0; 553 return 0;
554 } 554 }
555 if (trapno !=1) 555 if (trapno !=1)
@@ -585,10 +585,10 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code)
585 handle_vm86_trap(regs, 0, 1); \ 585 handle_vm86_trap(regs, 0, 1); \
586 return; } while (0) 586 return; } while (0)
587 587
588 orig_flags = *(unsigned short *)&regs->pt.eflags; 588 orig_flags = *(unsigned short *)&regs->pt.flags;
589 589
590 csp = (unsigned char __user *) (regs->pt.xcs << 4); 590 csp = (unsigned char __user *) (regs->pt.cs << 4);
591 ssp = (unsigned char __user *) (regs->pt.xss << 4); 591 ssp = (unsigned char __user *) (regs->pt.ss << 4);
592 sp = SP(regs); 592 sp = SP(regs);
593 ip = IP(regs); 593 ip = IP(regs);
594 594
@@ -675,7 +675,7 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code)
675 SP(regs) += 6; 675 SP(regs) += 6;
676 } 676 }
677 IP(regs) = newip; 677 IP(regs) = newip;
678 regs->pt.xcs = newcs; 678 regs->pt.cs = newcs;
679 CHECK_IF_IN_TRAP; 679 CHECK_IF_IN_TRAP;
680 if (data32) { 680 if (data32) {
681 set_vflags_long(newflags, regs); 681 set_vflags_long(newflags, regs);
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index 87e5633805a9..599b6f2ed562 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -88,13 +88,13 @@ struct vmi_timer_ops vmi_timer_ops;
88#define IRQ_PATCH_DISABLE 5 88#define IRQ_PATCH_DISABLE 5
89 89
90static inline void patch_offset(void *insnbuf, 90static inline void patch_offset(void *insnbuf,
91 unsigned long eip, unsigned long dest) 91 unsigned long ip, unsigned long dest)
92{ 92{
93 *(unsigned long *)(insnbuf+1) = dest-eip-5; 93 *(unsigned long *)(insnbuf+1) = dest-ip-5;
94} 94}
95 95
96static unsigned patch_internal(int call, unsigned len, void *insnbuf, 96static unsigned patch_internal(int call, unsigned len, void *insnbuf,
97 unsigned long eip) 97 unsigned long ip)
98{ 98{
99 u64 reloc; 99 u64 reloc;
100 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc; 100 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
@@ -103,13 +103,13 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
103 case VMI_RELOCATION_CALL_REL: 103 case VMI_RELOCATION_CALL_REL:
104 BUG_ON(len < 5); 104 BUG_ON(len < 5);
105 *(char *)insnbuf = MNEM_CALL; 105 *(char *)insnbuf = MNEM_CALL;
106 patch_offset(insnbuf, eip, (unsigned long)rel->eip); 106 patch_offset(insnbuf, ip, (unsigned long)rel->eip);
107 return 5; 107 return 5;
108 108
109 case VMI_RELOCATION_JUMP_REL: 109 case VMI_RELOCATION_JUMP_REL:
110 BUG_ON(len < 5); 110 BUG_ON(len < 5);
111 *(char *)insnbuf = MNEM_JMP; 111 *(char *)insnbuf = MNEM_JMP;
112 patch_offset(insnbuf, eip, (unsigned long)rel->eip); 112 patch_offset(insnbuf, ip, (unsigned long)rel->eip);
113 return 5; 113 return 5;
114 114
115 case VMI_RELOCATION_NOP: 115 case VMI_RELOCATION_NOP:
@@ -131,25 +131,25 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
131 * sequence. The callee does nop padding for us. 131 * sequence. The callee does nop padding for us.
132 */ 132 */
133static unsigned vmi_patch(u8 type, u16 clobbers, void *insns, 133static unsigned vmi_patch(u8 type, u16 clobbers, void *insns,
134 unsigned long eip, unsigned len) 134 unsigned long ip, unsigned len)
135{ 135{
136 switch (type) { 136 switch (type) {
137 case PARAVIRT_PATCH(pv_irq_ops.irq_disable): 137 case PARAVIRT_PATCH(pv_irq_ops.irq_disable):
138 return patch_internal(VMI_CALL_DisableInterrupts, len, 138 return patch_internal(VMI_CALL_DisableInterrupts, len,
139 insns, eip); 139 insns, ip);
140 case PARAVIRT_PATCH(pv_irq_ops.irq_enable): 140 case PARAVIRT_PATCH(pv_irq_ops.irq_enable):
141 return patch_internal(VMI_CALL_EnableInterrupts, len, 141 return patch_internal(VMI_CALL_EnableInterrupts, len,
142 insns, eip); 142 insns, ip);
143 case PARAVIRT_PATCH(pv_irq_ops.restore_fl): 143 case PARAVIRT_PATCH(pv_irq_ops.restore_fl):
144 return patch_internal(VMI_CALL_SetInterruptMask, len, 144 return patch_internal(VMI_CALL_SetInterruptMask, len,
145 insns, eip); 145 insns, ip);
146 case PARAVIRT_PATCH(pv_irq_ops.save_fl): 146 case PARAVIRT_PATCH(pv_irq_ops.save_fl):
147 return patch_internal(VMI_CALL_GetInterruptMask, len, 147 return patch_internal(VMI_CALL_GetInterruptMask, len,
148 insns, eip); 148 insns, ip);
149 case PARAVIRT_PATCH(pv_cpu_ops.iret): 149 case PARAVIRT_PATCH(pv_cpu_ops.iret):
150 return patch_internal(VMI_CALL_IRET, len, insns, eip); 150 return patch_internal(VMI_CALL_IRET, len, insns, ip);
151 case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_syscall_ret): 151 case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_syscall_ret):
152 return patch_internal(VMI_CALL_SYSEXIT, len, insns, eip); 152 return patch_internal(VMI_CALL_SYSEXIT, len, insns, ip);
153 default: 153 default:
154 break; 154 break;
155 } 155 }
@@ -157,29 +157,29 @@ static unsigned vmi_patch(u8 type, u16 clobbers, void *insns,
157} 157}
158 158
159/* CPUID has non-C semantics, and paravirt-ops API doesn't match hardware ISA */ 159/* CPUID has non-C semantics, and paravirt-ops API doesn't match hardware ISA */
160static void vmi_cpuid(unsigned int *eax, unsigned int *ebx, 160static void vmi_cpuid(unsigned int *ax, unsigned int *bx,
161 unsigned int *ecx, unsigned int *edx) 161 unsigned int *cx, unsigned int *dx)
162{ 162{
163 int override = 0; 163 int override = 0;
164 if (*eax == 1) 164 if (*ax == 1)
165 override = 1; 165 override = 1;
166 asm volatile ("call *%6" 166 asm volatile ("call *%6"
167 : "=a" (*eax), 167 : "=a" (*ax),
168 "=b" (*ebx), 168 "=b" (*bx),
169 "=c" (*ecx), 169 "=c" (*cx),
170 "=d" (*edx) 170 "=d" (*dx)
171 : "0" (*eax), "2" (*ecx), "r" (vmi_ops.cpuid)); 171 : "0" (*ax), "2" (*cx), "r" (vmi_ops.cpuid));
172 if (override) { 172 if (override) {
173 if (disable_pse) 173 if (disable_pse)
174 *edx &= ~X86_FEATURE_PSE; 174 *dx &= ~X86_FEATURE_PSE;
175 if (disable_pge) 175 if (disable_pge)
176 *edx &= ~X86_FEATURE_PGE; 176 *dx &= ~X86_FEATURE_PGE;
177 if (disable_sep) 177 if (disable_sep)
178 *edx &= ~X86_FEATURE_SEP; 178 *dx &= ~X86_FEATURE_SEP;
179 if (disable_tsc) 179 if (disable_tsc)
180 *edx &= ~X86_FEATURE_TSC; 180 *dx &= ~X86_FEATURE_TSC;
181 if (disable_mtrr) 181 if (disable_mtrr)
182 *edx &= ~X86_FEATURE_MTRR; 182 *dx &= ~X86_FEATURE_MTRR;
183 } 183 }
184} 184}
185 185
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index ad4005c6d4a1..018f7cf33790 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -43,7 +43,7 @@
43#include <asm/vgtod.h> 43#include <asm/vgtod.h>
44 44
45#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr))) 45#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
46#define __syscall_clobber "r11","rcx","memory" 46#define __syscall_clobber "r11","cx","memory"
47#define __pa_vsymbol(x) \ 47#define __pa_vsymbol(x) \
48 ({unsigned long v; \ 48 ({unsigned long v; \
49 extern char __vsyscall_0; \ 49 extern char __vsyscall_0; \