aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2008-01-30 07:30:56 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:30:56 -0500
commit65ea5b0349903585bfed9720fa06f5edb4f1cd25 (patch)
tree6c252228c34416b7e2077f23475de34500c2ab8a /include/asm-x86
parent53756d3722172815f52272b28c6d5d5e9639adde (diff)
x86: rename the struct pt_regs members for 32/64-bit consistency
We have a lot of code which differs only by the naming of specific members of structures that contain registers. In order to enable additional unifications, this patch drops the e- or r- size prefix from the register names in struct pt_regs, and drops the x- prefixes for segment registers on the 32-bit side. This patch also performs the equivalent renames in some additional places that might be candidates for unification in the future. Signed-off-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/compat.h2
-rw-r--r--include/asm-x86/elf.h66
-rw-r--r--include/asm-x86/kexec_32.h36
-rw-r--r--include/asm-x86/kexec_64.h20
-rw-r--r--include/asm-x86/kprobes_32.h2
-rw-r--r--include/asm-x86/kprobes_64.h2
-rw-r--r--include/asm-x86/mce.h4
-rw-r--r--include/asm-x86/processor_32.h20
-rw-r--r--include/asm-x86/processor_64.h8
-rw-r--r--include/asm-x86/ptrace.h80
10 files changed, 147 insertions, 93 deletions
diff --git a/include/asm-x86/compat.h b/include/asm-x86/compat.h
index 66ba7987184a..b270ee04959e 100644
--- a/include/asm-x86/compat.h
+++ b/include/asm-x86/compat.h
@@ -207,7 +207,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
207static __inline__ void __user *compat_alloc_user_space(long len) 207static __inline__ void __user *compat_alloc_user_space(long len)
208{ 208{
209 struct pt_regs *regs = task_pt_regs(current); 209 struct pt_regs *regs = task_pt_regs(current);
210 return (void __user *)regs->rsp - len; 210 return (void __user *)regs->sp - len;
211} 211}
212 212
213static inline int is_compat_task(void) 213static inline int is_compat_task(void)
diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h
index 60f5101d9483..5e5705bf082a 100644
--- a/include/asm-x86/elf.h
+++ b/include/asm-x86/elf.h
@@ -99,32 +99,32 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
99 just to make things more deterministic. 99 just to make things more deterministic.
100 */ 100 */
101#define ELF_PLAT_INIT(_r, load_addr) do { \ 101#define ELF_PLAT_INIT(_r, load_addr) do { \
102 _r->ebx = 0; _r->ecx = 0; _r->edx = 0; \ 102 _r->bx = 0; _r->cx = 0; _r->dx = 0; \
103 _r->esi = 0; _r->edi = 0; _r->ebp = 0; \ 103 _r->si = 0; _r->di = 0; _r->bp = 0; \
104 _r->eax = 0; \ 104 _r->ax = 0; \
105} while (0) 105} while (0)
106 106
107/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is 107/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
108 now struct_user_regs, they are different) */ 108 now struct_user_regs, they are different) */
109 109
110#define ELF_CORE_COPY_REGS(pr_reg, regs) \ 110#define ELF_CORE_COPY_REGS(pr_reg, regs) \
111 pr_reg[0] = regs->ebx; \ 111 pr_reg[0] = regs->bx; \
112 pr_reg[1] = regs->ecx; \ 112 pr_reg[1] = regs->cx; \
113 pr_reg[2] = regs->edx; \ 113 pr_reg[2] = regs->dx; \
114 pr_reg[3] = regs->esi; \ 114 pr_reg[3] = regs->si; \
115 pr_reg[4] = regs->edi; \ 115 pr_reg[4] = regs->di; \
116 pr_reg[5] = regs->ebp; \ 116 pr_reg[5] = regs->bp; \
117 pr_reg[6] = regs->eax; \ 117 pr_reg[6] = regs->ax; \
118 pr_reg[7] = regs->xds & 0xffff; \ 118 pr_reg[7] = regs->ds & 0xffff; \
119 pr_reg[8] = regs->xes & 0xffff; \ 119 pr_reg[8] = regs->es & 0xffff; \
120 pr_reg[9] = regs->xfs & 0xffff; \ 120 pr_reg[9] = regs->fs & 0xffff; \
121 savesegment(gs,pr_reg[10]); \ 121 savesegment(gs,pr_reg[10]); \
122 pr_reg[11] = regs->orig_eax; \ 122 pr_reg[11] = regs->orig_ax; \
123 pr_reg[12] = regs->eip; \ 123 pr_reg[12] = regs->ip; \
124 pr_reg[13] = regs->xcs & 0xffff; \ 124 pr_reg[13] = regs->cs & 0xffff; \
125 pr_reg[14] = regs->eflags; \ 125 pr_reg[14] = regs->flags; \
126 pr_reg[15] = regs->esp; \ 126 pr_reg[15] = regs->sp; \
127 pr_reg[16] = regs->xss & 0xffff; 127 pr_reg[16] = regs->ss & 0xffff;
128 128
129#define ELF_PLATFORM (utsname()->machine) 129#define ELF_PLATFORM (utsname()->machine)
130#define set_personality_64bit() do { } while (0) 130#define set_personality_64bit() do { } while (0)
@@ -142,9 +142,9 @@ extern unsigned int vdso_enabled;
142 142
143#define ELF_PLAT_INIT(_r, load_addr) do { \ 143#define ELF_PLAT_INIT(_r, load_addr) do { \
144 struct task_struct *cur = current; \ 144 struct task_struct *cur = current; \
145 (_r)->rbx = 0; (_r)->rcx = 0; (_r)->rdx = 0; \ 145 (_r)->bx = 0; (_r)->cx = 0; (_r)->dx = 0; \
146 (_r)->rsi = 0; (_r)->rdi = 0; (_r)->rbp = 0; \ 146 (_r)->si = 0; (_r)->di = 0; (_r)->bp = 0; \
147 (_r)->rax = 0; \ 147 (_r)->ax = 0; \
148 (_r)->r8 = 0; \ 148 (_r)->r8 = 0; \
149 (_r)->r9 = 0; \ 149 (_r)->r9 = 0; \
150 (_r)->r10 = 0; \ 150 (_r)->r10 = 0; \
@@ -169,22 +169,22 @@ extern unsigned int vdso_enabled;
169 (pr_reg)[1] = (regs)->r14; \ 169 (pr_reg)[1] = (regs)->r14; \
170 (pr_reg)[2] = (regs)->r13; \ 170 (pr_reg)[2] = (regs)->r13; \
171 (pr_reg)[3] = (regs)->r12; \ 171 (pr_reg)[3] = (regs)->r12; \
172 (pr_reg)[4] = (regs)->rbp; \ 172 (pr_reg)[4] = (regs)->bp; \
173 (pr_reg)[5] = (regs)->rbx; \ 173 (pr_reg)[5] = (regs)->bx; \
174 (pr_reg)[6] = (regs)->r11; \ 174 (pr_reg)[6] = (regs)->r11; \
175 (pr_reg)[7] = (regs)->r10; \ 175 (pr_reg)[7] = (regs)->r10; \
176 (pr_reg)[8] = (regs)->r9; \ 176 (pr_reg)[8] = (regs)->r9; \
177 (pr_reg)[9] = (regs)->r8; \ 177 (pr_reg)[9] = (regs)->r8; \
178 (pr_reg)[10] = (regs)->rax; \ 178 (pr_reg)[10] = (regs)->ax; \
179 (pr_reg)[11] = (regs)->rcx; \ 179 (pr_reg)[11] = (regs)->cx; \
180 (pr_reg)[12] = (regs)->rdx; \ 180 (pr_reg)[12] = (regs)->dx; \
181 (pr_reg)[13] = (regs)->rsi; \ 181 (pr_reg)[13] = (regs)->si; \
182 (pr_reg)[14] = (regs)->rdi; \ 182 (pr_reg)[14] = (regs)->di; \
183 (pr_reg)[15] = (regs)->orig_rax; \ 183 (pr_reg)[15] = (regs)->orig_ax; \
184 (pr_reg)[16] = (regs)->rip; \ 184 (pr_reg)[16] = (regs)->ip; \
185 (pr_reg)[17] = (regs)->cs; \ 185 (pr_reg)[17] = (regs)->cs; \
186 (pr_reg)[18] = (regs)->eflags; \ 186 (pr_reg)[18] = (regs)->flags; \
187 (pr_reg)[19] = (regs)->rsp; \ 187 (pr_reg)[19] = (regs)->sp; \
188 (pr_reg)[20] = (regs)->ss; \ 188 (pr_reg)[20] = (regs)->ss; \
189 (pr_reg)[21] = current->thread.fs; \ 189 (pr_reg)[21] = current->thread.fs; \
190 (pr_reg)[22] = current->thread.gs; \ 190 (pr_reg)[22] = current->thread.gs; \
diff --git a/include/asm-x86/kexec_32.h b/include/asm-x86/kexec_32.h
index 4b9dc9e6b701..ff39d2f88022 100644
--- a/include/asm-x86/kexec_32.h
+++ b/include/asm-x86/kexec_32.h
@@ -45,7 +45,7 @@
45/* We can also handle crash dumps from 64 bit kernel. */ 45/* We can also handle crash dumps from 64 bit kernel. */
46#define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64) 46#define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
47 47
48/* CPU does not save ss and esp on stack if execution is already 48/* CPU does not save ss and sp on stack if execution is already
49 * running in kernel mode at the time of NMI occurrence. This code 49 * running in kernel mode at the time of NMI occurrence. This code
50 * fixes it. 50 * fixes it.
51 */ 51 */
@@ -53,16 +53,16 @@ static inline void crash_fixup_ss_esp(struct pt_regs *newregs,
53 struct pt_regs *oldregs) 53 struct pt_regs *oldregs)
54{ 54{
55 memcpy(newregs, oldregs, sizeof(*newregs)); 55 memcpy(newregs, oldregs, sizeof(*newregs));
56 newregs->esp = (unsigned long)&(oldregs->esp); 56 newregs->sp = (unsigned long)&(oldregs->sp);
57 __asm__ __volatile__( 57 __asm__ __volatile__(
58 "xorl %%eax, %%eax\n\t" 58 "xorl %%eax, %%eax\n\t"
59 "movw %%ss, %%ax\n\t" 59 "movw %%ss, %%ax\n\t"
60 :"=a"(newregs->xss)); 60 :"=a"(newregs->ss));
61} 61}
62 62
63/* 63/*
64 * This function is responsible for capturing register states if coming 64 * This function is responsible for capturing register states if coming
65 * via panic otherwise just fix up the ss and esp if coming via kernel 65 * via panic otherwise just fix up the ss and sp if coming via kernel
66 * mode exception. 66 * mode exception.
67 */ 67 */
68static inline void crash_setup_regs(struct pt_regs *newregs, 68static inline void crash_setup_regs(struct pt_regs *newregs,
@@ -71,21 +71,21 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
71 if (oldregs) 71 if (oldregs)
72 crash_fixup_ss_esp(newregs, oldregs); 72 crash_fixup_ss_esp(newregs, oldregs);
73 else { 73 else {
74 __asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->ebx)); 74 __asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->bx));
75 __asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->ecx)); 75 __asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->cx));
76 __asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->edx)); 76 __asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->dx));
77 __asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->esi)); 77 __asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->si));
78 __asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->edi)); 78 __asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->di));
79 __asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->ebp)); 79 __asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->bp));
80 __asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->eax)); 80 __asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->ax));
81 __asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->esp)); 81 __asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->sp));
82 __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(newregs->xss)); 82 __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss));
83 __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(newregs->xcs)); 83 __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs));
84 __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(newregs->xds)); 84 __asm__ __volatile__("movl %%ds, %%eax;" :"=a"(newregs->ds));
85 __asm__ __volatile__("movw %%es, %%ax;" :"=a"(newregs->xes)); 85 __asm__ __volatile__("movl %%es, %%eax;" :"=a"(newregs->es));
86 __asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->eflags)); 86 __asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->flags));
87 87
88 newregs->eip = (unsigned long)current_text_addr(); 88 newregs->ip = (unsigned long)current_text_addr();
89 } 89 }
90} 90}
91asmlinkage NORET_TYPE void 91asmlinkage NORET_TYPE void
diff --git a/include/asm-x86/kexec_64.h b/include/asm-x86/kexec_64.h
index 738e581b67f8..b5f989b15c0b 100644
--- a/include/asm-x86/kexec_64.h
+++ b/include/asm-x86/kexec_64.h
@@ -60,14 +60,14 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
60 if (oldregs) 60 if (oldregs)
61 memcpy(newregs, oldregs, sizeof(*newregs)); 61 memcpy(newregs, oldregs, sizeof(*newregs));
62 else { 62 else {
63 __asm__ __volatile__("movq %%rbx,%0" : "=m"(newregs->rbx)); 63 __asm__ __volatile__("movq %%rbx,%0" : "=m"(newregs->bx));
64 __asm__ __volatile__("movq %%rcx,%0" : "=m"(newregs->rcx)); 64 __asm__ __volatile__("movq %%rcx,%0" : "=m"(newregs->cx));
65 __asm__ __volatile__("movq %%rdx,%0" : "=m"(newregs->rdx)); 65 __asm__ __volatile__("movq %%rdx,%0" : "=m"(newregs->dx));
66 __asm__ __volatile__("movq %%rsi,%0" : "=m"(newregs->rsi)); 66 __asm__ __volatile__("movq %%rsi,%0" : "=m"(newregs->si));
67 __asm__ __volatile__("movq %%rdi,%0" : "=m"(newregs->rdi)); 67 __asm__ __volatile__("movq %%rdi,%0" : "=m"(newregs->di));
68 __asm__ __volatile__("movq %%rbp,%0" : "=m"(newregs->rbp)); 68 __asm__ __volatile__("movq %%rbp,%0" : "=m"(newregs->bp));
69 __asm__ __volatile__("movq %%rax,%0" : "=m"(newregs->rax)); 69 __asm__ __volatile__("movq %%rax,%0" : "=m"(newregs->ax));
70 __asm__ __volatile__("movq %%rsp,%0" : "=m"(newregs->rsp)); 70 __asm__ __volatile__("movq %%rsp,%0" : "=m"(newregs->sp));
71 __asm__ __volatile__("movq %%r8,%0" : "=m"(newregs->r8)); 71 __asm__ __volatile__("movq %%r8,%0" : "=m"(newregs->r8));
72 __asm__ __volatile__("movq %%r9,%0" : "=m"(newregs->r9)); 72 __asm__ __volatile__("movq %%r9,%0" : "=m"(newregs->r9));
73 __asm__ __volatile__("movq %%r10,%0" : "=m"(newregs->r10)); 73 __asm__ __volatile__("movq %%r10,%0" : "=m"(newregs->r10));
@@ -78,9 +78,9 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
78 __asm__ __volatile__("movq %%r15,%0" : "=m"(newregs->r15)); 78 __asm__ __volatile__("movq %%r15,%0" : "=m"(newregs->r15));
79 __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss)); 79 __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss));
80 __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs)); 80 __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs));
81 __asm__ __volatile__("pushfq; popq %0" :"=m"(newregs->eflags)); 81 __asm__ __volatile__("pushfq; popq %0" :"=m"(newregs->flags));
82 82
83 newregs->rip = (unsigned long)current_text_addr(); 83 newregs->ip = (unsigned long)current_text_addr();
84 } 84 }
85} 85}
86 86
diff --git a/include/asm-x86/kprobes_32.h b/include/asm-x86/kprobes_32.h
index 9fe8f3bddfd5..2f38315bc39f 100644
--- a/include/asm-x86/kprobes_32.h
+++ b/include/asm-x86/kprobes_32.h
@@ -84,7 +84,7 @@ struct kprobe_ctlblk {
84 */ 84 */
85static inline void restore_interrupts(struct pt_regs *regs) 85static inline void restore_interrupts(struct pt_regs *regs)
86{ 86{
87 if (regs->eflags & IF_MASK) 87 if (regs->flags & IF_MASK)
88 local_irq_enable(); 88 local_irq_enable();
89} 89}
90 90
diff --git a/include/asm-x86/kprobes_64.h b/include/asm-x86/kprobes_64.h
index 743d76218fc9..8c919d35cdd3 100644
--- a/include/asm-x86/kprobes_64.h
+++ b/include/asm-x86/kprobes_64.h
@@ -77,7 +77,7 @@ struct kprobe_ctlblk {
77 */ 77 */
78static inline void restore_interrupts(struct pt_regs *regs) 78static inline void restore_interrupts(struct pt_regs *regs)
79{ 79{
80 if (regs->eflags & IF_MASK) 80 if (regs->flags & IF_MASK)
81 local_irq_enable(); 81 local_irq_enable();
82} 82}
83 83
diff --git a/include/asm-x86/mce.h b/include/asm-x86/mce.h
index e6ff507a73b0..94f1fd79e22a 100644
--- a/include/asm-x86/mce.h
+++ b/include/asm-x86/mce.h
@@ -13,7 +13,7 @@
13#define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */ 13#define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */
14 14
15#define MCG_STATUS_RIPV (1UL<<0) /* restart ip valid */ 15#define MCG_STATUS_RIPV (1UL<<0) /* restart ip valid */
16#define MCG_STATUS_EIPV (1UL<<1) /* eip points to correct instruction */ 16#define MCG_STATUS_EIPV (1UL<<1) /* ip points to correct instruction */
17#define MCG_STATUS_MCIP (1UL<<2) /* machine check in progress */ 17#define MCG_STATUS_MCIP (1UL<<2) /* machine check in progress */
18 18
19#define MCI_STATUS_VAL (1UL<<63) /* valid error */ 19#define MCI_STATUS_VAL (1UL<<63) /* valid error */
@@ -30,7 +30,7 @@ struct mce {
30 __u64 misc; 30 __u64 misc;
31 __u64 addr; 31 __u64 addr;
32 __u64 mcgstatus; 32 __u64 mcgstatus;
33 __u64 rip; 33 __u64 ip;
34 __u64 tsc; /* cpu time stamp counter */ 34 __u64 tsc; /* cpu time stamp counter */
35 __u64 res1; /* for future extension */ 35 __u64 res1; /* for future extension */
36 __u64 res2; /* dito. */ 36 __u64 res2; /* dito. */
diff --git a/include/asm-x86/processor_32.h b/include/asm-x86/processor_32.h
index 3c67eacb3168..c85400fe58c4 100644
--- a/include/asm-x86/processor_32.h
+++ b/include/asm-x86/processor_32.h
@@ -398,14 +398,14 @@ struct thread_struct {
398 398
399#define start_thread(regs, new_eip, new_esp) do { \ 399#define start_thread(regs, new_eip, new_esp) do { \
400 __asm__("movl %0,%%gs": :"r" (0)); \ 400 __asm__("movl %0,%%gs": :"r" (0)); \
401 regs->xfs = 0; \ 401 regs->fs = 0; \
402 set_fs(USER_DS); \ 402 set_fs(USER_DS); \
403 regs->xds = __USER_DS; \ 403 regs->ds = __USER_DS; \
404 regs->xes = __USER_DS; \ 404 regs->es = __USER_DS; \
405 regs->xss = __USER_DS; \ 405 regs->ss = __USER_DS; \
406 regs->xcs = __USER_CS; \ 406 regs->cs = __USER_CS; \
407 regs->eip = new_eip; \ 407 regs->ip = new_eip; \
408 regs->esp = new_esp; \ 408 regs->sp = new_esp; \
409} while (0) 409} while (0)
410 410
411/* Forward declaration, a strange C thing */ 411/* Forward declaration, a strange C thing */
@@ -440,7 +440,7 @@ unsigned long get_wchan(struct task_struct *p);
440 * is accessable even if the CPU haven't stored the SS/ESP registers 440 * is accessable even if the CPU haven't stored the SS/ESP registers
441 * on the stack (interrupt gate does not save these registers 441 * on the stack (interrupt gate does not save these registers
442 * when switching to the same priv ring). 442 * when switching to the same priv ring).
443 * Therefore beware: accessing the xss/esp fields of the 443 * Therefore beware: accessing the ss/esp fields of the
444 * "struct pt_regs" is possible, but they may contain the 444 * "struct pt_regs" is possible, but they may contain the
445 * completely wrong values. 445 * completely wrong values.
446 */ 446 */
@@ -451,8 +451,8 @@ unsigned long get_wchan(struct task_struct *p);
451 __regs__ - 1; \ 451 __regs__ - 1; \
452}) 452})
453 453
454#define KSTK_EIP(task) (task_pt_regs(task)->eip) 454#define KSTK_EIP(task) (task_pt_regs(task)->ip)
455#define KSTK_ESP(task) (task_pt_regs(task)->esp) 455#define KSTK_ESP(task) (task_pt_regs(task)->sp)
456 456
457 457
458struct microcode_header { 458struct microcode_header {
diff --git a/include/asm-x86/processor_64.h b/include/asm-x86/processor_64.h
index e7bea4fed642..797770113e6d 100644
--- a/include/asm-x86/processor_64.h
+++ b/include/asm-x86/processor_64.h
@@ -258,12 +258,12 @@ struct thread_struct {
258#define start_thread(regs,new_rip,new_rsp) do { \ 258#define start_thread(regs,new_rip,new_rsp) do { \
259 asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \ 259 asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
260 load_gs_index(0); \ 260 load_gs_index(0); \
261 (regs)->rip = (new_rip); \ 261 (regs)->ip = (new_rip); \
262 (regs)->rsp = (new_rsp); \ 262 (regs)->sp = (new_rsp); \
263 write_pda(oldrsp, (new_rsp)); \ 263 write_pda(oldrsp, (new_rsp)); \
264 (regs)->cs = __USER_CS; \ 264 (regs)->cs = __USER_CS; \
265 (regs)->ss = __USER_DS; \ 265 (regs)->ss = __USER_DS; \
266 (regs)->eflags = 0x200; \ 266 (regs)->flags = 0x200; \
267 set_fs(USER_DS); \ 267 set_fs(USER_DS); \
268} while(0) 268} while(0)
269 269
@@ -297,7 +297,7 @@ extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
297 297
298extern unsigned long get_wchan(struct task_struct *p); 298extern unsigned long get_wchan(struct task_struct *p);
299#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1) 299#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1)
300#define KSTK_EIP(tsk) (task_pt_regs(tsk)->rip) 300#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ip)
301#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ 301#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
302 302
303 303
diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h
index 04204f359298..9187b2fab754 100644
--- a/include/asm-x86/ptrace.h
+++ b/include/asm-x86/ptrace.h
@@ -10,6 +10,8 @@
10/* this struct defines the way the registers are stored on the 10/* this struct defines the way the registers are stored on the
11 stack during a system call. */ 11 stack during a system call. */
12 12
13#ifndef __KERNEL__
14
13struct pt_regs { 15struct pt_regs {
14 long ebx; 16 long ebx;
15 long ecx; 17 long ecx;
@@ -21,7 +23,7 @@ struct pt_regs {
21 int xds; 23 int xds;
22 int xes; 24 int xes;
23 int xfs; 25 int xfs;
24 /* int xgs; */ 26 /* int gs; */
25 long orig_eax; 27 long orig_eax;
26 long eip; 28 long eip;
27 int xcs; 29 int xcs;
@@ -30,7 +32,27 @@ struct pt_regs {
30 int xss; 32 int xss;
31}; 33};
32 34
33#ifdef __KERNEL__ 35#else /* __KERNEL__ */
36
37struct pt_regs {
38 long bx;
39 long cx;
40 long dx;
41 long si;
42 long di;
43 long bp;
44 long ax;
45 int ds;
46 int es;
47 int fs;
48 /* int gs; */
49 long orig_ax;
50 long ip;
51 int cs;
52 long flags;
53 long sp;
54 int ss;
55};
34 56
35#include <asm/vm86.h> 57#include <asm/vm86.h>
36#include <asm/segment.h> 58#include <asm/segment.h>
@@ -47,27 +69,30 @@ extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int erro
47 */ 69 */
48static inline int user_mode(struct pt_regs *regs) 70static inline int user_mode(struct pt_regs *regs)
49{ 71{
50 return (regs->xcs & SEGMENT_RPL_MASK) == USER_RPL; 72 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
51} 73}
52static inline int user_mode_vm(struct pt_regs *regs) 74static inline int user_mode_vm(struct pt_regs *regs)
53{ 75{
54 return ((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= USER_RPL; 76 return ((regs->cs & SEGMENT_RPL_MASK) |
77 (regs->flags & VM_MASK)) >= USER_RPL;
55} 78}
56static inline int v8086_mode(struct pt_regs *regs) 79static inline int v8086_mode(struct pt_regs *regs)
57{ 80{
58 return (regs->eflags & VM_MASK); 81 return (regs->flags & VM_MASK);
59} 82}
60 83
61#define instruction_pointer(regs) ((regs)->eip) 84#define instruction_pointer(regs) ((regs)->ip)
62#define frame_pointer(regs) ((regs)->ebp) 85#define frame_pointer(regs) ((regs)->bp)
63#define stack_pointer(regs) ((unsigned long)(regs)) 86#define stack_pointer(regs) ((unsigned long)(regs))
64#define regs_return_value(regs) ((regs)->eax) 87#define regs_return_value(regs) ((regs)->ax)
65 88
66extern unsigned long profile_pc(struct pt_regs *regs); 89extern unsigned long profile_pc(struct pt_regs *regs);
67#endif /* __KERNEL__ */ 90#endif /* __KERNEL__ */
68 91
69#else /* __i386__ */ 92#else /* __i386__ */
70 93
94#ifndef __KERNEL__
95
71struct pt_regs { 96struct pt_regs {
72 unsigned long r15; 97 unsigned long r15;
73 unsigned long r14; 98 unsigned long r14;
@@ -96,14 +121,43 @@ struct pt_regs {
96/* top of stack page */ 121/* top of stack page */
97}; 122};
98 123
99#ifdef __KERNEL__ 124#else /* __KERNEL__ */
125
126struct pt_regs {
127 unsigned long r15;
128 unsigned long r14;
129 unsigned long r13;
130 unsigned long r12;
131 unsigned long bp;
132 unsigned long bx;
133/* arguments: non interrupts/non tracing syscalls only save upto here*/
134 unsigned long r11;
135 unsigned long r10;
136 unsigned long r9;
137 unsigned long r8;
138 unsigned long ax;
139 unsigned long cx;
140 unsigned long dx;
141 unsigned long si;
142 unsigned long di;
143 unsigned long orig_ax;
144/* end of arguments */
145/* cpu exception frame or undefined */
146 unsigned long ip;
147 unsigned long cs;
148 unsigned long flags;
149 unsigned long sp;
150 unsigned long ss;
151/* top of stack page */
152};
100 153
101#define user_mode(regs) (!!((regs)->cs & 3)) 154#define user_mode(regs) (!!((regs)->cs & 3))
102#define user_mode_vm(regs) user_mode(regs) 155#define user_mode_vm(regs) user_mode(regs)
103#define instruction_pointer(regs) ((regs)->rip) 156#define v8086_mode(regs) 0 /* No V86 mode support in long mode */
104#define frame_pointer(regs) ((regs)->rbp) 157#define instruction_pointer(regs) ((regs)->ip)
105#define stack_pointer(regs) ((regs)->rsp) 158#define frame_pointer(regs) ((regs)->bp)
106#define regs_return_value(regs) ((regs)->rax) 159#define stack_pointer(regs) ((regs)->sp)
160#define regs_return_value(regs) ((regs)->ax)
107 161
108extern unsigned long profile_pc(struct pt_regs *regs); 162extern unsigned long profile_pc(struct pt_regs *regs);
109void signal_fault(struct pt_regs *regs, void __user *frame, char *where); 163void signal_fault(struct pt_regs *regs, void __user *frame, char *where);