aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/vm86_32.c
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2008-01-30 07:30:56 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:30:56 -0500
commit65ea5b0349903585bfed9720fa06f5edb4f1cd25 (patch)
tree6c252228c34416b7e2077f23475de34500c2ab8a /arch/x86/kernel/vm86_32.c
parent53756d3722172815f52272b28c6d5d5e9639adde (diff)
x86: rename the struct pt_regs members for 32/64-bit consistency
We have a lot of code which differs only by the naming of specific members of structures that contain registers. In order to enable additional unifications, this patch drops the e- or r- size prefix from the register names in struct pt_regs, and drops the x- prefixes for segment registers on the 32-bit side. This patch also performs the equivalent renames in some additional places that might be candidates for unification in the future. Signed-off-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/vm86_32.c')
-rw-r--r--arch/x86/kernel/vm86_32.c94
1 files changed, 47 insertions, 47 deletions
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 157e4bedd3c5..980e85b90091 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -70,10 +70,10 @@
70/* 70/*
71 * 8- and 16-bit register defines.. 71 * 8- and 16-bit register defines..
72 */ 72 */
73#define AL(regs) (((unsigned char *)&((regs)->pt.eax))[0]) 73#define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0])
74#define AH(regs) (((unsigned char *)&((regs)->pt.eax))[1]) 74#define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1])
75#define IP(regs) (*(unsigned short *)&((regs)->pt.eip)) 75#define IP(regs) (*(unsigned short *)&((regs)->pt.ip))
76#define SP(regs) (*(unsigned short *)&((regs)->pt.esp)) 76#define SP(regs) (*(unsigned short *)&((regs)->pt.sp))
77 77
78/* 78/*
79 * virtual flags (16 and 32-bit versions) 79 * virtual flags (16 and 32-bit versions)
@@ -93,12 +93,12 @@ static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
93{ 93{
94 int ret = 0; 94 int ret = 0;
95 95
96 /* kernel_vm86_regs is missing xgs, so copy everything up to 96 /* kernel_vm86_regs is missing gs, so copy everything up to
97 (but not including) orig_eax, and then rest including orig_eax. */ 97 (but not including) orig_eax, and then rest including orig_eax. */
98 ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_eax)); 98 ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax));
99 ret += copy_to_user(&user->orig_eax, &regs->pt.orig_eax, 99 ret += copy_to_user(&user->orig_eax, &regs->pt.orig_ax,
100 sizeof(struct kernel_vm86_regs) - 100 sizeof(struct kernel_vm86_regs) -
101 offsetof(struct kernel_vm86_regs, pt.orig_eax)); 101 offsetof(struct kernel_vm86_regs, pt.orig_ax));
102 102
103 return ret; 103 return ret;
104} 104}
@@ -110,12 +110,12 @@ static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
110{ 110{
111 int ret = 0; 111 int ret = 0;
112 112
113 /* copy eax-xfs inclusive */ 113 /* copy ax-fs inclusive */
114 ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_eax)); 114 ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_ax));
115 /* copy orig_eax-__gsh+extra */ 115 /* copy orig_ax-__gsh+extra */
116 ret += copy_from_user(&regs->pt.orig_eax, &user->orig_eax, 116 ret += copy_from_user(&regs->pt.orig_ax, &user->orig_eax,
117 sizeof(struct kernel_vm86_regs) - 117 sizeof(struct kernel_vm86_regs) -
118 offsetof(struct kernel_vm86_regs, pt.orig_eax) + 118 offsetof(struct kernel_vm86_regs, pt.orig_ax) +
119 extra); 119 extra);
120 return ret; 120 return ret;
121} 121}
@@ -138,7 +138,7 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
138 printk("no vm86_info: BAD\n"); 138 printk("no vm86_info: BAD\n");
139 do_exit(SIGSEGV); 139 do_exit(SIGSEGV);
140 } 140 }
141 set_flags(regs->pt.eflags, VEFLAGS, VIF_MASK | current->thread.v86mask); 141 set_flags(regs->pt.flags, VEFLAGS, VIF_MASK | current->thread.v86mask);
142 tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs,regs); 142 tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs,regs);
143 tmp += put_user(current->thread.screen_bitmap,&current->thread.vm86_info->screen_bitmap); 143 tmp += put_user(current->thread.screen_bitmap,&current->thread.vm86_info->screen_bitmap);
144 if (tmp) { 144 if (tmp) {
@@ -155,7 +155,7 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
155 155
156 ret = KVM86->regs32; 156 ret = KVM86->regs32;
157 157
158 ret->xfs = current->thread.saved_fs; 158 ret->fs = current->thread.saved_fs;
159 loadsegment(gs, current->thread.saved_gs); 159 loadsegment(gs, current->thread.saved_gs);
160 160
161 return ret; 161 return ret;
@@ -197,7 +197,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
197 197
198asmlinkage int sys_vm86old(struct pt_regs regs) 198asmlinkage int sys_vm86old(struct pt_regs regs)
199{ 199{
200 struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.ebx; 200 struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.bx;
201 struct kernel_vm86_struct info; /* declare this _on top_, 201 struct kernel_vm86_struct info; /* declare this _on top_,
202 * this avoids wasting of stack space. 202 * this avoids wasting of stack space.
203 * This remains on the stack until we 203 * This remains on the stack until we
@@ -237,12 +237,12 @@ asmlinkage int sys_vm86(struct pt_regs regs)
237 struct vm86plus_struct __user *v86; 237 struct vm86plus_struct __user *v86;
238 238
239 tsk = current; 239 tsk = current;
240 switch (regs.ebx) { 240 switch (regs.bx) {
241 case VM86_REQUEST_IRQ: 241 case VM86_REQUEST_IRQ:
242 case VM86_FREE_IRQ: 242 case VM86_FREE_IRQ:
243 case VM86_GET_IRQ_BITS: 243 case VM86_GET_IRQ_BITS:
244 case VM86_GET_AND_RESET_IRQ: 244 case VM86_GET_AND_RESET_IRQ:
245 ret = do_vm86_irq_handling(regs.ebx, (int)regs.ecx); 245 ret = do_vm86_irq_handling(regs.bx, (int)regs.cx);
246 goto out; 246 goto out;
247 case VM86_PLUS_INSTALL_CHECK: 247 case VM86_PLUS_INSTALL_CHECK:
248 /* NOTE: on old vm86 stuff this will return the error 248 /* NOTE: on old vm86 stuff this will return the error
@@ -258,7 +258,7 @@ asmlinkage int sys_vm86(struct pt_regs regs)
258 ret = -EPERM; 258 ret = -EPERM;
259 if (tsk->thread.saved_esp0) 259 if (tsk->thread.saved_esp0)
260 goto out; 260 goto out;
261 v86 = (struct vm86plus_struct __user *)regs.ecx; 261 v86 = (struct vm86plus_struct __user *)regs.cx;
262 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, 262 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
263 offsetof(struct kernel_vm86_struct, regs32) - 263 offsetof(struct kernel_vm86_struct, regs32) -
264 sizeof(info.regs)); 264 sizeof(info.regs));
@@ -281,23 +281,23 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
281/* 281/*
282 * make sure the vm86() system call doesn't try to do anything silly 282 * make sure the vm86() system call doesn't try to do anything silly
283 */ 283 */
284 info->regs.pt.xds = 0; 284 info->regs.pt.ds = 0;
285 info->regs.pt.xes = 0; 285 info->regs.pt.es = 0;
286 info->regs.pt.xfs = 0; 286 info->regs.pt.fs = 0;
287 287
288/* we are clearing gs later just before "jmp resume_userspace", 288/* we are clearing gs later just before "jmp resume_userspace",
289 * because it is not saved/restored. 289 * because it is not saved/restored.
290 */ 290 */
291 291
292/* 292/*
293 * The eflags register is also special: we cannot trust that the user 293 * The flags register is also special: we cannot trust that the user
294 * has set it up safely, so this makes sure interrupt etc flags are 294 * has set it up safely, so this makes sure interrupt etc flags are
295 * inherited from protected mode. 295 * inherited from protected mode.
296 */ 296 */
297 VEFLAGS = info->regs.pt.eflags; 297 VEFLAGS = info->regs.pt.flags;
298 info->regs.pt.eflags &= SAFE_MASK; 298 info->regs.pt.flags &= SAFE_MASK;
299 info->regs.pt.eflags |= info->regs32->eflags & ~SAFE_MASK; 299 info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK;
300 info->regs.pt.eflags |= VM_MASK; 300 info->regs.pt.flags |= VM_MASK;
301 301
302 switch (info->cpu_type) { 302 switch (info->cpu_type) {
303 case CPU_286: 303 case CPU_286:
@@ -315,11 +315,11 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
315 } 315 }
316 316
317/* 317/*
318 * Save old state, set default return value (%eax) to 0 318 * Save old state, set default return value (%ax) to 0
319 */ 319 */
320 info->regs32->eax = 0; 320 info->regs32->ax = 0;
321 tsk->thread.saved_esp0 = tsk->thread.esp0; 321 tsk->thread.saved_esp0 = tsk->thread.esp0;
322 tsk->thread.saved_fs = info->regs32->xfs; 322 tsk->thread.saved_fs = info->regs32->fs;
323 savesegment(gs, tsk->thread.saved_gs); 323 savesegment(gs, tsk->thread.saved_gs);
324 324
325 tss = &per_cpu(init_tss, get_cpu()); 325 tss = &per_cpu(init_tss, get_cpu());
@@ -352,7 +352,7 @@ static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval)
352 struct pt_regs * regs32; 352 struct pt_regs * regs32;
353 353
354 regs32 = save_v86_state(regs16); 354 regs32 = save_v86_state(regs16);
355 regs32->eax = retval; 355 regs32->ax = retval;
356 __asm__ __volatile__("movl %0,%%esp\n\t" 356 __asm__ __volatile__("movl %0,%%esp\n\t"
357 "movl %1,%%ebp\n\t" 357 "movl %1,%%ebp\n\t"
358 "jmp resume_userspace" 358 "jmp resume_userspace"
@@ -373,12 +373,12 @@ static inline void clear_IF(struct kernel_vm86_regs * regs)
373 373
374static inline void clear_TF(struct kernel_vm86_regs * regs) 374static inline void clear_TF(struct kernel_vm86_regs * regs)
375{ 375{
376 regs->pt.eflags &= ~TF_MASK; 376 regs->pt.flags &= ~TF_MASK;
377} 377}
378 378
379static inline void clear_AC(struct kernel_vm86_regs * regs) 379static inline void clear_AC(struct kernel_vm86_regs * regs)
380{ 380{
381 regs->pt.eflags &= ~AC_MASK; 381 regs->pt.flags &= ~AC_MASK;
382} 382}
383 383
384/* It is correct to call set_IF(regs) from the set_vflags_* 384/* It is correct to call set_IF(regs) from the set_vflags_*
@@ -392,11 +392,11 @@ static inline void clear_AC(struct kernel_vm86_regs * regs)
392 * [KD] 392 * [KD]
393 */ 393 */
394 394
395static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs * regs) 395static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs * regs)
396{ 396{
397 set_flags(VEFLAGS, eflags, current->thread.v86mask); 397 set_flags(VEFLAGS, flags, current->thread.v86mask);
398 set_flags(regs->pt.eflags, eflags, SAFE_MASK); 398 set_flags(regs->pt.flags, flags, SAFE_MASK);
399 if (eflags & IF_MASK) 399 if (flags & IF_MASK)
400 set_IF(regs); 400 set_IF(regs);
401 else 401 else
402 clear_IF(regs); 402 clear_IF(regs);
@@ -405,7 +405,7 @@ static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs
405static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs) 405static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs)
406{ 406{
407 set_flags(VFLAGS, flags, current->thread.v86mask); 407 set_flags(VFLAGS, flags, current->thread.v86mask);
408 set_flags(regs->pt.eflags, flags, SAFE_MASK); 408 set_flags(regs->pt.flags, flags, SAFE_MASK);
409 if (flags & IF_MASK) 409 if (flags & IF_MASK)
410 set_IF(regs); 410 set_IF(regs);
411 else 411 else
@@ -414,7 +414,7 @@ static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_reg
414 414
415static inline unsigned long get_vflags(struct kernel_vm86_regs * regs) 415static inline unsigned long get_vflags(struct kernel_vm86_regs * regs)
416{ 416{
417 unsigned long flags = regs->pt.eflags & RETURN_MASK; 417 unsigned long flags = regs->pt.flags & RETURN_MASK;
418 418
419 if (VEFLAGS & VIF_MASK) 419 if (VEFLAGS & VIF_MASK)
420 flags |= IF_MASK; 420 flags |= IF_MASK;
@@ -518,7 +518,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
518 unsigned long __user *intr_ptr; 518 unsigned long __user *intr_ptr;
519 unsigned long segoffs; 519 unsigned long segoffs;
520 520
521 if (regs->pt.xcs == BIOSSEG) 521 if (regs->pt.cs == BIOSSEG)
522 goto cannot_handle; 522 goto cannot_handle;
523 if (is_revectored(i, &KVM86->int_revectored)) 523 if (is_revectored(i, &KVM86->int_revectored))
524 goto cannot_handle; 524 goto cannot_handle;
@@ -530,9 +530,9 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
530 if ((segoffs >> 16) == BIOSSEG) 530 if ((segoffs >> 16) == BIOSSEG)
531 goto cannot_handle; 531 goto cannot_handle;
532 pushw(ssp, sp, get_vflags(regs), cannot_handle); 532 pushw(ssp, sp, get_vflags(regs), cannot_handle);
533 pushw(ssp, sp, regs->pt.xcs, cannot_handle); 533 pushw(ssp, sp, regs->pt.cs, cannot_handle);
534 pushw(ssp, sp, IP(regs), cannot_handle); 534 pushw(ssp, sp, IP(regs), cannot_handle);
535 regs->pt.xcs = segoffs >> 16; 535 regs->pt.cs = segoffs >> 16;
536 SP(regs) -= 6; 536 SP(regs) -= 6;
537 IP(regs) = segoffs & 0xffff; 537 IP(regs) = segoffs & 0xffff;
538 clear_TF(regs); 538 clear_TF(regs);
@@ -549,7 +549,7 @@ int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno
549 if (VMPI.is_vm86pus) { 549 if (VMPI.is_vm86pus) {
550 if ( (trapno==3) || (trapno==1) ) 550 if ( (trapno==3) || (trapno==1) )
551 return_to_32bit(regs, VM86_TRAP + (trapno << 8)); 551 return_to_32bit(regs, VM86_TRAP + (trapno << 8));
552 do_int(regs, trapno, (unsigned char __user *) (regs->pt.xss << 4), SP(regs)); 552 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
553 return 0; 553 return 0;
554 } 554 }
555 if (trapno !=1) 555 if (trapno !=1)
@@ -585,10 +585,10 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code)
585 handle_vm86_trap(regs, 0, 1); \ 585 handle_vm86_trap(regs, 0, 1); \
586 return; } while (0) 586 return; } while (0)
587 587
588 orig_flags = *(unsigned short *)&regs->pt.eflags; 588 orig_flags = *(unsigned short *)&regs->pt.flags;
589 589
590 csp = (unsigned char __user *) (regs->pt.xcs << 4); 590 csp = (unsigned char __user *) (regs->pt.cs << 4);
591 ssp = (unsigned char __user *) (regs->pt.xss << 4); 591 ssp = (unsigned char __user *) (regs->pt.ss << 4);
592 sp = SP(regs); 592 sp = SP(regs);
593 ip = IP(regs); 593 ip = IP(regs);
594 594
@@ -675,7 +675,7 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code)
675 SP(regs) += 6; 675 SP(regs) += 6;
676 } 676 }
677 IP(regs) = newip; 677 IP(regs) = newip;
678 regs->pt.xcs = newcs; 678 regs->pt.cs = newcs;
679 CHECK_IF_IN_TRAP; 679 CHECK_IF_IN_TRAP;
680 if (data32) { 680 if (data32) {
681 set_vflags_long(newflags, regs); 681 set_vflags_long(newflags, regs);