aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/vm86_32.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-01-30 08:40:09 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-01-30 08:40:09 -0500
commitdd430ca20c40ecccd6954a7efd13d4398f507728 (patch)
treeb65089436d17b2bcc6054ede2e335a821b50007f /arch/x86/kernel/vm86_32.c
parent60e233172eabdd1f831bd48631b9626ce2279d9b (diff)
parentafadcd788f37bfa62d92662e54a720c26c91becf (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: (890 commits) x86: fix nodemap_size according to nodeid bits x86: fix overlap between pagetable with bss section x86: add PCI IDs to k8topology_64.c x86: fix early_ioremap pagetable ops x86: use the same pgd_list for PAE and 64-bit x86: defer cr3 reload when doing pud_clear() x86: early boot debugging via FireWire (ohci1394_dma=early) x86: don't special-case pmd allocations as much x86: shrink some ifdefs in fault.c x86: ignore spurious faults x86: remove nx_enabled from fault.c x86: unify fault_32|64.c x86: unify fault_32|64.c with ifdefs x86: unify fault_32|64.c by ifdef'd function bodies x86: arch/x86/mm/init_32.c printk fixes x86: arch/x86/mm/init_32.c cleanup x86: arch/x86/mm/init_64.c printk fixes x86: unify ioremap x86: fixes some bugs about EFI memory map handling x86: use reboot_type on EFI 32 ...
Diffstat (limited to 'arch/x86/kernel/vm86_32.c')
-rw-r--r--arch/x86/kernel/vm86_32.c115
1 files changed, 57 insertions, 58 deletions
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 157e4bedd3c5..738c2104df30 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -70,10 +70,10 @@
70/* 70/*
71 * 8- and 16-bit register defines.. 71 * 8- and 16-bit register defines..
72 */ 72 */
73#define AL(regs) (((unsigned char *)&((regs)->pt.eax))[0]) 73#define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0])
74#define AH(regs) (((unsigned char *)&((regs)->pt.eax))[1]) 74#define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1])
75#define IP(regs) (*(unsigned short *)&((regs)->pt.eip)) 75#define IP(regs) (*(unsigned short *)&((regs)->pt.ip))
76#define SP(regs) (*(unsigned short *)&((regs)->pt.esp)) 76#define SP(regs) (*(unsigned short *)&((regs)->pt.sp))
77 77
78/* 78/*
79 * virtual flags (16 and 32-bit versions) 79 * virtual flags (16 and 32-bit versions)
@@ -93,12 +93,12 @@ static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
93{ 93{
94 int ret = 0; 94 int ret = 0;
95 95
96 /* kernel_vm86_regs is missing xgs, so copy everything up to 96 /* kernel_vm86_regs is missing gs, so copy everything up to
97 (but not including) orig_eax, and then rest including orig_eax. */ 97 (but not including) orig_eax, and then rest including orig_eax. */
98 ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_eax)); 98 ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax));
99 ret += copy_to_user(&user->orig_eax, &regs->pt.orig_eax, 99 ret += copy_to_user(&user->orig_eax, &regs->pt.orig_ax,
100 sizeof(struct kernel_vm86_regs) - 100 sizeof(struct kernel_vm86_regs) -
101 offsetof(struct kernel_vm86_regs, pt.orig_eax)); 101 offsetof(struct kernel_vm86_regs, pt.orig_ax));
102 102
103 return ret; 103 return ret;
104} 104}
@@ -110,18 +110,17 @@ static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
110{ 110{
111 int ret = 0; 111 int ret = 0;
112 112
113 /* copy eax-xfs inclusive */ 113 /* copy ax-fs inclusive */
114 ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_eax)); 114 ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_ax));
115 /* copy orig_eax-__gsh+extra */ 115 /* copy orig_ax-__gsh+extra */
116 ret += copy_from_user(&regs->pt.orig_eax, &user->orig_eax, 116 ret += copy_from_user(&regs->pt.orig_ax, &user->orig_eax,
117 sizeof(struct kernel_vm86_regs) - 117 sizeof(struct kernel_vm86_regs) -
118 offsetof(struct kernel_vm86_regs, pt.orig_eax) + 118 offsetof(struct kernel_vm86_regs, pt.orig_ax) +
119 extra); 119 extra);
120 return ret; 120 return ret;
121} 121}
122 122
123struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs)); 123struct pt_regs * save_v86_state(struct kernel_vm86_regs * regs)
124struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
125{ 124{
126 struct tss_struct *tss; 125 struct tss_struct *tss;
127 struct pt_regs *ret; 126 struct pt_regs *ret;
@@ -138,7 +137,7 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
138 printk("no vm86_info: BAD\n"); 137 printk("no vm86_info: BAD\n");
139 do_exit(SIGSEGV); 138 do_exit(SIGSEGV);
140 } 139 }
141 set_flags(regs->pt.eflags, VEFLAGS, VIF_MASK | current->thread.v86mask); 140 set_flags(regs->pt.flags, VEFLAGS, VIF_MASK | current->thread.v86mask);
142 tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs,regs); 141 tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs,regs);
143 tmp += put_user(current->thread.screen_bitmap,&current->thread.vm86_info->screen_bitmap); 142 tmp += put_user(current->thread.screen_bitmap,&current->thread.vm86_info->screen_bitmap);
144 if (tmp) { 143 if (tmp) {
@@ -147,15 +146,15 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
147 } 146 }
148 147
149 tss = &per_cpu(init_tss, get_cpu()); 148 tss = &per_cpu(init_tss, get_cpu());
150 current->thread.esp0 = current->thread.saved_esp0; 149 current->thread.sp0 = current->thread.saved_sp0;
151 current->thread.sysenter_cs = __KERNEL_CS; 150 current->thread.sysenter_cs = __KERNEL_CS;
152 load_esp0(tss, &current->thread); 151 load_sp0(tss, &current->thread);
153 current->thread.saved_esp0 = 0; 152 current->thread.saved_sp0 = 0;
154 put_cpu(); 153 put_cpu();
155 154
156 ret = KVM86->regs32; 155 ret = KVM86->regs32;
157 156
158 ret->xfs = current->thread.saved_fs; 157 ret->fs = current->thread.saved_fs;
159 loadsegment(gs, current->thread.saved_gs); 158 loadsegment(gs, current->thread.saved_gs);
160 159
161 return ret; 160 return ret;
@@ -197,7 +196,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
197 196
198asmlinkage int sys_vm86old(struct pt_regs regs) 197asmlinkage int sys_vm86old(struct pt_regs regs)
199{ 198{
200 struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.ebx; 199 struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.bx;
201 struct kernel_vm86_struct info; /* declare this _on top_, 200 struct kernel_vm86_struct info; /* declare this _on top_,
202 * this avoids wasting of stack space. 201 * this avoids wasting of stack space.
203 * This remains on the stack until we 202 * This remains on the stack until we
@@ -207,7 +206,7 @@ asmlinkage int sys_vm86old(struct pt_regs regs)
207 int tmp, ret = -EPERM; 206 int tmp, ret = -EPERM;
208 207
209 tsk = current; 208 tsk = current;
210 if (tsk->thread.saved_esp0) 209 if (tsk->thread.saved_sp0)
211 goto out; 210 goto out;
212 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, 211 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
213 offsetof(struct kernel_vm86_struct, vm86plus) - 212 offsetof(struct kernel_vm86_struct, vm86plus) -
@@ -237,12 +236,12 @@ asmlinkage int sys_vm86(struct pt_regs regs)
237 struct vm86plus_struct __user *v86; 236 struct vm86plus_struct __user *v86;
238 237
239 tsk = current; 238 tsk = current;
240 switch (regs.ebx) { 239 switch (regs.bx) {
241 case VM86_REQUEST_IRQ: 240 case VM86_REQUEST_IRQ:
242 case VM86_FREE_IRQ: 241 case VM86_FREE_IRQ:
243 case VM86_GET_IRQ_BITS: 242 case VM86_GET_IRQ_BITS:
244 case VM86_GET_AND_RESET_IRQ: 243 case VM86_GET_AND_RESET_IRQ:
245 ret = do_vm86_irq_handling(regs.ebx, (int)regs.ecx); 244 ret = do_vm86_irq_handling(regs.bx, (int)regs.cx);
246 goto out; 245 goto out;
247 case VM86_PLUS_INSTALL_CHECK: 246 case VM86_PLUS_INSTALL_CHECK:
248 /* NOTE: on old vm86 stuff this will return the error 247 /* NOTE: on old vm86 stuff this will return the error
@@ -256,9 +255,9 @@ asmlinkage int sys_vm86(struct pt_regs regs)
256 255
257 /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ 256 /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
258 ret = -EPERM; 257 ret = -EPERM;
259 if (tsk->thread.saved_esp0) 258 if (tsk->thread.saved_sp0)
260 goto out; 259 goto out;
261 v86 = (struct vm86plus_struct __user *)regs.ecx; 260 v86 = (struct vm86plus_struct __user *)regs.cx;
262 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, 261 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
263 offsetof(struct kernel_vm86_struct, regs32) - 262 offsetof(struct kernel_vm86_struct, regs32) -
264 sizeof(info.regs)); 263 sizeof(info.regs));
@@ -281,23 +280,23 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
281/* 280/*
282 * make sure the vm86() system call doesn't try to do anything silly 281 * make sure the vm86() system call doesn't try to do anything silly
283 */ 282 */
284 info->regs.pt.xds = 0; 283 info->regs.pt.ds = 0;
285 info->regs.pt.xes = 0; 284 info->regs.pt.es = 0;
286 info->regs.pt.xfs = 0; 285 info->regs.pt.fs = 0;
287 286
288/* we are clearing gs later just before "jmp resume_userspace", 287/* we are clearing gs later just before "jmp resume_userspace",
289 * because it is not saved/restored. 288 * because it is not saved/restored.
290 */ 289 */
291 290
292/* 291/*
293 * The eflags register is also special: we cannot trust that the user 292 * The flags register is also special: we cannot trust that the user
294 * has set it up safely, so this makes sure interrupt etc flags are 293 * has set it up safely, so this makes sure interrupt etc flags are
295 * inherited from protected mode. 294 * inherited from protected mode.
296 */ 295 */
297 VEFLAGS = info->regs.pt.eflags; 296 VEFLAGS = info->regs.pt.flags;
298 info->regs.pt.eflags &= SAFE_MASK; 297 info->regs.pt.flags &= SAFE_MASK;
299 info->regs.pt.eflags |= info->regs32->eflags & ~SAFE_MASK; 298 info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK;
300 info->regs.pt.eflags |= VM_MASK; 299 info->regs.pt.flags |= VM_MASK;
301 300
302 switch (info->cpu_type) { 301 switch (info->cpu_type) {
303 case CPU_286: 302 case CPU_286:
@@ -315,18 +314,18 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
315 } 314 }
316 315
317/* 316/*
318 * Save old state, set default return value (%eax) to 0 317 * Save old state, set default return value (%ax) to 0
319 */ 318 */
320 info->regs32->eax = 0; 319 info->regs32->ax = 0;
321 tsk->thread.saved_esp0 = tsk->thread.esp0; 320 tsk->thread.saved_sp0 = tsk->thread.sp0;
322 tsk->thread.saved_fs = info->regs32->xfs; 321 tsk->thread.saved_fs = info->regs32->fs;
323 savesegment(gs, tsk->thread.saved_gs); 322 savesegment(gs, tsk->thread.saved_gs);
324 323
325 tss = &per_cpu(init_tss, get_cpu()); 324 tss = &per_cpu(init_tss, get_cpu());
326 tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0; 325 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
327 if (cpu_has_sep) 326 if (cpu_has_sep)
328 tsk->thread.sysenter_cs = 0; 327 tsk->thread.sysenter_cs = 0;
329 load_esp0(tss, &tsk->thread); 328 load_sp0(tss, &tsk->thread);
330 put_cpu(); 329 put_cpu();
331 330
332 tsk->thread.screen_bitmap = info->screen_bitmap; 331 tsk->thread.screen_bitmap = info->screen_bitmap;
@@ -352,7 +351,7 @@ static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval)
352 struct pt_regs * regs32; 351 struct pt_regs * regs32;
353 352
354 regs32 = save_v86_state(regs16); 353 regs32 = save_v86_state(regs16);
355 regs32->eax = retval; 354 regs32->ax = retval;
356 __asm__ __volatile__("movl %0,%%esp\n\t" 355 __asm__ __volatile__("movl %0,%%esp\n\t"
357 "movl %1,%%ebp\n\t" 356 "movl %1,%%ebp\n\t"
358 "jmp resume_userspace" 357 "jmp resume_userspace"
@@ -373,30 +372,30 @@ static inline void clear_IF(struct kernel_vm86_regs * regs)
373 372
374static inline void clear_TF(struct kernel_vm86_regs * regs) 373static inline void clear_TF(struct kernel_vm86_regs * regs)
375{ 374{
376 regs->pt.eflags &= ~TF_MASK; 375 regs->pt.flags &= ~TF_MASK;
377} 376}
378 377
379static inline void clear_AC(struct kernel_vm86_regs * regs) 378static inline void clear_AC(struct kernel_vm86_regs * regs)
380{ 379{
381 regs->pt.eflags &= ~AC_MASK; 380 regs->pt.flags &= ~AC_MASK;
382} 381}
383 382
384/* It is correct to call set_IF(regs) from the set_vflags_* 383/* It is correct to call set_IF(regs) from the set_vflags_*
385 * functions. However someone forgot to call clear_IF(regs) 384 * functions. However someone forgot to call clear_IF(regs)
386 * in the opposite case. 385 * in the opposite case.
387 * After the command sequence CLI PUSHF STI POPF you should 386 * After the command sequence CLI PUSHF STI POPF you should
388 * end up with interrups disabled, but you ended up with 387 * end up with interrupts disabled, but you ended up with
389 * interrupts enabled. 388 * interrupts enabled.
390 * ( I was testing my own changes, but the only bug I 389 * ( I was testing my own changes, but the only bug I
391 * could find was in a function I had not changed. ) 390 * could find was in a function I had not changed. )
392 * [KD] 391 * [KD]
393 */ 392 */
394 393
395static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs * regs) 394static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs * regs)
396{ 395{
397 set_flags(VEFLAGS, eflags, current->thread.v86mask); 396 set_flags(VEFLAGS, flags, current->thread.v86mask);
398 set_flags(regs->pt.eflags, eflags, SAFE_MASK); 397 set_flags(regs->pt.flags, flags, SAFE_MASK);
399 if (eflags & IF_MASK) 398 if (flags & IF_MASK)
400 set_IF(regs); 399 set_IF(regs);
401 else 400 else
402 clear_IF(regs); 401 clear_IF(regs);
@@ -405,7 +404,7 @@ static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs
405static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs) 404static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs)
406{ 405{
407 set_flags(VFLAGS, flags, current->thread.v86mask); 406 set_flags(VFLAGS, flags, current->thread.v86mask);
408 set_flags(regs->pt.eflags, flags, SAFE_MASK); 407 set_flags(regs->pt.flags, flags, SAFE_MASK);
409 if (flags & IF_MASK) 408 if (flags & IF_MASK)
410 set_IF(regs); 409 set_IF(regs);
411 else 410 else
@@ -414,7 +413,7 @@ static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_reg
414 413
415static inline unsigned long get_vflags(struct kernel_vm86_regs * regs) 414static inline unsigned long get_vflags(struct kernel_vm86_regs * regs)
416{ 415{
417 unsigned long flags = regs->pt.eflags & RETURN_MASK; 416 unsigned long flags = regs->pt.flags & RETURN_MASK;
418 417
419 if (VEFLAGS & VIF_MASK) 418 if (VEFLAGS & VIF_MASK)
420 flags |= IF_MASK; 419 flags |= IF_MASK;
@@ -518,7 +517,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
518 unsigned long __user *intr_ptr; 517 unsigned long __user *intr_ptr;
519 unsigned long segoffs; 518 unsigned long segoffs;
520 519
521 if (regs->pt.xcs == BIOSSEG) 520 if (regs->pt.cs == BIOSSEG)
522 goto cannot_handle; 521 goto cannot_handle;
523 if (is_revectored(i, &KVM86->int_revectored)) 522 if (is_revectored(i, &KVM86->int_revectored))
524 goto cannot_handle; 523 goto cannot_handle;
@@ -530,9 +529,9 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
530 if ((segoffs >> 16) == BIOSSEG) 529 if ((segoffs >> 16) == BIOSSEG)
531 goto cannot_handle; 530 goto cannot_handle;
532 pushw(ssp, sp, get_vflags(regs), cannot_handle); 531 pushw(ssp, sp, get_vflags(regs), cannot_handle);
533 pushw(ssp, sp, regs->pt.xcs, cannot_handle); 532 pushw(ssp, sp, regs->pt.cs, cannot_handle);
534 pushw(ssp, sp, IP(regs), cannot_handle); 533 pushw(ssp, sp, IP(regs), cannot_handle);
535 regs->pt.xcs = segoffs >> 16; 534 regs->pt.cs = segoffs >> 16;
536 SP(regs) -= 6; 535 SP(regs) -= 6;
537 IP(regs) = segoffs & 0xffff; 536 IP(regs) = segoffs & 0xffff;
538 clear_TF(regs); 537 clear_TF(regs);
@@ -549,7 +548,7 @@ int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno
549 if (VMPI.is_vm86pus) { 548 if (VMPI.is_vm86pus) {
550 if ( (trapno==3) || (trapno==1) ) 549 if ( (trapno==3) || (trapno==1) )
551 return_to_32bit(regs, VM86_TRAP + (trapno << 8)); 550 return_to_32bit(regs, VM86_TRAP + (trapno << 8));
552 do_int(regs, trapno, (unsigned char __user *) (regs->pt.xss << 4), SP(regs)); 551 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
553 return 0; 552 return 0;
554 } 553 }
555 if (trapno !=1) 554 if (trapno !=1)
@@ -585,10 +584,10 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code)
585 handle_vm86_trap(regs, 0, 1); \ 584 handle_vm86_trap(regs, 0, 1); \
586 return; } while (0) 585 return; } while (0)
587 586
588 orig_flags = *(unsigned short *)&regs->pt.eflags; 587 orig_flags = *(unsigned short *)&regs->pt.flags;
589 588
590 csp = (unsigned char __user *) (regs->pt.xcs << 4); 589 csp = (unsigned char __user *) (regs->pt.cs << 4);
591 ssp = (unsigned char __user *) (regs->pt.xss << 4); 590 ssp = (unsigned char __user *) (regs->pt.ss << 4);
592 sp = SP(regs); 591 sp = SP(regs);
593 ip = IP(regs); 592 ip = IP(regs);
594 593
@@ -675,7 +674,7 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code)
675 SP(regs) += 6; 674 SP(regs) += 6;
676 } 675 }
677 IP(regs) = newip; 676 IP(regs) = newip;
678 regs->pt.xcs = newcs; 677 regs->pt.cs = newcs;
679 CHECK_IF_IN_TRAP; 678 CHECK_IF_IN_TRAP;
680 if (data32) { 679 if (data32) {
681 set_vflags_long(newflags, regs); 680 set_vflags_long(newflags, regs);