diff options
author | Paolo Ciarrocchi <paolo.ciarrocchi@gmail.com> | 2008-02-22 17:10:40 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-17 11:40:50 -0400 |
commit | 83e714e82f3434a32c3f54f7223f78345d873218 (patch) | |
tree | f10abe2bcbf9c665133b945123a4246160327255 /arch/x86/kernel/vm86_32.c | |
parent | fb87a298fb79357fa5b27e6916ae1c45bf94dac7 (diff) |
x86: coding style fixes to arch/x86/kernel/vm86_32.c
Before:
total: 64 errors, 18 warnings, 840 lines checked
After:
total: 12 errors, 15 warnings, 844 lines checked
No code changed:
arch/x86/kernel/vm86_32.o:
text data bss dec hex filename
4449 28 132 4609 1201 vm86_32.o.before
4449 28 132 4609 1201 vm86_32.o.after
md5:
e4e51ed7689d17f04148554a3c6d5bb6 vm86_32.o.before.asm
e4e51ed7689d17f04148554a3c6d5bb6 vm86_32.o.after.asm
Signed-off-by: Paolo Ciarrocchi <paolo.ciarrocchi@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/vm86_32.c')
-rw-r--r-- | arch/x86/kernel/vm86_32.c | 174 |
1 files changed, 89 insertions, 85 deletions
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 738c2104df30..6a91fcf92d67 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c | |||
@@ -64,7 +64,7 @@ | |||
64 | 64 | ||
65 | 65 | ||
66 | #define KVM86 ((struct kernel_vm86_struct *)regs) | 66 | #define KVM86 ((struct kernel_vm86_struct *)regs) |
67 | #define VMPI KVM86->vm86plus | 67 | #define VMPI KVM86->vm86plus |
68 | 68 | ||
69 | 69 | ||
70 | /* | 70 | /* |
@@ -81,7 +81,7 @@ | |||
81 | #define VFLAGS (*(unsigned short *)&(current->thread.v86flags)) | 81 | #define VFLAGS (*(unsigned short *)&(current->thread.v86flags)) |
82 | #define VEFLAGS (current->thread.v86flags) | 82 | #define VEFLAGS (current->thread.v86flags) |
83 | 83 | ||
84 | #define set_flags(X,new,mask) \ | 84 | #define set_flags(X, new, mask) \ |
85 | ((X) = ((X) & ~(mask)) | ((new) & (mask))) | 85 | ((X) = ((X) & ~(mask)) | ((new) & (mask))) |
86 | 86 | ||
87 | #define SAFE_MASK (0xDD5) | 87 | #define SAFE_MASK (0xDD5) |
@@ -93,8 +93,10 @@ static int copy_vm86_regs_to_user(struct vm86_regs __user *user, | |||
93 | { | 93 | { |
94 | int ret = 0; | 94 | int ret = 0; |
95 | 95 | ||
96 | /* kernel_vm86_regs is missing gs, so copy everything up to | 96 | /* |
97 | (but not including) orig_eax, and then rest including orig_eax. */ | 97 | * kernel_vm86_regs is missing gs, so copy everything up to |
98 | * (but not including) orig_eax, and then rest including orig_eax. | ||
99 | */ | ||
98 | ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax)); | 100 | ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax)); |
99 | ret += copy_to_user(&user->orig_eax, ®s->pt.orig_ax, | 101 | ret += copy_to_user(&user->orig_eax, ®s->pt.orig_ax, |
100 | sizeof(struct kernel_vm86_regs) - | 102 | sizeof(struct kernel_vm86_regs) - |
@@ -120,7 +122,7 @@ static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs, | |||
120 | return ret; | 122 | return ret; |
121 | } | 123 | } |
122 | 124 | ||
123 | struct pt_regs * save_v86_state(struct kernel_vm86_regs * regs) | 125 | struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) |
124 | { | 126 | { |
125 | struct tss_struct *tss; | 127 | struct tss_struct *tss; |
126 | struct pt_regs *ret; | 128 | struct pt_regs *ret; |
@@ -138,8 +140,8 @@ struct pt_regs * save_v86_state(struct kernel_vm86_regs * regs) | |||
138 | do_exit(SIGSEGV); | 140 | do_exit(SIGSEGV); |
139 | } | 141 | } |
140 | set_flags(regs->pt.flags, VEFLAGS, VIF_MASK | current->thread.v86mask); | 142 | set_flags(regs->pt.flags, VEFLAGS, VIF_MASK | current->thread.v86mask); |
141 | tmp = copy_vm86_regs_to_user(¤t->thread.vm86_info->regs,regs); | 143 | tmp = copy_vm86_regs_to_user(¤t->thread.vm86_info->regs, regs); |
142 | tmp += put_user(current->thread.screen_bitmap,¤t->thread.vm86_info->screen_bitmap); | 144 | tmp += put_user(current->thread.screen_bitmap, ¤t->thread.vm86_info->screen_bitmap); |
143 | if (tmp) { | 145 | if (tmp) { |
144 | printk("vm86: could not access userspace vm86_info\n"); | 146 | printk("vm86: could not access userspace vm86_info\n"); |
145 | do_exit(SIGSEGV); | 147 | do_exit(SIGSEGV); |
@@ -237,20 +239,21 @@ asmlinkage int sys_vm86(struct pt_regs regs) | |||
237 | 239 | ||
238 | tsk = current; | 240 | tsk = current; |
239 | switch (regs.bx) { | 241 | switch (regs.bx) { |
240 | case VM86_REQUEST_IRQ: | 242 | case VM86_REQUEST_IRQ: |
241 | case VM86_FREE_IRQ: | 243 | case VM86_FREE_IRQ: |
242 | case VM86_GET_IRQ_BITS: | 244 | case VM86_GET_IRQ_BITS: |
243 | case VM86_GET_AND_RESET_IRQ: | 245 | case VM86_GET_AND_RESET_IRQ: |
244 | ret = do_vm86_irq_handling(regs.bx, (int)regs.cx); | 246 | ret = do_vm86_irq_handling(regs.bx, (int)regs.cx); |
245 | goto out; | 247 | goto out; |
246 | case VM86_PLUS_INSTALL_CHECK: | 248 | case VM86_PLUS_INSTALL_CHECK: |
247 | /* NOTE: on old vm86 stuff this will return the error | 249 | /* |
248 | from access_ok(), because the subfunction is | 250 | * NOTE: on old vm86 stuff this will return the error |
249 | interpreted as (invalid) address to vm86_struct. | 251 | * from access_ok(), because the subfunction is |
250 | So the installation check works. | 252 | * interpreted as (invalid) address to vm86_struct. |
251 | */ | 253 | * So the installation check works. |
252 | ret = 0; | 254 | */ |
253 | goto out; | 255 | ret = 0; |
256 | goto out; | ||
254 | } | 257 | } |
255 | 258 | ||
256 | /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ | 259 | /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ |
@@ -299,18 +302,18 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk | |||
299 | info->regs.pt.flags |= VM_MASK; | 302 | info->regs.pt.flags |= VM_MASK; |
300 | 303 | ||
301 | switch (info->cpu_type) { | 304 | switch (info->cpu_type) { |
302 | case CPU_286: | 305 | case CPU_286: |
303 | tsk->thread.v86mask = 0; | 306 | tsk->thread.v86mask = 0; |
304 | break; | 307 | break; |
305 | case CPU_386: | 308 | case CPU_386: |
306 | tsk->thread.v86mask = NT_MASK | IOPL_MASK; | 309 | tsk->thread.v86mask = NT_MASK | IOPL_MASK; |
307 | break; | 310 | break; |
308 | case CPU_486: | 311 | case CPU_486: |
309 | tsk->thread.v86mask = AC_MASK | NT_MASK | IOPL_MASK; | 312 | tsk->thread.v86mask = AC_MASK | NT_MASK | IOPL_MASK; |
310 | break; | 313 | break; |
311 | default: | 314 | default: |
312 | tsk->thread.v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK; | 315 | tsk->thread.v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK; |
313 | break; | 316 | break; |
314 | } | 317 | } |
315 | 318 | ||
316 | /* | 319 | /* |
@@ -346,9 +349,9 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk | |||
346 | /* we never return here */ | 349 | /* we never return here */ |
347 | } | 350 | } |
348 | 351 | ||
349 | static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval) | 352 | static inline void return_to_32bit(struct kernel_vm86_regs *regs16, int retval) |
350 | { | 353 | { |
351 | struct pt_regs * regs32; | 354 | struct pt_regs *regs32; |
352 | 355 | ||
353 | regs32 = save_v86_state(regs16); | 356 | regs32 = save_v86_state(regs16); |
354 | regs32->ax = retval; | 357 | regs32->ax = retval; |
@@ -358,29 +361,30 @@ static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval) | |||
358 | : : "r" (regs32), "r" (current_thread_info())); | 361 | : : "r" (regs32), "r" (current_thread_info())); |
359 | } | 362 | } |
360 | 363 | ||
361 | static inline void set_IF(struct kernel_vm86_regs * regs) | 364 | static inline void set_IF(struct kernel_vm86_regs *regs) |
362 | { | 365 | { |
363 | VEFLAGS |= VIF_MASK; | 366 | VEFLAGS |= VIF_MASK; |
364 | if (VEFLAGS & VIP_MASK) | 367 | if (VEFLAGS & VIP_MASK) |
365 | return_to_32bit(regs, VM86_STI); | 368 | return_to_32bit(regs, VM86_STI); |
366 | } | 369 | } |
367 | 370 | ||
368 | static inline void clear_IF(struct kernel_vm86_regs * regs) | 371 | static inline void clear_IF(struct kernel_vm86_regs *regs) |
369 | { | 372 | { |
370 | VEFLAGS &= ~VIF_MASK; | 373 | VEFLAGS &= ~VIF_MASK; |
371 | } | 374 | } |
372 | 375 | ||
373 | static inline void clear_TF(struct kernel_vm86_regs * regs) | 376 | static inline void clear_TF(struct kernel_vm86_regs *regs) |
374 | { | 377 | { |
375 | regs->pt.flags &= ~TF_MASK; | 378 | regs->pt.flags &= ~TF_MASK; |
376 | } | 379 | } |
377 | 380 | ||
378 | static inline void clear_AC(struct kernel_vm86_regs * regs) | 381 | static inline void clear_AC(struct kernel_vm86_regs *regs) |
379 | { | 382 | { |
380 | regs->pt.flags &= ~AC_MASK; | 383 | regs->pt.flags &= ~AC_MASK; |
381 | } | 384 | } |
382 | 385 | ||
383 | /* It is correct to call set_IF(regs) from the set_vflags_* | 386 | /* |
387 | * It is correct to call set_IF(regs) from the set_vflags_* | ||
384 | * functions. However someone forgot to call clear_IF(regs) | 388 | * functions. However someone forgot to call clear_IF(regs) |
385 | * in the opposite case. | 389 | * in the opposite case. |
386 | * After the command sequence CLI PUSHF STI POPF you should | 390 | * After the command sequence CLI PUSHF STI POPF you should |
@@ -391,7 +395,7 @@ static inline void clear_AC(struct kernel_vm86_regs * regs) | |||
391 | * [KD] | 395 | * [KD] |
392 | */ | 396 | */ |
393 | 397 | ||
394 | static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs * regs) | 398 | static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs) |
395 | { | 399 | { |
396 | set_flags(VEFLAGS, flags, current->thread.v86mask); | 400 | set_flags(VEFLAGS, flags, current->thread.v86mask); |
397 | set_flags(regs->pt.flags, flags, SAFE_MASK); | 401 | set_flags(regs->pt.flags, flags, SAFE_MASK); |
@@ -401,7 +405,7 @@ static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs | |||
401 | clear_IF(regs); | 405 | clear_IF(regs); |
402 | } | 406 | } |
403 | 407 | ||
404 | static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs) | 408 | static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs) |
405 | { | 409 | { |
406 | set_flags(VFLAGS, flags, current->thread.v86mask); | 410 | set_flags(VFLAGS, flags, current->thread.v86mask); |
407 | set_flags(regs->pt.flags, flags, SAFE_MASK); | 411 | set_flags(regs->pt.flags, flags, SAFE_MASK); |
@@ -411,7 +415,7 @@ static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_reg | |||
411 | clear_IF(regs); | 415 | clear_IF(regs); |
412 | } | 416 | } |
413 | 417 | ||
414 | static inline unsigned long get_vflags(struct kernel_vm86_regs * regs) | 418 | static inline unsigned long get_vflags(struct kernel_vm86_regs *regs) |
415 | { | 419 | { |
416 | unsigned long flags = regs->pt.flags & RETURN_MASK; | 420 | unsigned long flags = regs->pt.flags & RETURN_MASK; |
417 | 421 | ||
@@ -421,11 +425,11 @@ static inline unsigned long get_vflags(struct kernel_vm86_regs * regs) | |||
421 | return flags | (VEFLAGS & current->thread.v86mask); | 425 | return flags | (VEFLAGS & current->thread.v86mask); |
422 | } | 426 | } |
423 | 427 | ||
424 | static inline int is_revectored(int nr, struct revectored_struct * bitmap) | 428 | static inline int is_revectored(int nr, struct revectored_struct *bitmap) |
425 | { | 429 | { |
426 | __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" | 430 | __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" |
427 | :"=r" (nr) | 431 | :"=r" (nr) |
428 | :"m" (*bitmap),"r" (nr)); | 432 | :"m" (*bitmap), "r" (nr)); |
429 | return nr; | 433 | return nr; |
430 | } | 434 | } |
431 | 435 | ||
@@ -437,7 +441,7 @@ static inline int is_revectored(int nr, struct revectored_struct * bitmap) | |||
437 | ptr--; \ | 441 | ptr--; \ |
438 | if (put_user(__val, base + ptr) < 0) \ | 442 | if (put_user(__val, base + ptr) < 0) \ |
439 | goto err_label; \ | 443 | goto err_label; \ |
440 | } while(0) | 444 | } while (0) |
441 | 445 | ||
442 | #define pushw(base, ptr, val, err_label) \ | 446 | #define pushw(base, ptr, val, err_label) \ |
443 | do { \ | 447 | do { \ |
@@ -448,7 +452,7 @@ static inline int is_revectored(int nr, struct revectored_struct * bitmap) | |||
448 | ptr--; \ | 452 | ptr--; \ |
449 | if (put_user(val_byte(__val, 0), base + ptr) < 0) \ | 453 | if (put_user(val_byte(__val, 0), base + ptr) < 0) \ |
450 | goto err_label; \ | 454 | goto err_label; \ |
451 | } while(0) | 455 | } while (0) |
452 | 456 | ||
453 | #define pushl(base, ptr, val, err_label) \ | 457 | #define pushl(base, ptr, val, err_label) \ |
454 | do { \ | 458 | do { \ |
@@ -465,7 +469,7 @@ static inline int is_revectored(int nr, struct revectored_struct * bitmap) | |||
465 | ptr--; \ | 469 | ptr--; \ |
466 | if (put_user(val_byte(__val, 0), base + ptr) < 0) \ | 470 | if (put_user(val_byte(__val, 0), base + ptr) < 0) \ |
467 | goto err_label; \ | 471 | goto err_label; \ |
468 | } while(0) | 472 | } while (0) |
469 | 473 | ||
470 | #define popb(base, ptr, err_label) \ | 474 | #define popb(base, ptr, err_label) \ |
471 | ({ \ | 475 | ({ \ |
@@ -512,7 +516,7 @@ static inline int is_revectored(int nr, struct revectored_struct * bitmap) | |||
512 | * in userspace is always better than an Oops anyway.) [KD] | 516 | * in userspace is always better than an Oops anyway.) [KD] |
513 | */ | 517 | */ |
514 | static void do_int(struct kernel_vm86_regs *regs, int i, | 518 | static void do_int(struct kernel_vm86_regs *regs, int i, |
515 | unsigned char __user * ssp, unsigned short sp) | 519 | unsigned char __user *ssp, unsigned short sp) |
516 | { | 520 | { |
517 | unsigned long __user *intr_ptr; | 521 | unsigned long __user *intr_ptr; |
518 | unsigned long segoffs; | 522 | unsigned long segoffs; |
@@ -521,7 +525,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i, | |||
521 | goto cannot_handle; | 525 | goto cannot_handle; |
522 | if (is_revectored(i, &KVM86->int_revectored)) | 526 | if (is_revectored(i, &KVM86->int_revectored)) |
523 | goto cannot_handle; | 527 | goto cannot_handle; |
524 | if (i==0x21 && is_revectored(AH(regs),&KVM86->int21_revectored)) | 528 | if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored)) |
525 | goto cannot_handle; | 529 | goto cannot_handle; |
526 | intr_ptr = (unsigned long __user *) (i << 2); | 530 | intr_ptr = (unsigned long __user *) (i << 2); |
527 | if (get_user(segoffs, intr_ptr)) | 531 | if (get_user(segoffs, intr_ptr)) |
@@ -543,15 +547,15 @@ cannot_handle: | |||
543 | return_to_32bit(regs, VM86_INTx + (i << 8)); | 547 | return_to_32bit(regs, VM86_INTx + (i << 8)); |
544 | } | 548 | } |
545 | 549 | ||
546 | int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno) | 550 | int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno) |
547 | { | 551 | { |
548 | if (VMPI.is_vm86pus) { | 552 | if (VMPI.is_vm86pus) { |
549 | if ( (trapno==3) || (trapno==1) ) | 553 | if ((trapno == 3) || (trapno == 1)) |
550 | return_to_32bit(regs, VM86_TRAP + (trapno << 8)); | 554 | return_to_32bit(regs, VM86_TRAP + (trapno << 8)); |
551 | do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); | 555 | do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); |
552 | return 0; | 556 | return 0; |
553 | } | 557 | } |
554 | if (trapno !=1) | 558 | if (trapno != 1) |
555 | return 1; /* we let this handle by the calling routine */ | 559 | return 1; /* we let this handle by the calling routine */ |
556 | if (current->ptrace & PT_PTRACED) { | 560 | if (current->ptrace & PT_PTRACED) { |
557 | unsigned long flags; | 561 | unsigned long flags; |
@@ -566,7 +570,7 @@ int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno | |||
566 | return 0; | 570 | return 0; |
567 | } | 571 | } |
568 | 572 | ||
569 | void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) | 573 | void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) |
570 | { | 574 | { |
571 | unsigned char opcode; | 575 | unsigned char opcode; |
572 | unsigned char __user *csp; | 576 | unsigned char __user *csp; |
@@ -595,17 +599,17 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) | |||
595 | pref_done = 0; | 599 | pref_done = 0; |
596 | do { | 600 | do { |
597 | switch (opcode = popb(csp, ip, simulate_sigsegv)) { | 601 | switch (opcode = popb(csp, ip, simulate_sigsegv)) { |
598 | case 0x66: /* 32-bit data */ data32=1; break; | 602 | case 0x66: /* 32-bit data */ data32 = 1; break; |
599 | case 0x67: /* 32-bit address */ break; | 603 | case 0x67: /* 32-bit address */ break; |
600 | case 0x2e: /* CS */ break; | 604 | case 0x2e: /* CS */ break; |
601 | case 0x3e: /* DS */ break; | 605 | case 0x3e: /* DS */ break; |
602 | case 0x26: /* ES */ break; | 606 | case 0x26: /* ES */ break; |
603 | case 0x36: /* SS */ break; | 607 | case 0x36: /* SS */ break; |
604 | case 0x65: /* GS */ break; | 608 | case 0x65: /* GS */ break; |
605 | case 0x64: /* FS */ break; | 609 | case 0x64: /* FS */ break; |
606 | case 0xf2: /* repnz */ break; | 610 | case 0xf2: /* repnz */ break; |
607 | case 0xf3: /* rep */ break; | 611 | case 0xf3: /* rep */ break; |
608 | default: pref_done = 1; | 612 | default: pref_done = 1; |
609 | } | 613 | } |
610 | } while (!pref_done); | 614 | } while (!pref_done); |
611 | 615 | ||
@@ -628,7 +632,7 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) | |||
628 | { | 632 | { |
629 | unsigned long newflags; | 633 | unsigned long newflags; |
630 | if (data32) { | 634 | if (data32) { |
631 | newflags=popl(ssp, sp, simulate_sigsegv); | 635 | newflags = popl(ssp, sp, simulate_sigsegv); |
632 | SP(regs) += 4; | 636 | SP(regs) += 4; |
633 | } else { | 637 | } else { |
634 | newflags = popw(ssp, sp, simulate_sigsegv); | 638 | newflags = popw(ssp, sp, simulate_sigsegv); |
@@ -636,20 +640,20 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) | |||
636 | } | 640 | } |
637 | IP(regs) = ip; | 641 | IP(regs) = ip; |
638 | CHECK_IF_IN_TRAP; | 642 | CHECK_IF_IN_TRAP; |
639 | if (data32) { | 643 | if (data32) |
640 | set_vflags_long(newflags, regs); | 644 | set_vflags_long(newflags, regs); |
641 | } else { | 645 | else |
642 | set_vflags_short(newflags, regs); | 646 | set_vflags_short(newflags, regs); |
643 | } | 647 | |
644 | VM86_FAULT_RETURN; | 648 | VM86_FAULT_RETURN; |
645 | } | 649 | } |
646 | 650 | ||
647 | /* int xx */ | 651 | /* int xx */ |
648 | case 0xcd: { | 652 | case 0xcd: { |
649 | int intno=popb(csp, ip, simulate_sigsegv); | 653 | int intno = popb(csp, ip, simulate_sigsegv); |
650 | IP(regs) = ip; | 654 | IP(regs) = ip; |
651 | if (VMPI.vm86dbg_active) { | 655 | if (VMPI.vm86dbg_active) { |
652 | if ( (1 << (intno &7)) & VMPI.vm86dbg_intxxtab[intno >> 3] ) | 656 | if ((1 << (intno & 7)) & VMPI.vm86dbg_intxxtab[intno >> 3]) |
653 | return_to_32bit(regs, VM86_INTx + (intno << 8)); | 657 | return_to_32bit(regs, VM86_INTx + (intno << 8)); |
654 | } | 658 | } |
655 | do_int(regs, intno, ssp, sp); | 659 | do_int(regs, intno, ssp, sp); |
@@ -663,9 +667,9 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) | |||
663 | unsigned long newcs; | 667 | unsigned long newcs; |
664 | unsigned long newflags; | 668 | unsigned long newflags; |
665 | if (data32) { | 669 | if (data32) { |
666 | newip=popl(ssp, sp, simulate_sigsegv); | 670 | newip = popl(ssp, sp, simulate_sigsegv); |
667 | newcs=popl(ssp, sp, simulate_sigsegv); | 671 | newcs = popl(ssp, sp, simulate_sigsegv); |
668 | newflags=popl(ssp, sp, simulate_sigsegv); | 672 | newflags = popl(ssp, sp, simulate_sigsegv); |
669 | SP(regs) += 12; | 673 | SP(regs) += 12; |
670 | } else { | 674 | } else { |
671 | newip = popw(ssp, sp, simulate_sigsegv); | 675 | newip = popw(ssp, sp, simulate_sigsegv); |
@@ -734,18 +738,18 @@ static struct vm86_irqs { | |||
734 | static DEFINE_SPINLOCK(irqbits_lock); | 738 | static DEFINE_SPINLOCK(irqbits_lock); |
735 | static int irqbits; | 739 | static int irqbits; |
736 | 740 | ||
737 | #define ALLOWED_SIGS ( 1 /* 0 = don't send a signal */ \ | 741 | #define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \ |
738 | | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \ | 742 | | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \ |
739 | | (1 << SIGUNUSED) ) | 743 | | (1 << SIGUNUSED)) |
740 | 744 | ||
741 | static irqreturn_t irq_handler(int intno, void *dev_id) | 745 | static irqreturn_t irq_handler(int intno, void *dev_id) |
742 | { | 746 | { |
743 | int irq_bit; | 747 | int irq_bit; |
744 | unsigned long flags; | 748 | unsigned long flags; |
745 | 749 | ||
746 | spin_lock_irqsave(&irqbits_lock, flags); | 750 | spin_lock_irqsave(&irqbits_lock, flags); |
747 | irq_bit = 1 << intno; | 751 | irq_bit = 1 << intno; |
748 | if ((irqbits & irq_bit) || ! vm86_irqs[intno].tsk) | 752 | if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk) |
749 | goto out; | 753 | goto out; |
750 | irqbits |= irq_bit; | 754 | irqbits |= irq_bit; |
751 | if (vm86_irqs[intno].sig) | 755 | if (vm86_irqs[intno].sig) |
@@ -759,7 +763,7 @@ static irqreturn_t irq_handler(int intno, void *dev_id) | |||
759 | return IRQ_HANDLED; | 763 | return IRQ_HANDLED; |
760 | 764 | ||
761 | out: | 765 | out: |
762 | spin_unlock_irqrestore(&irqbits_lock, flags); | 766 | spin_unlock_irqrestore(&irqbits_lock, flags); |
763 | return IRQ_NONE; | 767 | return IRQ_NONE; |
764 | } | 768 | } |
765 | 769 | ||
@@ -770,9 +774,9 @@ static inline void free_vm86_irq(int irqnumber) | |||
770 | free_irq(irqnumber, NULL); | 774 | free_irq(irqnumber, NULL); |
771 | vm86_irqs[irqnumber].tsk = NULL; | 775 | vm86_irqs[irqnumber].tsk = NULL; |
772 | 776 | ||
773 | spin_lock_irqsave(&irqbits_lock, flags); | 777 | spin_lock_irqsave(&irqbits_lock, flags); |
774 | irqbits &= ~(1 << irqnumber); | 778 | irqbits &= ~(1 << irqnumber); |
775 | spin_unlock_irqrestore(&irqbits_lock, flags); | 779 | spin_unlock_irqrestore(&irqbits_lock, flags); |
776 | } | 780 | } |
777 | 781 | ||
778 | void release_vm86_irqs(struct task_struct *task) | 782 | void release_vm86_irqs(struct task_struct *task) |
@@ -788,10 +792,10 @@ static inline int get_and_reset_irq(int irqnumber) | |||
788 | int bit; | 792 | int bit; |
789 | unsigned long flags; | 793 | unsigned long flags; |
790 | int ret = 0; | 794 | int ret = 0; |
791 | 795 | ||
792 | if (invalid_vm86_irq(irqnumber)) return 0; | 796 | if (invalid_vm86_irq(irqnumber)) return 0; |
793 | if (vm86_irqs[irqnumber].tsk != current) return 0; | 797 | if (vm86_irqs[irqnumber].tsk != current) return 0; |
794 | spin_lock_irqsave(&irqbits_lock, flags); | 798 | spin_lock_irqsave(&irqbits_lock, flags); |
795 | bit = irqbits & (1 << irqnumber); | 799 | bit = irqbits & (1 << irqnumber); |
796 | irqbits &= ~bit; | 800 | irqbits &= ~bit; |
797 | if (bit) { | 801 | if (bit) { |
@@ -799,7 +803,7 @@ static inline int get_and_reset_irq(int irqnumber) | |||
799 | ret = 1; | 803 | ret = 1; |
800 | } | 804 | } |
801 | 805 | ||
802 | spin_unlock_irqrestore(&irqbits_lock, flags); | 806 | spin_unlock_irqrestore(&irqbits_lock, flags); |
803 | return ret; | 807 | return ret; |
804 | } | 808 | } |
805 | 809 | ||