diff options
author | Gustavo F. Padovan <gustavo@las.ic.unicamp.br> | 2008-07-29 01:48:51 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-31 12:21:30 -0400 |
commit | 7de08b4e1ed8d80e6086f71b7e99fc4b397aae39 (patch) | |
tree | 288c336994fe4083bbe5acf87ada5bb70100989a /arch/x86/kernel/process_64.c | |
parent | 3964cd3a6721f18ef1dd67b9a0a89dc5b36683b9 (diff) |
x86: coding styles fixes to arch/x86/kernel/process_64.c
Fix about 50 errors and many warnings without change process_64.o
arch/x86/kernel/process_64.o:
text data bss dec hex filename
5236 8 24 5268 1494 process_64.o.after
5236 8 24 5268 1494 process_64.o.before
md5:
9c35e9debdea4e471288c6e8ca267a75 process_64.o.after
9c35e9debdea4e471288c6e8ca267a75 process_64.o.before
Signed-off-by: Gustavo F. Padovan <gustavo@las.ic.unicamp.br>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/process_64.c')
-rw-r--r-- | arch/x86/kernel/process_64.c | 101 |
1 files changed, 50 insertions, 51 deletions
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 3fb62a7d9a16..4da8514dd25c 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -37,11 +37,11 @@ | |||
37 | #include <linux/kdebug.h> | 37 | #include <linux/kdebug.h> |
38 | #include <linux/tick.h> | 38 | #include <linux/tick.h> |
39 | #include <linux/prctl.h> | 39 | #include <linux/prctl.h> |
40 | #include <linux/uaccess.h> | ||
41 | #include <linux/io.h> | ||
40 | 42 | ||
41 | #include <asm/uaccess.h> | ||
42 | #include <asm/pgtable.h> | 43 | #include <asm/pgtable.h> |
43 | #include <asm/system.h> | 44 | #include <asm/system.h> |
44 | #include <asm/io.h> | ||
45 | #include <asm/processor.h> | 45 | #include <asm/processor.h> |
46 | #include <asm/i387.h> | 46 | #include <asm/i387.h> |
47 | #include <asm/mmu_context.h> | 47 | #include <asm/mmu_context.h> |
@@ -88,7 +88,7 @@ void exit_idle(void) | |||
88 | #ifdef CONFIG_HOTPLUG_CPU | 88 | #ifdef CONFIG_HOTPLUG_CPU |
89 | DECLARE_PER_CPU(int, cpu_state); | 89 | DECLARE_PER_CPU(int, cpu_state); |
90 | 90 | ||
91 | #include <asm/nmi.h> | 91 | #include <linux/nmi.h> |
92 | /* We halt the CPU with physical CPU hotplug */ | 92 | /* We halt the CPU with physical CPU hotplug */ |
93 | static inline void play_dead(void) | 93 | static inline void play_dead(void) |
94 | { | 94 | { |
@@ -152,7 +152,7 @@ void cpu_idle(void) | |||
152 | } | 152 | } |
153 | 153 | ||
154 | /* Prints also some state that isn't saved in the pt_regs */ | 154 | /* Prints also some state that isn't saved in the pt_regs */ |
155 | void __show_regs(struct pt_regs * regs) | 155 | void __show_regs(struct pt_regs *regs) |
156 | { | 156 | { |
157 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; | 157 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; |
158 | unsigned long d0, d1, d2, d3, d6, d7; | 158 | unsigned long d0, d1, d2, d3, d6, d7; |
@@ -177,28 +177,28 @@ void __show_regs(struct pt_regs * regs) | |||
177 | printk("RBP: %016lx R08: %016lx R09: %016lx\n", | 177 | printk("RBP: %016lx R08: %016lx R09: %016lx\n", |
178 | regs->bp, regs->r8, regs->r9); | 178 | regs->bp, regs->r8, regs->r9); |
179 | printk("R10: %016lx R11: %016lx R12: %016lx\n", | 179 | printk("R10: %016lx R11: %016lx R12: %016lx\n", |
180 | regs->r10, regs->r11, regs->r12); | 180 | regs->r10, regs->r11, regs->r12); |
181 | printk("R13: %016lx R14: %016lx R15: %016lx\n", | 181 | printk("R13: %016lx R14: %016lx R15: %016lx\n", |
182 | regs->r13, regs->r14, regs->r15); | 182 | regs->r13, regs->r14, regs->r15); |
183 | 183 | ||
184 | asm("movl %%ds,%0" : "=r" (ds)); | 184 | asm("movl %%ds,%0" : "=r" (ds)); |
185 | asm("movl %%cs,%0" : "=r" (cs)); | 185 | asm("movl %%cs,%0" : "=r" (cs)); |
186 | asm("movl %%es,%0" : "=r" (es)); | 186 | asm("movl %%es,%0" : "=r" (es)); |
187 | asm("movl %%fs,%0" : "=r" (fsindex)); | 187 | asm("movl %%fs,%0" : "=r" (fsindex)); |
188 | asm("movl %%gs,%0" : "=r" (gsindex)); | 188 | asm("movl %%gs,%0" : "=r" (gsindex)); |
189 | 189 | ||
190 | rdmsrl(MSR_FS_BASE, fs); | 190 | rdmsrl(MSR_FS_BASE, fs); |
191 | rdmsrl(MSR_GS_BASE, gs); | 191 | rdmsrl(MSR_GS_BASE, gs); |
192 | rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); | 192 | rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); |
193 | 193 | ||
194 | cr0 = read_cr0(); | 194 | cr0 = read_cr0(); |
195 | cr2 = read_cr2(); | 195 | cr2 = read_cr2(); |
196 | cr3 = read_cr3(); | 196 | cr3 = read_cr3(); |
197 | cr4 = read_cr4(); | 197 | cr4 = read_cr4(); |
198 | 198 | ||
199 | printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", | 199 | printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", |
200 | fs,fsindex,gs,gsindex,shadowgs); | 200 | fs, fsindex, gs, gsindex, shadowgs); |
201 | printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0); | 201 | printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0); |
202 | printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4); | 202 | printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4); |
203 | 203 | ||
204 | get_debugreg(d0, 0); | 204 | get_debugreg(d0, 0); |
@@ -314,10 +314,10 @@ void prepare_to_copy(struct task_struct *tsk) | |||
314 | 314 | ||
315 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | 315 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, |
316 | unsigned long unused, | 316 | unsigned long unused, |
317 | struct task_struct * p, struct pt_regs * regs) | 317 | struct task_struct *p, struct pt_regs *regs) |
318 | { | 318 | { |
319 | int err; | 319 | int err; |
320 | struct pt_regs * childregs; | 320 | struct pt_regs *childregs; |
321 | struct task_struct *me = current; | 321 | struct task_struct *me = current; |
322 | 322 | ||
323 | childregs = ((struct pt_regs *) | 323 | childregs = ((struct pt_regs *) |
@@ -362,10 +362,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | |||
362 | if (test_thread_flag(TIF_IA32)) | 362 | if (test_thread_flag(TIF_IA32)) |
363 | err = do_set_thread_area(p, -1, | 363 | err = do_set_thread_area(p, -1, |
364 | (struct user_desc __user *)childregs->si, 0); | 364 | (struct user_desc __user *)childregs->si, 0); |
365 | else | 365 | else |
366 | #endif | 366 | #endif |
367 | err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); | 367 | err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); |
368 | if (err) | 368 | if (err) |
369 | goto out; | 369 | goto out; |
370 | } | 370 | } |
371 | err = 0; | 371 | err = 0; |
@@ -544,7 +544,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
544 | unsigned fsindex, gsindex; | 544 | unsigned fsindex, gsindex; |
545 | 545 | ||
546 | /* we're going to use this soon, after a few expensive things */ | 546 | /* we're going to use this soon, after a few expensive things */ |
547 | if (next_p->fpu_counter>5) | 547 | if (next_p->fpu_counter > 5) |
548 | prefetch(next->xstate); | 548 | prefetch(next->xstate); |
549 | 549 | ||
550 | /* | 550 | /* |
@@ -552,13 +552,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
552 | */ | 552 | */ |
553 | load_sp0(tss, next); | 553 | load_sp0(tss, next); |
554 | 554 | ||
555 | /* | 555 | /* |
556 | * Switch DS and ES. | 556 | * Switch DS and ES. |
557 | * This won't pick up thread selector changes, but I guess that is ok. | 557 | * This won't pick up thread selector changes, but I guess that is ok. |
558 | */ | 558 | */ |
559 | savesegment(es, prev->es); | 559 | savesegment(es, prev->es); |
560 | if (unlikely(next->es | prev->es)) | 560 | if (unlikely(next->es | prev->es)) |
561 | loadsegment(es, next->es); | 561 | loadsegment(es, next->es); |
562 | 562 | ||
563 | savesegment(ds, prev->ds); | 563 | savesegment(ds, prev->ds); |
564 | if (unlikely(next->ds | prev->ds)) | 564 | if (unlikely(next->ds | prev->ds)) |
@@ -584,7 +584,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
584 | */ | 584 | */ |
585 | arch_leave_lazy_cpu_mode(); | 585 | arch_leave_lazy_cpu_mode(); |
586 | 586 | ||
587 | /* | 587 | /* |
588 | * Switch FS and GS. | 588 | * Switch FS and GS. |
589 | * | 589 | * |
590 | * Segment register != 0 always requires a reload. Also | 590 | * Segment register != 0 always requires a reload. Also |
@@ -593,13 +593,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
593 | */ | 593 | */ |
594 | if (unlikely(fsindex | next->fsindex | prev->fs)) { | 594 | if (unlikely(fsindex | next->fsindex | prev->fs)) { |
595 | loadsegment(fs, next->fsindex); | 595 | loadsegment(fs, next->fsindex); |
596 | /* | 596 | /* |
597 | * Check if the user used a selector != 0; if yes | 597 | * Check if the user used a selector != 0; if yes |
598 | * clear 64bit base, since overloaded base is always | 598 | * clear 64bit base, since overloaded base is always |
599 | * mapped to the Null selector | 599 | * mapped to the Null selector |
600 | */ | 600 | */ |
601 | if (fsindex) | 601 | if (fsindex) |
602 | prev->fs = 0; | 602 | prev->fs = 0; |
603 | } | 603 | } |
604 | /* when next process has a 64bit base use it */ | 604 | /* when next process has a 64bit base use it */ |
605 | if (next->fs) | 605 | if (next->fs) |
@@ -609,7 +609,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
609 | if (unlikely(gsindex | next->gsindex | prev->gs)) { | 609 | if (unlikely(gsindex | next->gsindex | prev->gs)) { |
610 | load_gs_index(next->gsindex); | 610 | load_gs_index(next->gsindex); |
611 | if (gsindex) | 611 | if (gsindex) |
612 | prev->gs = 0; | 612 | prev->gs = 0; |
613 | } | 613 | } |
614 | if (next->gs) | 614 | if (next->gs) |
615 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); | 615 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); |
@@ -618,12 +618,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
618 | /* Must be after DS reload */ | 618 | /* Must be after DS reload */ |
619 | unlazy_fpu(prev_p); | 619 | unlazy_fpu(prev_p); |
620 | 620 | ||
621 | /* | 621 | /* |
622 | * Switch the PDA and FPU contexts. | 622 | * Switch the PDA and FPU contexts. |
623 | */ | 623 | */ |
624 | prev->usersp = read_pda(oldrsp); | 624 | prev->usersp = read_pda(oldrsp); |
625 | write_pda(oldrsp, next->usersp); | 625 | write_pda(oldrsp, next->usersp); |
626 | write_pda(pcurrent, next_p); | 626 | write_pda(pcurrent, next_p); |
627 | 627 | ||
628 | write_pda(kernelstack, | 628 | write_pda(kernelstack, |
629 | (unsigned long)task_stack_page(next_p) + | 629 | (unsigned long)task_stack_page(next_p) + |
@@ -664,7 +664,7 @@ long sys_execve(char __user *name, char __user * __user *argv, | |||
664 | char __user * __user *envp, struct pt_regs *regs) | 664 | char __user * __user *envp, struct pt_regs *regs) |
665 | { | 665 | { |
666 | long error; | 666 | long error; |
667 | char * filename; | 667 | char *filename; |
668 | 668 | ||
669 | filename = getname(name); | 669 | filename = getname(name); |
670 | error = PTR_ERR(filename); | 670 | error = PTR_ERR(filename); |
@@ -722,55 +722,55 @@ asmlinkage long sys_vfork(struct pt_regs *regs) | |||
722 | unsigned long get_wchan(struct task_struct *p) | 722 | unsigned long get_wchan(struct task_struct *p) |
723 | { | 723 | { |
724 | unsigned long stack; | 724 | unsigned long stack; |
725 | u64 fp,ip; | 725 | u64 fp, ip; |
726 | int count = 0; | 726 | int count = 0; |
727 | 727 | ||
728 | if (!p || p == current || p->state==TASK_RUNNING) | 728 | if (!p || p == current || p->state == TASK_RUNNING) |
729 | return 0; | 729 | return 0; |
730 | stack = (unsigned long)task_stack_page(p); | 730 | stack = (unsigned long)task_stack_page(p); |
731 | if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE) | 731 | if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE) |
732 | return 0; | 732 | return 0; |
733 | fp = *(u64 *)(p->thread.sp); | 733 | fp = *(u64 *)(p->thread.sp); |
734 | do { | 734 | do { |
735 | if (fp < (unsigned long)stack || | 735 | if (fp < (unsigned long)stack || |
736 | fp > (unsigned long)stack+THREAD_SIZE) | 736 | fp > (unsigned long)stack+THREAD_SIZE) |
737 | return 0; | 737 | return 0; |
738 | ip = *(u64 *)(fp+8); | 738 | ip = *(u64 *)(fp+8); |
739 | if (!in_sched_functions(ip)) | 739 | if (!in_sched_functions(ip)) |
740 | return ip; | 740 | return ip; |
741 | fp = *(u64 *)fp; | 741 | fp = *(u64 *)fp; |
742 | } while (count++ < 16); | 742 | } while (count++ < 16); |
743 | return 0; | 743 | return 0; |
744 | } | 744 | } |
745 | 745 | ||
746 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) | 746 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) |
747 | { | 747 | { |
748 | int ret = 0; | 748 | int ret = 0; |
749 | int doit = task == current; | 749 | int doit = task == current; |
750 | int cpu; | 750 | int cpu; |
751 | 751 | ||
752 | switch (code) { | 752 | switch (code) { |
753 | case ARCH_SET_GS: | 753 | case ARCH_SET_GS: |
754 | if (addr >= TASK_SIZE_OF(task)) | 754 | if (addr >= TASK_SIZE_OF(task)) |
755 | return -EPERM; | 755 | return -EPERM; |
756 | cpu = get_cpu(); | 756 | cpu = get_cpu(); |
757 | /* handle small bases via the GDT because that's faster to | 757 | /* handle small bases via the GDT because that's faster to |
758 | switch. */ | 758 | switch. */ |
759 | if (addr <= 0xffffffff) { | 759 | if (addr <= 0xffffffff) { |
760 | set_32bit_tls(task, GS_TLS, addr); | 760 | set_32bit_tls(task, GS_TLS, addr); |
761 | if (doit) { | 761 | if (doit) { |
762 | load_TLS(&task->thread, cpu); | 762 | load_TLS(&task->thread, cpu); |
763 | load_gs_index(GS_TLS_SEL); | 763 | load_gs_index(GS_TLS_SEL); |
764 | } | 764 | } |
765 | task->thread.gsindex = GS_TLS_SEL; | 765 | task->thread.gsindex = GS_TLS_SEL; |
766 | task->thread.gs = 0; | 766 | task->thread.gs = 0; |
767 | } else { | 767 | } else { |
768 | task->thread.gsindex = 0; | 768 | task->thread.gsindex = 0; |
769 | task->thread.gs = addr; | 769 | task->thread.gs = addr; |
770 | if (doit) { | 770 | if (doit) { |
771 | load_gs_index(0); | 771 | load_gs_index(0); |
772 | ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); | 772 | ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); |
773 | } | 773 | } |
774 | } | 774 | } |
775 | put_cpu(); | 775 | put_cpu(); |
776 | break; | 776 | break; |
@@ -824,8 +824,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) | |||
824 | rdmsrl(MSR_KERNEL_GS_BASE, base); | 824 | rdmsrl(MSR_KERNEL_GS_BASE, base); |
825 | else | 825 | else |
826 | base = task->thread.gs; | 826 | base = task->thread.gs; |
827 | } | 827 | } else |
828 | else | ||
829 | base = task->thread.gs; | 828 | base = task->thread.gs; |
830 | ret = put_user(base, (unsigned long __user *)addr); | 829 | ret = put_user(base, (unsigned long __user *)addr); |
831 | break; | 830 | break; |