aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/process_64.c')
-rw-r--r--arch/x86/kernel/process_64.c131
1 files changed, 66 insertions, 65 deletions
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 79e3e173ab40..2a8ccb9238b4 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -37,11 +37,11 @@
37#include <linux/kdebug.h> 37#include <linux/kdebug.h>
38#include <linux/tick.h> 38#include <linux/tick.h>
39#include <linux/prctl.h> 39#include <linux/prctl.h>
40#include <linux/uaccess.h>
41#include <linux/io.h>
40 42
41#include <asm/uaccess.h>
42#include <asm/pgtable.h> 43#include <asm/pgtable.h>
43#include <asm/system.h> 44#include <asm/system.h>
44#include <asm/io.h>
45#include <asm/processor.h> 45#include <asm/processor.h>
46#include <asm/i387.h> 46#include <asm/i387.h>
47#include <asm/mmu_context.h> 47#include <asm/mmu_context.h>
@@ -89,7 +89,7 @@ void exit_idle(void)
89#ifdef CONFIG_HOTPLUG_CPU 89#ifdef CONFIG_HOTPLUG_CPU
90DECLARE_PER_CPU(int, cpu_state); 90DECLARE_PER_CPU(int, cpu_state);
91 91
92#include <asm/nmi.h> 92#include <linux/nmi.h>
93/* We halt the CPU with physical CPU hotplug */ 93/* We halt the CPU with physical CPU hotplug */
94static inline void play_dead(void) 94static inline void play_dead(void)
95{ 95{
@@ -154,7 +154,7 @@ void cpu_idle(void)
154} 154}
155 155
156/* Prints also some state that isn't saved in the pt_regs */ 156/* Prints also some state that isn't saved in the pt_regs */
157void __show_regs(struct pt_regs * regs) 157void __show_regs(struct pt_regs *regs)
158{ 158{
159 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; 159 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
160 unsigned long d0, d1, d2, d3, d6, d7; 160 unsigned long d0, d1, d2, d3, d6, d7;
@@ -163,59 +163,61 @@ void __show_regs(struct pt_regs * regs)
163 163
164 printk("\n"); 164 printk("\n");
165 print_modules(); 165 print_modules();
166 printk("Pid: %d, comm: %.20s %s %s %.*s\n", 166 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s\n",
167 current->pid, current->comm, print_tainted(), 167 current->pid, current->comm, print_tainted(),
168 init_utsname()->release, 168 init_utsname()->release,
169 (int)strcspn(init_utsname()->version, " "), 169 (int)strcspn(init_utsname()->version, " "),
170 init_utsname()->version); 170 init_utsname()->version);
171 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); 171 printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
172 printk_address(regs->ip, 1); 172 printk_address(regs->ip, 1);
173 printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->sp, 173 printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
174 regs->flags); 174 regs->sp, regs->flags);
175 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n", 175 printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
176 regs->ax, regs->bx, regs->cx); 176 regs->ax, regs->bx, regs->cx);
177 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n", 177 printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n",
178 regs->dx, regs->si, regs->di); 178 regs->dx, regs->si, regs->di);
179 printk("RBP: %016lx R08: %016lx R09: %016lx\n", 179 printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n",
180 regs->bp, regs->r8, regs->r9); 180 regs->bp, regs->r8, regs->r9);
181 printk("R10: %016lx R11: %016lx R12: %016lx\n", 181 printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n",
182 regs->r10, regs->r11, regs->r12); 182 regs->r10, regs->r11, regs->r12);
183 printk("R13: %016lx R14: %016lx R15: %016lx\n", 183 printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
184 regs->r13, regs->r14, regs->r15); 184 regs->r13, regs->r14, regs->r15);
185 185
186 asm("movl %%ds,%0" : "=r" (ds)); 186 asm("movl %%ds,%0" : "=r" (ds));
187 asm("movl %%cs,%0" : "=r" (cs)); 187 asm("movl %%cs,%0" : "=r" (cs));
188 asm("movl %%es,%0" : "=r" (es)); 188 asm("movl %%es,%0" : "=r" (es));
189 asm("movl %%fs,%0" : "=r" (fsindex)); 189 asm("movl %%fs,%0" : "=r" (fsindex));
190 asm("movl %%gs,%0" : "=r" (gsindex)); 190 asm("movl %%gs,%0" : "=r" (gsindex));
191 191
192 rdmsrl(MSR_FS_BASE, fs); 192 rdmsrl(MSR_FS_BASE, fs);
193 rdmsrl(MSR_GS_BASE, gs); 193 rdmsrl(MSR_GS_BASE, gs);
194 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); 194 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
195 195
196 cr0 = read_cr0(); 196 cr0 = read_cr0();
197 cr2 = read_cr2(); 197 cr2 = read_cr2();
198 cr3 = read_cr3(); 198 cr3 = read_cr3();
199 cr4 = read_cr4(); 199 cr4 = read_cr4();
200 200
201 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", 201 printk(KERN_INFO "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
202 fs,fsindex,gs,gsindex,shadowgs); 202 fs, fsindex, gs, gsindex, shadowgs);
203 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0); 203 printk(KERN_INFO "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
204 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4); 204 es, cr0);
205 printk(KERN_INFO "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
206 cr4);
205 207
206 get_debugreg(d0, 0); 208 get_debugreg(d0, 0);
207 get_debugreg(d1, 1); 209 get_debugreg(d1, 1);
208 get_debugreg(d2, 2); 210 get_debugreg(d2, 2);
209 printk("DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2); 211 printk(KERN_INFO "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
210 get_debugreg(d3, 3); 212 get_debugreg(d3, 3);
211 get_debugreg(d6, 6); 213 get_debugreg(d6, 6);
212 get_debugreg(d7, 7); 214 get_debugreg(d7, 7);
213 printk("DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7); 215 printk(KERN_INFO "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
214} 216}
215 217
216void show_regs(struct pt_regs *regs) 218void show_regs(struct pt_regs *regs)
217{ 219{
218 printk("CPU %d:", smp_processor_id()); 220 printk(KERN_INFO "CPU %d:", smp_processor_id());
219 __show_regs(regs); 221 __show_regs(regs);
220 show_trace(NULL, regs, (void *)(regs + 1), regs->bp); 222 show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
221} 223}
@@ -324,10 +326,10 @@ void prepare_to_copy(struct task_struct *tsk)
324 326
325int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, 327int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
326 unsigned long unused, 328 unsigned long unused,
327 struct task_struct * p, struct pt_regs * regs) 329 struct task_struct *p, struct pt_regs *regs)
328{ 330{
329 int err; 331 int err;
330 struct pt_regs * childregs; 332 struct pt_regs *childregs;
331 struct task_struct *me = current; 333 struct task_struct *me = current;
332 334
333 childregs = ((struct pt_regs *) 335 childregs = ((struct pt_regs *)
@@ -372,10 +374,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
372 if (test_thread_flag(TIF_IA32)) 374 if (test_thread_flag(TIF_IA32))
373 err = do_set_thread_area(p, -1, 375 err = do_set_thread_area(p, -1,
374 (struct user_desc __user *)childregs->si, 0); 376 (struct user_desc __user *)childregs->si, 0);
375 else 377 else
376#endif 378#endif
377 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); 379 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
378 if (err) 380 if (err)
379 goto out; 381 goto out;
380 } 382 }
381 err = 0; 383 err = 0;
@@ -568,7 +570,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
568 unsigned fsindex, gsindex; 570 unsigned fsindex, gsindex;
569 571
570 /* we're going to use this soon, after a few expensive things */ 572 /* we're going to use this soon, after a few expensive things */
571 if (next_p->fpu_counter>5) 573 if (next_p->fpu_counter > 5)
572 prefetch(next->xstate); 574 prefetch(next->xstate);
573 575
574 /* 576 /*
@@ -576,13 +578,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
576 */ 578 */
577 load_sp0(tss, next); 579 load_sp0(tss, next);
578 580
579 /* 581 /*
580 * Switch DS and ES. 582 * Switch DS and ES.
581 * This won't pick up thread selector changes, but I guess that is ok. 583 * This won't pick up thread selector changes, but I guess that is ok.
582 */ 584 */
583 savesegment(es, prev->es); 585 savesegment(es, prev->es);
584 if (unlikely(next->es | prev->es)) 586 if (unlikely(next->es | prev->es))
585 loadsegment(es, next->es); 587 loadsegment(es, next->es);
586 588
587 savesegment(ds, prev->ds); 589 savesegment(ds, prev->ds);
588 if (unlikely(next->ds | prev->ds)) 590 if (unlikely(next->ds | prev->ds))
@@ -608,7 +610,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
608 */ 610 */
609 arch_leave_lazy_cpu_mode(); 611 arch_leave_lazy_cpu_mode();
610 612
611 /* 613 /*
612 * Switch FS and GS. 614 * Switch FS and GS.
613 * 615 *
614 * Segment register != 0 always requires a reload. Also 616 * Segment register != 0 always requires a reload. Also
@@ -617,13 +619,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
617 */ 619 */
618 if (unlikely(fsindex | next->fsindex | prev->fs)) { 620 if (unlikely(fsindex | next->fsindex | prev->fs)) {
619 loadsegment(fs, next->fsindex); 621 loadsegment(fs, next->fsindex);
620 /* 622 /*
621 * Check if the user used a selector != 0; if yes 623 * Check if the user used a selector != 0; if yes
622 * clear 64bit base, since overloaded base is always 624 * clear 64bit base, since overloaded base is always
623 * mapped to the Null selector 625 * mapped to the Null selector
624 */ 626 */
625 if (fsindex) 627 if (fsindex)
626 prev->fs = 0; 628 prev->fs = 0;
627 } 629 }
628 /* when next process has a 64bit base use it */ 630 /* when next process has a 64bit base use it */
629 if (next->fs) 631 if (next->fs)
@@ -633,7 +635,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
633 if (unlikely(gsindex | next->gsindex | prev->gs)) { 635 if (unlikely(gsindex | next->gsindex | prev->gs)) {
634 load_gs_index(next->gsindex); 636 load_gs_index(next->gsindex);
635 if (gsindex) 637 if (gsindex)
636 prev->gs = 0; 638 prev->gs = 0;
637 } 639 }
638 if (next->gs) 640 if (next->gs)
639 wrmsrl(MSR_KERNEL_GS_BASE, next->gs); 641 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
@@ -642,12 +644,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
642 /* Must be after DS reload */ 644 /* Must be after DS reload */
643 unlazy_fpu(prev_p); 645 unlazy_fpu(prev_p);
644 646
645 /* 647 /*
646 * Switch the PDA and FPU contexts. 648 * Switch the PDA and FPU contexts.
647 */ 649 */
648 prev->usersp = read_pda(oldrsp); 650 prev->usersp = read_pda(oldrsp);
649 write_pda(oldrsp, next->usersp); 651 write_pda(oldrsp, next->usersp);
650 write_pda(pcurrent, next_p); 652 write_pda(pcurrent, next_p);
651 653
652 write_pda(kernelstack, 654 write_pda(kernelstack,
653 (unsigned long)task_stack_page(next_p) + 655 (unsigned long)task_stack_page(next_p) +
@@ -688,7 +690,7 @@ long sys_execve(char __user *name, char __user * __user *argv,
688 char __user * __user *envp, struct pt_regs *regs) 690 char __user * __user *envp, struct pt_regs *regs)
689{ 691{
690 long error; 692 long error;
691 char * filename; 693 char *filename;
692 694
693 filename = getname(name); 695 filename = getname(name);
694 error = PTR_ERR(filename); 696 error = PTR_ERR(filename);
@@ -746,55 +748,55 @@ asmlinkage long sys_vfork(struct pt_regs *regs)
746unsigned long get_wchan(struct task_struct *p) 748unsigned long get_wchan(struct task_struct *p)
747{ 749{
748 unsigned long stack; 750 unsigned long stack;
749 u64 fp,ip; 751 u64 fp, ip;
750 int count = 0; 752 int count = 0;
751 753
752 if (!p || p == current || p->state==TASK_RUNNING) 754 if (!p || p == current || p->state == TASK_RUNNING)
753 return 0; 755 return 0;
754 stack = (unsigned long)task_stack_page(p); 756 stack = (unsigned long)task_stack_page(p);
755 if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE) 757 if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE)
756 return 0; 758 return 0;
757 fp = *(u64 *)(p->thread.sp); 759 fp = *(u64 *)(p->thread.sp);
758 do { 760 do {
759 if (fp < (unsigned long)stack || 761 if (fp < (unsigned long)stack ||
760 fp > (unsigned long)stack+THREAD_SIZE) 762 fp > (unsigned long)stack+THREAD_SIZE)
761 return 0; 763 return 0;
762 ip = *(u64 *)(fp+8); 764 ip = *(u64 *)(fp+8);
763 if (!in_sched_functions(ip)) 765 if (!in_sched_functions(ip))
764 return ip; 766 return ip;
765 fp = *(u64 *)fp; 767 fp = *(u64 *)fp;
766 } while (count++ < 16); 768 } while (count++ < 16);
767 return 0; 769 return 0;
768} 770}
769 771
770long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) 772long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
771{ 773{
772 int ret = 0; 774 int ret = 0;
773 int doit = task == current; 775 int doit = task == current;
774 int cpu; 776 int cpu;
775 777
776 switch (code) { 778 switch (code) {
777 case ARCH_SET_GS: 779 case ARCH_SET_GS:
778 if (addr >= TASK_SIZE_OF(task)) 780 if (addr >= TASK_SIZE_OF(task))
779 return -EPERM; 781 return -EPERM;
780 cpu = get_cpu(); 782 cpu = get_cpu();
781 /* handle small bases via the GDT because that's faster to 783 /* handle small bases via the GDT because that's faster to
782 switch. */ 784 switch. */
783 if (addr <= 0xffffffff) { 785 if (addr <= 0xffffffff) {
784 set_32bit_tls(task, GS_TLS, addr); 786 set_32bit_tls(task, GS_TLS, addr);
785 if (doit) { 787 if (doit) {
786 load_TLS(&task->thread, cpu); 788 load_TLS(&task->thread, cpu);
787 load_gs_index(GS_TLS_SEL); 789 load_gs_index(GS_TLS_SEL);
788 } 790 }
789 task->thread.gsindex = GS_TLS_SEL; 791 task->thread.gsindex = GS_TLS_SEL;
790 task->thread.gs = 0; 792 task->thread.gs = 0;
791 } else { 793 } else {
792 task->thread.gsindex = 0; 794 task->thread.gsindex = 0;
793 task->thread.gs = addr; 795 task->thread.gs = addr;
794 if (doit) { 796 if (doit) {
795 load_gs_index(0); 797 load_gs_index(0);
796 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); 798 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
797 } 799 }
798 } 800 }
799 put_cpu(); 801 put_cpu();
800 break; 802 break;
@@ -848,8 +850,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
848 rdmsrl(MSR_KERNEL_GS_BASE, base); 850 rdmsrl(MSR_KERNEL_GS_BASE, base);
849 else 851 else
850 base = task->thread.gs; 852 base = task->thread.gs;
851 } 853 } else
852 else
853 base = task->thread.gs; 854 base = task->thread.gs;
854 ret = put_user(base, (unsigned long __user *)addr); 855 ret = put_user(base, (unsigned long __user *)addr);
855 break; 856 break;