diff options
Diffstat (limited to 'arch/x86/kernel/process_64.c')
-rw-r--r-- | arch/x86/kernel/process_64.c | 204 |
1 files changed, 111 insertions, 93 deletions
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 71553b664e2..c958120fb1b 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -37,11 +37,11 @@ | |||
37 | #include <linux/kdebug.h> | 37 | #include <linux/kdebug.h> |
38 | #include <linux/tick.h> | 38 | #include <linux/tick.h> |
39 | #include <linux/prctl.h> | 39 | #include <linux/prctl.h> |
40 | #include <linux/uaccess.h> | ||
41 | #include <linux/io.h> | ||
40 | 42 | ||
41 | #include <asm/uaccess.h> | ||
42 | #include <asm/pgtable.h> | 43 | #include <asm/pgtable.h> |
43 | #include <asm/system.h> | 44 | #include <asm/system.h> |
44 | #include <asm/io.h> | ||
45 | #include <asm/processor.h> | 45 | #include <asm/processor.h> |
46 | #include <asm/i387.h> | 46 | #include <asm/i387.h> |
47 | #include <asm/mmu_context.h> | 47 | #include <asm/mmu_context.h> |
@@ -51,6 +51,7 @@ | |||
51 | #include <asm/proto.h> | 51 | #include <asm/proto.h> |
52 | #include <asm/ia32.h> | 52 | #include <asm/ia32.h> |
53 | #include <asm/idle.h> | 53 | #include <asm/idle.h> |
54 | #include <asm/syscalls.h> | ||
54 | 55 | ||
55 | asmlinkage extern void ret_from_fork(void); | 56 | asmlinkage extern void ret_from_fork(void); |
56 | 57 | ||
@@ -62,6 +63,13 @@ void idle_notifier_register(struct notifier_block *n) | |||
62 | { | 63 | { |
63 | atomic_notifier_chain_register(&idle_notifier, n); | 64 | atomic_notifier_chain_register(&idle_notifier, n); |
64 | } | 65 | } |
66 | EXPORT_SYMBOL_GPL(idle_notifier_register); | ||
67 | |||
68 | void idle_notifier_unregister(struct notifier_block *n) | ||
69 | { | ||
70 | atomic_notifier_chain_unregister(&idle_notifier, n); | ||
71 | } | ||
72 | EXPORT_SYMBOL_GPL(idle_notifier_unregister); | ||
65 | 73 | ||
66 | void enter_idle(void) | 74 | void enter_idle(void) |
67 | { | 75 | { |
@@ -85,28 +93,12 @@ void exit_idle(void) | |||
85 | __exit_idle(); | 93 | __exit_idle(); |
86 | } | 94 | } |
87 | 95 | ||
88 | #ifdef CONFIG_HOTPLUG_CPU | 96 | #ifndef CONFIG_SMP |
89 | DECLARE_PER_CPU(int, cpu_state); | ||
90 | |||
91 | #include <asm/nmi.h> | ||
92 | /* We halt the CPU with physical CPU hotplug */ | ||
93 | static inline void play_dead(void) | ||
94 | { | ||
95 | idle_task_exit(); | ||
96 | mb(); | ||
97 | /* Ack it */ | ||
98 | __get_cpu_var(cpu_state) = CPU_DEAD; | ||
99 | |||
100 | local_irq_disable(); | ||
101 | /* mask all interrupts, flush any and all caches, and halt */ | ||
102 | wbinvd_halt(); | ||
103 | } | ||
104 | #else | ||
105 | static inline void play_dead(void) | 97 | static inline void play_dead(void) |
106 | { | 98 | { |
107 | BUG(); | 99 | BUG(); |
108 | } | 100 | } |
109 | #endif /* CONFIG_HOTPLUG_CPU */ | 101 | #endif |
110 | 102 | ||
111 | /* | 103 | /* |
112 | * The idle thread. There's no useful work to be | 104 | * The idle thread. There's no useful work to be |
@@ -151,7 +143,7 @@ void cpu_idle(void) | |||
151 | } | 143 | } |
152 | 144 | ||
153 | /* Prints also some state that isn't saved in the pt_regs */ | 145 | /* Prints also some state that isn't saved in the pt_regs */ |
154 | void __show_regs(struct pt_regs * regs) | 146 | void __show_regs(struct pt_regs *regs, int all) |
155 | { | 147 | { |
156 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; | 148 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; |
157 | unsigned long d0, d1, d2, d3, d6, d7; | 149 | unsigned long d0, d1, d2, d3, d6, d7; |
@@ -160,60 +152,65 @@ void __show_regs(struct pt_regs * regs) | |||
160 | 152 | ||
161 | printk("\n"); | 153 | printk("\n"); |
162 | print_modules(); | 154 | print_modules(); |
163 | printk("Pid: %d, comm: %.20s %s %s %.*s\n", | 155 | printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s\n", |
164 | current->pid, current->comm, print_tainted(), | 156 | current->pid, current->comm, print_tainted(), |
165 | init_utsname()->release, | 157 | init_utsname()->release, |
166 | (int)strcspn(init_utsname()->version, " "), | 158 | (int)strcspn(init_utsname()->version, " "), |
167 | init_utsname()->version); | 159 | init_utsname()->version); |
168 | printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); | 160 | printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); |
169 | printk_address(regs->ip, 1); | 161 | printk_address(regs->ip, 1); |
170 | printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->sp, | 162 | printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, |
171 | regs->flags); | 163 | regs->sp, regs->flags); |
172 | printk("RAX: %016lx RBX: %016lx RCX: %016lx\n", | 164 | printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n", |
173 | regs->ax, regs->bx, regs->cx); | 165 | regs->ax, regs->bx, regs->cx); |
174 | printk("RDX: %016lx RSI: %016lx RDI: %016lx\n", | 166 | printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n", |
175 | regs->dx, regs->si, regs->di); | 167 | regs->dx, regs->si, regs->di); |
176 | printk("RBP: %016lx R08: %016lx R09: %016lx\n", | 168 | printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n", |
177 | regs->bp, regs->r8, regs->r9); | 169 | regs->bp, regs->r8, regs->r9); |
178 | printk("R10: %016lx R11: %016lx R12: %016lx\n", | 170 | printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n", |
179 | regs->r10, regs->r11, regs->r12); | 171 | regs->r10, regs->r11, regs->r12); |
180 | printk("R13: %016lx R14: %016lx R15: %016lx\n", | 172 | printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n", |
181 | regs->r13, regs->r14, regs->r15); | 173 | regs->r13, regs->r14, regs->r15); |
182 | 174 | ||
183 | asm("movl %%ds,%0" : "=r" (ds)); | 175 | asm("movl %%ds,%0" : "=r" (ds)); |
184 | asm("movl %%cs,%0" : "=r" (cs)); | 176 | asm("movl %%cs,%0" : "=r" (cs)); |
185 | asm("movl %%es,%0" : "=r" (es)); | 177 | asm("movl %%es,%0" : "=r" (es)); |
186 | asm("movl %%fs,%0" : "=r" (fsindex)); | 178 | asm("movl %%fs,%0" : "=r" (fsindex)); |
187 | asm("movl %%gs,%0" : "=r" (gsindex)); | 179 | asm("movl %%gs,%0" : "=r" (gsindex)); |
188 | 180 | ||
189 | rdmsrl(MSR_FS_BASE, fs); | 181 | rdmsrl(MSR_FS_BASE, fs); |
190 | rdmsrl(MSR_GS_BASE, gs); | 182 | rdmsrl(MSR_GS_BASE, gs); |
191 | rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); | 183 | rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); |
184 | |||
185 | if (!all) | ||
186 | return; | ||
192 | 187 | ||
193 | cr0 = read_cr0(); | 188 | cr0 = read_cr0(); |
194 | cr2 = read_cr2(); | 189 | cr2 = read_cr2(); |
195 | cr3 = read_cr3(); | 190 | cr3 = read_cr3(); |
196 | cr4 = read_cr4(); | 191 | cr4 = read_cr4(); |
197 | 192 | ||
198 | printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", | 193 | printk(KERN_INFO "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", |
199 | fs,fsindex,gs,gsindex,shadowgs); | 194 | fs, fsindex, gs, gsindex, shadowgs); |
200 | printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0); | 195 | printk(KERN_INFO "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, |
201 | printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4); | 196 | es, cr0); |
197 | printk(KERN_INFO "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, | ||
198 | cr4); | ||
202 | 199 | ||
203 | get_debugreg(d0, 0); | 200 | get_debugreg(d0, 0); |
204 | get_debugreg(d1, 1); | 201 | get_debugreg(d1, 1); |
205 | get_debugreg(d2, 2); | 202 | get_debugreg(d2, 2); |
206 | printk("DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2); | 203 | printk(KERN_INFO "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2); |
207 | get_debugreg(d3, 3); | 204 | get_debugreg(d3, 3); |
208 | get_debugreg(d6, 6); | 205 | get_debugreg(d6, 6); |
209 | get_debugreg(d7, 7); | 206 | get_debugreg(d7, 7); |
210 | printk("DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7); | 207 | printk(KERN_INFO "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7); |
211 | } | 208 | } |
212 | 209 | ||
213 | void show_regs(struct pt_regs *regs) | 210 | void show_regs(struct pt_regs *regs) |
214 | { | 211 | { |
215 | printk("CPU %d:", smp_processor_id()); | 212 | printk(KERN_INFO "CPU %d:", smp_processor_id()); |
216 | __show_regs(regs); | 213 | __show_regs(regs, 1); |
217 | show_trace(NULL, regs, (void *)(regs + 1), regs->bp); | 214 | show_trace(NULL, regs, (void *)(regs + 1), regs->bp); |
218 | } | 215 | } |
219 | 216 | ||
@@ -238,6 +235,14 @@ void exit_thread(void) | |||
238 | t->io_bitmap_max = 0; | 235 | t->io_bitmap_max = 0; |
239 | put_cpu(); | 236 | put_cpu(); |
240 | } | 237 | } |
238 | #ifdef CONFIG_X86_DS | ||
239 | /* Free any DS contexts that have not been properly released. */ | ||
240 | if (unlikely(t->ds_ctx)) { | ||
241 | /* we clear debugctl to make sure DS is not used. */ | ||
242 | update_debugctlmsr(0); | ||
243 | ds_free(t->ds_ctx); | ||
244 | } | ||
245 | #endif /* CONFIG_X86_DS */ | ||
241 | } | 246 | } |
242 | 247 | ||
243 | void flush_thread(void) | 248 | void flush_thread(void) |
@@ -313,10 +318,10 @@ void prepare_to_copy(struct task_struct *tsk) | |||
313 | 318 | ||
314 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | 319 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, |
315 | unsigned long unused, | 320 | unsigned long unused, |
316 | struct task_struct * p, struct pt_regs * regs) | 321 | struct task_struct *p, struct pt_regs *regs) |
317 | { | 322 | { |
318 | int err; | 323 | int err; |
319 | struct pt_regs * childregs; | 324 | struct pt_regs *childregs; |
320 | struct task_struct *me = current; | 325 | struct task_struct *me = current; |
321 | 326 | ||
322 | childregs = ((struct pt_regs *) | 327 | childregs = ((struct pt_regs *) |
@@ -361,10 +366,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | |||
361 | if (test_thread_flag(TIF_IA32)) | 366 | if (test_thread_flag(TIF_IA32)) |
362 | err = do_set_thread_area(p, -1, | 367 | err = do_set_thread_area(p, -1, |
363 | (struct user_desc __user *)childregs->si, 0); | 368 | (struct user_desc __user *)childregs->si, 0); |
364 | else | 369 | else |
365 | #endif | 370 | #endif |
366 | err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); | 371 | err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); |
367 | if (err) | 372 | if (err) |
368 | goto out; | 373 | goto out; |
369 | } | 374 | } |
370 | err = 0; | 375 | err = 0; |
@@ -471,13 +476,27 @@ static inline void __switch_to_xtra(struct task_struct *prev_p, | |||
471 | next = &next_p->thread; | 476 | next = &next_p->thread; |
472 | 477 | ||
473 | debugctl = prev->debugctlmsr; | 478 | debugctl = prev->debugctlmsr; |
474 | if (next->ds_area_msr != prev->ds_area_msr) { | 479 | |
475 | /* we clear debugctl to make sure DS | 480 | #ifdef CONFIG_X86_DS |
476 | * is not in use when we change it */ | 481 | { |
477 | debugctl = 0; | 482 | unsigned long ds_prev = 0, ds_next = 0; |
478 | update_debugctlmsr(0); | 483 | |
479 | wrmsrl(MSR_IA32_DS_AREA, next->ds_area_msr); | 484 | if (prev->ds_ctx) |
485 | ds_prev = (unsigned long)prev->ds_ctx->ds; | ||
486 | if (next->ds_ctx) | ||
487 | ds_next = (unsigned long)next->ds_ctx->ds; | ||
488 | |||
489 | if (ds_next != ds_prev) { | ||
490 | /* | ||
491 | * We clear debugctl to make sure DS | ||
492 | * is not in use when we change it: | ||
493 | */ | ||
494 | debugctl = 0; | ||
495 | update_debugctlmsr(0); | ||
496 | wrmsrl(MSR_IA32_DS_AREA, ds_next); | ||
497 | } | ||
480 | } | 498 | } |
499 | #endif /* CONFIG_X86_DS */ | ||
481 | 500 | ||
482 | if (next->debugctlmsr != debugctl) | 501 | if (next->debugctlmsr != debugctl) |
483 | update_debugctlmsr(next->debugctlmsr); | 502 | update_debugctlmsr(next->debugctlmsr); |
@@ -515,13 +534,13 @@ static inline void __switch_to_xtra(struct task_struct *prev_p, | |||
515 | memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); | 534 | memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); |
516 | } | 535 | } |
517 | 536 | ||
518 | #ifdef X86_BTS | 537 | #ifdef CONFIG_X86_PTRACE_BTS |
519 | if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) | 538 | if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) |
520 | ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); | 539 | ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); |
521 | 540 | ||
522 | if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) | 541 | if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) |
523 | ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); | 542 | ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); |
524 | #endif | 543 | #endif /* CONFIG_X86_PTRACE_BTS */ |
525 | } | 544 | } |
526 | 545 | ||
527 | /* | 546 | /* |
@@ -543,7 +562,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
543 | unsigned fsindex, gsindex; | 562 | unsigned fsindex, gsindex; |
544 | 563 | ||
545 | /* we're going to use this soon, after a few expensive things */ | 564 | /* we're going to use this soon, after a few expensive things */ |
546 | if (next_p->fpu_counter>5) | 565 | if (next_p->fpu_counter > 5) |
547 | prefetch(next->xstate); | 566 | prefetch(next->xstate); |
548 | 567 | ||
549 | /* | 568 | /* |
@@ -551,13 +570,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
551 | */ | 570 | */ |
552 | load_sp0(tss, next); | 571 | load_sp0(tss, next); |
553 | 572 | ||
554 | /* | 573 | /* |
555 | * Switch DS and ES. | 574 | * Switch DS and ES. |
556 | * This won't pick up thread selector changes, but I guess that is ok. | 575 | * This won't pick up thread selector changes, but I guess that is ok. |
557 | */ | 576 | */ |
558 | savesegment(es, prev->es); | 577 | savesegment(es, prev->es); |
559 | if (unlikely(next->es | prev->es)) | 578 | if (unlikely(next->es | prev->es)) |
560 | loadsegment(es, next->es); | 579 | loadsegment(es, next->es); |
561 | 580 | ||
562 | savesegment(ds, prev->ds); | 581 | savesegment(ds, prev->ds); |
563 | if (unlikely(next->ds | prev->ds)) | 582 | if (unlikely(next->ds | prev->ds)) |
@@ -583,7 +602,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
583 | */ | 602 | */ |
584 | arch_leave_lazy_cpu_mode(); | 603 | arch_leave_lazy_cpu_mode(); |
585 | 604 | ||
586 | /* | 605 | /* |
587 | * Switch FS and GS. | 606 | * Switch FS and GS. |
588 | * | 607 | * |
589 | * Segment register != 0 always requires a reload. Also | 608 | * Segment register != 0 always requires a reload. Also |
@@ -592,13 +611,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
592 | */ | 611 | */ |
593 | if (unlikely(fsindex | next->fsindex | prev->fs)) { | 612 | if (unlikely(fsindex | next->fsindex | prev->fs)) { |
594 | loadsegment(fs, next->fsindex); | 613 | loadsegment(fs, next->fsindex); |
595 | /* | 614 | /* |
596 | * Check if the user used a selector != 0; if yes | 615 | * Check if the user used a selector != 0; if yes |
597 | * clear 64bit base, since overloaded base is always | 616 | * clear 64bit base, since overloaded base is always |
598 | * mapped to the Null selector | 617 | * mapped to the Null selector |
599 | */ | 618 | */ |
600 | if (fsindex) | 619 | if (fsindex) |
601 | prev->fs = 0; | 620 | prev->fs = 0; |
602 | } | 621 | } |
603 | /* when next process has a 64bit base use it */ | 622 | /* when next process has a 64bit base use it */ |
604 | if (next->fs) | 623 | if (next->fs) |
@@ -608,7 +627,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
608 | if (unlikely(gsindex | next->gsindex | prev->gs)) { | 627 | if (unlikely(gsindex | next->gsindex | prev->gs)) { |
609 | load_gs_index(next->gsindex); | 628 | load_gs_index(next->gsindex); |
610 | if (gsindex) | 629 | if (gsindex) |
611 | prev->gs = 0; | 630 | prev->gs = 0; |
612 | } | 631 | } |
613 | if (next->gs) | 632 | if (next->gs) |
614 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); | 633 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); |
@@ -617,12 +636,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
617 | /* Must be after DS reload */ | 636 | /* Must be after DS reload */ |
618 | unlazy_fpu(prev_p); | 637 | unlazy_fpu(prev_p); |
619 | 638 | ||
620 | /* | 639 | /* |
621 | * Switch the PDA and FPU contexts. | 640 | * Switch the PDA and FPU contexts. |
622 | */ | 641 | */ |
623 | prev->usersp = read_pda(oldrsp); | 642 | prev->usersp = read_pda(oldrsp); |
624 | write_pda(oldrsp, next->usersp); | 643 | write_pda(oldrsp, next->usersp); |
625 | write_pda(pcurrent, next_p); | 644 | write_pda(pcurrent, next_p); |
626 | 645 | ||
627 | write_pda(kernelstack, | 646 | write_pda(kernelstack, |
628 | (unsigned long)task_stack_page(next_p) + | 647 | (unsigned long)task_stack_page(next_p) + |
@@ -663,7 +682,7 @@ long sys_execve(char __user *name, char __user * __user *argv, | |||
663 | char __user * __user *envp, struct pt_regs *regs) | 682 | char __user * __user *envp, struct pt_regs *regs) |
664 | { | 683 | { |
665 | long error; | 684 | long error; |
666 | char * filename; | 685 | char *filename; |
667 | 686 | ||
668 | filename = getname(name); | 687 | filename = getname(name); |
669 | error = PTR_ERR(filename); | 688 | error = PTR_ERR(filename); |
@@ -721,55 +740,55 @@ asmlinkage long sys_vfork(struct pt_regs *regs) | |||
721 | unsigned long get_wchan(struct task_struct *p) | 740 | unsigned long get_wchan(struct task_struct *p) |
722 | { | 741 | { |
723 | unsigned long stack; | 742 | unsigned long stack; |
724 | u64 fp,ip; | 743 | u64 fp, ip; |
725 | int count = 0; | 744 | int count = 0; |
726 | 745 | ||
727 | if (!p || p == current || p->state==TASK_RUNNING) | 746 | if (!p || p == current || p->state == TASK_RUNNING) |
728 | return 0; | 747 | return 0; |
729 | stack = (unsigned long)task_stack_page(p); | 748 | stack = (unsigned long)task_stack_page(p); |
730 | if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE) | 749 | if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE) |
731 | return 0; | 750 | return 0; |
732 | fp = *(u64 *)(p->thread.sp); | 751 | fp = *(u64 *)(p->thread.sp); |
733 | do { | 752 | do { |
734 | if (fp < (unsigned long)stack || | 753 | if (fp < (unsigned long)stack || |
735 | fp > (unsigned long)stack+THREAD_SIZE) | 754 | fp >= (unsigned long)stack+THREAD_SIZE) |
736 | return 0; | 755 | return 0; |
737 | ip = *(u64 *)(fp+8); | 756 | ip = *(u64 *)(fp+8); |
738 | if (!in_sched_functions(ip)) | 757 | if (!in_sched_functions(ip)) |
739 | return ip; | 758 | return ip; |
740 | fp = *(u64 *)fp; | 759 | fp = *(u64 *)fp; |
741 | } while (count++ < 16); | 760 | } while (count++ < 16); |
742 | return 0; | 761 | return 0; |
743 | } | 762 | } |
744 | 763 | ||
745 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) | 764 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) |
746 | { | 765 | { |
747 | int ret = 0; | 766 | int ret = 0; |
748 | int doit = task == current; | 767 | int doit = task == current; |
749 | int cpu; | 768 | int cpu; |
750 | 769 | ||
751 | switch (code) { | 770 | switch (code) { |
752 | case ARCH_SET_GS: | 771 | case ARCH_SET_GS: |
753 | if (addr >= TASK_SIZE_OF(task)) | 772 | if (addr >= TASK_SIZE_OF(task)) |
754 | return -EPERM; | 773 | return -EPERM; |
755 | cpu = get_cpu(); | 774 | cpu = get_cpu(); |
756 | /* handle small bases via the GDT because that's faster to | 775 | /* handle small bases via the GDT because that's faster to |
757 | switch. */ | 776 | switch. */ |
758 | if (addr <= 0xffffffff) { | 777 | if (addr <= 0xffffffff) { |
759 | set_32bit_tls(task, GS_TLS, addr); | 778 | set_32bit_tls(task, GS_TLS, addr); |
760 | if (doit) { | 779 | if (doit) { |
761 | load_TLS(&task->thread, cpu); | 780 | load_TLS(&task->thread, cpu); |
762 | load_gs_index(GS_TLS_SEL); | 781 | load_gs_index(GS_TLS_SEL); |
763 | } | 782 | } |
764 | task->thread.gsindex = GS_TLS_SEL; | 783 | task->thread.gsindex = GS_TLS_SEL; |
765 | task->thread.gs = 0; | 784 | task->thread.gs = 0; |
766 | } else { | 785 | } else { |
767 | task->thread.gsindex = 0; | 786 | task->thread.gsindex = 0; |
768 | task->thread.gs = addr; | 787 | task->thread.gs = addr; |
769 | if (doit) { | 788 | if (doit) { |
770 | load_gs_index(0); | 789 | load_gs_index(0); |
771 | ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); | 790 | ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); |
772 | } | 791 | } |
773 | } | 792 | } |
774 | put_cpu(); | 793 | put_cpu(); |
775 | break; | 794 | break; |
@@ -823,8 +842,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) | |||
823 | rdmsrl(MSR_KERNEL_GS_BASE, base); | 842 | rdmsrl(MSR_KERNEL_GS_BASE, base); |
824 | else | 843 | else |
825 | base = task->thread.gs; | 844 | base = task->thread.gs; |
826 | } | 845 | } else |
827 | else | ||
828 | base = task->thread.gs; | 846 | base = task->thread.gs; |
829 | ret = put_user(base, (unsigned long __user *)addr); | 847 | ret = put_user(base, (unsigned long __user *)addr); |
830 | break; | 848 | break; |