aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2008-03-08 11:49:24 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2008-03-08 11:49:24 -0500
commit9446389ef612096704fdf18fa79bab423d4110f0 (patch)
tree3e4fda7270be58ae176d20d318e61fb115b325b5 /arch/x86/kernel
parentcdd0972945dbcb8ea24db365d9b0e100af2a27bb (diff)
parent84c6f6046c5a2189160a8f0dca8b90427bf690ea (diff)
Merge commit 'origin' into devel
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/asm-offsets_32.c4
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/e_powersaver.c8
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c9
-rw-r--r--arch/x86/kernel/cpu/transmeta.c7
-rw-r--r--arch/x86/kernel/entry_64.S6
-rw-r--r--arch/x86/kernel/head_32.S2
-rw-r--r--arch/x86/kernel/head_64.S22
-rw-r--r--arch/x86/kernel/hpet.c4
-rw-r--r--arch/x86/kernel/i387.c10
-rw-r--r--arch/x86/kernel/init_task.c1
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/process_64.c8
-rw-r--r--arch/x86/kernel/ptrace.c22
-rw-r--r--arch/x86/kernel/reboot.c6
-rw-r--r--arch/x86/kernel/setup_64.c2
-rw-r--r--arch/x86/kernel/signal_32.c4
-rw-r--r--arch/x86/kernel/signal_64.c2
-rw-r--r--arch/x86/kernel/smpboot_64.c2
-rw-r--r--arch/x86/kernel/stacktrace.c4
-rw-r--r--arch/x86/kernel/step.c4
-rw-r--r--arch/x86/kernel/tls.c8
-rw-r--r--arch/x86/kernel/tsc_32.c3
-rw-r--r--arch/x86/kernel/vsyscall_64.c49
24 files changed, 102 insertions, 89 deletions
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index a33d53017997..8ea040124f7d 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -128,13 +128,11 @@ void foo(void)
128 OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending); 128 OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending);
129#endif 129#endif
130 130
131#ifdef CONFIG_LGUEST_GUEST 131#if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE)
132 BLANK(); 132 BLANK();
133 OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled); 133 OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled);
134 OFFSET(LGUEST_DATA_pgdir, lguest_data, pgdir); 134 OFFSET(LGUEST_DATA_pgdir, lguest_data, pgdir);
135#endif
136 135
137#ifdef CONFIG_LGUEST
138 BLANK(); 136 BLANK();
139 OFFSET(LGUEST_PAGES_host_gdt_desc, lguest_pages, state.host_gdt_desc); 137 OFFSET(LGUEST_PAGES_host_gdt_desc, lguest_pages, state.host_gdt_desc);
140 OFFSET(LGUEST_PAGES_host_idt_desc, lguest_pages, state.host_idt_desc); 138 OFFSET(LGUEST_PAGES_host_idt_desc, lguest_pages, state.host_idt_desc);
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index f86a3c4a2669..a38aafaefc23 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -504,7 +504,7 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
504 504
505 /* Clear all flags overriden by options */ 505 /* Clear all flags overriden by options */
506 for (i = 0; i < NCAPINTS; i++) 506 for (i = 0; i < NCAPINTS; i++)
507 c->x86_capability[i] ^= cleared_cpu_caps[i]; 507 c->x86_capability[i] &= ~cleared_cpu_caps[i];
508 508
509 /* Init Machine Check Exception if available. */ 509 /* Init Machine Check Exception if available. */
510 mcheck_init(c); 510 mcheck_init(c);
diff --git a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
index 39f8cb18296c..c2f930d86640 100644
--- a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
+++ b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
@@ -55,7 +55,6 @@ static int eps_set_state(struct eps_cpu_data *centaur,
55{ 55{
56 struct cpufreq_freqs freqs; 56 struct cpufreq_freqs freqs;
57 u32 lo, hi; 57 u32 lo, hi;
58 u8 current_multiplier, current_voltage;
59 int err = 0; 58 int err = 0;
60 int i; 59 int i;
61 60
@@ -95,6 +94,10 @@ postchange:
95 rdmsr(MSR_IA32_PERF_STATUS, lo, hi); 94 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
96 freqs.new = centaur->fsb * ((lo >> 8) & 0xff); 95 freqs.new = centaur->fsb * ((lo >> 8) & 0xff);
97 96
97#ifdef DEBUG
98 {
99 u8 current_multiplier, current_voltage;
100
98 /* Print voltage and multiplier */ 101 /* Print voltage and multiplier */
99 rdmsr(MSR_IA32_PERF_STATUS, lo, hi); 102 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
100 current_voltage = lo & 0xff; 103 current_voltage = lo & 0xff;
@@ -103,7 +106,8 @@ postchange:
103 current_multiplier = (lo >> 8) & 0xff; 106 current_multiplier = (lo >> 8) & 0xff;
104 printk(KERN_INFO "eps: Current multiplier = %d\n", 107 printk(KERN_INFO "eps: Current multiplier = %d\n",
105 current_multiplier); 108 current_multiplier);
106 109 }
110#endif
107 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 111 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
108 return err; 112 return err;
109} 113}
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index b6e136f23d3d..be83336fddba 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -43,6 +43,7 @@
43#include <asm/uaccess.h> 43#include <asm/uaccess.h>
44#include <asm/processor.h> 44#include <asm/processor.h>
45#include <asm/msr.h> 45#include <asm/msr.h>
46#include <asm/kvm_para.h>
46#include "mtrr.h" 47#include "mtrr.h"
47 48
48u32 num_var_ranges = 0; 49u32 num_var_ranges = 0;
@@ -649,6 +650,7 @@ static __init int amd_special_default_mtrr(void)
649 650
650/** 651/**
651 * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs 652 * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs
653 * @end_pfn: ending page frame number
652 * 654 *
653 * Some buggy BIOSes don't setup the MTRRs properly for systems with certain 655 * Some buggy BIOSes don't setup the MTRRs properly for systems with certain
654 * memory configurations. This routine checks that the highest MTRR matches 656 * memory configurations. This routine checks that the highest MTRR matches
@@ -688,8 +690,11 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
688 690
689 /* kvm/qemu doesn't have mtrr set right, don't trim them all */ 691 /* kvm/qemu doesn't have mtrr set right, don't trim them all */
690 if (!highest_pfn) { 692 if (!highest_pfn) {
691 printk(KERN_WARNING "WARNING: strange, CPU MTRRs all blank?\n"); 693 if (!kvm_para_available()) {
692 WARN_ON(1); 694 printk(KERN_WARNING
695 "WARNING: strange, CPU MTRRs all blank?\n");
696 WARN_ON(1);
697 }
693 return 0; 698 return 0;
694 } 699 }
695 700
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
index 200fb3f9ebfb..e8b422c1c512 100644
--- a/arch/x86/kernel/cpu/transmeta.c
+++ b/arch/x86/kernel/cpu/transmeta.c
@@ -76,13 +76,6 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
76 /* All Transmeta CPUs have a constant TSC */ 76 /* All Transmeta CPUs have a constant TSC */
77 set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); 77 set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
78 78
79 /* If we can run i686 user-space code, call us an i686 */
80#define USER686 ((1 << X86_FEATURE_TSC)|\
81 (1 << X86_FEATURE_CX8)|\
82 (1 << X86_FEATURE_CMOV))
83 if (c->x86 == 5 && (c->x86_capability[0] & USER686) == USER686)
84 c->x86 = 6;
85
86#ifdef CONFIG_SYSCTL 79#ifdef CONFIG_SYSCTL
87 /* randomize_va_space slows us down enormously; 80 /* randomize_va_space slows us down enormously;
88 it probably triggers retranslation of x86->native bytecode */ 81 it probably triggers retranslation of x86->native bytecode */
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 2ad9a1bc6a73..c20c9e7e08dd 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -453,6 +453,7 @@ ENTRY(stub_execve)
453 CFI_REGISTER rip, r11 453 CFI_REGISTER rip, r11
454 SAVE_REST 454 SAVE_REST
455 FIXUP_TOP_OF_STACK %r11 455 FIXUP_TOP_OF_STACK %r11
456 movq %rsp, %rcx
456 call sys_execve 457 call sys_execve
457 RESTORE_TOP_OF_STACK %r11 458 RESTORE_TOP_OF_STACK %r11
458 movq %rax,RAX(%rsp) 459 movq %rax,RAX(%rsp)
@@ -1036,15 +1037,16 @@ ENDPROC(child_rip)
1036 * rdi: name, rsi: argv, rdx: envp 1037 * rdi: name, rsi: argv, rdx: envp
1037 * 1038 *
1038 * We want to fallback into: 1039 * We want to fallback into:
1039 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs) 1040 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
1040 * 1041 *
1041 * do_sys_execve asm fallback arguments: 1042 * do_sys_execve asm fallback arguments:
1042 * rdi: name, rsi: argv, rdx: envp, fake frame on the stack 1043 * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
1043 */ 1044 */
1044ENTRY(kernel_execve) 1045ENTRY(kernel_execve)
1045 CFI_STARTPROC 1046 CFI_STARTPROC
1046 FAKE_STACK_FRAME $0 1047 FAKE_STACK_FRAME $0
1047 SAVE_ALL 1048 SAVE_ALL
1049 movq %rsp,%rcx
1048 call sys_execve 1050 call sys_execve
1049 movq %rax, RAX(%rsp) 1051 movq %rax, RAX(%rsp)
1050 RESTORE_REST 1052 RESTORE_REST
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 25eb98540a41..fd8ca53943a8 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -606,7 +606,7 @@ ENTRY(_stext)
606.section ".bss.page_aligned","wa" 606.section ".bss.page_aligned","wa"
607 .align PAGE_SIZE_asm 607 .align PAGE_SIZE_asm
608#ifdef CONFIG_X86_PAE 608#ifdef CONFIG_X86_PAE
609ENTRY(swapper_pg_pmd) 609swapper_pg_pmd:
610 .fill 1024*KPMDS,4,0 610 .fill 1024*KPMDS,4,0
611#else 611#else
612ENTRY(swapper_pg_dir) 612ENTRY(swapper_pg_dir)
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index eb415043a929..a007454133a3 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -379,18 +379,24 @@ NEXT_PAGE(level2_ident_pgt)
379 /* Since I easily can, map the first 1G. 379 /* Since I easily can, map the first 1G.
380 * Don't set NX because code runs from these pages. 380 * Don't set NX because code runs from these pages.
381 */ 381 */
382 PMDS(0x0000000000000000, __PAGE_KERNEL_LARGE_EXEC, PTRS_PER_PMD) 382 PMDS(0, __PAGE_KERNEL_LARGE_EXEC, PTRS_PER_PMD)
383 383
384NEXT_PAGE(level2_kernel_pgt) 384NEXT_PAGE(level2_kernel_pgt)
385 /* 40MB kernel mapping. The kernel code cannot be bigger than that. 385 /*
386 When you change this change KERNEL_TEXT_SIZE in page.h too. */ 386 * 128 MB kernel mapping. We spend a full page on this pagetable
387 /* (2^48-(2*1024*1024*1024)-((2^39)*511)-((2^30)*510)) = 0 */ 387 * anyway.
388 PMDS(0x0000000000000000, __PAGE_KERNEL_LARGE_EXEC|_PAGE_GLOBAL, KERNEL_TEXT_SIZE/PMD_SIZE) 388 *
389 /* Module mapping starts here */ 389 * The kernel code+data+bss must not be bigger than that.
390 .fill (PTRS_PER_PMD - (KERNEL_TEXT_SIZE/PMD_SIZE)),8,0 390 *
391 * (NOTE: at +128MB starts the module area, see MODULES_VADDR.
392 * If you want to increase this then increase MODULES_VADDR
393 * too.)
394 */
395 PMDS(0, __PAGE_KERNEL_LARGE_EXEC|_PAGE_GLOBAL,
396 KERNEL_IMAGE_SIZE/PMD_SIZE)
391 397
392NEXT_PAGE(level2_spare_pgt) 398NEXT_PAGE(level2_spare_pgt)
393 .fill 512,8,0 399 .fill 512, 8, 0
394 400
395#undef PMDS 401#undef PMDS
396#undef NEXT_PAGE 402#undef NEXT_PAGE
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 429d084e014d..235fd6c77504 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -368,8 +368,8 @@ static int hpet_clocksource_register(void)
368 return 0; 368 return 0;
369} 369}
370 370
371/* 371/**
372 * Try to setup the HPET timer 372 * hpet_enable - Try to setup the HPET timer. Returns 1 on success.
373 */ 373 */
374int __init hpet_enable(void) 374int __init hpet_enable(void)
375{ 375{
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 763dfc407232..d2e39e69aaf8 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -132,7 +132,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
132 if (!cpu_has_fxsr) 132 if (!cpu_has_fxsr)
133 return -ENODEV; 133 return -ENODEV;
134 134
135 unlazy_fpu(target); 135 init_fpu(target);
136 136
137 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 137 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
138 &target->thread.i387.fxsave, 0, -1); 138 &target->thread.i387.fxsave, 0, -1);
@@ -147,7 +147,7 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
147 if (!cpu_has_fxsr) 147 if (!cpu_has_fxsr)
148 return -ENODEV; 148 return -ENODEV;
149 149
150 unlazy_fpu(target); 150 init_fpu(target);
151 set_stopped_child_used_math(target); 151 set_stopped_child_used_math(target);
152 152
153 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 153 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
@@ -261,7 +261,7 @@ static void convert_from_fxsr(struct user_i387_ia32_struct *env,
261 } 261 }
262#else 262#else
263 env->fip = fxsave->fip; 263 env->fip = fxsave->fip;
264 env->fcs = fxsave->fcs; 264 env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
265 env->foo = fxsave->foo; 265 env->foo = fxsave->foo;
266 env->fos = fxsave->fos; 266 env->fos = fxsave->fos;
267#endif 267#endif
@@ -307,7 +307,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
307 if (!HAVE_HWFP) 307 if (!HAVE_HWFP)
308 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf); 308 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
309 309
310 unlazy_fpu(target); 310 init_fpu(target);
311 311
312 if (!cpu_has_fxsr) 312 if (!cpu_has_fxsr)
313 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 313 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
@@ -332,7 +332,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
332 if (!HAVE_HWFP) 332 if (!HAVE_HWFP)
333 return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); 333 return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
334 334
335 unlazy_fpu(target); 335 init_fpu(target);
336 set_stopped_child_used_math(target); 336 set_stopped_child_used_math(target);
337 337
338 if (!cpu_has_fxsr) 338 if (!cpu_has_fxsr)
diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
index 5b3ce7934363..3d01e47777db 100644
--- a/arch/x86/kernel/init_task.c
+++ b/arch/x86/kernel/init_task.c
@@ -15,6 +15,7 @@ static struct files_struct init_files = INIT_FILES;
15static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 15static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
16static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 16static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17struct mm_struct init_mm = INIT_MM(init_mm); 17struct mm_struct init_mm = INIT_MM(init_mm);
18EXPORT_UNUSED_SYMBOL(init_mm); /* will be removed in 2.6.26 */
18 19
19/* 20/*
20 * Initial thread structure. 21 * Initial thread structure.
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index a7d50a547dc2..be3c7a299f02 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -603,11 +603,13 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
603 } 603 }
604#endif 604#endif
605 605
606#ifdef X86_BTS
606 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) 607 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
607 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); 608 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
608 609
609 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) 610 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
610 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); 611 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
612#endif
611 613
612 614
613 if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { 615 if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index b0cc8f0136d8..3baf9b9f4c87 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -604,11 +604,13 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
604 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); 604 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
605 } 605 }
606 606
607#ifdef X86_BTS
607 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) 608 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
608 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); 609 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
609 610
610 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) 611 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
611 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); 612 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
613#endif
612} 614}
613 615
614/* 616/*
@@ -730,16 +732,16 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
730 */ 732 */
731asmlinkage 733asmlinkage
732long sys_execve(char __user *name, char __user * __user *argv, 734long sys_execve(char __user *name, char __user * __user *argv,
733 char __user * __user *envp, struct pt_regs regs) 735 char __user * __user *envp, struct pt_regs *regs)
734{ 736{
735 long error; 737 long error;
736 char * filename; 738 char * filename;
737 739
738 filename = getname(name); 740 filename = getname(name);
739 error = PTR_ERR(filename); 741 error = PTR_ERR(filename);
740 if (IS_ERR(filename)) 742 if (IS_ERR(filename))
741 return error; 743 return error;
742 error = do_execve(filename, argv, envp, &regs); 744 error = do_execve(filename, argv, envp, regs);
743 putname(filename); 745 putname(filename);
744 return error; 746 return error;
745} 747}
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index d862e396b099..8f64abe699fd 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -323,6 +323,16 @@ static int putreg(struct task_struct *child,
323 return set_flags(child, value); 323 return set_flags(child, value);
324 324
325#ifdef CONFIG_X86_64 325#ifdef CONFIG_X86_64
326 /*
327 * Orig_ax is really just a flag with small positive and
328 * negative values, so make sure to always sign-extend it
329 * from 32 bits so that it works correctly regardless of
330 * whether we come from a 32-bit environment or not.
331 */
332 case offsetof(struct user_regs_struct, orig_ax):
333 value = (long) (s32) value;
334 break;
335
326 case offsetof(struct user_regs_struct,fs_base): 336 case offsetof(struct user_regs_struct,fs_base):
327 if (value >= TASK_SIZE_OF(child)) 337 if (value >= TASK_SIZE_OF(child))
328 return -EIO; 338 return -EIO;
@@ -544,6 +554,8 @@ static int ptrace_set_debugreg(struct task_struct *child,
544 return 0; 554 return 0;
545} 555}
546 556
557#ifdef X86_BTS
558
547static int ptrace_bts_get_size(struct task_struct *child) 559static int ptrace_bts_get_size(struct task_struct *child)
548{ 560{
549 if (!child->thread.ds_area_msr) 561 if (!child->thread.ds_area_msr)
@@ -826,6 +838,7 @@ void ptrace_bts_take_timestamp(struct task_struct *tsk,
826 838
827 ptrace_bts_write_record(tsk, &rec); 839 ptrace_bts_write_record(tsk, &rec);
828} 840}
841#endif /* X86_BTS */
829 842
830/* 843/*
831 * Called by kernel/ptrace.c when detaching.. 844 * Called by kernel/ptrace.c when detaching..
@@ -839,7 +852,9 @@ void ptrace_disable(struct task_struct *child)
839 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); 852 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
840#endif 853#endif
841 if (child->thread.ds_area_msr) { 854 if (child->thread.ds_area_msr) {
855#ifdef X86_BTS
842 ptrace_bts_realloc(child, 0, 0); 856 ptrace_bts_realloc(child, 0, 0);
857#endif
843 child->thread.debugctlmsr &= ~ds_debugctl_mask(); 858 child->thread.debugctlmsr &= ~ds_debugctl_mask();
844 if (!child->thread.debugctlmsr) 859 if (!child->thread.debugctlmsr)
845 clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR); 860 clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
@@ -961,6 +976,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
961 break; 976 break;
962#endif 977#endif
963 978
979 /*
980 * These bits need more cooking - not enabled yet:
981 */
982#ifdef X86_BTS
964 case PTRACE_BTS_CONFIG: 983 case PTRACE_BTS_CONFIG:
965 ret = ptrace_bts_config 984 ret = ptrace_bts_config
966 (child, data, (struct ptrace_bts_config __user *)addr); 985 (child, data, (struct ptrace_bts_config __user *)addr);
@@ -988,6 +1007,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
988 ret = ptrace_bts_drain 1007 ret = ptrace_bts_drain
989 (child, data, (struct bts_struct __user *) addr); 1008 (child, data, (struct bts_struct __user *) addr);
990 break; 1009 break;
1010#endif
991 1011
992 default: 1012 default:
993 ret = ptrace_request(child, request, addr, data); 1013 ret = ptrace_request(child, request, addr, data);
@@ -1226,12 +1246,14 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
1226 case PTRACE_SETOPTIONS: 1246 case PTRACE_SETOPTIONS:
1227 case PTRACE_SET_THREAD_AREA: 1247 case PTRACE_SET_THREAD_AREA:
1228 case PTRACE_GET_THREAD_AREA: 1248 case PTRACE_GET_THREAD_AREA:
1249#ifdef X86_BTS
1229 case PTRACE_BTS_CONFIG: 1250 case PTRACE_BTS_CONFIG:
1230 case PTRACE_BTS_STATUS: 1251 case PTRACE_BTS_STATUS:
1231 case PTRACE_BTS_SIZE: 1252 case PTRACE_BTS_SIZE:
1232 case PTRACE_BTS_GET: 1253 case PTRACE_BTS_GET:
1233 case PTRACE_BTS_CLEAR: 1254 case PTRACE_BTS_CLEAR:
1234 case PTRACE_BTS_DRAIN: 1255 case PTRACE_BTS_DRAIN:
1256#endif
1235 return sys_ptrace(request, pid, addr, data); 1257 return sys_ptrace(request, pid, addr, data);
1236 1258
1237 default: 1259 default:
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 7fd6ac43e4a1..55ceb8cdef75 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -326,6 +326,10 @@ static inline void kb_wait(void)
326 } 326 }
327} 327}
328 328
329void __attribute__((weak)) mach_reboot_fixups(void)
330{
331}
332
329static void native_machine_emergency_restart(void) 333static void native_machine_emergency_restart(void)
330{ 334{
331 int i; 335 int i;
@@ -337,6 +341,8 @@ static void native_machine_emergency_restart(void)
337 /* Could also try the reset bit in the Hammer NB */ 341 /* Could also try the reset bit in the Hammer NB */
338 switch (reboot_type) { 342 switch (reboot_type) {
339 case BOOT_KBD: 343 case BOOT_KBD:
344 mach_reboot_fixups(); /* for board specific fixups */
345
340 for (i = 0; i < 10; i++) { 346 for (i = 0; i < 10; i++) {
341 kb_wait(); 347 kb_wait();
342 udelay(50); 348 udelay(50);
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index 6fd804f07821..7637dc91c79b 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -1021,7 +1021,7 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1021 1021
1022 /* Clear all flags overriden by options */ 1022 /* Clear all flags overriden by options */
1023 for (i = 0; i < NCAPINTS; i++) 1023 for (i = 0; i < NCAPINTS; i++)
1024 c->x86_capability[i] ^= cleared_cpu_caps[i]; 1024 c->x86_capability[i] &= ~cleared_cpu_caps[i];
1025 1025
1026#ifdef CONFIG_X86_MCE 1026#ifdef CONFIG_X86_MCE
1027 mcheck_init(c); 1027 mcheck_init(c);
diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c
index caee1f002fed..0157a6f0f41f 100644
--- a/arch/x86/kernel/signal_32.c
+++ b/arch/x86/kernel/signal_32.c
@@ -407,7 +407,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
407 * The tracer may want to single-step inside the 407 * The tracer may want to single-step inside the
408 * handler too. 408 * handler too.
409 */ 409 */
410 regs->flags &= ~TF_MASK; 410 regs->flags &= ~(TF_MASK | X86_EFLAGS_DF);
411 if (test_thread_flag(TIF_SINGLESTEP)) 411 if (test_thread_flag(TIF_SINGLESTEP))
412 ptrace_notify(SIGTRAP); 412 ptrace_notify(SIGTRAP);
413 413
@@ -500,7 +500,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
500 * The tracer may want to single-step inside the 500 * The tracer may want to single-step inside the
501 * handler too. 501 * handler too.
502 */ 502 */
503 regs->flags &= ~TF_MASK; 503 regs->flags &= ~(TF_MASK | X86_EFLAGS_DF);
504 if (test_thread_flag(TIF_SINGLESTEP)) 504 if (test_thread_flag(TIF_SINGLESTEP))
505 ptrace_notify(SIGTRAP); 505 ptrace_notify(SIGTRAP);
506 506
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
index 7347bb14e306..56b72fb67f9b 100644
--- a/arch/x86/kernel/signal_64.c
+++ b/arch/x86/kernel/signal_64.c
@@ -295,7 +295,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
295 see include/asm-x86_64/uaccess.h for details. */ 295 see include/asm-x86_64/uaccess.h for details. */
296 set_fs(USER_DS); 296 set_fs(USER_DS);
297 297
298 regs->flags &= ~X86_EFLAGS_TF; 298 regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF);
299 if (test_thread_flag(TIF_SINGLESTEP)) 299 if (test_thread_flag(TIF_SINGLESTEP))
300 ptrace_notify(SIGTRAP); 300 ptrace_notify(SIGTRAP);
301#ifdef DEBUG_SIG 301#ifdef DEBUG_SIG
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
index d53bd6fcb428..0880f2c388a9 100644
--- a/arch/x86/kernel/smpboot_64.c
+++ b/arch/x86/kernel/smpboot_64.c
@@ -554,10 +554,10 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
554 int timeout; 554 int timeout;
555 unsigned long start_rip; 555 unsigned long start_rip;
556 struct create_idle c_idle = { 556 struct create_idle c_idle = {
557 .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
558 .cpu = cpu, 557 .cpu = cpu,
559 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), 558 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
560 }; 559 };
560 INIT_WORK(&c_idle.work, do_fork_idle);
561 561
562 /* allocate memory for gdts of secondary cpus. Hotplug is considered */ 562 /* allocate memory for gdts of secondary cpus. Hotplug is considered */
563 if (!cpu_gdt_descr[cpu].address && 563 if (!cpu_gdt_descr[cpu].address &&
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 02f0f61f5b11..c28c342c162f 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -25,6 +25,8 @@ static int save_stack_stack(void *data, char *name)
25static void save_stack_address(void *data, unsigned long addr, int reliable) 25static void save_stack_address(void *data, unsigned long addr, int reliable)
26{ 26{
27 struct stack_trace *trace = data; 27 struct stack_trace *trace = data;
28 if (!reliable)
29 return;
28 if (trace->skip > 0) { 30 if (trace->skip > 0) {
29 trace->skip--; 31 trace->skip--;
30 return; 32 return;
@@ -37,6 +39,8 @@ static void
37save_stack_address_nosched(void *data, unsigned long addr, int reliable) 39save_stack_address_nosched(void *data, unsigned long addr, int reliable)
38{ 40{
39 struct stack_trace *trace = (struct stack_trace *)data; 41 struct stack_trace *trace = (struct stack_trace *)data;
42 if (!reliable)
43 return;
40 if (in_sched_functions(addr)) 44 if (in_sched_functions(addr))
41 return; 45 return;
42 if (trace->skip > 0) { 46 if (trace->skip > 0) {
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index 2ef1a5f8d675..9d406cdc847f 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -166,7 +166,7 @@ static void enable_step(struct task_struct *child, bool block)
166 child->thread.debugctlmsr | DEBUGCTLMSR_BTF); 166 child->thread.debugctlmsr | DEBUGCTLMSR_BTF);
167 } else { 167 } else {
168 write_debugctlmsr(child, 168 write_debugctlmsr(child,
169 child->thread.debugctlmsr & ~TIF_DEBUGCTLMSR); 169 child->thread.debugctlmsr & ~DEBUGCTLMSR_BTF);
170 170
171 if (!child->thread.debugctlmsr) 171 if (!child->thread.debugctlmsr)
172 clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR); 172 clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
@@ -189,7 +189,7 @@ void user_disable_single_step(struct task_struct *child)
189 * Make sure block stepping (BTF) is disabled. 189 * Make sure block stepping (BTF) is disabled.
190 */ 190 */
191 write_debugctlmsr(child, 191 write_debugctlmsr(child,
192 child->thread.debugctlmsr & ~TIF_DEBUGCTLMSR); 192 child->thread.debugctlmsr & ~DEBUGCTLMSR_BTF);
193 193
194 if (!child->thread.debugctlmsr) 194 if (!child->thread.debugctlmsr)
195 clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR); 195 clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
index 6dfd4e76661a..022bcaa3b42e 100644
--- a/arch/x86/kernel/tls.c
+++ b/arch/x86/kernel/tls.c
@@ -91,7 +91,9 @@ int do_set_thread_area(struct task_struct *p, int idx,
91 91
92asmlinkage int sys_set_thread_area(struct user_desc __user *u_info) 92asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
93{ 93{
94 return do_set_thread_area(current, -1, u_info, 1); 94 int ret = do_set_thread_area(current, -1, u_info, 1);
95 prevent_tail_call(ret);
96 return ret;
95} 97}
96 98
97 99
@@ -139,7 +141,9 @@ int do_get_thread_area(struct task_struct *p, int idx,
139 141
140asmlinkage int sys_get_thread_area(struct user_desc __user *u_info) 142asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
141{ 143{
142 return do_get_thread_area(current, -1, u_info); 144 int ret = do_get_thread_area(current, -1, u_info);
145 prevent_tail_call(ret);
146 return ret;
143} 147}
144 148
145int regset_tls_active(struct task_struct *target, 149int regset_tls_active(struct task_struct *target,
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c
index 43517e324be8..f14cfd9d1f94 100644
--- a/arch/x86/kernel/tsc_32.c
+++ b/arch/x86/kernel/tsc_32.c
@@ -28,7 +28,8 @@ EXPORT_SYMBOL_GPL(tsc_khz);
28static int __init tsc_setup(char *str) 28static int __init tsc_setup(char *str)
29{ 29{
30 printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " 30 printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
31 "cannot disable TSC.\n"); 31 "cannot disable TSC completely.\n");
32 mark_tsc_unstable("user disabled TSC");
32 return 1; 33 return 1;
33} 34}
34#else 35#else
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 3f8242774580..edff4c985485 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -44,11 +44,6 @@
44 44
45#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr))) 45#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
46#define __syscall_clobber "r11","cx","memory" 46#define __syscall_clobber "r11","cx","memory"
47#define __pa_vsymbol(x) \
48 ({unsigned long v; \
49 extern char __vsyscall_0; \
50 asm("" : "=r" (v) : "0" (x)); \
51 ((v - VSYSCALL_START) + __pa_symbol(&__vsyscall_0)); })
52 47
53/* 48/*
54 * vsyscall_gtod_data contains data that is : 49 * vsyscall_gtod_data contains data that is :
@@ -102,7 +97,7 @@ static __always_inline void do_get_tz(struct timezone * tz)
102static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz) 97static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
103{ 98{
104 int ret; 99 int ret;
105 asm volatile("vsysc2: syscall" 100 asm volatile("syscall"
106 : "=a" (ret) 101 : "=a" (ret)
107 : "0" (__NR_gettimeofday),"D" (tv),"S" (tz) 102 : "0" (__NR_gettimeofday),"D" (tv),"S" (tz)
108 : __syscall_clobber ); 103 : __syscall_clobber );
@@ -112,7 +107,7 @@ static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
112static __always_inline long time_syscall(long *t) 107static __always_inline long time_syscall(long *t)
113{ 108{
114 long secs; 109 long secs;
115 asm volatile("vsysc1: syscall" 110 asm volatile("syscall"
116 : "=a" (secs) 111 : "=a" (secs)
117 : "0" (__NR_time),"D" (t) : __syscall_clobber); 112 : "0" (__NR_time),"D" (t) : __syscall_clobber);
118 return secs; 113 return secs;
@@ -228,42 +223,11 @@ long __vsyscall(3) venosys_1(void)
228 223
229#ifdef CONFIG_SYSCTL 224#ifdef CONFIG_SYSCTL
230 225
231#define SYSCALL 0x050f 226static int
232#define NOP2 0x9090 227vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
233 228 void __user *buffer, size_t *lenp, loff_t *ppos)
234/*
235 * NOP out syscall in vsyscall page when not needed.
236 */
237static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
238 void __user *buffer, size_t *lenp, loff_t *ppos)
239{ 229{
240 extern u16 vsysc1, vsysc2; 230 return proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
241 u16 __iomem *map1;
242 u16 __iomem *map2;
243 int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
244 if (!write)
245 return ret;
246 /* gcc has some trouble with __va(__pa()), so just do it this
247 way. */
248 map1 = ioremap(__pa_vsymbol(&vsysc1), 2);
249 if (!map1)
250 return -ENOMEM;
251 map2 = ioremap(__pa_vsymbol(&vsysc2), 2);
252 if (!map2) {
253 ret = -ENOMEM;
254 goto out;
255 }
256 if (!vsyscall_gtod_data.sysctl_enabled) {
257 writew(SYSCALL, map1);
258 writew(SYSCALL, map2);
259 } else {
260 writew(NOP2, map1);
261 writew(NOP2, map2);
262 }
263 iounmap(map2);
264out:
265 iounmap(map1);
266 return ret;
267} 231}
268 232
269static ctl_table kernel_table2[] = { 233static ctl_table kernel_table2[] = {
@@ -279,7 +243,6 @@ static ctl_table kernel_root_table2[] = {
279 .child = kernel_table2 }, 243 .child = kernel_table2 },
280 {} 244 {}
281}; 245};
282
283#endif 246#endif
284 247
285/* Assume __initcall executes before all user space. Hopefully kmod 248/* Assume __initcall executes before all user space. Hopefully kmod