diff options
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r-- | arch/i386/kernel/alternative.c | 6 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/intel_cacheinfo.c | 4 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/perfctr-watchdog.c | 28 | ||||
-rw-r--r-- | arch/i386/kernel/nmi.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/ptrace.c | 1 |
5 files changed, 28 insertions, 13 deletions
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c index 9f4ac8b02de4..bd72d94e713e 100644 --- a/arch/i386/kernel/alternative.c +++ b/arch/i386/kernel/alternative.c | |||
@@ -445,8 +445,6 @@ void __kprobes text_poke(void *addr, unsigned char *opcode, int len) | |||
445 | { | 445 | { |
446 | memcpy(addr, opcode, len); | 446 | memcpy(addr, opcode, len); |
447 | sync_core(); | 447 | sync_core(); |
448 | /* Not strictly needed, but can speed CPU recovery up. Ignore cross cacheline | 448 | /* Could also do a CLFLUSH here to speed up CPU recovery; but |
449 | case. */ | 449 | that causes hangs on some VIA CPUs. */ |
450 | if (cpu_has_clflush) | ||
451 | asm("clflush (%0) " :: "r" (addr) : "memory"); | ||
452 | } | 450 | } |
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c index d5a456d27d82..db6c25aa5776 100644 --- a/arch/i386/kernel/cpu/intel_cacheinfo.c +++ b/arch/i386/kernel/cpu/intel_cacheinfo.c | |||
@@ -515,7 +515,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
515 | 515 | ||
516 | cpuid4_info[cpu] = kzalloc( | 516 | cpuid4_info[cpu] = kzalloc( |
517 | sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); | 517 | sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); |
518 | if (unlikely(cpuid4_info[cpu] == NULL)) | 518 | if (cpuid4_info[cpu] == NULL) |
519 | return -ENOMEM; | 519 | return -ENOMEM; |
520 | 520 | ||
521 | oldmask = current->cpus_allowed; | 521 | oldmask = current->cpus_allowed; |
@@ -748,6 +748,8 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) | |||
748 | unsigned int cpu = sys_dev->id; | 748 | unsigned int cpu = sys_dev->id; |
749 | unsigned long i; | 749 | unsigned long i; |
750 | 750 | ||
751 | if (cpuid4_info[cpu] == NULL) | ||
752 | return; | ||
751 | for (i = 0; i < num_cache_leaves; i++) { | 753 | for (i = 0; i < num_cache_leaves; i++) { |
752 | cache_remove_shared_cpu_map(cpu, i); | 754 | cache_remove_shared_cpu_map(cpu, i); |
753 | kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); | 755 | kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); |
diff --git a/arch/i386/kernel/cpu/perfctr-watchdog.c b/arch/i386/kernel/cpu/perfctr-watchdog.c index 4be488e73bee..93fecd4b03de 100644 --- a/arch/i386/kernel/cpu/perfctr-watchdog.c +++ b/arch/i386/kernel/cpu/perfctr-watchdog.c | |||
@@ -263,8 +263,8 @@ static int setup_k7_watchdog(unsigned nmi_hz) | |||
263 | unsigned int evntsel; | 263 | unsigned int evntsel; |
264 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | 264 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); |
265 | 265 | ||
266 | perfctr_msr = MSR_K7_PERFCTR0; | 266 | perfctr_msr = wd_ops->perfctr; |
267 | evntsel_msr = MSR_K7_EVNTSEL0; | 267 | evntsel_msr = wd_ops->evntsel; |
268 | 268 | ||
269 | wrmsrl(perfctr_msr, 0UL); | 269 | wrmsrl(perfctr_msr, 0UL); |
270 | 270 | ||
@@ -343,8 +343,8 @@ static int setup_p6_watchdog(unsigned nmi_hz) | |||
343 | unsigned int evntsel; | 343 | unsigned int evntsel; |
344 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | 344 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); |
345 | 345 | ||
346 | perfctr_msr = MSR_P6_PERFCTR0; | 346 | perfctr_msr = wd_ops->perfctr; |
347 | evntsel_msr = MSR_P6_EVNTSEL0; | 347 | evntsel_msr = wd_ops->evntsel; |
348 | 348 | ||
349 | /* KVM doesn't implement this MSR */ | 349 | /* KVM doesn't implement this MSR */ |
350 | if (wrmsr_safe(perfctr_msr, 0, 0) < 0) | 350 | if (wrmsr_safe(perfctr_msr, 0, 0) < 0) |
@@ -569,8 +569,8 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz) | |||
569 | (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) | 569 | (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) |
570 | return 0; | 570 | return 0; |
571 | 571 | ||
572 | perfctr_msr = MSR_ARCH_PERFMON_PERFCTR1; | 572 | perfctr_msr = wd_ops->perfctr; |
573 | evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL1; | 573 | evntsel_msr = wd_ops->evntsel; |
574 | 574 | ||
575 | wrmsrl(perfctr_msr, 0UL); | 575 | wrmsrl(perfctr_msr, 0UL); |
576 | 576 | ||
@@ -605,6 +605,16 @@ static struct wd_ops intel_arch_wd_ops = { | |||
605 | .evntsel = MSR_ARCH_PERFMON_EVENTSEL1, | 605 | .evntsel = MSR_ARCH_PERFMON_EVENTSEL1, |
606 | }; | 606 | }; |
607 | 607 | ||
608 | static struct wd_ops coreduo_wd_ops = { | ||
609 | .reserve = single_msr_reserve, | ||
610 | .unreserve = single_msr_unreserve, | ||
611 | .setup = setup_intel_arch_watchdog, | ||
612 | .rearm = p6_rearm, | ||
613 | .stop = single_msr_stop_watchdog, | ||
614 | .perfctr = MSR_ARCH_PERFMON_PERFCTR0, | ||
615 | .evntsel = MSR_ARCH_PERFMON_EVENTSEL0, | ||
616 | }; | ||
617 | |||
608 | static void probe_nmi_watchdog(void) | 618 | static void probe_nmi_watchdog(void) |
609 | { | 619 | { |
610 | switch (boot_cpu_data.x86_vendor) { | 620 | switch (boot_cpu_data.x86_vendor) { |
@@ -615,6 +625,12 @@ static void probe_nmi_watchdog(void) | |||
615 | wd_ops = &k7_wd_ops; | 625 | wd_ops = &k7_wd_ops; |
616 | break; | 626 | break; |
617 | case X86_VENDOR_INTEL: | 627 | case X86_VENDOR_INTEL: |
628 | /* Work around Core Duo (Yonah) errata AE49 where perfctr1 | ||
629 | doesn't have a working enable bit. */ | ||
630 | if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 14) { | ||
631 | wd_ops = &coreduo_wd_ops; | ||
632 | break; | ||
633 | } | ||
618 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | 634 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { |
619 | wd_ops = &intel_arch_wd_ops; | 635 | wd_ops = &intel_arch_wd_ops; |
620 | break; | 636 | break; |
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 8c1c965eb2a8..c7227e2180f8 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c | |||
@@ -115,12 +115,12 @@ static int __init check_nmi_watchdog(void) | |||
115 | atomic_dec(&nmi_active); | 115 | atomic_dec(&nmi_active); |
116 | } | 116 | } |
117 | } | 117 | } |
118 | endflag = 1; | ||
118 | if (!atomic_read(&nmi_active)) { | 119 | if (!atomic_read(&nmi_active)) { |
119 | kfree(prev_nmi_count); | 120 | kfree(prev_nmi_count); |
120 | atomic_set(&nmi_active, -1); | 121 | atomic_set(&nmi_active, -1); |
121 | return -1; | 122 | return -1; |
122 | } | 123 | } |
123 | endflag = 1; | ||
124 | printk("OK.\n"); | 124 | printk("OK.\n"); |
125 | 125 | ||
126 | /* now that we know it works we can reduce NMI frequency to | 126 | /* now that we know it works we can reduce NMI frequency to |
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c index 0c8f00e69c4d..7c1b92522e95 100644 --- a/arch/i386/kernel/ptrace.c +++ b/arch/i386/kernel/ptrace.c | |||
@@ -274,7 +274,6 @@ static void clear_singlestep(struct task_struct *child) | |||
274 | void ptrace_disable(struct task_struct *child) | 274 | void ptrace_disable(struct task_struct *child) |
275 | { | 275 | { |
276 | clear_singlestep(child); | 276 | clear_singlestep(child); |
277 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
278 | clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); | 277 | clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); |
279 | } | 278 | } |
280 | 279 | ||