aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/iomap.h4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event.c59
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c15
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c2
-rw-r--r--arch/x86/kernel/trampoline.c3
-rw-r--r--arch/x86/mm/iomap_32.c6
-rw-r--r--arch/x86/oprofile/nmi_int.c22
9 files changed, 86 insertions, 38 deletions
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h
index f35eb45d6576..c4191b3b7056 100644
--- a/arch/x86/include/asm/iomap.h
+++ b/arch/x86/include/asm/iomap.h
@@ -26,11 +26,11 @@
26#include <asm/pgtable.h> 26#include <asm/pgtable.h>
27#include <asm/tlbflush.h> 27#include <asm/tlbflush.h>
28 28
29void * 29void __iomem *
30iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); 30iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
31 31
32void 32void
33iounmap_atomic(void *kvaddr, enum km_type type); 33iounmap_atomic(void __iomem *kvaddr, enum km_type type);
34 34
35int 35int
36iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot); 36iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 224392d8fe8c..5e975298fa81 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -530,7 +530,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
530 err = -ENOMEM; 530 err = -ENOMEM;
531 goto out; 531 goto out;
532 } 532 }
533 if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) { 533 if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
534 kfree(b); 534 kfree(b);
535 err = -ENOMEM; 535 err = -ENOMEM;
536 goto out; 536 goto out;
@@ -543,7 +543,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
543#ifndef CONFIG_SMP 543#ifndef CONFIG_SMP
544 cpumask_setall(b->cpus); 544 cpumask_setall(b->cpus);
545#else 545#else
546 cpumask_copy(b->cpus, c->llc_shared_map); 546 cpumask_set_cpu(cpu, b->cpus);
547#endif 547#endif
548 548
549 per_cpu(threshold_banks, cpu)[bank] = b; 549 per_cpu(threshold_banks, cpu)[bank] = b;
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index c2a8b26d4fea..d9368eeda309 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -202,10 +202,11 @@ static int therm_throt_process(bool new_event, int event, int level)
202 202
203#ifdef CONFIG_SYSFS 203#ifdef CONFIG_SYSFS
204/* Add/Remove thermal_throttle interface for CPU device: */ 204/* Add/Remove thermal_throttle interface for CPU device: */
205static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev) 205static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev,
206 unsigned int cpu)
206{ 207{
207 int err; 208 int err;
208 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); 209 struct cpuinfo_x86 *c = &cpu_data(cpu);
209 210
210 err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group); 211 err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group);
211 if (err) 212 if (err)
@@ -251,7 +252,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
251 case CPU_UP_PREPARE: 252 case CPU_UP_PREPARE:
252 case CPU_UP_PREPARE_FROZEN: 253 case CPU_UP_PREPARE_FROZEN:
253 mutex_lock(&therm_cpu_lock); 254 mutex_lock(&therm_cpu_lock);
254 err = thermal_throttle_add_dev(sys_dev); 255 err = thermal_throttle_add_dev(sys_dev, cpu);
255 mutex_unlock(&therm_cpu_lock); 256 mutex_unlock(&therm_cpu_lock);
256 WARN_ON(err); 257 WARN_ON(err);
257 break; 258 break;
@@ -287,7 +288,7 @@ static __init int thermal_throttle_init_device(void)
287#endif 288#endif
288 /* connect live CPUs to sysfs */ 289 /* connect live CPUs to sysfs */
289 for_each_online_cpu(cpu) { 290 for_each_online_cpu(cpu) {
290 err = thermal_throttle_add_dev(get_cpu_sysdev(cpu)); 291 err = thermal_throttle_add_dev(get_cpu_sysdev(cpu), cpu);
291 WARN_ON(err); 292 WARN_ON(err);
292 } 293 }
293#ifdef CONFIG_HOTPLUG_CPU 294#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index f2da20fda02d..3efdf2870a35 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1154,7 +1154,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
1154 /* 1154 /*
1155 * event overflow 1155 * event overflow
1156 */ 1156 */
1157 handled = 1; 1157 handled++;
1158 data.period = event->hw.last_period; 1158 data.period = event->hw.last_period;
1159 1159
1160 if (!x86_perf_event_set_period(event)) 1160 if (!x86_perf_event_set_period(event))
@@ -1200,12 +1200,20 @@ void perf_events_lapic_init(void)
1200 apic_write(APIC_LVTPC, APIC_DM_NMI); 1200 apic_write(APIC_LVTPC, APIC_DM_NMI);
1201} 1201}
1202 1202
1203struct pmu_nmi_state {
1204 unsigned int marked;
1205 int handled;
1206};
1207
1208static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
1209
1203static int __kprobes 1210static int __kprobes
1204perf_event_nmi_handler(struct notifier_block *self, 1211perf_event_nmi_handler(struct notifier_block *self,
1205 unsigned long cmd, void *__args) 1212 unsigned long cmd, void *__args)
1206{ 1213{
1207 struct die_args *args = __args; 1214 struct die_args *args = __args;
1208 struct pt_regs *regs; 1215 unsigned int this_nmi;
1216 int handled;
1209 1217
1210 if (!atomic_read(&active_events)) 1218 if (!atomic_read(&active_events))
1211 return NOTIFY_DONE; 1219 return NOTIFY_DONE;
@@ -1214,22 +1222,47 @@ perf_event_nmi_handler(struct notifier_block *self,
1214 case DIE_NMI: 1222 case DIE_NMI:
1215 case DIE_NMI_IPI: 1223 case DIE_NMI_IPI:
1216 break; 1224 break;
1217 1225 case DIE_NMIUNKNOWN:
1226 this_nmi = percpu_read(irq_stat.__nmi_count);
1227 if (this_nmi != __get_cpu_var(pmu_nmi).marked)
1228 /* let the kernel handle the unknown nmi */
1229 return NOTIFY_DONE;
1230 /*
1231 * This one is a PMU back-to-back nmi. Two events
1232 * trigger 'simultaneously' raising two back-to-back
1233 * NMIs. If the first NMI handles both, the latter
1234 * will be empty and daze the CPU. So, we drop it to
1235 * avoid false-positive 'unknown nmi' messages.
1236 */
1237 return NOTIFY_STOP;
1218 default: 1238 default:
1219 return NOTIFY_DONE; 1239 return NOTIFY_DONE;
1220 } 1240 }
1221 1241
1222 regs = args->regs;
1223
1224 apic_write(APIC_LVTPC, APIC_DM_NMI); 1242 apic_write(APIC_LVTPC, APIC_DM_NMI);
1225 /* 1243
1226 * Can't rely on the handled return value to say it was our NMI, two 1244 handled = x86_pmu.handle_irq(args->regs);
1227 * events could trigger 'simultaneously' raising two back-to-back NMIs. 1245 if (!handled)
1228 * 1246 return NOTIFY_DONE;
1229 * If the first NMI handles both, the latter will be empty and daze 1247
1230 * the CPU. 1248 this_nmi = percpu_read(irq_stat.__nmi_count);
1231 */ 1249 if ((handled > 1) ||
1232 x86_pmu.handle_irq(regs); 1250 /* the next nmi could be a back-to-back nmi */
1251 ((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
1252 (__get_cpu_var(pmu_nmi).handled > 1))) {
1253 /*
1254 * We could have two subsequent back-to-back nmis: The
1255 * first handles more than one counter, the 2nd
1256 * handles only one counter and the 3rd handles no
1257 * counter.
1258 *
1259 * This is the 2nd nmi because the previous was
1260 * handling more than one counter. We will mark the
1261 * next (3rd) and then drop it if unhandled.
1262 */
1263 __get_cpu_var(pmu_nmi).marked = this_nmi + 1;
1264 __get_cpu_var(pmu_nmi).handled = handled;
1265 }
1233 1266
1234 return NOTIFY_STOP; 1267 return NOTIFY_STOP;
1235} 1268}
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index d8d86d014008..ee05c90012d2 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -712,7 +712,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
712 struct perf_sample_data data; 712 struct perf_sample_data data;
713 struct cpu_hw_events *cpuc; 713 struct cpu_hw_events *cpuc;
714 int bit, loops; 714 int bit, loops;
715 u64 ack, status; 715 u64 status;
716 int handled = 0;
716 717
717 perf_sample_data_init(&data, 0); 718 perf_sample_data_init(&data, 0);
718 719
@@ -728,6 +729,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
728 729
729 loops = 0; 730 loops = 0;
730again: 731again:
732 intel_pmu_ack_status(status);
731 if (++loops > 100) { 733 if (++loops > 100) {
732 WARN_ONCE(1, "perfevents: irq loop stuck!\n"); 734 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
733 perf_event_print_debug(); 735 perf_event_print_debug();
@@ -736,19 +738,22 @@ again:
736 } 738 }
737 739
738 inc_irq_stat(apic_perf_irqs); 740 inc_irq_stat(apic_perf_irqs);
739 ack = status;
740 741
741 intel_pmu_lbr_read(); 742 intel_pmu_lbr_read();
742 743
743 /* 744 /*
744 * PEBS overflow sets bit 62 in the global status register 745 * PEBS overflow sets bit 62 in the global status register
745 */ 746 */
746 if (__test_and_clear_bit(62, (unsigned long *)&status)) 747 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
748 handled++;
747 x86_pmu.drain_pebs(regs); 749 x86_pmu.drain_pebs(regs);
750 }
748 751
749 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { 752 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
750 struct perf_event *event = cpuc->events[bit]; 753 struct perf_event *event = cpuc->events[bit];
751 754
755 handled++;
756
752 if (!test_bit(bit, cpuc->active_mask)) 757 if (!test_bit(bit, cpuc->active_mask))
753 continue; 758 continue;
754 759
@@ -761,8 +766,6 @@ again:
761 x86_pmu_stop(event); 766 x86_pmu_stop(event);
762 } 767 }
763 768
764 intel_pmu_ack_status(ack);
765
766 /* 769 /*
767 * Repeat if there is more work to be done: 770 * Repeat if there is more work to be done:
768 */ 771 */
@@ -772,7 +775,7 @@ again:
772 775
773done: 776done:
774 intel_pmu_enable_all(0); 777 intel_pmu_enable_all(0);
775 return 1; 778 return handled;
776} 779}
777 780
778static struct event_constraint * 781static struct event_constraint *
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 7e578e9cc58b..b560db3305be 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -692,7 +692,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
692 inc_irq_stat(apic_perf_irqs); 692 inc_irq_stat(apic_perf_irqs);
693 } 693 }
694 694
695 return handled > 0; 695 return handled;
696} 696}
697 697
698/* 698/*
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
index a874495b3673..e2a595257390 100644
--- a/arch/x86/kernel/trampoline.c
+++ b/arch/x86/kernel/trampoline.c
@@ -45,8 +45,7 @@ void __init setup_trampoline_page_table(void)
45 /* Copy kernel address range */ 45 /* Copy kernel address range */
46 clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY, 46 clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY,
47 swapper_pg_dir + KERNEL_PGD_BOUNDARY, 47 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
48 min_t(unsigned long, KERNEL_PGD_PTRS, 48 KERNEL_PGD_PTRS);
49 KERNEL_PGD_BOUNDARY));
50 49
51 /* Initialize low mappings */ 50 /* Initialize low mappings */
52 clone_pgd_range(trampoline_pg_dir, 51 clone_pgd_range(trampoline_pg_dir,
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 84e236ce76ba..72fc70cf6184 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -74,7 +74,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
74/* 74/*
75 * Map 'pfn' using fixed map 'type' and protections 'prot' 75 * Map 'pfn' using fixed map 'type' and protections 'prot'
76 */ 76 */
77void * 77void __iomem *
78iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) 78iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
79{ 79{
80 /* 80 /*
@@ -86,12 +86,12 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
86 if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) 86 if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
87 prot = PAGE_KERNEL_UC_MINUS; 87 prot = PAGE_KERNEL_UC_MINUS;
88 88
89 return kmap_atomic_prot_pfn(pfn, type, prot); 89 return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot);
90} 90}
91EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); 91EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
92 92
93void 93void
94iounmap_atomic(void *kvaddr, enum km_type type) 94iounmap_atomic(void __iomem *kvaddr, enum km_type type)
95{ 95{
96 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 96 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
97 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); 97 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index f6b48f6c5951..cfe4faabb0f6 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -568,8 +568,13 @@ static int __init init_sysfs(void)
568 int error; 568 int error;
569 569
570 error = sysdev_class_register(&oprofile_sysclass); 570 error = sysdev_class_register(&oprofile_sysclass);
571 if (!error) 571 if (error)
572 error = sysdev_register(&device_oprofile); 572 return error;
573
574 error = sysdev_register(&device_oprofile);
575 if (error)
576 sysdev_class_unregister(&oprofile_sysclass);
577
573 return error; 578 return error;
574} 579}
575 580
@@ -580,8 +585,10 @@ static void exit_sysfs(void)
580} 585}
581 586
582#else 587#else
583#define init_sysfs() do { } while (0) 588
584#define exit_sysfs() do { } while (0) 589static inline int init_sysfs(void) { return 0; }
590static inline void exit_sysfs(void) { }
591
585#endif /* CONFIG_PM */ 592#endif /* CONFIG_PM */
586 593
587static int __init p4_init(char **cpu_type) 594static int __init p4_init(char **cpu_type)
@@ -695,6 +702,8 @@ int __init op_nmi_init(struct oprofile_operations *ops)
695 char *cpu_type = NULL; 702 char *cpu_type = NULL;
696 int ret = 0; 703 int ret = 0;
697 704
705 using_nmi = 0;
706
698 if (!cpu_has_apic) 707 if (!cpu_has_apic)
699 return -ENODEV; 708 return -ENODEV;
700 709
@@ -774,7 +783,10 @@ int __init op_nmi_init(struct oprofile_operations *ops)
774 783
775 mux_init(ops); 784 mux_init(ops);
776 785
777 init_sysfs(); 786 ret = init_sysfs();
787 if (ret)
788 return ret;
789
778 using_nmi = 1; 790 using_nmi = 1;
779 printk(KERN_INFO "oprofile: using NMI interrupt.\n"); 791 printk(KERN_INFO "oprofile: using NMI interrupt.\n");
780 return 0; 792 return 0;