aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2014-04-14 10:44:42 -0400
committerIngo Molnar <mingo@kernel.org>2014-04-14 10:44:42 -0400
commit740c699a8d316c8bf8593f19e2ca47795e690622 (patch)
treea78886955770a477945c5d84e06b2e7678733b54 /arch/x86/kernel/cpu
parente69af4657e7764d03ad555f0b583d9c4217bcefa (diff)
parentc9eaa447e77efe77b7fa4c953bd62de8297fd6c5 (diff)
Merge tag 'v3.15-rc1' into perf/urgent
Pick up the latest fixes. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/intel.c4
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c13
-rw-r--r--arch/x86/kernel/cpu/match.c42
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c26
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c19
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c18
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_uncore.c7
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_rapl.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c6
10 files changed, 68 insertions, 82 deletions
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 897d6201ef10..a80029035bf2 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -274,10 +274,6 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
274 } 274 }
275#endif 275#endif
276 276
277#ifdef CONFIG_X86_NUMAQ
278 numaq_tsc_disable();
279#endif
280
281 intel_smp_check(c); 277 intel_smp_check(c);
282} 278}
283#else 279#else
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 0641113e2965..a952e9c85b6f 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -1225,21 +1225,24 @@ static struct notifier_block cacheinfo_cpu_notifier = {
1225 1225
1226static int __init cache_sysfs_init(void) 1226static int __init cache_sysfs_init(void)
1227{ 1227{
1228 int i; 1228 int i, err = 0;
1229 1229
1230 if (num_cache_leaves == 0) 1230 if (num_cache_leaves == 0)
1231 return 0; 1231 return 0;
1232 1232
1233 cpu_notifier_register_begin();
1233 for_each_online_cpu(i) { 1234 for_each_online_cpu(i) {
1234 int err;
1235 struct device *dev = get_cpu_device(i); 1235 struct device *dev = get_cpu_device(i);
1236 1236
1237 err = cache_add_dev(dev); 1237 err = cache_add_dev(dev);
1238 if (err) 1238 if (err)
1239 return err; 1239 goto out;
1240 } 1240 }
1241 register_hotcpu_notifier(&cacheinfo_cpu_notifier); 1241 __register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1242 return 0; 1242
1243out:
1244 cpu_notifier_register_done();
1245 return err;
1243} 1246}
1244 1247
1245device_initcall(cache_sysfs_init); 1248device_initcall(cache_sysfs_init);
diff --git a/arch/x86/kernel/cpu/match.c b/arch/x86/kernel/cpu/match.c
index 36565373af87..afa9f0d487ea 100644
--- a/arch/x86/kernel/cpu/match.c
+++ b/arch/x86/kernel/cpu/match.c
@@ -47,45 +47,3 @@ const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match)
47 return NULL; 47 return NULL;
48} 48}
49EXPORT_SYMBOL(x86_match_cpu); 49EXPORT_SYMBOL(x86_match_cpu);
50
51ssize_t arch_print_cpu_modalias(struct device *dev,
52 struct device_attribute *attr,
53 char *bufptr)
54{
55 int size = PAGE_SIZE;
56 int i, n;
57 char *buf = bufptr;
58
59 n = snprintf(buf, size, "x86cpu:vendor:%04X:family:%04X:"
60 "model:%04X:feature:",
61 boot_cpu_data.x86_vendor,
62 boot_cpu_data.x86,
63 boot_cpu_data.x86_model);
64 size -= n;
65 buf += n;
66 size -= 1;
67 for (i = 0; i < NCAPINTS*32; i++) {
68 if (boot_cpu_has(i)) {
69 n = snprintf(buf, size, ",%04X", i);
70 if (n >= size) {
71 WARN(1, "x86 features overflow page\n");
72 break;
73 }
74 size -= n;
75 buf += n;
76 }
77 }
78 *buf++ = '\n';
79 return buf - bufptr;
80}
81
82int arch_cpu_uevent(struct device *dev, struct kobj_uevent_env *env)
83{
84 char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
85 if (buf) {
86 arch_print_cpu_modalias(NULL, NULL, buf);
87 add_uevent_var(env, "MODALIAS=%s", buf);
88 kfree(buf);
89 }
90 return 0;
91}
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 4d5419b249da..eeee23ff75ef 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -89,6 +89,9 @@ static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait);
89static DEFINE_PER_CPU(struct mce, mces_seen); 89static DEFINE_PER_CPU(struct mce, mces_seen);
90static int cpu_missing; 90static int cpu_missing;
91 91
92/* CMCI storm detection filter */
93static DEFINE_PER_CPU(unsigned long, mce_polled_error);
94
92/* 95/*
93 * MCA banks polled by the period polling timer for corrected events. 96 * MCA banks polled by the period polling timer for corrected events.
94 * With Intel CMCI, this only has MCA banks which do not support CMCI (if any). 97 * With Intel CMCI, this only has MCA banks which do not support CMCI (if any).
@@ -595,6 +598,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
595{ 598{
596 struct mce m; 599 struct mce m;
597 int i; 600 int i;
601 unsigned long *v;
598 602
599 this_cpu_inc(mce_poll_count); 603 this_cpu_inc(mce_poll_count);
600 604
@@ -614,6 +618,8 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
614 if (!(m.status & MCI_STATUS_VAL)) 618 if (!(m.status & MCI_STATUS_VAL))
615 continue; 619 continue;
616 620
621 v = &get_cpu_var(mce_polled_error);
622 set_bit(0, v);
617 /* 623 /*
618 * Uncorrected or signalled events are handled by the exception 624 * Uncorrected or signalled events are handled by the exception
619 * handler when it is enabled, so don't process those here. 625 * handler when it is enabled, so don't process those here.
@@ -1278,10 +1284,18 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
1278static unsigned long (*mce_adjust_timer)(unsigned long interval) = 1284static unsigned long (*mce_adjust_timer)(unsigned long interval) =
1279 mce_adjust_timer_default; 1285 mce_adjust_timer_default;
1280 1286
1287static int cmc_error_seen(void)
1288{
1289 unsigned long *v = &__get_cpu_var(mce_polled_error);
1290
1291 return test_and_clear_bit(0, v);
1292}
1293
1281static void mce_timer_fn(unsigned long data) 1294static void mce_timer_fn(unsigned long data)
1282{ 1295{
1283 struct timer_list *t = &__get_cpu_var(mce_timer); 1296 struct timer_list *t = &__get_cpu_var(mce_timer);
1284 unsigned long iv; 1297 unsigned long iv;
1298 int notify;
1285 1299
1286 WARN_ON(smp_processor_id() != data); 1300 WARN_ON(smp_processor_id() != data);
1287 1301
@@ -1296,7 +1310,9 @@ static void mce_timer_fn(unsigned long data)
1296 * polling interval, otherwise increase the polling interval. 1310 * polling interval, otherwise increase the polling interval.
1297 */ 1311 */
1298 iv = __this_cpu_read(mce_next_interval); 1312 iv = __this_cpu_read(mce_next_interval);
1299 if (mce_notify_irq()) { 1313 notify = mce_notify_irq();
1314 notify |= cmc_error_seen();
1315 if (notify) {
1300 iv = max(iv / 2, (unsigned long) HZ/100); 1316 iv = max(iv / 2, (unsigned long) HZ/100);
1301 } else { 1317 } else {
1302 iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); 1318 iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
@@ -2434,14 +2450,18 @@ static __init int mcheck_init_device(void)
2434 if (err) 2450 if (err)
2435 return err; 2451 return err;
2436 2452
2453 cpu_notifier_register_begin();
2437 for_each_online_cpu(i) { 2454 for_each_online_cpu(i) {
2438 err = mce_device_create(i); 2455 err = mce_device_create(i);
2439 if (err) 2456 if (err) {
2457 cpu_notifier_register_done();
2440 return err; 2458 return err;
2459 }
2441 } 2460 }
2442 2461
2443 register_syscore_ops(&mce_syscore_ops); 2462 register_syscore_ops(&mce_syscore_ops);
2444 register_hotcpu_notifier(&mce_cpu_notifier); 2463 __register_hotcpu_notifier(&mce_cpu_notifier);
2464 cpu_notifier_register_done();
2445 2465
2446 /* register character device /dev/mcelog */ 2466 /* register character device /dev/mcelog */
2447 misc_register(&mce_chrdev_device); 2467 misc_register(&mce_chrdev_device);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index fb6156fee6f7..3bdb95ae8c43 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -9,6 +9,7 @@
9#include <linux/interrupt.h> 9#include <linux/interrupt.h>
10#include <linux/percpu.h> 10#include <linux/percpu.h>
11#include <linux/sched.h> 11#include <linux/sched.h>
12#include <linux/cpumask.h>
12#include <asm/apic.h> 13#include <asm/apic.h>
13#include <asm/processor.h> 14#include <asm/processor.h>
14#include <asm/msr.h> 15#include <asm/msr.h>
@@ -137,6 +138,22 @@ unsigned long mce_intel_adjust_timer(unsigned long interval)
137 } 138 }
138} 139}
139 140
141static void cmci_storm_disable_banks(void)
142{
143 unsigned long flags, *owned;
144 int bank;
145 u64 val;
146
147 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
148 owned = __get_cpu_var(mce_banks_owned);
149 for_each_set_bit(bank, owned, MAX_NR_BANKS) {
150 rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
151 val &= ~MCI_CTL2_CMCI_EN;
152 wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
153 }
154 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
155}
156
140static bool cmci_storm_detect(void) 157static bool cmci_storm_detect(void)
141{ 158{
142 unsigned int cnt = __this_cpu_read(cmci_storm_cnt); 159 unsigned int cnt = __this_cpu_read(cmci_storm_cnt);
@@ -158,7 +175,7 @@ static bool cmci_storm_detect(void)
158 if (cnt <= CMCI_STORM_THRESHOLD) 175 if (cnt <= CMCI_STORM_THRESHOLD)
159 return false; 176 return false;
160 177
161 cmci_clear(); 178 cmci_storm_disable_banks();
162 __this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE); 179 __this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE);
163 r = atomic_add_return(1, &cmci_storm_on_cpus); 180 r = atomic_add_return(1, &cmci_storm_on_cpus);
164 mce_timer_kick(CMCI_POLL_INTERVAL); 181 mce_timer_kick(CMCI_POLL_INTERVAL);
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 3eec7de76efb..d921b7ee6595 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -271,9 +271,6 @@ static void thermal_throttle_remove_dev(struct device *dev)
271 sysfs_remove_group(&dev->kobj, &thermal_attr_group); 271 sysfs_remove_group(&dev->kobj, &thermal_attr_group);
272} 272}
273 273
274/* Mutex protecting device creation against CPU hotplug: */
275static DEFINE_MUTEX(therm_cpu_lock);
276
277/* Get notified when a cpu comes on/off. Be hotplug friendly. */ 274/* Get notified when a cpu comes on/off. Be hotplug friendly. */
278static int 275static int
279thermal_throttle_cpu_callback(struct notifier_block *nfb, 276thermal_throttle_cpu_callback(struct notifier_block *nfb,
@@ -289,18 +286,14 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
289 switch (action) { 286 switch (action) {
290 case CPU_UP_PREPARE: 287 case CPU_UP_PREPARE:
291 case CPU_UP_PREPARE_FROZEN: 288 case CPU_UP_PREPARE_FROZEN:
292 mutex_lock(&therm_cpu_lock);
293 err = thermal_throttle_add_dev(dev, cpu); 289 err = thermal_throttle_add_dev(dev, cpu);
294 mutex_unlock(&therm_cpu_lock);
295 WARN_ON(err); 290 WARN_ON(err);
296 break; 291 break;
297 case CPU_UP_CANCELED: 292 case CPU_UP_CANCELED:
298 case CPU_UP_CANCELED_FROZEN: 293 case CPU_UP_CANCELED_FROZEN:
299 case CPU_DEAD: 294 case CPU_DEAD:
300 case CPU_DEAD_FROZEN: 295 case CPU_DEAD_FROZEN:
301 mutex_lock(&therm_cpu_lock);
302 thermal_throttle_remove_dev(dev); 296 thermal_throttle_remove_dev(dev);
303 mutex_unlock(&therm_cpu_lock);
304 break; 297 break;
305 } 298 }
306 return notifier_from_errno(err); 299 return notifier_from_errno(err);
@@ -319,19 +312,16 @@ static __init int thermal_throttle_init_device(void)
319 if (!atomic_read(&therm_throt_en)) 312 if (!atomic_read(&therm_throt_en))
320 return 0; 313 return 0;
321 314
322 register_hotcpu_notifier(&thermal_throttle_cpu_notifier); 315 cpu_notifier_register_begin();
323 316
324#ifdef CONFIG_HOTPLUG_CPU
325 mutex_lock(&therm_cpu_lock);
326#endif
327 /* connect live CPUs to sysfs */ 317 /* connect live CPUs to sysfs */
328 for_each_online_cpu(cpu) { 318 for_each_online_cpu(cpu) {
329 err = thermal_throttle_add_dev(get_cpu_device(cpu), cpu); 319 err = thermal_throttle_add_dev(get_cpu_device(cpu), cpu);
330 WARN_ON(err); 320 WARN_ON(err);
331 } 321 }
332#ifdef CONFIG_HOTPLUG_CPU 322
333 mutex_unlock(&therm_cpu_lock); 323 __register_hotcpu_notifier(&thermal_throttle_cpu_notifier);
334#endif 324 cpu_notifier_register_done();
335 325
336 return 0; 326 return 0;
337} 327}
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index 4b8e4d3cd6ea..4c36bbe3173a 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -926,13 +926,13 @@ static __init int amd_ibs_init(void)
926 goto out; 926 goto out;
927 927
928 perf_ibs_pm_init(); 928 perf_ibs_pm_init();
929 get_online_cpus(); 929 cpu_notifier_register_begin();
930 ibs_caps = caps; 930 ibs_caps = caps;
931 /* make ibs_caps visible to other cpus: */ 931 /* make ibs_caps visible to other cpus: */
932 smp_mb(); 932 smp_mb();
933 perf_cpu_notifier(perf_ibs_cpu_notifier);
934 smp_call_function(setup_APIC_ibs, NULL, 1); 933 smp_call_function(setup_APIC_ibs, NULL, 1);
935 put_online_cpus(); 934 __perf_cpu_notifier(perf_ibs_cpu_notifier);
935 cpu_notifier_register_done();
936 936
937 ret = perf_event_ibs_init(); 937 ret = perf_event_ibs_init();
938out: 938out:
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
index 754291adec33..3bbdf4cd38b9 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
@@ -531,15 +531,16 @@ static int __init amd_uncore_init(void)
531 if (ret) 531 if (ret)
532 return -ENODEV; 532 return -ENODEV;
533 533
534 get_online_cpus(); 534 cpu_notifier_register_begin();
535
535 /* init cpus already online before registering for hotplug notifier */ 536 /* init cpus already online before registering for hotplug notifier */
536 for_each_online_cpu(cpu) { 537 for_each_online_cpu(cpu) {
537 amd_uncore_cpu_up_prepare(cpu); 538 amd_uncore_cpu_up_prepare(cpu);
538 smp_call_function_single(cpu, init_cpu_already_online, NULL, 1); 539 smp_call_function_single(cpu, init_cpu_already_online, NULL, 1);
539 } 540 }
540 541
541 register_cpu_notifier(&amd_uncore_cpu_notifier_block); 542 __register_cpu_notifier(&amd_uncore_cpu_notifier_block);
542 put_online_cpus(); 543 cpu_notifier_register_done();
543 544
544 return 0; 545 return 0;
545} 546}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
index 3cec947e3b98..4b9a9e9466bd 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -673,19 +673,20 @@ static int __init rapl_pmu_init(void)
673 /* unsupported */ 673 /* unsupported */
674 return 0; 674 return 0;
675 } 675 }
676 get_online_cpus(); 676
677 cpu_notifier_register_begin();
677 678
678 for_each_online_cpu(cpu) { 679 for_each_online_cpu(cpu) {
679 rapl_cpu_prepare(cpu); 680 rapl_cpu_prepare(cpu);
680 rapl_cpu_init(cpu); 681 rapl_cpu_init(cpu);
681 } 682 }
682 683
683 perf_cpu_notifier(rapl_cpu_notifier); 684 __perf_cpu_notifier(rapl_cpu_notifier);
684 685
685 ret = perf_pmu_register(&rapl_pmu_class, "power", -1); 686 ret = perf_pmu_register(&rapl_pmu_class, "power", -1);
686 if (WARN_ON(ret)) { 687 if (WARN_ON(ret)) {
687 pr_info("RAPL PMU detected, registration failed (%d), RAPL PMU disabled\n", ret); 688 pr_info("RAPL PMU detected, registration failed (%d), RAPL PMU disabled\n", ret);
688 put_online_cpus(); 689 cpu_notifier_register_done();
689 return -1; 690 return -1;
690 } 691 }
691 692
@@ -699,7 +700,7 @@ static int __init rapl_pmu_init(void)
699 hweight32(rapl_cntr_mask), 700 hweight32(rapl_cntr_mask),
700 ktime_to_ms(pmu->timer_interval)); 701 ktime_to_ms(pmu->timer_interval));
701 702
702 put_online_cpus(); 703 cpu_notifier_register_done();
703 704
704 return 0; 705 return 0;
705} 706}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index bd2253d40cff..65bbbea38b9c 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -4244,7 +4244,7 @@ static void __init uncore_cpumask_init(void)
4244 if (!cpumask_empty(&uncore_cpu_mask)) 4244 if (!cpumask_empty(&uncore_cpu_mask))
4245 return; 4245 return;
4246 4246
4247 get_online_cpus(); 4247 cpu_notifier_register_begin();
4248 4248
4249 for_each_online_cpu(cpu) { 4249 for_each_online_cpu(cpu) {
4250 int i, phys_id = topology_physical_package_id(cpu); 4250 int i, phys_id = topology_physical_package_id(cpu);
@@ -4263,9 +4263,9 @@ static void __init uncore_cpumask_init(void)
4263 } 4263 }
4264 on_each_cpu(uncore_cpu_setup, NULL, 1); 4264 on_each_cpu(uncore_cpu_setup, NULL, 1);
4265 4265
4266 register_cpu_notifier(&uncore_cpu_nb); 4266 __register_cpu_notifier(&uncore_cpu_nb);
4267 4267
4268 put_online_cpus(); 4268 cpu_notifier_register_done();
4269} 4269}
4270 4270
4271 4271