diff options
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/apic/hw_nmi.c | 18 | ||||
-rw-r--r-- | arch/x86/kernel/apm_32.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel.c | 22 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 10 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.h | 12 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 78 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_ds.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_uncore.c | 11 | ||||
-rw-r--r-- | arch/x86/kernel/entry_32.S | 13 | ||||
-rw-r--r-- | arch/x86/kernel/espfix_64.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/kprobes/core.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/signal.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/traps.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/tsc.c | 4 |
16 files changed, 162 insertions, 45 deletions
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index c3fcb5de5083..6a1e71bde323 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c | |||
@@ -33,31 +33,41 @@ static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; | |||
33 | /* "in progress" flag of arch_trigger_all_cpu_backtrace */ | 33 | /* "in progress" flag of arch_trigger_all_cpu_backtrace */ |
34 | static unsigned long backtrace_flag; | 34 | static unsigned long backtrace_flag; |
35 | 35 | ||
36 | void arch_trigger_all_cpu_backtrace(void) | 36 | void arch_trigger_all_cpu_backtrace(bool include_self) |
37 | { | 37 | { |
38 | int i; | 38 | int i; |
39 | int cpu = get_cpu(); | ||
39 | 40 | ||
40 | if (test_and_set_bit(0, &backtrace_flag)) | 41 | if (test_and_set_bit(0, &backtrace_flag)) { |
41 | /* | 42 | /* |
42 | * If there is already a trigger_all_cpu_backtrace() in progress | 43 | * If there is already a trigger_all_cpu_backtrace() in progress |
43 | * (backtrace_flag == 1), don't output double cpu dump infos. | 44 | * (backtrace_flag == 1), don't output double cpu dump infos. |
44 | */ | 45 | */ |
46 | put_cpu(); | ||
45 | return; | 47 | return; |
48 | } | ||
46 | 49 | ||
47 | cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask); | 50 | cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask); |
51 | if (!include_self) | ||
52 | cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); | ||
48 | 53 | ||
49 | printk(KERN_INFO "sending NMI to all CPUs:\n"); | 54 | if (!cpumask_empty(to_cpumask(backtrace_mask))) { |
50 | apic->send_IPI_all(NMI_VECTOR); | 55 | pr_info("sending NMI to %s CPUs:\n", |
56 | (include_self ? "all" : "other")); | ||
57 | apic->send_IPI_mask(to_cpumask(backtrace_mask), NMI_VECTOR); | ||
58 | } | ||
51 | 59 | ||
52 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ | 60 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ |
53 | for (i = 0; i < 10 * 1000; i++) { | 61 | for (i = 0; i < 10 * 1000; i++) { |
54 | if (cpumask_empty(to_cpumask(backtrace_mask))) | 62 | if (cpumask_empty(to_cpumask(backtrace_mask))) |
55 | break; | 63 | break; |
56 | mdelay(1); | 64 | mdelay(1); |
65 | touch_softlockup_watchdog(); | ||
57 | } | 66 | } |
58 | 67 | ||
59 | clear_bit(0, &backtrace_flag); | 68 | clear_bit(0, &backtrace_flag); |
60 | smp_mb__after_atomic(); | 69 | smp_mb__after_atomic(); |
70 | put_cpu(); | ||
61 | } | 71 | } |
62 | 72 | ||
63 | static int | 73 | static int |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index f3a1f04ed4cb..584874451414 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -841,7 +841,6 @@ static int apm_do_idle(void) | |||
841 | u32 eax; | 841 | u32 eax; |
842 | u8 ret = 0; | 842 | u8 ret = 0; |
843 | int idled = 0; | 843 | int idled = 0; |
844 | int polling; | ||
845 | int err = 0; | 844 | int err = 0; |
846 | 845 | ||
847 | if (!need_resched()) { | 846 | if (!need_resched()) { |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index a80029035bf2..f9e4fdd3b877 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -370,6 +370,17 @@ static void init_intel(struct cpuinfo_x86 *c) | |||
370 | */ | 370 | */ |
371 | detect_extended_topology(c); | 371 | detect_extended_topology(c); |
372 | 372 | ||
373 | if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { | ||
374 | /* | ||
375 | * let's use the legacy cpuid vector 0x1 and 0x4 for topology | ||
376 | * detection. | ||
377 | */ | ||
378 | c->x86_max_cores = intel_num_cpu_cores(c); | ||
379 | #ifdef CONFIG_X86_32 | ||
380 | detect_ht(c); | ||
381 | #endif | ||
382 | } | ||
383 | |||
373 | l2 = init_intel_cacheinfo(c); | 384 | l2 = init_intel_cacheinfo(c); |
374 | if (c->cpuid_level > 9) { | 385 | if (c->cpuid_level > 9) { |
375 | unsigned eax = cpuid_eax(10); | 386 | unsigned eax = cpuid_eax(10); |
@@ -438,17 +449,6 @@ static void init_intel(struct cpuinfo_x86 *c) | |||
438 | set_cpu_cap(c, X86_FEATURE_P3); | 449 | set_cpu_cap(c, X86_FEATURE_P3); |
439 | #endif | 450 | #endif |
440 | 451 | ||
441 | if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { | ||
442 | /* | ||
443 | * let's use the legacy cpuid vector 0x1 and 0x4 for topology | ||
444 | * detection. | ||
445 | */ | ||
446 | c->x86_max_cores = intel_num_cpu_cores(c); | ||
447 | #ifdef CONFIG_X86_32 | ||
448 | detect_ht(c); | ||
449 | #endif | ||
450 | } | ||
451 | |||
452 | /* Work around errata */ | 452 | /* Work around errata */ |
453 | srat_detect_node(c); | 453 | srat_detect_node(c); |
454 | 454 | ||
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index a952e9c85b6f..9c8f7394c612 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -730,6 +730,18 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
730 | #endif | 730 | #endif |
731 | } | 731 | } |
732 | 732 | ||
733 | #ifdef CONFIG_X86_HT | ||
734 | /* | ||
735 | * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in | ||
736 | * turns means that the only possibility is SMT (as indicated in | ||
737 | * cpuid1). Since cpuid2 doesn't specify shared caches, and we know | ||
738 | * that SMT shares all caches, we can unconditionally set cpu_llc_id to | ||
739 | * c->phys_proc_id. | ||
740 | */ | ||
741 | if (per_cpu(cpu_llc_id, cpu) == BAD_APICID) | ||
742 | per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; | ||
743 | #endif | ||
744 | |||
733 | c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); | 745 | c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); |
734 | 746 | ||
735 | return l2; | 747 | return l2; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index bb92f38153b2..9a79c8dbd8e8 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -2451,6 +2451,12 @@ static __init int mcheck_init_device(void) | |||
2451 | for_each_online_cpu(i) { | 2451 | for_each_online_cpu(i) { |
2452 | err = mce_device_create(i); | 2452 | err = mce_device_create(i); |
2453 | if (err) { | 2453 | if (err) { |
2454 | /* | ||
2455 | * Register notifier anyway (and do not unreg it) so | ||
2456 | * that we don't leave undeleted timers, see notifier | ||
2457 | * callback above. | ||
2458 | */ | ||
2459 | __register_hotcpu_notifier(&mce_cpu_notifier); | ||
2454 | cpu_notifier_register_done(); | 2460 | cpu_notifier_register_done(); |
2455 | goto err_device_create; | 2461 | goto err_device_create; |
2456 | } | 2462 | } |
@@ -2471,10 +2477,6 @@ static __init int mcheck_init_device(void) | |||
2471 | err_register: | 2477 | err_register: |
2472 | unregister_syscore_ops(&mce_syscore_ops); | 2478 | unregister_syscore_ops(&mce_syscore_ops); |
2473 | 2479 | ||
2474 | cpu_notifier_register_begin(); | ||
2475 | __unregister_hotcpu_notifier(&mce_cpu_notifier); | ||
2476 | cpu_notifier_register_done(); | ||
2477 | |||
2478 | err_device_create: | 2480 | err_device_create: |
2479 | /* | 2481 | /* |
2480 | * We didn't keep track of which devices were created above, but | 2482 | * We didn't keep track of which devices were created above, but |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 2bdfbff8a4f6..2879ecdaac43 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -118,6 +118,9 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event *event) | |||
118 | continue; | 118 | continue; |
119 | if (event->attr.config1 & ~er->valid_mask) | 119 | if (event->attr.config1 & ~er->valid_mask) |
120 | return -EINVAL; | 120 | return -EINVAL; |
121 | /* Check if the extra msrs can be safely accessed*/ | ||
122 | if (!er->extra_msr_access) | ||
123 | return -ENXIO; | ||
121 | 124 | ||
122 | reg->idx = er->idx; | 125 | reg->idx = er->idx; |
123 | reg->config = event->attr.config1; | 126 | reg->config = event->attr.config1; |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 3b2f9bdd974b..8ade93111e03 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -295,14 +295,16 @@ struct extra_reg { | |||
295 | u64 config_mask; | 295 | u64 config_mask; |
296 | u64 valid_mask; | 296 | u64 valid_mask; |
297 | int idx; /* per_xxx->regs[] reg index */ | 297 | int idx; /* per_xxx->regs[] reg index */ |
298 | bool extra_msr_access; | ||
298 | }; | 299 | }; |
299 | 300 | ||
300 | #define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ | 301 | #define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ |
301 | .event = (e), \ | 302 | .event = (e), \ |
302 | .msr = (ms), \ | 303 | .msr = (ms), \ |
303 | .config_mask = (m), \ | 304 | .config_mask = (m), \ |
304 | .valid_mask = (vm), \ | 305 | .valid_mask = (vm), \ |
305 | .idx = EXTRA_REG_##i, \ | 306 | .idx = EXTRA_REG_##i, \ |
307 | .extra_msr_access = true, \ | ||
306 | } | 308 | } |
307 | 309 | ||
308 | #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ | 310 | #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index adb02aa62af5..2502d0d9d246 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -1382,6 +1382,15 @@ again: | |||
1382 | intel_pmu_lbr_read(); | 1382 | intel_pmu_lbr_read(); |
1383 | 1383 | ||
1384 | /* | 1384 | /* |
1385 | * CondChgd bit 63 doesn't mean any overflow status. Ignore | ||
1386 | * and clear the bit. | ||
1387 | */ | ||
1388 | if (__test_and_clear_bit(63, (unsigned long *)&status)) { | ||
1389 | if (!status) | ||
1390 | goto done; | ||
1391 | } | ||
1392 | |||
1393 | /* | ||
1385 | * PEBS overflow sets bit 62 in the global status register | 1394 | * PEBS overflow sets bit 62 in the global status register |
1386 | */ | 1395 | */ |
1387 | if (__test_and_clear_bit(62, (unsigned long *)&status)) { | 1396 | if (__test_and_clear_bit(62, (unsigned long *)&status)) { |
@@ -2173,6 +2182,41 @@ static void intel_snb_check_microcode(void) | |||
2173 | } | 2182 | } |
2174 | } | 2183 | } |
2175 | 2184 | ||
2185 | /* | ||
2186 | * Under certain circumstances, access certain MSR may cause #GP. | ||
2187 | * The function tests if the input MSR can be safely accessed. | ||
2188 | */ | ||
2189 | static bool check_msr(unsigned long msr, u64 mask) | ||
2190 | { | ||
2191 | u64 val_old, val_new, val_tmp; | ||
2192 | |||
2193 | /* | ||
2194 | * Read the current value, change it and read it back to see if it | ||
2195 | * matches, this is needed to detect certain hardware emulators | ||
2196 | * (qemu/kvm) that don't trap on the MSR access and always return 0s. | ||
2197 | */ | ||
2198 | if (rdmsrl_safe(msr, &val_old)) | ||
2199 | return false; | ||
2200 | |||
2201 | /* | ||
2202 | * Only change the bits which can be updated by wrmsrl. | ||
2203 | */ | ||
2204 | val_tmp = val_old ^ mask; | ||
2205 | if (wrmsrl_safe(msr, val_tmp) || | ||
2206 | rdmsrl_safe(msr, &val_new)) | ||
2207 | return false; | ||
2208 | |||
2209 | if (val_new != val_tmp) | ||
2210 | return false; | ||
2211 | |||
2212 | /* Here it's sure that the MSR can be safely accessed. | ||
2213 | * Restore the old value and return. | ||
2214 | */ | ||
2215 | wrmsrl(msr, val_old); | ||
2216 | |||
2217 | return true; | ||
2218 | } | ||
2219 | |||
2176 | static __init void intel_sandybridge_quirk(void) | 2220 | static __init void intel_sandybridge_quirk(void) |
2177 | { | 2221 | { |
2178 | x86_pmu.check_microcode = intel_snb_check_microcode; | 2222 | x86_pmu.check_microcode = intel_snb_check_microcode; |
@@ -2262,7 +2306,8 @@ __init int intel_pmu_init(void) | |||
2262 | union cpuid10_ebx ebx; | 2306 | union cpuid10_ebx ebx; |
2263 | struct event_constraint *c; | 2307 | struct event_constraint *c; |
2264 | unsigned int unused; | 2308 | unsigned int unused; |
2265 | int version; | 2309 | struct extra_reg *er; |
2310 | int version, i; | ||
2266 | 2311 | ||
2267 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | 2312 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { |
2268 | switch (boot_cpu_data.x86) { | 2313 | switch (boot_cpu_data.x86) { |
@@ -2465,6 +2510,9 @@ __init int intel_pmu_init(void) | |||
2465 | case 62: /* IvyBridge EP */ | 2510 | case 62: /* IvyBridge EP */ |
2466 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, | 2511 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, |
2467 | sizeof(hw_cache_event_ids)); | 2512 | sizeof(hw_cache_event_ids)); |
2513 | /* dTLB-load-misses on IVB is different than SNB */ | ||
2514 | hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */ | ||
2515 | |||
2468 | memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, | 2516 | memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, |
2469 | sizeof(hw_cache_extra_regs)); | 2517 | sizeof(hw_cache_extra_regs)); |
2470 | 2518 | ||
@@ -2565,6 +2613,34 @@ __init int intel_pmu_init(void) | |||
2565 | } | 2613 | } |
2566 | } | 2614 | } |
2567 | 2615 | ||
2616 | /* | ||
2617 | * Access LBR MSR may cause #GP under certain circumstances. | ||
2618 | * E.g. KVM doesn't support LBR MSR | ||
2619 | * Check all LBT MSR here. | ||
2620 | * Disable LBR access if any LBR MSRs can not be accessed. | ||
2621 | */ | ||
2622 | if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL)) | ||
2623 | x86_pmu.lbr_nr = 0; | ||
2624 | for (i = 0; i < x86_pmu.lbr_nr; i++) { | ||
2625 | if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) && | ||
2626 | check_msr(x86_pmu.lbr_to + i, 0xffffUL))) | ||
2627 | x86_pmu.lbr_nr = 0; | ||
2628 | } | ||
2629 | |||
2630 | /* | ||
2631 | * Access extra MSR may cause #GP under certain circumstances. | ||
2632 | * E.g. KVM doesn't support offcore event | ||
2633 | * Check all extra_regs here. | ||
2634 | */ | ||
2635 | if (x86_pmu.extra_regs) { | ||
2636 | for (er = x86_pmu.extra_regs; er->msr; er++) { | ||
2637 | er->extra_msr_access = check_msr(er->msr, 0x1ffUL); | ||
2638 | /* Disable LBR select mapping */ | ||
2639 | if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access) | ||
2640 | x86_pmu.lbr_sel_map = NULL; | ||
2641 | } | ||
2642 | } | ||
2643 | |||
2568 | /* Support full width counters using alternative MSR range */ | 2644 | /* Support full width counters using alternative MSR range */ |
2569 | if (x86_pmu.intel_cap.full_width_write) { | 2645 | if (x86_pmu.intel_cap.full_width_write) { |
2570 | x86_pmu.max_period = x86_pmu.cntval_mask; | 2646 | x86_pmu.max_period = x86_pmu.cntval_mask; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 980970cb744d..696ade311ded 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -311,9 +311,11 @@ static int alloc_bts_buffer(int cpu) | |||
311 | if (!x86_pmu.bts) | 311 | if (!x86_pmu.bts) |
312 | return 0; | 312 | return 0; |
313 | 313 | ||
314 | buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL, node); | 314 | buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node); |
315 | if (unlikely(!buffer)) | 315 | if (unlikely(!buffer)) { |
316 | WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__); | ||
316 | return -ENOMEM; | 317 | return -ENOMEM; |
318 | } | ||
317 | 319 | ||
318 | max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; | 320 | max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; |
319 | thresh = max / 16; | 321 | thresh = max / 16; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 65bbbea38b9c..ae6552a0701f 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c | |||
@@ -550,16 +550,16 @@ static struct extra_reg snbep_uncore_cbox_extra_regs[] = { | |||
550 | SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), | 550 | SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), |
551 | SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), | 551 | SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), |
552 | SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), | 552 | SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), |
553 | SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc), | 553 | SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa), |
554 | SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc), | 554 | SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa), |
555 | SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2), | 555 | SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2), |
556 | SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2), | 556 | SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2), |
557 | SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2), | 557 | SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2), |
558 | SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2), | 558 | SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2), |
559 | SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8), | 559 | SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8), |
560 | SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8), | 560 | SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8), |
561 | SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc), | 561 | SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa), |
562 | SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc), | 562 | SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa), |
563 | SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2), | 563 | SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2), |
564 | SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2), | 564 | SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2), |
565 | SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2), | 565 | SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2), |
@@ -1222,6 +1222,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = { | |||
1222 | SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, | 1222 | SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, |
1223 | SNBEP_CBO_PMON_CTL_TID_EN, 0x1), | 1223 | SNBEP_CBO_PMON_CTL_TID_EN, 0x1), |
1224 | SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), | 1224 | SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), |
1225 | |||
1225 | SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), | 1226 | SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), |
1226 | SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), | 1227 | SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), |
1227 | SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc), | 1228 | SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc), |
@@ -1245,7 +1246,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = { | |||
1245 | SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10), | 1246 | SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10), |
1246 | SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10), | 1247 | SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10), |
1247 | SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10), | 1248 | SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10), |
1248 | SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), | 1249 | SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10), |
1249 | SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), | 1250 | SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), |
1250 | SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18), | 1251 | SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18), |
1251 | SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18), | 1252 | SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18), |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index f0da82b8e634..0d0c9d4ab6d5 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -423,8 +423,9 @@ sysenter_past_esp: | |||
423 | jnz sysenter_audit | 423 | jnz sysenter_audit |
424 | sysenter_do_call: | 424 | sysenter_do_call: |
425 | cmpl $(NR_syscalls), %eax | 425 | cmpl $(NR_syscalls), %eax |
426 | jae syscall_badsys | 426 | jae sysenter_badsys |
427 | call *sys_call_table(,%eax,4) | 427 | call *sys_call_table(,%eax,4) |
428 | sysenter_after_call: | ||
428 | movl %eax,PT_EAX(%esp) | 429 | movl %eax,PT_EAX(%esp) |
429 | LOCKDEP_SYS_EXIT | 430 | LOCKDEP_SYS_EXIT |
430 | DISABLE_INTERRUPTS(CLBR_ANY) | 431 | DISABLE_INTERRUPTS(CLBR_ANY) |
@@ -501,6 +502,7 @@ ENTRY(system_call) | |||
501 | jae syscall_badsys | 502 | jae syscall_badsys |
502 | syscall_call: | 503 | syscall_call: |
503 | call *sys_call_table(,%eax,4) | 504 | call *sys_call_table(,%eax,4) |
505 | syscall_after_call: | ||
504 | movl %eax,PT_EAX(%esp) # store the return value | 506 | movl %eax,PT_EAX(%esp) # store the return value |
505 | syscall_exit: | 507 | syscall_exit: |
506 | LOCKDEP_SYS_EXIT | 508 | LOCKDEP_SYS_EXIT |
@@ -674,8 +676,13 @@ syscall_fault: | |||
674 | END(syscall_fault) | 676 | END(syscall_fault) |
675 | 677 | ||
676 | syscall_badsys: | 678 | syscall_badsys: |
677 | movl $-ENOSYS,PT_EAX(%esp) | 679 | movl $-ENOSYS,%eax |
678 | jmp resume_userspace | 680 | jmp syscall_after_call |
681 | END(syscall_badsys) | ||
682 | |||
683 | sysenter_badsys: | ||
684 | movl $-ENOSYS,%eax | ||
685 | jmp sysenter_after_call | ||
679 | END(syscall_badsys) | 686 | END(syscall_badsys) |
680 | CFI_ENDPROC | 687 | CFI_ENDPROC |
681 | 688 | ||
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c index 6afbb16e9b79..94d857fb1033 100644 --- a/arch/x86/kernel/espfix_64.c +++ b/arch/x86/kernel/espfix_64.c | |||
@@ -175,7 +175,7 @@ void init_espfix_ap(void) | |||
175 | if (!pud_present(pud)) { | 175 | if (!pud_present(pud)) { |
176 | pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP); | 176 | pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP); |
177 | pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask)); | 177 | pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask)); |
178 | paravirt_alloc_pud(&init_mm, __pa(pmd_p) >> PAGE_SHIFT); | 178 | paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT); |
179 | for (n = 0; n < ESPFIX_PUD_CLONES; n++) | 179 | for (n = 0; n < ESPFIX_PUD_CLONES; n++) |
180 | set_pud(&pud_p[n], pud); | 180 | set_pud(&pud_p[n], pud); |
181 | } | 181 | } |
@@ -185,7 +185,7 @@ void init_espfix_ap(void) | |||
185 | if (!pmd_present(pmd)) { | 185 | if (!pmd_present(pmd)) { |
186 | pte_p = (pte_t *)__get_free_page(PGALLOC_GFP); | 186 | pte_p = (pte_t *)__get_free_page(PGALLOC_GFP); |
187 | pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask)); | 187 | pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask)); |
188 | paravirt_alloc_pmd(&init_mm, __pa(pte_p) >> PAGE_SHIFT); | 188 | paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT); |
189 | for (n = 0; n < ESPFIX_PMD_CLONES; n++) | 189 | for (n = 0; n < ESPFIX_PMD_CLONES; n++) |
190 | set_pmd(&pmd_p[n], pmd); | 190 | set_pmd(&pmd_p[n], pmd); |
191 | } | 191 | } |
@@ -193,7 +193,6 @@ void init_espfix_ap(void) | |||
193 | pte_p = pte_offset_kernel(&pmd, addr); | 193 | pte_p = pte_offset_kernel(&pmd, addr); |
194 | stack_page = (void *)__get_free_page(GFP_KERNEL); | 194 | stack_page = (void *)__get_free_page(GFP_KERNEL); |
195 | pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask)); | 195 | pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask)); |
196 | paravirt_alloc_pte(&init_mm, __pa(stack_page) >> PAGE_SHIFT); | ||
197 | for (n = 0; n < ESPFIX_PTE_CLONES; n++) | 196 | for (n = 0; n < ESPFIX_PTE_CLONES; n++) |
198 | set_pte(&pte_p[n*PTE_STRIDE], pte); | 197 | set_pte(&pte_p[n*PTE_STRIDE], pte); |
199 | 198 | ||
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 7596df664901..67e6d19ef1be 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c | |||
@@ -574,6 +574,9 @@ int kprobe_int3_handler(struct pt_regs *regs) | |||
574 | struct kprobe *p; | 574 | struct kprobe *p; |
575 | struct kprobe_ctlblk *kcb; | 575 | struct kprobe_ctlblk *kcb; |
576 | 576 | ||
577 | if (user_mode_vm(regs)) | ||
578 | return 0; | ||
579 | |||
577 | addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); | 580 | addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); |
578 | /* | 581 | /* |
579 | * We don't want to be preempted for the entire | 582 | * We don't want to be preempted for the entire |
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index a0da58db43a8..2851d63c1202 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
@@ -363,7 +363,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig, | |||
363 | 363 | ||
364 | /* Set up to return from userspace. */ | 364 | /* Set up to return from userspace. */ |
365 | restorer = current->mm->context.vdso + | 365 | restorer = current->mm->context.vdso + |
366 | selected_vdso32->sym___kernel_sigreturn; | 366 | selected_vdso32->sym___kernel_rt_sigreturn; |
367 | if (ksig->ka.sa.sa_flags & SA_RESTORER) | 367 | if (ksig->ka.sa.sa_flags & SA_RESTORER) |
368 | restorer = ksig->ka.sa.sa_restorer; | 368 | restorer = ksig->ka.sa.sa_restorer; |
369 | put_user_ex(restorer, &frame->pretcode); | 369 | put_user_ex(restorer, &frame->pretcode); |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index c6eb418c5627..0d0e922fafc1 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -343,6 +343,7 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) | |||
343 | if (poke_int3_handler(regs)) | 343 | if (poke_int3_handler(regs)) |
344 | return; | 344 | return; |
345 | 345 | ||
346 | prev_state = exception_enter(); | ||
346 | #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP | 347 | #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP |
347 | if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, | 348 | if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, |
348 | SIGTRAP) == NOTIFY_STOP) | 349 | SIGTRAP) == NOTIFY_STOP) |
@@ -351,9 +352,8 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) | |||
351 | 352 | ||
352 | #ifdef CONFIG_KPROBES | 353 | #ifdef CONFIG_KPROBES |
353 | if (kprobe_int3_handler(regs)) | 354 | if (kprobe_int3_handler(regs)) |
354 | return; | 355 | goto exit; |
355 | #endif | 356 | #endif |
356 | prev_state = exception_enter(); | ||
357 | 357 | ||
358 | if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, | 358 | if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, |
359 | SIGTRAP) == NOTIFY_STOP) | 359 | SIGTRAP) == NOTIFY_STOP) |
@@ -433,6 +433,8 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code) | |||
433 | unsigned long dr6; | 433 | unsigned long dr6; |
434 | int si_code; | 434 | int si_code; |
435 | 435 | ||
436 | prev_state = exception_enter(); | ||
437 | |||
436 | get_debugreg(dr6, 6); | 438 | get_debugreg(dr6, 6); |
437 | 439 | ||
438 | /* Filter out all the reserved bits which are preset to 1 */ | 440 | /* Filter out all the reserved bits which are preset to 1 */ |
@@ -465,7 +467,6 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code) | |||
465 | if (kprobe_debug_handler(regs)) | 467 | if (kprobe_debug_handler(regs)) |
466 | goto exit; | 468 | goto exit; |
467 | #endif | 469 | #endif |
468 | prev_state = exception_enter(); | ||
469 | 470 | ||
470 | if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code, | 471 | if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code, |
471 | SIGTRAP) == NOTIFY_STOP) | 472 | SIGTRAP) == NOTIFY_STOP) |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 57e5ce126d5a..ea030319b321 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -920,9 +920,9 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | |||
920 | tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); | 920 | tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); |
921 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) | 921 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) |
922 | mark_tsc_unstable("cpufreq changes"); | 922 | mark_tsc_unstable("cpufreq changes"); |
923 | } | ||
924 | 923 | ||
925 | set_cyc2ns_scale(tsc_khz, freq->cpu); | 924 | set_cyc2ns_scale(tsc_khz, freq->cpu); |
925 | } | ||
926 | 926 | ||
927 | return 0; | 927 | return 0; |
928 | } | 928 | } |