aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/acpi/sleep.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c7
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c18
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/threshold.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_rapl.c46
-rw-r--r--arch/x86/kernel/cpu/rdrand.c1
-rw-r--r--arch/x86/kernel/early-quirks.c18
-rw-r--r--arch/x86/kernel/head32.c2
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/hpet.c2
-rw-r--r--arch/x86/kernel/kprobes/core.c16
-rw-r--r--arch/x86/kernel/ldt.c4
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/reboot.c82
-rw-r--r--arch/x86/kernel/smp.c2
-rw-r--r--arch/x86/kernel/traps.c6
-rw-r--r--arch/x86/kernel/vsmp_64.c17
-rw-r--r--arch/x86/kernel/vsyscall_gtod.c2
21 files changed, 158 insertions, 84 deletions
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 3a2ae4c88948..31368207837c 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -31,7 +31,7 @@ static char temp_stack[4096];
31 * 31 *
32 * Wrapper around acpi_enter_sleep_state() to be called by assmebly. 32 * Wrapper around acpi_enter_sleep_state() to be called by assmebly.
33 */ 33 */
34acpi_status asmlinkage x86_acpi_enter_sleep_state(u8 state) 34acpi_status asmlinkage __visible x86_acpi_enter_sleep_state(u8 state)
35{ 35{
36 return acpi_enter_sleep_state(state); 36 return acpi_enter_sleep_state(state);
37} 37}
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 6ad4658de705..992060e09897 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2189,7 +2189,7 @@ void send_cleanup_vector(struct irq_cfg *cfg)
2189 cfg->move_in_progress = 0; 2189 cfg->move_in_progress = 0;
2190} 2190}
2191 2191
2192asmlinkage void smp_irq_move_cleanup_interrupt(void) 2192asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
2193{ 2193{
2194 unsigned vector, me; 2194 unsigned vector, me;
2195 2195
@@ -3425,6 +3425,11 @@ int get_nr_irqs_gsi(void)
3425 return nr_irqs_gsi; 3425 return nr_irqs_gsi;
3426} 3426}
3427 3427
3428unsigned int arch_dynirq_lower_bound(unsigned int from)
3429{
3430 return from < nr_irqs_gsi ? nr_irqs_gsi : from;
3431}
3432
3428int __init arch_probe_nr_irqs(void) 3433int __init arch_probe_nr_irqs(void)
3429{ 3434{
3430 int nr; 3435 int nr;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index eeee23ff75ef..68317c80de7f 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -598,7 +598,6 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
598{ 598{
599 struct mce m; 599 struct mce m;
600 int i; 600 int i;
601 unsigned long *v;
602 601
603 this_cpu_inc(mce_poll_count); 602 this_cpu_inc(mce_poll_count);
604 603
@@ -618,8 +617,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
618 if (!(m.status & MCI_STATUS_VAL)) 617 if (!(m.status & MCI_STATUS_VAL))
619 continue; 618 continue;
620 619
621 v = &get_cpu_var(mce_polled_error); 620 this_cpu_write(mce_polled_error, 1);
622 set_bit(0, v);
623 /* 621 /*
624 * Uncorrected or signalled events are handled by the exception 622 * Uncorrected or signalled events are handled by the exception
625 * handler when it is enabled, so don't process those here. 623 * handler when it is enabled, so don't process those here.
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 3bdb95ae8c43..9a316b21df8b 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -42,7 +42,7 @@ static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
42 * cmci_discover_lock protects against parallel discovery attempts 42 * cmci_discover_lock protects against parallel discovery attempts
43 * which could race against each other. 43 * which could race against each other.
44 */ 44 */
45static DEFINE_RAW_SPINLOCK(cmci_discover_lock); 45static DEFINE_SPINLOCK(cmci_discover_lock);
46 46
47#define CMCI_THRESHOLD 1 47#define CMCI_THRESHOLD 1
48#define CMCI_POLL_INTERVAL (30 * HZ) 48#define CMCI_POLL_INTERVAL (30 * HZ)
@@ -144,14 +144,14 @@ static void cmci_storm_disable_banks(void)
144 int bank; 144 int bank;
145 u64 val; 145 u64 val;
146 146
147 raw_spin_lock_irqsave(&cmci_discover_lock, flags); 147 spin_lock_irqsave(&cmci_discover_lock, flags);
148 owned = __get_cpu_var(mce_banks_owned); 148 owned = __get_cpu_var(mce_banks_owned);
149 for_each_set_bit(bank, owned, MAX_NR_BANKS) { 149 for_each_set_bit(bank, owned, MAX_NR_BANKS) {
150 rdmsrl(MSR_IA32_MCx_CTL2(bank), val); 150 rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
151 val &= ~MCI_CTL2_CMCI_EN; 151 val &= ~MCI_CTL2_CMCI_EN;
152 wrmsrl(MSR_IA32_MCx_CTL2(bank), val); 152 wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
153 } 153 }
154 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); 154 spin_unlock_irqrestore(&cmci_discover_lock, flags);
155} 155}
156 156
157static bool cmci_storm_detect(void) 157static bool cmci_storm_detect(void)
@@ -211,7 +211,7 @@ static void cmci_discover(int banks)
211 int i; 211 int i;
212 int bios_wrong_thresh = 0; 212 int bios_wrong_thresh = 0;
213 213
214 raw_spin_lock_irqsave(&cmci_discover_lock, flags); 214 spin_lock_irqsave(&cmci_discover_lock, flags);
215 for (i = 0; i < banks; i++) { 215 for (i = 0; i < banks; i++) {
216 u64 val; 216 u64 val;
217 int bios_zero_thresh = 0; 217 int bios_zero_thresh = 0;
@@ -266,7 +266,7 @@ static void cmci_discover(int banks)
266 WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); 266 WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
267 } 267 }
268 } 268 }
269 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); 269 spin_unlock_irqrestore(&cmci_discover_lock, flags);
270 if (mca_cfg.bios_cmci_threshold && bios_wrong_thresh) { 270 if (mca_cfg.bios_cmci_threshold && bios_wrong_thresh) {
271 pr_info_once( 271 pr_info_once(
272 "bios_cmci_threshold: Some banks do not have valid thresholds set\n"); 272 "bios_cmci_threshold: Some banks do not have valid thresholds set\n");
@@ -316,10 +316,10 @@ void cmci_clear(void)
316 316
317 if (!cmci_supported(&banks)) 317 if (!cmci_supported(&banks))
318 return; 318 return;
319 raw_spin_lock_irqsave(&cmci_discover_lock, flags); 319 spin_lock_irqsave(&cmci_discover_lock, flags);
320 for (i = 0; i < banks; i++) 320 for (i = 0; i < banks; i++)
321 __cmci_disable_bank(i); 321 __cmci_disable_bank(i);
322 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); 322 spin_unlock_irqrestore(&cmci_discover_lock, flags);
323} 323}
324 324
325static void cmci_rediscover_work_func(void *arg) 325static void cmci_rediscover_work_func(void *arg)
@@ -360,9 +360,9 @@ void cmci_disable_bank(int bank)
360 if (!cmci_supported(&banks)) 360 if (!cmci_supported(&banks))
361 return; 361 return;
362 362
363 raw_spin_lock_irqsave(&cmci_discover_lock, flags); 363 spin_lock_irqsave(&cmci_discover_lock, flags);
364 __cmci_disable_bank(bank); 364 __cmci_disable_bank(bank);
365 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); 365 spin_unlock_irqrestore(&cmci_discover_lock, flags);
366} 366}
367 367
368static void intel_init_cmci(void) 368static void intel_init_cmci(void)
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index d921b7ee6595..36a1bb6d1ee0 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -429,14 +429,14 @@ static inline void __smp_thermal_interrupt(void)
429 smp_thermal_vector(); 429 smp_thermal_vector();
430} 430}
431 431
432asmlinkage void smp_thermal_interrupt(struct pt_regs *regs) 432asmlinkage __visible void smp_thermal_interrupt(struct pt_regs *regs)
433{ 433{
434 entering_irq(); 434 entering_irq();
435 __smp_thermal_interrupt(); 435 __smp_thermal_interrupt();
436 exiting_ack_irq(); 436 exiting_ack_irq();
437} 437}
438 438
439asmlinkage void smp_trace_thermal_interrupt(struct pt_regs *regs) 439asmlinkage __visible void smp_trace_thermal_interrupt(struct pt_regs *regs)
440{ 440{
441 entering_irq(); 441 entering_irq();
442 trace_thermal_apic_entry(THERMAL_APIC_VECTOR); 442 trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c
index fe6b1c86645b..7245980186ee 100644
--- a/arch/x86/kernel/cpu/mcheck/threshold.c
+++ b/arch/x86/kernel/cpu/mcheck/threshold.c
@@ -24,14 +24,14 @@ static inline void __smp_threshold_interrupt(void)
24 mce_threshold_vector(); 24 mce_threshold_vector();
25} 25}
26 26
27asmlinkage void smp_threshold_interrupt(void) 27asmlinkage __visible void smp_threshold_interrupt(void)
28{ 28{
29 entering_irq(); 29 entering_irq();
30 __smp_threshold_interrupt(); 30 __smp_threshold_interrupt();
31 exiting_ack_irq(); 31 exiting_ack_irq();
32} 32}
33 33
34asmlinkage void smp_trace_threshold_interrupt(void) 34asmlinkage __visible void smp_trace_threshold_interrupt(void)
35{ 35{
36 entering_irq(); 36 entering_irq();
37 trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR); 37 trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR);
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index aa333d966886..adb02aa62af5 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -169,7 +169,6 @@ static struct event_constraint intel_slm_event_constraints[] __read_mostly =
169{ 169{
170 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 170 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
171 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 171 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
172 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF */
173 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */ 172 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
174 EVENT_CONSTRAINT_END 173 EVENT_CONSTRAINT_END
175}; 174};
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
index 059218ed5208..619f7699487a 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -59,7 +59,7 @@
59#define INTEL_RAPL_PKG 0x2 /* pseudo-encoding */ 59#define INTEL_RAPL_PKG 0x2 /* pseudo-encoding */
60#define RAPL_IDX_RAM_NRG_STAT 2 /* DRAM */ 60#define RAPL_IDX_RAM_NRG_STAT 2 /* DRAM */
61#define INTEL_RAPL_RAM 0x3 /* pseudo-encoding */ 61#define INTEL_RAPL_RAM 0x3 /* pseudo-encoding */
62#define RAPL_IDX_PP1_NRG_STAT 3 /* DRAM */ 62#define RAPL_IDX_PP1_NRG_STAT 3 /* gpu */
63#define INTEL_RAPL_PP1 0x4 /* pseudo-encoding */ 63#define INTEL_RAPL_PP1 0x4 /* pseudo-encoding */
64 64
65/* Clients have PP0, PKG */ 65/* Clients have PP0, PKG */
@@ -72,6 +72,12 @@
72 1<<RAPL_IDX_PKG_NRG_STAT|\ 72 1<<RAPL_IDX_PKG_NRG_STAT|\
73 1<<RAPL_IDX_RAM_NRG_STAT) 73 1<<RAPL_IDX_RAM_NRG_STAT)
74 74
75/* Servers have PP0, PKG, RAM, PP1 */
76#define RAPL_IDX_HSW (1<<RAPL_IDX_PP0_NRG_STAT|\
77 1<<RAPL_IDX_PKG_NRG_STAT|\
78 1<<RAPL_IDX_RAM_NRG_STAT|\
79 1<<RAPL_IDX_PP1_NRG_STAT)
80
75/* 81/*
76 * event code: LSB 8 bits, passed in attr->config 82 * event code: LSB 8 bits, passed in attr->config
77 * any other bit is reserved 83 * any other bit is reserved
@@ -425,6 +431,24 @@ static struct attribute *rapl_events_cln_attr[] = {
425 NULL, 431 NULL,
426}; 432};
427 433
434static struct attribute *rapl_events_hsw_attr[] = {
435 EVENT_PTR(rapl_cores),
436 EVENT_PTR(rapl_pkg),
437 EVENT_PTR(rapl_gpu),
438 EVENT_PTR(rapl_ram),
439
440 EVENT_PTR(rapl_cores_unit),
441 EVENT_PTR(rapl_pkg_unit),
442 EVENT_PTR(rapl_gpu_unit),
443 EVENT_PTR(rapl_ram_unit),
444
445 EVENT_PTR(rapl_cores_scale),
446 EVENT_PTR(rapl_pkg_scale),
447 EVENT_PTR(rapl_gpu_scale),
448 EVENT_PTR(rapl_ram_scale),
449 NULL,
450};
451
428static struct attribute_group rapl_pmu_events_group = { 452static struct attribute_group rapl_pmu_events_group = {
429 .name = "events", 453 .name = "events",
430 .attrs = NULL, /* patched at runtime */ 454 .attrs = NULL, /* patched at runtime */
@@ -511,6 +535,7 @@ static int rapl_cpu_prepare(int cpu)
511 struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); 535 struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu);
512 int phys_id = topology_physical_package_id(cpu); 536 int phys_id = topology_physical_package_id(cpu);
513 u64 ms; 537 u64 ms;
538 u64 msr_rapl_power_unit_bits;
514 539
515 if (pmu) 540 if (pmu)
516 return 0; 541 return 0;
@@ -518,6 +543,10 @@ static int rapl_cpu_prepare(int cpu)
518 if (phys_id < 0) 543 if (phys_id < 0)
519 return -1; 544 return -1;
520 545
546 /* protect rdmsrl() to handle virtualization */
547 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
548 return -1;
549
521 pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); 550 pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
522 if (!pmu) 551 if (!pmu)
523 return -1; 552 return -1;
@@ -531,8 +560,7 @@ static int rapl_cpu_prepare(int cpu)
531 * 560 *
532 * we cache in local PMU instance 561 * we cache in local PMU instance
533 */ 562 */
534 rdmsrl(MSR_RAPL_POWER_UNIT, pmu->hw_unit); 563 pmu->hw_unit = (msr_rapl_power_unit_bits >> 8) & 0x1FULL;
535 pmu->hw_unit = (pmu->hw_unit >> 8) & 0x1FULL;
536 pmu->pmu = &rapl_pmu_class; 564 pmu->pmu = &rapl_pmu_class;
537 565
538 /* 566 /*
@@ -631,11 +659,14 @@ static int __init rapl_pmu_init(void)
631 switch (boot_cpu_data.x86_model) { 659 switch (boot_cpu_data.x86_model) {
632 case 42: /* Sandy Bridge */ 660 case 42: /* Sandy Bridge */
633 case 58: /* Ivy Bridge */ 661 case 58: /* Ivy Bridge */
634 case 60: /* Haswell */
635 case 69: /* Haswell-Celeron */
636 rapl_cntr_mask = RAPL_IDX_CLN; 662 rapl_cntr_mask = RAPL_IDX_CLN;
637 rapl_pmu_events_group.attrs = rapl_events_cln_attr; 663 rapl_pmu_events_group.attrs = rapl_events_cln_attr;
638 break; 664 break;
665 case 60: /* Haswell */
666 case 69: /* Haswell-Celeron */
667 rapl_cntr_mask = RAPL_IDX_HSW;
668 rapl_pmu_events_group.attrs = rapl_events_hsw_attr;
669 break;
639 case 45: /* Sandy Bridge-EP */ 670 case 45: /* Sandy Bridge-EP */
640 case 62: /* IvyTown */ 671 case 62: /* IvyTown */
641 rapl_cntr_mask = RAPL_IDX_SRV; 672 rapl_cntr_mask = RAPL_IDX_SRV;
@@ -650,7 +681,9 @@ static int __init rapl_pmu_init(void)
650 cpu_notifier_register_begin(); 681 cpu_notifier_register_begin();
651 682
652 for_each_online_cpu(cpu) { 683 for_each_online_cpu(cpu) {
653 rapl_cpu_prepare(cpu); 684 ret = rapl_cpu_prepare(cpu);
685 if (ret)
686 goto out;
654 rapl_cpu_init(cpu); 687 rapl_cpu_init(cpu);
655 } 688 }
656 689
@@ -673,6 +706,7 @@ static int __init rapl_pmu_init(void)
673 hweight32(rapl_cntr_mask), 706 hweight32(rapl_cntr_mask),
674 ktime_to_ms(pmu->timer_interval)); 707 ktime_to_ms(pmu->timer_interval));
675 708
709out:
676 cpu_notifier_register_done(); 710 cpu_notifier_register_done();
677 711
678 return 0; 712 return 0;
diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c
index 384df5105fbc..136ac74dee82 100644
--- a/arch/x86/kernel/cpu/rdrand.c
+++ b/arch/x86/kernel/cpu/rdrand.c
@@ -27,6 +27,7 @@
27static int __init x86_rdrand_setup(char *s) 27static int __init x86_rdrand_setup(char *s)
28{ 28{
29 setup_clear_cpu_cap(X86_FEATURE_RDRAND); 29 setup_clear_cpu_cap(X86_FEATURE_RDRAND);
30 setup_clear_cpu_cap(X86_FEATURE_RDSEED);
30 return 1; 31 return 1;
31} 32}
32__setup("nordrand", x86_rdrand_setup); 33__setup("nordrand", x86_rdrand_setup);
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index b0cc3809723d..6cda0baeac9d 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -17,6 +17,7 @@
17#include <asm/dma.h> 17#include <asm/dma.h>
18#include <asm/io_apic.h> 18#include <asm/io_apic.h>
19#include <asm/apic.h> 19#include <asm/apic.h>
20#include <asm/hpet.h>
20#include <asm/iommu.h> 21#include <asm/iommu.h>
21#include <asm/gart.h> 22#include <asm/gart.h>
22#include <asm/irq_remapping.h> 23#include <asm/irq_remapping.h>
@@ -240,7 +241,7 @@ static u32 __init intel_stolen_base(int num, int slot, int func, size_t stolen_s
240 return base; 241 return base;
241} 242}
242 243
243#define KB(x) ((x) * 1024) 244#define KB(x) ((x) * 1024UL)
244#define MB(x) (KB (KB (x))) 245#define MB(x) (KB (KB (x)))
245#define GB(x) (MB (KB (x))) 246#define GB(x) (MB (KB (x)))
246 247
@@ -530,6 +531,15 @@ static void __init intel_graphics_stolen(int num, int slot, int func)
530 } 531 }
531} 532}
532 533
534static void __init force_disable_hpet(int num, int slot, int func)
535{
536#ifdef CONFIG_HPET_TIMER
537 boot_hpet_disable = 1;
538 pr_info("x86/hpet: Will disable the HPET for this platform because it's not reliable\n");
539#endif
540}
541
542
533#define QFLAG_APPLY_ONCE 0x1 543#define QFLAG_APPLY_ONCE 0x1
534#define QFLAG_APPLIED 0x2 544#define QFLAG_APPLIED 0x2
535#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) 545#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@@ -567,6 +577,12 @@ static struct chipset early_qrk[] __initdata = {
567 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 577 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
568 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID, 578 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID,
569 QFLAG_APPLY_ONCE, intel_graphics_stolen }, 579 QFLAG_APPLY_ONCE, intel_graphics_stolen },
580 /*
581 * HPET on current version of Baytrail platform has accuracy
582 * problems, disable it for now:
583 */
584 { PCI_VENDOR_ID_INTEL, 0x0f00,
585 PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
570 {} 586 {}
571}; 587};
572 588
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index c61a14a4a310..d6c1b9836995 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -29,7 +29,7 @@ static void __init i386_default_early_setup(void)
29 reserve_ebda_region(); 29 reserve_ebda_region();
30} 30}
31 31
32asmlinkage void __init i386_start_kernel(void) 32asmlinkage __visible void __init i386_start_kernel(void)
33{ 33{
34 sanitize_boot_params(&boot_params); 34 sanitize_boot_params(&boot_params);
35 35
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 85126ccbdf6b..068054f4bf20 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -137,7 +137,7 @@ static void __init copy_bootdata(char *real_mode_data)
137 } 137 }
138} 138}
139 139
140asmlinkage void __init x86_64_start_kernel(char * real_mode_data) 140asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
141{ 141{
142 int i; 142 int i;
143 143
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 8d80ae011603..4177bfbc80b0 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -88,7 +88,7 @@ static inline void hpet_clear_mapping(void)
88/* 88/*
89 * HPET command line enable / disable 89 * HPET command line enable / disable
90 */ 90 */
91static int boot_hpet_disable; 91int boot_hpet_disable;
92int hpet_force_user; 92int hpet_force_user;
93static int hpet_verbose; 93static int hpet_verbose;
94 94
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 79a3f9682871..61b17dc2c277 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -897,9 +897,10 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
897 struct kprobe *cur = kprobe_running(); 897 struct kprobe *cur = kprobe_running();
898 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 898 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
899 899
900 switch (kcb->kprobe_status) { 900 if (unlikely(regs->ip == (unsigned long)cur->ainsn.insn)) {
901 case KPROBE_HIT_SS: 901 /* This must happen on single-stepping */
902 case KPROBE_REENTER: 902 WARN_ON(kcb->kprobe_status != KPROBE_HIT_SS &&
903 kcb->kprobe_status != KPROBE_REENTER);
903 /* 904 /*
904 * We are here because the instruction being single 905 * We are here because the instruction being single
905 * stepped caused a page fault. We reset the current 906 * stepped caused a page fault. We reset the current
@@ -914,9 +915,8 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
914 else 915 else
915 reset_current_kprobe(); 916 reset_current_kprobe();
916 preempt_enable_no_resched(); 917 preempt_enable_no_resched();
917 break; 918 } else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE ||
918 case KPROBE_HIT_ACTIVE: 919 kcb->kprobe_status == KPROBE_HIT_SSDONE) {
919 case KPROBE_HIT_SSDONE:
920 /* 920 /*
921 * We increment the nmissed count for accounting, 921 * We increment the nmissed count for accounting,
922 * we can also use npre/npostfault count for accounting 922 * we can also use npre/npostfault count for accounting
@@ -945,10 +945,8 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
945 * fixup routine could not handle it, 945 * fixup routine could not handle it,
946 * Let do_page_fault() fix it. 946 * Let do_page_fault() fix it.
947 */ 947 */
948 break;
949 default:
950 break;
951 } 948 }
949
952 return 0; 950 return 0;
953} 951}
954 952
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index af1d14a9ebda..dcbbaa165bde 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -20,6 +20,8 @@
20#include <asm/mmu_context.h> 20#include <asm/mmu_context.h>
21#include <asm/syscalls.h> 21#include <asm/syscalls.h>
22 22
23int sysctl_ldt16 = 0;
24
23#ifdef CONFIG_SMP 25#ifdef CONFIG_SMP
24static void flush_ldt(void *current_mm) 26static void flush_ldt(void *current_mm)
25{ 27{
@@ -234,7 +236,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
234 * IRET leaking the high bits of the kernel stack address. 236 * IRET leaking the high bits of the kernel stack address.
235 */ 237 */
236#ifdef CONFIG_X86_64 238#ifdef CONFIG_X86_64
237 if (!ldt_info.seg_32bit) { 239 if (!ldt_info.seg_32bit && !sysctl_ldt16) {
238 error = -EINVAL; 240 error = -EINVAL;
239 goto out_unlock; 241 goto out_unlock;
240 } 242 }
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 9c0280f93d05..898d077617a9 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -52,7 +52,7 @@
52 52
53asmlinkage extern void ret_from_fork(void); 53asmlinkage extern void ret_from_fork(void);
54 54
55asmlinkage DEFINE_PER_CPU(unsigned long, old_rsp); 55__visible DEFINE_PER_CPU(unsigned long, old_rsp);
56 56
57/* Prints also some state that isn't saved in the pt_regs */ 57/* Prints also some state that isn't saved in the pt_regs */
58void __show_regs(struct pt_regs *regs, int all) 58void __show_regs(struct pt_regs *regs, int all)
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 654b46574b91..52b1157c53eb 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -114,8 +114,8 @@ EXPORT_SYMBOL(machine_real_restart);
114 */ 114 */
115static int __init set_pci_reboot(const struct dmi_system_id *d) 115static int __init set_pci_reboot(const struct dmi_system_id *d)
116{ 116{
117 if (reboot_type != BOOT_CF9) { 117 if (reboot_type != BOOT_CF9_FORCE) {
118 reboot_type = BOOT_CF9; 118 reboot_type = BOOT_CF9_FORCE;
119 pr_info("%s series board detected. Selecting %s-method for reboots.\n", 119 pr_info("%s series board detected. Selecting %s-method for reboots.\n",
120 d->ident, "PCI"); 120 d->ident, "PCI");
121 } 121 }
@@ -191,6 +191,16 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
191 }, 191 },
192 }, 192 },
193 193
194 /* Certec */
195 { /* Handle problems with rebooting on Certec BPC600 */
196 .callback = set_pci_reboot,
197 .ident = "Certec BPC600",
198 .matches = {
199 DMI_MATCH(DMI_SYS_VENDOR, "Certec"),
200 DMI_MATCH(DMI_PRODUCT_NAME, "BPC600"),
201 },
202 },
203
194 /* Dell */ 204 /* Dell */
195 { /* Handle problems with rebooting on Dell DXP061 */ 205 { /* Handle problems with rebooting on Dell DXP061 */
196 .callback = set_bios_reboot, 206 .callback = set_bios_reboot,
@@ -458,20 +468,23 @@ void __attribute__((weak)) mach_reboot_fixups(void)
458} 468}
459 469
460/* 470/*
461 * Windows compatible x86 hardware expects the following on reboot: 471 * To the best of our knowledge Windows compatible x86 hardware expects
472 * the following on reboot:
462 * 473 *
463 * 1) If the FADT has the ACPI reboot register flag set, try it 474 * 1) If the FADT has the ACPI reboot register flag set, try it
464 * 2) If still alive, write to the keyboard controller 475 * 2) If still alive, write to the keyboard controller
465 * 3) If still alive, write to the ACPI reboot register again 476 * 3) If still alive, write to the ACPI reboot register again
466 * 4) If still alive, write to the keyboard controller again 477 * 4) If still alive, write to the keyboard controller again
467 * 5) If still alive, call the EFI runtime service to reboot 478 * 5) If still alive, call the EFI runtime service to reboot
468 * 6) If still alive, write to the PCI IO port 0xCF9 to reboot 479 * 6) If no EFI runtime service, call the BIOS to do a reboot
469 * 7) If still alive, inform BIOS to do a proper reboot 480 *
481 * We default to following the same pattern. We also have
482 * two other reboot methods: 'triple fault' and 'PCI', which
483 * can be triggered via the reboot= kernel boot option or
484 * via quirks.
470 * 485 *
471 * If the machine is still alive at this stage, it gives up. We default to 486 * This means that this function can never return, it can misbehave
472 * following the same pattern, except that if we're still alive after (7) we'll 487 * by not rebooting properly and hanging.
473 * try to force a triple fault and then cycle between hitting the keyboard
474 * controller and doing that
475 */ 488 */
476static void native_machine_emergency_restart(void) 489static void native_machine_emergency_restart(void)
477{ 490{
@@ -492,6 +505,11 @@ static void native_machine_emergency_restart(void)
492 for (;;) { 505 for (;;) {
493 /* Could also try the reset bit in the Hammer NB */ 506 /* Could also try the reset bit in the Hammer NB */
494 switch (reboot_type) { 507 switch (reboot_type) {
508 case BOOT_ACPI:
509 acpi_reboot();
510 reboot_type = BOOT_KBD;
511 break;
512
495 case BOOT_KBD: 513 case BOOT_KBD:
496 mach_reboot_fixups(); /* For board specific fixups */ 514 mach_reboot_fixups(); /* For board specific fixups */
497 515
@@ -509,43 +527,29 @@ static void native_machine_emergency_restart(void)
509 } 527 }
510 break; 528 break;
511 529
512 case BOOT_TRIPLE:
513 load_idt(&no_idt);
514 __asm__ __volatile__("int3");
515
516 /* We're probably dead after this, but... */
517 reboot_type = BOOT_KBD;
518 break;
519
520 case BOOT_BIOS:
521 machine_real_restart(MRR_BIOS);
522
523 /* We're probably dead after this, but... */
524 reboot_type = BOOT_TRIPLE;
525 break;
526
527 case BOOT_ACPI:
528 acpi_reboot();
529 reboot_type = BOOT_KBD;
530 break;
531
532 case BOOT_EFI: 530 case BOOT_EFI:
533 if (efi_enabled(EFI_RUNTIME_SERVICES)) 531 if (efi_enabled(EFI_RUNTIME_SERVICES))
534 efi.reset_system(reboot_mode == REBOOT_WARM ? 532 efi.reset_system(reboot_mode == REBOOT_WARM ?
535 EFI_RESET_WARM : 533 EFI_RESET_WARM :
536 EFI_RESET_COLD, 534 EFI_RESET_COLD,
537 EFI_SUCCESS, 0, NULL); 535 EFI_SUCCESS, 0, NULL);
538 reboot_type = BOOT_CF9_COND; 536 reboot_type = BOOT_BIOS;
537 break;
538
539 case BOOT_BIOS:
540 machine_real_restart(MRR_BIOS);
541
542 /* We're probably dead after this, but... */
543 reboot_type = BOOT_CF9_SAFE;
539 break; 544 break;
540 545
541 case BOOT_CF9: 546 case BOOT_CF9_FORCE:
542 port_cf9_safe = true; 547 port_cf9_safe = true;
543 /* Fall through */ 548 /* Fall through */
544 549
545 case BOOT_CF9_COND: 550 case BOOT_CF9_SAFE:
546 if (port_cf9_safe) { 551 if (port_cf9_safe) {
547 u8 reboot_code = reboot_mode == REBOOT_WARM ? 552 u8 reboot_code = reboot_mode == REBOOT_WARM ? 0x06 : 0x0E;
548 0x06 : 0x0E;
549 u8 cf9 = inb(0xcf9) & ~reboot_code; 553 u8 cf9 = inb(0xcf9) & ~reboot_code;
550 outb(cf9|2, 0xcf9); /* Request hard reset */ 554 outb(cf9|2, 0xcf9); /* Request hard reset */
551 udelay(50); 555 udelay(50);
@@ -553,7 +557,15 @@ static void native_machine_emergency_restart(void)
553 outb(cf9|reboot_code, 0xcf9); 557 outb(cf9|reboot_code, 0xcf9);
554 udelay(50); 558 udelay(50);
555 } 559 }
556 reboot_type = BOOT_BIOS; 560 reboot_type = BOOT_TRIPLE;
561 break;
562
563 case BOOT_TRIPLE:
564 load_idt(&no_idt);
565 __asm__ __volatile__("int3");
566
567 /* We're probably dead after this, but... */
568 reboot_type = BOOT_KBD;
557 break; 569 break;
558 } 570 }
559 } 571 }
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 7c3a5a61f2e4..be8e1bde07aa 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -168,7 +168,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
168 * this function calls the 'stop' function on all other CPUs in the system. 168 * this function calls the 'stop' function on all other CPUs in the system.
169 */ 169 */
170 170
171asmlinkage void smp_reboot_interrupt(void) 171asmlinkage __visible void smp_reboot_interrupt(void)
172{ 172{
173 ack_APIC_irq(); 173 ack_APIC_irq();
174 irq_enter(); 174 irq_enter();
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 57409f6b8c62..f73b5d435bdc 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -357,7 +357,7 @@ exit:
357 * for scheduling or signal handling. The actual stack switch is done in 357 * for scheduling or signal handling. The actual stack switch is done in
358 * entry.S 358 * entry.S
359 */ 359 */
360asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) 360asmlinkage __visible __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
361{ 361{
362 struct pt_regs *regs = eregs; 362 struct pt_regs *regs = eregs;
363 /* Did already sync */ 363 /* Did already sync */
@@ -601,11 +601,11 @@ do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
601#endif 601#endif
602} 602}
603 603
604asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) 604asmlinkage __visible void __attribute__((weak)) smp_thermal_interrupt(void)
605{ 605{
606} 606}
607 607
608asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) 608asmlinkage __visible void __attribute__((weak)) smp_threshold_interrupt(void)
609{ 609{
610} 610}
611 611
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index f6584a90aba3..b99b9ad8540c 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -26,6 +26,9 @@
26 26
27#define TOPOLOGY_REGISTER_OFFSET 0x10 27#define TOPOLOGY_REGISTER_OFFSET 0x10
28 28
29/* Flag below is initialized once during vSMP PCI initialization. */
30static int irq_routing_comply = 1;
31
29#if defined CONFIG_PCI && defined CONFIG_PARAVIRT 32#if defined CONFIG_PCI && defined CONFIG_PARAVIRT
30/* 33/*
31 * Interrupt control on vSMPowered systems: 34 * Interrupt control on vSMPowered systems:
@@ -33,7 +36,7 @@
33 * and vice versa. 36 * and vice versa.
34 */ 37 */
35 38
36asmlinkage unsigned long vsmp_save_fl(void) 39asmlinkage __visible unsigned long vsmp_save_fl(void)
37{ 40{
38 unsigned long flags = native_save_fl(); 41 unsigned long flags = native_save_fl();
39 42
@@ -53,7 +56,7 @@ __visible void vsmp_restore_fl(unsigned long flags)
53} 56}
54PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl); 57PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
55 58
56asmlinkage void vsmp_irq_disable(void) 59asmlinkage __visible void vsmp_irq_disable(void)
57{ 60{
58 unsigned long flags = native_save_fl(); 61 unsigned long flags = native_save_fl();
59 62
@@ -61,7 +64,7 @@ asmlinkage void vsmp_irq_disable(void)
61} 64}
62PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable); 65PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
63 66
64asmlinkage void vsmp_irq_enable(void) 67asmlinkage __visible void vsmp_irq_enable(void)
65{ 68{
66 unsigned long flags = native_save_fl(); 69 unsigned long flags = native_save_fl();
67 70
@@ -101,6 +104,10 @@ static void __init set_vsmp_pv_ops(void)
101#ifdef CONFIG_SMP 104#ifdef CONFIG_SMP
102 if (cap & ctl & BIT(8)) { 105 if (cap & ctl & BIT(8)) {
103 ctl &= ~BIT(8); 106 ctl &= ~BIT(8);
107
108 /* Interrupt routing set to ignore */
109 irq_routing_comply = 0;
110
104#ifdef CONFIG_PROC_FS 111#ifdef CONFIG_PROC_FS
105 /* Don't let users change irq affinity via procfs */ 112 /* Don't let users change irq affinity via procfs */
106 no_irq_affinity = 1; 113 no_irq_affinity = 1;
@@ -218,7 +225,9 @@ static void vsmp_apic_post_init(void)
218{ 225{
219 /* need to update phys_pkg_id */ 226 /* need to update phys_pkg_id */
220 apic->phys_pkg_id = apicid_phys_pkg_id; 227 apic->phys_pkg_id = apicid_phys_pkg_id;
221 apic->vector_allocation_domain = fill_vector_allocation_domain; 228
229 if (!irq_routing_comply)
230 apic->vector_allocation_domain = fill_vector_allocation_domain;
222} 231}
223 232
224void __init vsmp_init(void) 233void __init vsmp_init(void)
diff --git a/arch/x86/kernel/vsyscall_gtod.c b/arch/x86/kernel/vsyscall_gtod.c
index f9c6e56e14b5..9531fbb123ba 100644
--- a/arch/x86/kernel/vsyscall_gtod.c
+++ b/arch/x86/kernel/vsyscall_gtod.c
@@ -43,7 +43,7 @@ void update_vsyscall(struct timekeeper *tk)
43 vdata->monotonic_time_sec = tk->xtime_sec 43 vdata->monotonic_time_sec = tk->xtime_sec
44 + tk->wall_to_monotonic.tv_sec; 44 + tk->wall_to_monotonic.tv_sec;
45 vdata->monotonic_time_snsec = tk->xtime_nsec 45 vdata->monotonic_time_snsec = tk->xtime_nsec
46 + (tk->wall_to_monotonic.tv_nsec 46 + ((u64)tk->wall_to_monotonic.tv_nsec
47 << tk->shift); 47 << tk->shift);
48 while (vdata->monotonic_time_snsec >= 48 while (vdata->monotonic_time_snsec >=
49 (((u64)NSEC_PER_SEC) << tk->shift)) { 49 (((u64)NSEC_PER_SEC) << tk->shift)) {