aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/acpi/boot.c90
-rw-r--r--arch/x86/kernel/acpi/sleep.c11
-rw-r--r--arch/x86/kernel/acpi/sleep.h2
-rw-r--r--arch/x86/kernel/acpi/wakeup_32.S2
-rw-r--r--arch/x86/kernel/acpi/wakeup_64.S2
-rw-r--r--arch/x86/kernel/alternative.c11
-rw-r--r--arch/x86/kernel/apb_timer.c10
-rw-r--r--arch/x86/kernel/apic/apic.c8
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c72
-rw-r--r--arch/x86/kernel/asm-offsets.c1
-rw-r--r--arch/x86/kernel/cpu/amd.c6
-rw-r--r--arch/x86/kernel/cpu/centaur.c8
-rw-r--r--arch/x86/kernel/cpu/common.c17
-rw-r--r--arch/x86/kernel/cpu/cpu.h20
-rw-r--r--arch/x86/kernel/cpu/intel.c12
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-apei.c3
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c27
-rw-r--r--arch/x86/kernel/cpu/perf_event.c31
-rw-r--r--arch/x86/kernel/cpu/perf_event.h6
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c83
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c204
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c31
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c144
-rw-r--r--arch/x86/kernel/cpu/proc.c15
-rw-r--r--arch/x86/kernel/cpu/umc.c4
-rw-r--r--arch/x86/kernel/crash.c2
-rw-r--r--arch/x86/kernel/devicetree.c51
-rw-r--r--arch/x86/kernel/dumpstack.c11
-rw-r--r--arch/x86/kernel/early-quirks.c12
-rw-r--r--arch/x86/kernel/early_printk.c9
-rw-r--r--arch/x86/kernel/entry_32.S17
-rw-r--r--arch/x86/kernel/entry_64.S36
-rw-r--r--arch/x86/kernel/head32.c4
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/i386_ksyms_32.c7
-rw-r--r--arch/x86/kernel/i387.c2
-rw-r--r--arch/x86/kernel/i8259.c3
-rw-r--r--arch/x86/kernel/irq_32.c34
-rw-r--r--arch/x86/kernel/irq_64.c21
-rw-r--r--arch/x86/kernel/jump_label.c25
-rw-r--r--arch/x86/kernel/kvm.c21
-rw-r--r--arch/x86/kernel/kvmclock.c1
-rw-r--r--arch/x86/kernel/microcode_amd.c3
-rw-r--r--arch/x86/kernel/module.c2
-rw-r--r--arch/x86/kernel/msr.c2
-rw-r--r--arch/x86/kernel/nmi.c4
-rw-r--r--arch/x86/kernel/preempt.S25
-rw-r--r--arch/x86/kernel/process.c6
-rw-r--r--arch/x86/kernel/process_32.c12
-rw-r--r--arch/x86/kernel/process_64.c12
-rw-r--r--arch/x86/kernel/pvclock.c13
-rw-r--r--arch/x86/kernel/reboot.c285
-rw-r--r--arch/x86/kernel/rtc.c12
-rw-r--r--arch/x86/kernel/setup.c10
-rw-r--r--arch/x86/kernel/smpboot.c57
-rw-r--r--arch/x86/kernel/sysfb_simplefb.c4
-rw-r--r--arch/x86/kernel/topology.c11
-rw-r--r--arch/x86/kernel/traps.c34
-rw-r--r--arch/x86/kernel/vmlinux.lds.S9
-rw-r--r--arch/x86/kernel/x8664_ksyms_64.c7
-rw-r--r--arch/x86/kernel/x86_init.c10
62 files changed, 930 insertions, 668 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index a5408b965c9d..9b0a34e2cd79 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -36,6 +36,8 @@ obj-y += tsc.o io_delay.o rtc.o
36obj-y += pci-iommu_table.o 36obj-y += pci-iommu_table.o
37obj-y += resource.o 37obj-y += resource.o
38 38
39obj-$(CONFIG_PREEMPT) += preempt.o
40
39obj-y += process.o 41obj-y += process.o
40obj-y += i387.o xsave.o 42obj-y += i387.o xsave.o
41obj-y += ptrace.o 43obj-y += ptrace.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 40c76604199f..6c0b43bd024b 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -189,24 +189,31 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
189 return 0; 189 return 0;
190} 190}
191 191
192static void acpi_register_lapic(int id, u8 enabled) 192/**
193 * acpi_register_lapic - register a local apic and generates a logic cpu number
194 * @id: local apic id to register
195 * @enabled: this cpu is enabled or not
196 *
197 * Returns the logic cpu number which maps to the local apic
198 */
199static int acpi_register_lapic(int id, u8 enabled)
193{ 200{
194 unsigned int ver = 0; 201 unsigned int ver = 0;
195 202
196 if (id >= MAX_LOCAL_APIC) { 203 if (id >= MAX_LOCAL_APIC) {
197 printk(KERN_INFO PREFIX "skipped apicid that is too big\n"); 204 printk(KERN_INFO PREFIX "skipped apicid that is too big\n");
198 return; 205 return -EINVAL;
199 } 206 }
200 207
201 if (!enabled) { 208 if (!enabled) {
202 ++disabled_cpus; 209 ++disabled_cpus;
203 return; 210 return -EINVAL;
204 } 211 }
205 212
206 if (boot_cpu_physical_apicid != -1U) 213 if (boot_cpu_physical_apicid != -1U)
207 ver = apic_version[boot_cpu_physical_apicid]; 214 ver = apic_version[boot_cpu_physical_apicid];
208 215
209 generic_processor_info(id, ver); 216 return generic_processor_info(id, ver);
210} 217}
211 218
212static int __init 219static int __init
@@ -614,84 +621,27 @@ static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
614#endif 621#endif
615} 622}
616 623
617static int _acpi_map_lsapic(acpi_handle handle, int *pcpu) 624static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
618{ 625{
619 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
620 union acpi_object *obj;
621 struct acpi_madt_local_apic *lapic;
622 cpumask_var_t tmp_map, new_map;
623 u8 physid;
624 int cpu; 626 int cpu;
625 int retval = -ENOMEM;
626
627 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
628 return -EINVAL;
629
630 if (!buffer.length || !buffer.pointer)
631 return -EINVAL;
632
633 obj = buffer.pointer;
634 if (obj->type != ACPI_TYPE_BUFFER ||
635 obj->buffer.length < sizeof(*lapic)) {
636 kfree(buffer.pointer);
637 return -EINVAL;
638 }
639 627
640 lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer; 628 cpu = acpi_register_lapic(physid, ACPI_MADT_ENABLED);
641 629 if (cpu < 0) {
642 if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC || 630 pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
643 !(lapic->lapic_flags & ACPI_MADT_ENABLED)) { 631 return cpu;
644 kfree(buffer.pointer);
645 return -EINVAL;
646 }
647
648 physid = lapic->id;
649
650 kfree(buffer.pointer);
651 buffer.length = ACPI_ALLOCATE_BUFFER;
652 buffer.pointer = NULL;
653 lapic = NULL;
654
655 if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL))
656 goto out;
657
658 if (!alloc_cpumask_var(&new_map, GFP_KERNEL))
659 goto free_tmp_map;
660
661 cpumask_copy(tmp_map, cpu_present_mask);
662 acpi_register_lapic(physid, ACPI_MADT_ENABLED);
663
664 /*
665 * If acpi_register_lapic successfully generates a new logical cpu
666 * number, then the following will get us exactly what was mapped
667 */
668 cpumask_andnot(new_map, cpu_present_mask, tmp_map);
669 if (cpumask_empty(new_map)) {
670 printk ("Unable to map lapic to logical cpu number\n");
671 retval = -EINVAL;
672 goto free_new_map;
673 } 632 }
674 633
675 acpi_processor_set_pdc(handle); 634 acpi_processor_set_pdc(handle);
676
677 cpu = cpumask_first(new_map);
678 acpi_map_cpu2node(handle, cpu, physid); 635 acpi_map_cpu2node(handle, cpu, physid);
679 636
680 *pcpu = cpu; 637 *pcpu = cpu;
681 retval = 0; 638 return 0;
682
683free_new_map:
684 free_cpumask_var(new_map);
685free_tmp_map:
686 free_cpumask_var(tmp_map);
687out:
688 return retval;
689} 639}
690 640
691/* wrapper to silence section mismatch warning */ 641/* wrapper to silence section mismatch warning */
692int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu) 642int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
693{ 643{
694 return _acpi_map_lsapic(handle, pcpu); 644 return _acpi_map_lsapic(handle, physid, pcpu);
695} 645}
696EXPORT_SYMBOL(acpi_map_lsapic); 646EXPORT_SYMBOL(acpi_map_lsapic);
697 647
@@ -745,7 +695,7 @@ static int __init acpi_parse_sbf(struct acpi_table_header *table)
745#ifdef CONFIG_HPET_TIMER 695#ifdef CONFIG_HPET_TIMER
746#include <asm/hpet.h> 696#include <asm/hpet.h>
747 697
748static struct __initdata resource *hpet_res; 698static struct resource *hpet_res __initdata;
749 699
750static int __init acpi_parse_hpet(struct acpi_table_header *table) 700static int __init acpi_parse_hpet(struct acpi_table_header *table)
751{ 701{
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 33120100ff5e..3a2ae4c88948 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -26,6 +26,17 @@ static char temp_stack[4096];
26#endif 26#endif
27 27
28/** 28/**
29 * x86_acpi_enter_sleep_state - enter sleep state
30 * @state: Sleep state to enter.
31 *
32 * Wrapper around acpi_enter_sleep_state() to be called by assmebly.
33 */
34acpi_status asmlinkage x86_acpi_enter_sleep_state(u8 state)
35{
36 return acpi_enter_sleep_state(state);
37}
38
39/**
29 * x86_acpi_suspend_lowlevel - save kernel state 40 * x86_acpi_suspend_lowlevel - save kernel state
30 * 41 *
31 * Create an identity mapped page table and copy the wakeup routine to 42 * Create an identity mapped page table and copy the wakeup routine to
diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h
index c9c2c982d5e4..65c7b606b606 100644
--- a/arch/x86/kernel/acpi/sleep.h
+++ b/arch/x86/kernel/acpi/sleep.h
@@ -17,3 +17,5 @@ extern void wakeup_long64(void);
17extern void do_suspend_lowlevel(void); 17extern void do_suspend_lowlevel(void);
18 18
19extern int x86_acpi_suspend_lowlevel(void); 19extern int x86_acpi_suspend_lowlevel(void);
20
21acpi_status asmlinkage x86_acpi_enter_sleep_state(u8 state);
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index d1daa66ab162..665c6b7d2ea9 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -73,7 +73,7 @@ ENTRY(do_suspend_lowlevel)
73 call save_processor_state 73 call save_processor_state
74 call save_registers 74 call save_registers
75 pushl $3 75 pushl $3
76 call acpi_enter_sleep_state 76 call x86_acpi_enter_sleep_state
77 addl $4, %esp 77 addl $4, %esp
78 78
79# In case of S3 failure, we'll emerge here. Jump 79# In case of S3 failure, we'll emerge here. Jump
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 8ea5164cbd04..ae693b51ed8e 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -73,7 +73,7 @@ ENTRY(do_suspend_lowlevel)
73 addq $8, %rsp 73 addq $8, %rsp
74 movl $3, %edi 74 movl $3, %edi
75 xorl %eax, %eax 75 xorl %eax, %eax
76 call acpi_enter_sleep_state 76 call x86_acpi_enter_sleep_state
77 /* in case something went wrong, restore the machine status and go on */ 77 /* in case something went wrong, restore the machine status and go on */
78 jmp resume_point 78 jmp resume_point
79 79
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 15e8563e5c24..df94598ad05a 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -402,17 +402,6 @@ void alternatives_enable_smp(void)
402{ 402{
403 struct smp_alt_module *mod; 403 struct smp_alt_module *mod;
404 404
405#ifdef CONFIG_LOCKDEP
406 /*
407 * Older binutils section handling bug prevented
408 * alternatives-replacement from working reliably.
409 *
410 * If this still occurs then you should see a hang
411 * or crash shortly after this line:
412 */
413 pr_info("lockdep: fixing up alternatives\n");
414#endif
415
416 /* Why bother if there are no other CPUs? */ 405 /* Why bother if there are no other CPUs? */
417 BUG_ON(num_possible_cpus() == 1); 406 BUG_ON(num_possible_cpus() == 1);
418 407
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index c9876efecafb..af5b08ab3b71 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -40,7 +40,7 @@
40 40
41#include <asm/fixmap.h> 41#include <asm/fixmap.h>
42#include <asm/apb_timer.h> 42#include <asm/apb_timer.h>
43#include <asm/mrst.h> 43#include <asm/intel-mid.h>
44#include <asm/time.h> 44#include <asm/time.h>
45 45
46#define APBT_CLOCKEVENT_RATING 110 46#define APBT_CLOCKEVENT_RATING 110
@@ -157,13 +157,13 @@ static int __init apbt_clockevent_register(void)
157 157
158 adev->num = smp_processor_id(); 158 adev->num = smp_processor_id();
159 adev->timer = dw_apb_clockevent_init(smp_processor_id(), "apbt0", 159 adev->timer = dw_apb_clockevent_init(smp_processor_id(), "apbt0",
160 mrst_timer_options == MRST_TIMER_LAPIC_APBT ? 160 intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ?
161 APBT_CLOCKEVENT_RATING - 100 : APBT_CLOCKEVENT_RATING, 161 APBT_CLOCKEVENT_RATING - 100 : APBT_CLOCKEVENT_RATING,
162 adev_virt_addr(adev), 0, apbt_freq); 162 adev_virt_addr(adev), 0, apbt_freq);
163 /* Firmware does EOI handling for us. */ 163 /* Firmware does EOI handling for us. */
164 adev->timer->eoi = NULL; 164 adev->timer->eoi = NULL;
165 165
166 if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) { 166 if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT) {
167 global_clock_event = &adev->timer->ced; 167 global_clock_event = &adev->timer->ced;
168 printk(KERN_DEBUG "%s clockevent registered as global\n", 168 printk(KERN_DEBUG "%s clockevent registered as global\n",
169 global_clock_event->name); 169 global_clock_event->name);
@@ -253,7 +253,7 @@ static int apbt_cpuhp_notify(struct notifier_block *n,
253 253
254static __init int apbt_late_init(void) 254static __init int apbt_late_init(void)
255{ 255{
256 if (mrst_timer_options == MRST_TIMER_LAPIC_APBT || 256 if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ||
257 !apb_timer_block_enabled) 257 !apb_timer_block_enabled)
258 return 0; 258 return 0;
259 /* This notifier should be called after workqueue is ready */ 259 /* This notifier should be called after workqueue is ready */
@@ -340,7 +340,7 @@ void __init apbt_time_init(void)
340 } 340 }
341#ifdef CONFIG_SMP 341#ifdef CONFIG_SMP
342 /* kernel cmdline disable apb timer, so we will use lapic timers */ 342 /* kernel cmdline disable apb timer, so we will use lapic timers */
343 if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) { 343 if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT) {
344 printk(KERN_INFO "apbt: disabled per cpu timer\n"); 344 printk(KERN_INFO "apbt: disabled per cpu timer\n");
345 return; 345 return;
346 } 346 }
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index a7eb82d9b012..ed165d657380 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -2107,7 +2107,7 @@ void disconnect_bsp_APIC(int virt_wire_setup)
2107 apic_write(APIC_LVT1, value); 2107 apic_write(APIC_LVT1, value);
2108} 2108}
2109 2109
2110void generic_processor_info(int apicid, int version) 2110int generic_processor_info(int apicid, int version)
2111{ 2111{
2112 int cpu, max = nr_cpu_ids; 2112 int cpu, max = nr_cpu_ids;
2113 bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid, 2113 bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
@@ -2127,7 +2127,7 @@ void generic_processor_info(int apicid, int version)
2127 " Processor %d/0x%x ignored.\n", max, thiscpu, apicid); 2127 " Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
2128 2128
2129 disabled_cpus++; 2129 disabled_cpus++;
2130 return; 2130 return -ENODEV;
2131 } 2131 }
2132 2132
2133 if (num_processors >= nr_cpu_ids) { 2133 if (num_processors >= nr_cpu_ids) {
@@ -2138,7 +2138,7 @@ void generic_processor_info(int apicid, int version)
2138 " Processor %d/0x%x ignored.\n", max, thiscpu, apicid); 2138 " Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
2139 2139
2140 disabled_cpus++; 2140 disabled_cpus++;
2141 return; 2141 return -EINVAL;
2142 } 2142 }
2143 2143
2144 num_processors++; 2144 num_processors++;
@@ -2183,6 +2183,8 @@ void generic_processor_info(int apicid, int version)
2183#endif 2183#endif
2184 set_cpu_possible(cpu, true); 2184 set_cpu_possible(cpu, true);
2185 set_cpu_present(cpu, true); 2185 set_cpu_present(cpu, true);
2186
2187 return cpu;
2186} 2188}
2187 2189
2188int hard_smp_processor_id(void) 2190int hard_smp_processor_id(void)
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 1191ac1c9d25..ad0dc0428baf 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -39,12 +39,6 @@
39#include <asm/x86_init.h> 39#include <asm/x86_init.h>
40#include <asm/nmi.h> 40#include <asm/nmi.h>
41 41
42/* BMC sets a bit this MMR non-zero before sending an NMI */
43#define UVH_NMI_MMR UVH_SCRATCH5
44#define UVH_NMI_MMR_CLEAR (UVH_NMI_MMR + 8)
45#define UV_NMI_PENDING_MASK (1UL << 63)
46DEFINE_PER_CPU(unsigned long, cpu_last_nmi_count);
47
48DEFINE_PER_CPU(int, x2apic_extra_bits); 42DEFINE_PER_CPU(int, x2apic_extra_bits);
49 43
50#define PR_DEVEL(fmt, args...) pr_devel("%s: " fmt, __func__, args) 44#define PR_DEVEL(fmt, args...) pr_devel("%s: " fmt, __func__, args)
@@ -58,7 +52,6 @@ int uv_min_hub_revision_id;
58EXPORT_SYMBOL_GPL(uv_min_hub_revision_id); 52EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
59unsigned int uv_apicid_hibits; 53unsigned int uv_apicid_hibits;
60EXPORT_SYMBOL_GPL(uv_apicid_hibits); 54EXPORT_SYMBOL_GPL(uv_apicid_hibits);
61static DEFINE_SPINLOCK(uv_nmi_lock);
62 55
63static struct apic apic_x2apic_uv_x; 56static struct apic apic_x2apic_uv_x;
64 57
@@ -113,7 +106,7 @@ static int __init early_get_pnodeid(void)
113 break; 106 break;
114 case UV3_HUB_PART_NUMBER: 107 case UV3_HUB_PART_NUMBER:
115 case UV3_HUB_PART_NUMBER_X: 108 case UV3_HUB_PART_NUMBER_X:
116 uv_min_hub_revision_id += UV3_HUB_REVISION_BASE - 1; 109 uv_min_hub_revision_id += UV3_HUB_REVISION_BASE;
117 break; 110 break;
118 } 111 }
119 112
@@ -847,68 +840,6 @@ void uv_cpu_init(void)
847 set_x2apic_extra_bits(uv_hub_info->pnode); 840 set_x2apic_extra_bits(uv_hub_info->pnode);
848} 841}
849 842
850/*
851 * When NMI is received, print a stack trace.
852 */
853int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
854{
855 unsigned long real_uv_nmi;
856 int bid;
857
858 /*
859 * Each blade has an MMR that indicates when an NMI has been sent
860 * to cpus on the blade. If an NMI is detected, atomically
861 * clear the MMR and update a per-blade NMI count used to
862 * cause each cpu on the blade to notice a new NMI.
863 */
864 bid = uv_numa_blade_id();
865 real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
866
867 if (unlikely(real_uv_nmi)) {
868 spin_lock(&uv_blade_info[bid].nmi_lock);
869 real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
870 if (real_uv_nmi) {
871 uv_blade_info[bid].nmi_count++;
872 uv_write_local_mmr(UVH_NMI_MMR_CLEAR, UV_NMI_PENDING_MASK);
873 }
874 spin_unlock(&uv_blade_info[bid].nmi_lock);
875 }
876
877 if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count))
878 return NMI_DONE;
879
880 __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count;
881
882 /*
883 * Use a lock so only one cpu prints at a time.
884 * This prevents intermixed output.
885 */
886 spin_lock(&uv_nmi_lock);
887 pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id());
888 dump_stack();
889 spin_unlock(&uv_nmi_lock);
890
891 return NMI_HANDLED;
892}
893
894void uv_register_nmi_notifier(void)
895{
896 if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
897 printk(KERN_WARNING "UV NMI handler failed to register\n");
898}
899
900void uv_nmi_init(void)
901{
902 unsigned int value;
903
904 /*
905 * Unmask NMI on all cpus
906 */
907 value = apic_read(APIC_LVT1) | APIC_DM_NMI;
908 value &= ~APIC_LVT_MASKED;
909 apic_write(APIC_LVT1, value);
910}
911
912void __init uv_system_init(void) 843void __init uv_system_init(void)
913{ 844{
914 union uvh_rh_gam_config_mmr_u m_n_config; 845 union uvh_rh_gam_config_mmr_u m_n_config;
@@ -1046,6 +977,7 @@ void __init uv_system_init(void)
1046 map_mmr_high(max_pnode); 977 map_mmr_high(max_pnode);
1047 map_mmioh_high(min_pnode, max_pnode); 978 map_mmioh_high(min_pnode, max_pnode);
1048 979
980 uv_nmi_setup();
1049 uv_cpu_init(); 981 uv_cpu_init();
1050 uv_scir_register_cpu_notifier(); 982 uv_scir_register_cpu_notifier();
1051 uv_register_nmi_notifier(); 983 uv_register_nmi_notifier();
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index 28610822fb3c..9f6b9341950f 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -32,7 +32,6 @@ void common(void) {
32 OFFSET(TI_flags, thread_info, flags); 32 OFFSET(TI_flags, thread_info, flags);
33 OFFSET(TI_status, thread_info, status); 33 OFFSET(TI_status, thread_info, status);
34 OFFSET(TI_addr_limit, thread_info, addr_limit); 34 OFFSET(TI_addr_limit, thread_info, addr_limit);
35 OFFSET(TI_preempt_count, thread_info, preempt_count);
36 35
37 BLANK(); 36 BLANK();
38 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); 37 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 903a264af981..3daece79a142 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -823,8 +823,8 @@ static const struct cpu_dev amd_cpu_dev = {
823 .c_vendor = "AMD", 823 .c_vendor = "AMD",
824 .c_ident = { "AuthenticAMD" }, 824 .c_ident = { "AuthenticAMD" },
825#ifdef CONFIG_X86_32 825#ifdef CONFIG_X86_32
826 .c_models = { 826 .legacy_models = {
827 { .vendor = X86_VENDOR_AMD, .family = 4, .model_names = 827 { .family = 4, .model_names =
828 { 828 {
829 [3] = "486 DX/2", 829 [3] = "486 DX/2",
830 [7] = "486 DX/2-WB", 830 [7] = "486 DX/2-WB",
@@ -835,7 +835,7 @@ static const struct cpu_dev amd_cpu_dev = {
835 } 835 }
836 }, 836 },
837 }, 837 },
838 .c_size_cache = amd_size_cache, 838 .legacy_cache_size = amd_size_cache,
839#endif 839#endif
840 .c_early_init = early_init_amd, 840 .c_early_init = early_init_amd,
841 .c_detect_tlb = cpu_detect_tlb_amd, 841 .c_detect_tlb = cpu_detect_tlb_amd,
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index fbf6c3bc2400..8d5652dc99dd 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -468,10 +468,10 @@ static void init_centaur(struct cpuinfo_x86 *c)
468#endif 468#endif
469} 469}
470 470
471#ifdef CONFIG_X86_32
471static unsigned int 472static unsigned int
472centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) 473centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
473{ 474{
474#ifdef CONFIG_X86_32
475 /* VIA C3 CPUs (670-68F) need further shifting. */ 475 /* VIA C3 CPUs (670-68F) need further shifting. */
476 if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8))) 476 if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
477 size >>= 8; 477 size >>= 8;
@@ -484,16 +484,18 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
484 if ((c->x86 == 6) && (c->x86_model == 9) && 484 if ((c->x86 == 6) && (c->x86_model == 9) &&
485 (c->x86_mask == 1) && (size == 65)) 485 (c->x86_mask == 1) && (size == 65))
486 size -= 1; 486 size -= 1;
487#endif
488 return size; 487 return size;
489} 488}
489#endif
490 490
491static const struct cpu_dev centaur_cpu_dev = { 491static const struct cpu_dev centaur_cpu_dev = {
492 .c_vendor = "Centaur", 492 .c_vendor = "Centaur",
493 .c_ident = { "CentaurHauls" }, 493 .c_ident = { "CentaurHauls" },
494 .c_early_init = early_init_centaur, 494 .c_early_init = early_init_centaur,
495 .c_init = init_centaur, 495 .c_init = init_centaur,
496 .c_size_cache = centaur_size_cache, 496#ifdef CONFIG_X86_32
497 .legacy_cache_size = centaur_size_cache,
498#endif
497 .c_x86_vendor = X86_VENDOR_CENTAUR, 499 .c_x86_vendor = X86_VENDOR_CENTAUR,
498}; 500};
499 501
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 2793d1f095a2..6abc172b8258 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -346,7 +346,8 @@ static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
346/* Look up CPU names by table lookup. */ 346/* Look up CPU names by table lookup. */
347static const char *table_lookup_model(struct cpuinfo_x86 *c) 347static const char *table_lookup_model(struct cpuinfo_x86 *c)
348{ 348{
349 const struct cpu_model_info *info; 349#ifdef CONFIG_X86_32
350 const struct legacy_cpu_model_info *info;
350 351
351 if (c->x86_model >= 16) 352 if (c->x86_model >= 16)
352 return NULL; /* Range check */ 353 return NULL; /* Range check */
@@ -354,13 +355,14 @@ static const char *table_lookup_model(struct cpuinfo_x86 *c)
354 if (!this_cpu) 355 if (!this_cpu)
355 return NULL; 356 return NULL;
356 357
357 info = this_cpu->c_models; 358 info = this_cpu->legacy_models;
358 359
359 while (info && info->family) { 360 while (info->family) {
360 if (info->family == c->x86) 361 if (info->family == c->x86)
361 return info->model_names[c->x86_model]; 362 return info->model_names[c->x86_model];
362 info++; 363 info++;
363 } 364 }
365#endif
364 return NULL; /* Not found */ 366 return NULL; /* Not found */
365} 367}
366 368
@@ -450,8 +452,8 @@ void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
450 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); 452 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
451#else 453#else
452 /* do processor-specific cache resizing */ 454 /* do processor-specific cache resizing */
453 if (this_cpu->c_size_cache) 455 if (this_cpu->legacy_cache_size)
454 l2size = this_cpu->c_size_cache(c, l2size); 456 l2size = this_cpu->legacy_cache_size(c, l2size);
455 457
456 /* Allow user to override all this if necessary. */ 458 /* Allow user to override all this if necessary. */
457 if (cachesize_override != -1) 459 if (cachesize_override != -1)
@@ -1095,6 +1097,9 @@ DEFINE_PER_CPU(char *, irq_stack_ptr) =
1095 1097
1096DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1; 1098DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
1097 1099
1100DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
1101EXPORT_PER_CPU_SYMBOL(__preempt_count);
1102
1098DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); 1103DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
1099 1104
1100/* 1105/*
@@ -1169,6 +1174,8 @@ void debug_stack_reset(void)
1169 1174
1170DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; 1175DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
1171EXPORT_PER_CPU_SYMBOL(current_task); 1176EXPORT_PER_CPU_SYMBOL(current_task);
1177DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
1178EXPORT_PER_CPU_SYMBOL(__preempt_count);
1172DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); 1179DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
1173 1180
1174#ifdef CONFIG_CC_STACKPROTECTOR 1181#ifdef CONFIG_CC_STACKPROTECTOR
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 4041c24ae7db..c37dc37e8317 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -1,12 +1,6 @@
1#ifndef ARCH_X86_CPU_H 1#ifndef ARCH_X86_CPU_H
2#define ARCH_X86_CPU_H 2#define ARCH_X86_CPU_H
3 3
4struct cpu_model_info {
5 int vendor;
6 int family;
7 const char *model_names[16];
8};
9
10/* attempt to consolidate cpu attributes */ 4/* attempt to consolidate cpu attributes */
11struct cpu_dev { 5struct cpu_dev {
12 const char *c_vendor; 6 const char *c_vendor;
@@ -14,15 +8,23 @@ struct cpu_dev {
14 /* some have two possibilities for cpuid string */ 8 /* some have two possibilities for cpuid string */
15 const char *c_ident[2]; 9 const char *c_ident[2];
16 10
17 struct cpu_model_info c_models[4];
18
19 void (*c_early_init)(struct cpuinfo_x86 *); 11 void (*c_early_init)(struct cpuinfo_x86 *);
20 void (*c_bsp_init)(struct cpuinfo_x86 *); 12 void (*c_bsp_init)(struct cpuinfo_x86 *);
21 void (*c_init)(struct cpuinfo_x86 *); 13 void (*c_init)(struct cpuinfo_x86 *);
22 void (*c_identify)(struct cpuinfo_x86 *); 14 void (*c_identify)(struct cpuinfo_x86 *);
23 void (*c_detect_tlb)(struct cpuinfo_x86 *); 15 void (*c_detect_tlb)(struct cpuinfo_x86 *);
24 unsigned int (*c_size_cache)(struct cpuinfo_x86 *, unsigned int);
25 int c_x86_vendor; 16 int c_x86_vendor;
17#ifdef CONFIG_X86_32
18 /* Optional vendor specific routine to obtain the cache size. */
19 unsigned int (*legacy_cache_size)(struct cpuinfo_x86 *,
20 unsigned int);
21
22 /* Family/stepping-based lookup table for model names. */
23 struct legacy_cpu_model_info {
24 int family;
25 const char *model_names[16];
26 } legacy_models[5];
27#endif
26}; 28};
27 29
28struct _tlb_table { 30struct _tlb_table {
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index ec7299566f79..dc1ec0dff939 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -665,8 +665,8 @@ static const struct cpu_dev intel_cpu_dev = {
665 .c_vendor = "Intel", 665 .c_vendor = "Intel",
666 .c_ident = { "GenuineIntel" }, 666 .c_ident = { "GenuineIntel" },
667#ifdef CONFIG_X86_32 667#ifdef CONFIG_X86_32
668 .c_models = { 668 .legacy_models = {
669 { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = 669 { .family = 4, .model_names =
670 { 670 {
671 [0] = "486 DX-25/33", 671 [0] = "486 DX-25/33",
672 [1] = "486 DX-50", 672 [1] = "486 DX-50",
@@ -679,7 +679,7 @@ static const struct cpu_dev intel_cpu_dev = {
679 [9] = "486 DX/4-WB" 679 [9] = "486 DX/4-WB"
680 } 680 }
681 }, 681 },
682 { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names = 682 { .family = 5, .model_names =
683 { 683 {
684 [0] = "Pentium 60/66 A-step", 684 [0] = "Pentium 60/66 A-step",
685 [1] = "Pentium 60/66", 685 [1] = "Pentium 60/66",
@@ -690,7 +690,7 @@ static const struct cpu_dev intel_cpu_dev = {
690 [8] = "Mobile Pentium MMX" 690 [8] = "Mobile Pentium MMX"
691 } 691 }
692 }, 692 },
693 { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names = 693 { .family = 6, .model_names =
694 { 694 {
695 [0] = "Pentium Pro A-step", 695 [0] = "Pentium Pro A-step",
696 [1] = "Pentium Pro", 696 [1] = "Pentium Pro",
@@ -704,7 +704,7 @@ static const struct cpu_dev intel_cpu_dev = {
704 [11] = "Pentium III (Tualatin)", 704 [11] = "Pentium III (Tualatin)",
705 } 705 }
706 }, 706 },
707 { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names = 707 { .family = 15, .model_names =
708 { 708 {
709 [0] = "Pentium 4 (Unknown)", 709 [0] = "Pentium 4 (Unknown)",
710 [1] = "Pentium 4 (Willamette)", 710 [1] = "Pentium 4 (Willamette)",
@@ -714,7 +714,7 @@ static const struct cpu_dev intel_cpu_dev = {
714 } 714 }
715 }, 715 },
716 }, 716 },
717 .c_size_cache = intel_size_cache, 717 .legacy_cache_size = intel_size_cache,
718#endif 718#endif
719 .c_detect_tlb = intel_detect_tlb, 719 .c_detect_tlb = intel_detect_tlb,
720 .c_early_init = early_init_intel, 720 .c_early_init = early_init_intel,
diff --git a/arch/x86/kernel/cpu/mcheck/mce-apei.c b/arch/x86/kernel/cpu/mcheck/mce-apei.c
index cd8b166a1735..de8b60a53f69 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-apei.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-apei.c
@@ -42,8 +42,7 @@ void apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err)
42 struct mce m; 42 struct mce m;
43 43
44 /* Only corrected MC is reported */ 44 /* Only corrected MC is reported */
45 if (!corrected || !(mem_err->validation_bits & 45 if (!corrected || !(mem_err->validation_bits & CPER_MEM_VALID_PA))
46 CPER_MEM_VALID_PHYSICAL_ADDRESS))
47 return; 46 return;
48 47
49 mce_setup(&m); 48 mce_setup(&m);
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 71a39f3621ba..9f7ca266864a 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -15,6 +15,7 @@
15#include <linux/clocksource.h> 15#include <linux/clocksource.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/hardirq.h> 17#include <linux/hardirq.h>
18#include <linux/efi.h>
18#include <linux/interrupt.h> 19#include <linux/interrupt.h>
19#include <asm/processor.h> 20#include <asm/processor.h>
20#include <asm/hypervisor.h> 21#include <asm/hypervisor.h>
@@ -23,6 +24,8 @@
23#include <asm/desc.h> 24#include <asm/desc.h>
24#include <asm/idle.h> 25#include <asm/idle.h>
25#include <asm/irq_regs.h> 26#include <asm/irq_regs.h>
27#include <asm/i8259.h>
28#include <asm/apic.h>
26 29
27struct ms_hyperv_info ms_hyperv; 30struct ms_hyperv_info ms_hyperv;
28EXPORT_SYMBOL_GPL(ms_hyperv); 31EXPORT_SYMBOL_GPL(ms_hyperv);
@@ -76,6 +79,30 @@ static void __init ms_hyperv_init_platform(void)
76 printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n", 79 printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n",
77 ms_hyperv.features, ms_hyperv.hints); 80 ms_hyperv.features, ms_hyperv.hints);
78 81
82#ifdef CONFIG_X86_LOCAL_APIC
83 if (ms_hyperv.features & HV_X64_MSR_APIC_FREQUENCY_AVAILABLE) {
84 /*
85 * Get the APIC frequency.
86 */
87 u64 hv_lapic_frequency;
88
89 rdmsrl(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency);
90 hv_lapic_frequency = div_u64(hv_lapic_frequency, HZ);
91 lapic_timer_frequency = hv_lapic_frequency;
92 printk(KERN_INFO "HyperV: LAPIC Timer Frequency: %#x\n",
93 lapic_timer_frequency);
94
95 /*
96 * On Hyper-V, when we are booting off an EFI firmware stack,
97 * we do not have many legacy devices including PIC, PIT etc.
98 */
99 if (efi_enabled(EFI_BOOT)) {
100 printk(KERN_INFO "HyperV: Using null_legacy_pic\n");
101 legacy_pic = &null_legacy_pic;
102 }
103 }
104#endif
105
79 if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) 106 if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
80 clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100); 107 clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
81} 108}
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 8355c84b9729..8e132931614d 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1276,16 +1276,16 @@ void perf_events_lapic_init(void)
1276static int __kprobes 1276static int __kprobes
1277perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs) 1277perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
1278{ 1278{
1279 int ret;
1280 u64 start_clock; 1279 u64 start_clock;
1281 u64 finish_clock; 1280 u64 finish_clock;
1281 int ret;
1282 1282
1283 if (!atomic_read(&active_events)) 1283 if (!atomic_read(&active_events))
1284 return NMI_DONE; 1284 return NMI_DONE;
1285 1285
1286 start_clock = local_clock(); 1286 start_clock = sched_clock();
1287 ret = x86_pmu.handle_irq(regs); 1287 ret = x86_pmu.handle_irq(regs);
1288 finish_clock = local_clock(); 1288 finish_clock = sched_clock();
1289 1289
1290 perf_sample_event_took(finish_clock - start_clock); 1290 perf_sample_event_took(finish_clock - start_clock);
1291 1291
@@ -1506,7 +1506,7 @@ static int __init init_hw_perf_events(void)
1506 err = amd_pmu_init(); 1506 err = amd_pmu_init();
1507 break; 1507 break;
1508 default: 1508 default:
1509 return 0; 1509 err = -ENOTSUPP;
1510 } 1510 }
1511 if (err != 0) { 1511 if (err != 0) {
1512 pr_cont("no PMU driver, software events only.\n"); 1512 pr_cont("no PMU driver, software events only.\n");
@@ -1883,26 +1883,21 @@ static struct pmu pmu = {
1883 1883
1884void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) 1884void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
1885{ 1885{
1886 userpg->cap_usr_time = 0; 1886 userpg->cap_user_time = 0;
1887 userpg->cap_usr_time_zero = 0; 1887 userpg->cap_user_time_zero = 0;
1888 userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc; 1888 userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
1889 userpg->pmc_width = x86_pmu.cntval_bits; 1889 userpg->pmc_width = x86_pmu.cntval_bits;
1890 1890
1891 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 1891 if (!sched_clock_stable)
1892 return; 1892 return;
1893 1893
1894 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 1894 userpg->cap_user_time = 1;
1895 return;
1896
1897 userpg->cap_usr_time = 1;
1898 userpg->time_mult = this_cpu_read(cyc2ns); 1895 userpg->time_mult = this_cpu_read(cyc2ns);
1899 userpg->time_shift = CYC2NS_SCALE_FACTOR; 1896 userpg->time_shift = CYC2NS_SCALE_FACTOR;
1900 userpg->time_offset = this_cpu_read(cyc2ns_offset) - now; 1897 userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
1901 1898
1902 if (sched_clock_stable && !check_tsc_disabled()) { 1899 userpg->cap_user_time_zero = 1;
1903 userpg->cap_usr_time_zero = 1; 1900 userpg->time_zero = this_cpu_read(cyc2ns_offset);
1904 userpg->time_zero = this_cpu_read(cyc2ns_offset);
1905 }
1906} 1901}
1907 1902
1908/* 1903/*
@@ -1994,7 +1989,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1994 frame.return_address = 0; 1989 frame.return_address = 0;
1995 1990
1996 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame)); 1991 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1997 if (bytes != sizeof(frame)) 1992 if (bytes != 0)
1998 break; 1993 break;
1999 1994
2000 if (!valid_user_frame(fp, sizeof(frame))) 1995 if (!valid_user_frame(fp, sizeof(frame)))
@@ -2046,7 +2041,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
2046 frame.return_address = 0; 2041 frame.return_address = 0;
2047 2042
2048 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame)); 2043 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
2049 if (bytes != sizeof(frame)) 2044 if (bytes != 0)
2050 break; 2045 break;
2051 2046
2052 if (!valid_user_frame(fp, sizeof(frame))) 2047 if (!valid_user_frame(fp, sizeof(frame)))
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index cc16faae0538..fd00bb29425d 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -164,6 +164,11 @@ struct cpu_hw_events {
164 struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX]; 164 struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX];
165 165
166 /* 166 /*
167 * Intel checkpoint mask
168 */
169 u64 intel_cp_status;
170
171 /*
167 * manage shared (per-core, per-cpu) registers 172 * manage shared (per-core, per-cpu) registers
168 * used on Intel NHM/WSM/SNB 173 * used on Intel NHM/WSM/SNB
169 */ 174 */
@@ -440,6 +445,7 @@ struct x86_pmu {
440 int lbr_nr; /* hardware stack size */ 445 int lbr_nr; /* hardware stack size */
441 u64 lbr_sel_mask; /* LBR_SELECT valid bits */ 446 u64 lbr_sel_mask; /* LBR_SELECT valid bits */
442 const int *lbr_sel_map; /* lbr_select mappings */ 447 const int *lbr_sel_map; /* lbr_select mappings */
448 bool lbr_double_abort; /* duplicated lbr aborts */
443 449
444 /* 450 /*
445 * Extra registers for events 451 * Extra registers for events
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index c62d88396ad5..0fa4f242f050 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -190,9 +190,9 @@ static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
190 EVENT_EXTRA_END 190 EVENT_EXTRA_END
191}; 191};
192 192
193EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3"); 193EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
194EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3"); 194EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
195EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2"); 195EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
196 196
197struct attribute *nhm_events_attrs[] = { 197struct attribute *nhm_events_attrs[] = {
198 EVENT_PTR(mem_ld_nhm), 198 EVENT_PTR(mem_ld_nhm),
@@ -899,8 +899,8 @@ static __initconst const u64 atom_hw_cache_event_ids
899static struct extra_reg intel_slm_extra_regs[] __read_mostly = 899static struct extra_reg intel_slm_extra_regs[] __read_mostly =
900{ 900{
901 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 901 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
902 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffff, RSP_0), 902 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
903 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x768005ffff, RSP_1), 903 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x768005ffffull, RSP_1),
904 EVENT_EXTRA_END 904 EVENT_EXTRA_END
905}; 905};
906 906
@@ -1184,6 +1184,11 @@ static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
1184 wrmsrl(hwc->config_base, ctrl_val); 1184 wrmsrl(hwc->config_base, ctrl_val);
1185} 1185}
1186 1186
1187static inline bool event_is_checkpointed(struct perf_event *event)
1188{
1189 return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
1190}
1191
1187static void intel_pmu_disable_event(struct perf_event *event) 1192static void intel_pmu_disable_event(struct perf_event *event)
1188{ 1193{
1189 struct hw_perf_event *hwc = &event->hw; 1194 struct hw_perf_event *hwc = &event->hw;
@@ -1197,6 +1202,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
1197 1202
1198 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx); 1203 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
1199 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx); 1204 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
1205 cpuc->intel_cp_status &= ~(1ull << hwc->idx);
1200 1206
1201 /* 1207 /*
1202 * must disable before any actual event 1208 * must disable before any actual event
@@ -1271,6 +1277,9 @@ static void intel_pmu_enable_event(struct perf_event *event)
1271 if (event->attr.exclude_guest) 1277 if (event->attr.exclude_guest)
1272 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx); 1278 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
1273 1279
1280 if (unlikely(event_is_checkpointed(event)))
1281 cpuc->intel_cp_status |= (1ull << hwc->idx);
1282
1274 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { 1283 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1275 intel_pmu_enable_fixed(hwc); 1284 intel_pmu_enable_fixed(hwc);
1276 return; 1285 return;
@@ -1289,6 +1298,17 @@ static void intel_pmu_enable_event(struct perf_event *event)
1289int intel_pmu_save_and_restart(struct perf_event *event) 1298int intel_pmu_save_and_restart(struct perf_event *event)
1290{ 1299{
1291 x86_perf_event_update(event); 1300 x86_perf_event_update(event);
1301 /*
1302 * For a checkpointed counter always reset back to 0. This
1303 * avoids a situation where the counter overflows, aborts the
1304 * transaction and is then set back to shortly before the
1305 * overflow, and overflows and aborts again.
1306 */
1307 if (unlikely(event_is_checkpointed(event))) {
1308 /* No race with NMIs because the counter should not be armed */
1309 wrmsrl(event->hw.event_base, 0);
1310 local64_set(&event->hw.prev_count, 0);
1311 }
1292 return x86_perf_event_set_period(event); 1312 return x86_perf_event_set_period(event);
1293} 1313}
1294 1314
@@ -1372,6 +1392,13 @@ again:
1372 x86_pmu.drain_pebs(regs); 1392 x86_pmu.drain_pebs(regs);
1373 } 1393 }
1374 1394
1395 /*
1396 * Checkpointed counters can lead to 'spurious' PMIs because the
1397 * rollback caused by the PMI will have cleared the overflow status
1398 * bit. Therefore always force probe these counters.
1399 */
1400 status |= cpuc->intel_cp_status;
1401
1375 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { 1402 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
1376 struct perf_event *event = cpuc->events[bit]; 1403 struct perf_event *event = cpuc->events[bit];
1377 1404
@@ -1837,6 +1864,20 @@ static int hsw_hw_config(struct perf_event *event)
1837 event->attr.precise_ip > 0)) 1864 event->attr.precise_ip > 0))
1838 return -EOPNOTSUPP; 1865 return -EOPNOTSUPP;
1839 1866
1867 if (event_is_checkpointed(event)) {
1868 /*
1869 * Sampling of checkpointed events can cause situations where
1870 * the CPU constantly aborts because of a overflow, which is
1871 * then checkpointed back and ignored. Forbid checkpointing
1872 * for sampling.
1873 *
1874 * But still allow a long sampling period, so that perf stat
1875 * from KVM works.
1876 */
1877 if (event->attr.sample_period > 0 &&
1878 event->attr.sample_period < 0x7fffffff)
1879 return -EOPNOTSUPP;
1880 }
1840 return 0; 1881 return 0;
1841} 1882}
1842 1883
@@ -2182,10 +2223,36 @@ static __init void intel_nehalem_quirk(void)
2182 } 2223 }
2183} 2224}
2184 2225
2185EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3"); 2226EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
2186EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82") 2227EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
2228
2229/* Haswell special events */
2230EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
2231EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
2232EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
2233EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
2234EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
2235EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
2236EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
2237EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
2238EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
2239EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
2240EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
2241EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
2187 2242
2188static struct attribute *hsw_events_attrs[] = { 2243static struct attribute *hsw_events_attrs[] = {
2244 EVENT_PTR(tx_start),
2245 EVENT_PTR(tx_commit),
2246 EVENT_PTR(tx_abort),
2247 EVENT_PTR(tx_capacity),
2248 EVENT_PTR(tx_conflict),
2249 EVENT_PTR(el_start),
2250 EVENT_PTR(el_commit),
2251 EVENT_PTR(el_abort),
2252 EVENT_PTR(el_capacity),
2253 EVENT_PTR(el_conflict),
2254 EVENT_PTR(cycles_t),
2255 EVENT_PTR(cycles_ct),
2189 EVENT_PTR(mem_ld_hsw), 2256 EVENT_PTR(mem_ld_hsw),
2190 EVENT_PTR(mem_st_hsw), 2257 EVENT_PTR(mem_st_hsw),
2191 NULL 2258 NULL
@@ -2325,6 +2392,7 @@ __init int intel_pmu_init(void)
2325 break; 2392 break;
2326 2393
2327 case 55: /* Atom 22nm "Silvermont" */ 2394 case 55: /* Atom 22nm "Silvermont" */
2395 case 77: /* Avoton "Silvermont" */
2328 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, 2396 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
2329 sizeof(hw_cache_event_ids)); 2397 sizeof(hw_cache_event_ids));
2330 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs, 2398 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
@@ -2451,6 +2519,7 @@ __init int intel_pmu_init(void)
2451 x86_pmu.hw_config = hsw_hw_config; 2519 x86_pmu.hw_config = hsw_hw_config;
2452 x86_pmu.get_event_constraints = hsw_get_event_constraints; 2520 x86_pmu.get_event_constraints = hsw_get_event_constraints;
2453 x86_pmu.cpu_events = hsw_events_attrs; 2521 x86_pmu.cpu_events = hsw_events_attrs;
2522 x86_pmu.lbr_double_abort = true;
2454 pr_cont("Haswell events, "); 2523 pr_cont("Haswell events, ");
2455 break; 2524 break;
2456 2525
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 63438aad177f..ae96cfa5eddd 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -12,6 +12,7 @@
12 12
13#define BTS_BUFFER_SIZE (PAGE_SIZE << 4) 13#define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
14#define PEBS_BUFFER_SIZE PAGE_SIZE 14#define PEBS_BUFFER_SIZE PAGE_SIZE
15#define PEBS_FIXUP_SIZE PAGE_SIZE
15 16
16/* 17/*
17 * pebs_record_32 for p4 and core not supported 18 * pebs_record_32 for p4 and core not supported
@@ -182,18 +183,32 @@ struct pebs_record_nhm {
182 * Same as pebs_record_nhm, with two additional fields. 183 * Same as pebs_record_nhm, with two additional fields.
183 */ 184 */
184struct pebs_record_hsw { 185struct pebs_record_hsw {
185 struct pebs_record_nhm nhm; 186 u64 flags, ip;
186 /* 187 u64 ax, bx, cx, dx;
187 * Real IP of the event. In the Intel documentation this 188 u64 si, di, bp, sp;
188 * is called eventingrip. 189 u64 r8, r9, r10, r11;
189 */ 190 u64 r12, r13, r14, r15;
190 u64 real_ip; 191 u64 status, dla, dse, lat;
191 /* 192 u64 real_ip, tsx_tuning;
192 * TSX tuning information field: abort cycles and abort flags. 193};
193 */ 194
194 u64 tsx_tuning; 195union hsw_tsx_tuning {
196 struct {
197 u32 cycles_last_block : 32,
198 hle_abort : 1,
199 rtm_abort : 1,
200 instruction_abort : 1,
201 non_instruction_abort : 1,
202 retry : 1,
203 data_conflict : 1,
204 capacity_writes : 1,
205 capacity_reads : 1;
206 };
207 u64 value;
195}; 208};
196 209
210#define PEBS_HSW_TSX_FLAGS 0xff00000000ULL
211
197void init_debug_store_on_cpu(int cpu) 212void init_debug_store_on_cpu(int cpu)
198{ 213{
199 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; 214 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
@@ -214,12 +229,14 @@ void fini_debug_store_on_cpu(int cpu)
214 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0); 229 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
215} 230}
216 231
232static DEFINE_PER_CPU(void *, insn_buffer);
233
217static int alloc_pebs_buffer(int cpu) 234static int alloc_pebs_buffer(int cpu)
218{ 235{
219 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; 236 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
220 int node = cpu_to_node(cpu); 237 int node = cpu_to_node(cpu);
221 int max, thresh = 1; /* always use a single PEBS record */ 238 int max, thresh = 1; /* always use a single PEBS record */
222 void *buffer; 239 void *buffer, *ibuffer;
223 240
224 if (!x86_pmu.pebs) 241 if (!x86_pmu.pebs)
225 return 0; 242 return 0;
@@ -228,6 +245,19 @@ static int alloc_pebs_buffer(int cpu)
228 if (unlikely(!buffer)) 245 if (unlikely(!buffer))
229 return -ENOMEM; 246 return -ENOMEM;
230 247
248 /*
249 * HSW+ already provides us the eventing ip; no need to allocate this
250 * buffer then.
251 */
252 if (x86_pmu.intel_cap.pebs_format < 2) {
253 ibuffer = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node);
254 if (!ibuffer) {
255 kfree(buffer);
256 return -ENOMEM;
257 }
258 per_cpu(insn_buffer, cpu) = ibuffer;
259 }
260
231 max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size; 261 max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
232 262
233 ds->pebs_buffer_base = (u64)(unsigned long)buffer; 263 ds->pebs_buffer_base = (u64)(unsigned long)buffer;
@@ -248,6 +278,9 @@ static void release_pebs_buffer(int cpu)
248 if (!ds || !x86_pmu.pebs) 278 if (!ds || !x86_pmu.pebs)
249 return; 279 return;
250 280
281 kfree(per_cpu(insn_buffer, cpu));
282 per_cpu(insn_buffer, cpu) = NULL;
283
251 kfree((void *)(unsigned long)ds->pebs_buffer_base); 284 kfree((void *)(unsigned long)ds->pebs_buffer_base);
252 ds->pebs_buffer_base = 0; 285 ds->pebs_buffer_base = 0;
253} 286}
@@ -584,6 +617,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
584 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ 617 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
585 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 618 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
586 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 619 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
620 INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
587 INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */ 621 INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
588 EVENT_CONSTRAINT_END 622 EVENT_CONSTRAINT_END
589}; 623};
@@ -714,6 +748,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
714 unsigned long old_to, to = cpuc->lbr_entries[0].to; 748 unsigned long old_to, to = cpuc->lbr_entries[0].to;
715 unsigned long ip = regs->ip; 749 unsigned long ip = regs->ip;
716 int is_64bit = 0; 750 int is_64bit = 0;
751 void *kaddr;
717 752
718 /* 753 /*
719 * We don't need to fixup if the PEBS assist is fault like 754 * We don't need to fixup if the PEBS assist is fault like
@@ -737,7 +772,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
737 * unsigned math, either ip is before the start (impossible) or 772 * unsigned math, either ip is before the start (impossible) or
738 * the basic block is larger than 1 page (sanity) 773 * the basic block is larger than 1 page (sanity)
739 */ 774 */
740 if ((ip - to) > PAGE_SIZE) 775 if ((ip - to) > PEBS_FIXUP_SIZE)
741 return 0; 776 return 0;
742 777
743 /* 778 /*
@@ -748,29 +783,33 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
748 return 1; 783 return 1;
749 } 784 }
750 785
786 if (!kernel_ip(ip)) {
787 int size, bytes;
788 u8 *buf = this_cpu_read(insn_buffer);
789
790 size = ip - to; /* Must fit our buffer, see above */
791 bytes = copy_from_user_nmi(buf, (void __user *)to, size);
792 if (bytes != 0)
793 return 0;
794
795 kaddr = buf;
796 } else {
797 kaddr = (void *)to;
798 }
799
751 do { 800 do {
752 struct insn insn; 801 struct insn insn;
753 u8 buf[MAX_INSN_SIZE];
754 void *kaddr;
755 802
756 old_to = to; 803 old_to = to;
757 if (!kernel_ip(ip)) {
758 int bytes, size = MAX_INSN_SIZE;
759
760 bytes = copy_from_user_nmi(buf, (void __user *)to, size);
761 if (bytes != size)
762 return 0;
763
764 kaddr = buf;
765 } else
766 kaddr = (void *)to;
767 804
768#ifdef CONFIG_X86_64 805#ifdef CONFIG_X86_64
769 is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32); 806 is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
770#endif 807#endif
771 insn_init(&insn, kaddr, is_64bit); 808 insn_init(&insn, kaddr, is_64bit);
772 insn_get_length(&insn); 809 insn_get_length(&insn);
810
773 to += insn.length; 811 to += insn.length;
812 kaddr += insn.length;
774 } while (to < ip); 813 } while (to < ip);
775 814
776 if (to == ip) { 815 if (to == ip) {
@@ -785,16 +824,34 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
785 return 0; 824 return 0;
786} 825}
787 826
827static inline u64 intel_hsw_weight(struct pebs_record_hsw *pebs)
828{
829 if (pebs->tsx_tuning) {
830 union hsw_tsx_tuning tsx = { .value = pebs->tsx_tuning };
831 return tsx.cycles_last_block;
832 }
833 return 0;
834}
835
836static inline u64 intel_hsw_transaction(struct pebs_record_hsw *pebs)
837{
838 u64 txn = (pebs->tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;
839
840 /* For RTM XABORTs also log the abort code from AX */
841 if ((txn & PERF_TXN_TRANSACTION) && (pebs->ax & 1))
842 txn |= ((pebs->ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
843 return txn;
844}
845
788static void __intel_pmu_pebs_event(struct perf_event *event, 846static void __intel_pmu_pebs_event(struct perf_event *event,
789 struct pt_regs *iregs, void *__pebs) 847 struct pt_regs *iregs, void *__pebs)
790{ 848{
791 /* 849 /*
792 * We cast to pebs_record_nhm to get the load latency data 850 * We cast to the biggest pebs_record but are careful not to
793 * if extra_reg MSR_PEBS_LD_LAT_THRESHOLD used 851 * unconditionally access the 'extra' entries.
794 */ 852 */
795 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 853 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
796 struct pebs_record_nhm *pebs = __pebs; 854 struct pebs_record_hsw *pebs = __pebs;
797 struct pebs_record_hsw *pebs_hsw = __pebs;
798 struct perf_sample_data data; 855 struct perf_sample_data data;
799 struct pt_regs regs; 856 struct pt_regs regs;
800 u64 sample_type; 857 u64 sample_type;
@@ -853,7 +910,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
853 regs.sp = pebs->sp; 910 regs.sp = pebs->sp;
854 911
855 if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) { 912 if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) {
856 regs.ip = pebs_hsw->real_ip; 913 regs.ip = pebs->real_ip;
857 regs.flags |= PERF_EFLAGS_EXACT; 914 regs.flags |= PERF_EFLAGS_EXACT;
858 } else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(&regs)) 915 } else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(&regs))
859 regs.flags |= PERF_EFLAGS_EXACT; 916 regs.flags |= PERF_EFLAGS_EXACT;
@@ -861,9 +918,18 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
861 regs.flags &= ~PERF_EFLAGS_EXACT; 918 regs.flags &= ~PERF_EFLAGS_EXACT;
862 919
863 if ((event->attr.sample_type & PERF_SAMPLE_ADDR) && 920 if ((event->attr.sample_type & PERF_SAMPLE_ADDR) &&
864 x86_pmu.intel_cap.pebs_format >= 1) 921 x86_pmu.intel_cap.pebs_format >= 1)
865 data.addr = pebs->dla; 922 data.addr = pebs->dla;
866 923
924 if (x86_pmu.intel_cap.pebs_format >= 2) {
925 /* Only set the TSX weight when no memory weight. */
926 if ((event->attr.sample_type & PERF_SAMPLE_WEIGHT) && !fll)
927 data.weight = intel_hsw_weight(pebs);
928
929 if (event->attr.sample_type & PERF_SAMPLE_TRANSACTION)
930 data.txn = intel_hsw_transaction(pebs);
931 }
932
867 if (has_branch_stack(event)) 933 if (has_branch_stack(event))
868 data.br_stack = &cpuc->lbr_stack; 934 data.br_stack = &cpuc->lbr_stack;
869 935
@@ -912,17 +978,34 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
912 __intel_pmu_pebs_event(event, iregs, at); 978 __intel_pmu_pebs_event(event, iregs, at);
913} 979}
914 980
915static void __intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, void *at, 981static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
916 void *top)
917{ 982{
918 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 983 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
919 struct debug_store *ds = cpuc->ds; 984 struct debug_store *ds = cpuc->ds;
920 struct perf_event *event = NULL; 985 struct perf_event *event = NULL;
986 void *at, *top;
921 u64 status = 0; 987 u64 status = 0;
922 int bit; 988 int bit;
923 989
990 if (!x86_pmu.pebs_active)
991 return;
992
993 at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
994 top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
995
924 ds->pebs_index = ds->pebs_buffer_base; 996 ds->pebs_index = ds->pebs_buffer_base;
925 997
998 if (unlikely(at > top))
999 return;
1000
1001 /*
1002 * Should not happen, we program the threshold at 1 and do not
1003 * set a reset value.
1004 */
1005 WARN_ONCE(top - at > x86_pmu.max_pebs_events * x86_pmu.pebs_record_size,
1006 "Unexpected number of pebs records %ld\n",
1007 (long)(top - at) / x86_pmu.pebs_record_size);
1008
926 for (; at < top; at += x86_pmu.pebs_record_size) { 1009 for (; at < top; at += x86_pmu.pebs_record_size) {
927 struct pebs_record_nhm *p = at; 1010 struct pebs_record_nhm *p = at;
928 1011
@@ -950,61 +1033,6 @@ static void __intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, void *at,
950 } 1033 }
951} 1034}
952 1035
953static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
954{
955 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
956 struct debug_store *ds = cpuc->ds;
957 struct pebs_record_nhm *at, *top;
958 int n;
959
960 if (!x86_pmu.pebs_active)
961 return;
962
963 at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
964 top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
965
966 ds->pebs_index = ds->pebs_buffer_base;
967
968 n = top - at;
969 if (n <= 0)
970 return;
971
972 /*
973 * Should not happen, we program the threshold at 1 and do not
974 * set a reset value.
975 */
976 WARN_ONCE(n > x86_pmu.max_pebs_events,
977 "Unexpected number of pebs records %d\n", n);
978
979 return __intel_pmu_drain_pebs_nhm(iregs, at, top);
980}
981
982static void intel_pmu_drain_pebs_hsw(struct pt_regs *iregs)
983{
984 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
985 struct debug_store *ds = cpuc->ds;
986 struct pebs_record_hsw *at, *top;
987 int n;
988
989 if (!x86_pmu.pebs_active)
990 return;
991
992 at = (struct pebs_record_hsw *)(unsigned long)ds->pebs_buffer_base;
993 top = (struct pebs_record_hsw *)(unsigned long)ds->pebs_index;
994
995 n = top - at;
996 if (n <= 0)
997 return;
998 /*
999 * Should not happen, we program the threshold at 1 and do not
1000 * set a reset value.
1001 */
1002 WARN_ONCE(n > x86_pmu.max_pebs_events,
1003 "Unexpected number of pebs records %d\n", n);
1004
1005 return __intel_pmu_drain_pebs_nhm(iregs, at, top);
1006}
1007
1008/* 1036/*
1009 * BTS, PEBS probe and setup 1037 * BTS, PEBS probe and setup
1010 */ 1038 */
@@ -1039,7 +1067,7 @@ void intel_ds_init(void)
1039 case 2: 1067 case 2:
1040 pr_cont("PEBS fmt2%c, ", pebs_type); 1068 pr_cont("PEBS fmt2%c, ", pebs_type);
1041 x86_pmu.pebs_record_size = sizeof(struct pebs_record_hsw); 1069 x86_pmu.pebs_record_size = sizeof(struct pebs_record_hsw);
1042 x86_pmu.drain_pebs = intel_pmu_drain_pebs_hsw; 1070 x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
1043 break; 1071 break;
1044 1072
1045 default: 1073 default:
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index d5be06a5005e..d82d155aca8c 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -284,6 +284,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
284 int lbr_format = x86_pmu.intel_cap.lbr_format; 284 int lbr_format = x86_pmu.intel_cap.lbr_format;
285 u64 tos = intel_pmu_lbr_tos(); 285 u64 tos = intel_pmu_lbr_tos();
286 int i; 286 int i;
287 int out = 0;
287 288
288 for (i = 0; i < x86_pmu.lbr_nr; i++) { 289 for (i = 0; i < x86_pmu.lbr_nr; i++) {
289 unsigned long lbr_idx = (tos - i) & mask; 290 unsigned long lbr_idx = (tos - i) & mask;
@@ -306,15 +307,27 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
306 } 307 }
307 from = (u64)((((s64)from) << skip) >> skip); 308 from = (u64)((((s64)from) << skip) >> skip);
308 309
309 cpuc->lbr_entries[i].from = from; 310 /*
310 cpuc->lbr_entries[i].to = to; 311 * Some CPUs report duplicated abort records,
311 cpuc->lbr_entries[i].mispred = mis; 312 * with the second entry not having an abort bit set.
312 cpuc->lbr_entries[i].predicted = pred; 313 * Skip them here. This loop runs backwards,
313 cpuc->lbr_entries[i].in_tx = in_tx; 314 * so we need to undo the previous record.
314 cpuc->lbr_entries[i].abort = abort; 315 * If the abort just happened outside the window
315 cpuc->lbr_entries[i].reserved = 0; 316 * the extra entry cannot be removed.
317 */
318 if (abort && x86_pmu.lbr_double_abort && out > 0)
319 out--;
320
321 cpuc->lbr_entries[out].from = from;
322 cpuc->lbr_entries[out].to = to;
323 cpuc->lbr_entries[out].mispred = mis;
324 cpuc->lbr_entries[out].predicted = pred;
325 cpuc->lbr_entries[out].in_tx = in_tx;
326 cpuc->lbr_entries[out].abort = abort;
327 cpuc->lbr_entries[out].reserved = 0;
328 out++;
316 } 329 }
317 cpuc->lbr_stack.nr = i; 330 cpuc->lbr_stack.nr = out;
318} 331}
319 332
320void intel_pmu_lbr_read(void) 333void intel_pmu_lbr_read(void)
@@ -478,7 +491,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
478 491
479 /* may fail if text not present */ 492 /* may fail if text not present */
480 bytes = copy_from_user_nmi(buf, (void __user *)from, size); 493 bytes = copy_from_user_nmi(buf, (void __user *)from, size);
481 if (bytes != size) 494 if (bytes != 0)
482 return X86_BR_NONE; 495 return X86_BR_NONE;
483 496
484 addr = buf; 497 addr = buf;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 8ed44589b0e4..29c248799ced 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -997,6 +997,20 @@ static int snbep_pci2phy_map_init(int devid)
997 } 997 }
998 } 998 }
999 999
1000 if (!err) {
1001 /*
1002 * For PCI bus with no UBOX device, find the next bus
1003 * that has UBOX device and use its mapping.
1004 */
1005 i = -1;
1006 for (bus = 255; bus >= 0; bus--) {
1007 if (pcibus_to_physid[bus] >= 0)
1008 i = pcibus_to_physid[bus];
1009 else
1010 pcibus_to_physid[bus] = i;
1011 }
1012 }
1013
1000 if (ubox_dev) 1014 if (ubox_dev)
1001 pci_dev_put(ubox_dev); 1015 pci_dev_put(ubox_dev);
1002 1016
@@ -1099,6 +1113,24 @@ static struct attribute *ivt_uncore_qpi_formats_attr[] = {
1099 &format_attr_umask.attr, 1113 &format_attr_umask.attr,
1100 &format_attr_edge.attr, 1114 &format_attr_edge.attr,
1101 &format_attr_thresh8.attr, 1115 &format_attr_thresh8.attr,
1116 &format_attr_match_rds.attr,
1117 &format_attr_match_rnid30.attr,
1118 &format_attr_match_rnid4.attr,
1119 &format_attr_match_dnid.attr,
1120 &format_attr_match_mc.attr,
1121 &format_attr_match_opc.attr,
1122 &format_attr_match_vnw.attr,
1123 &format_attr_match0.attr,
1124 &format_attr_match1.attr,
1125 &format_attr_mask_rds.attr,
1126 &format_attr_mask_rnid30.attr,
1127 &format_attr_mask_rnid4.attr,
1128 &format_attr_mask_dnid.attr,
1129 &format_attr_mask_mc.attr,
1130 &format_attr_mask_opc.attr,
1131 &format_attr_mask_vnw.attr,
1132 &format_attr_mask0.attr,
1133 &format_attr_mask1.attr,
1102 NULL, 1134 NULL,
1103}; 1135};
1104 1136
@@ -1312,17 +1344,83 @@ static struct intel_uncore_type ivt_uncore_imc = {
1312 IVT_UNCORE_PCI_COMMON_INIT(), 1344 IVT_UNCORE_PCI_COMMON_INIT(),
1313}; 1345};
1314 1346
1347/* registers in IRP boxes are not properly aligned */
1348static unsigned ivt_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1349static unsigned ivt_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1350
1351static void ivt_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1352{
1353 struct pci_dev *pdev = box->pci_dev;
1354 struct hw_perf_event *hwc = &event->hw;
1355
1356 pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx],
1357 hwc->config | SNBEP_PMON_CTL_EN);
1358}
1359
1360static void ivt_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1361{
1362 struct pci_dev *pdev = box->pci_dev;
1363 struct hw_perf_event *hwc = &event->hw;
1364
1365 pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx], hwc->config);
1366}
1367
1368static u64 ivt_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1369{
1370 struct pci_dev *pdev = box->pci_dev;
1371 struct hw_perf_event *hwc = &event->hw;
1372 u64 count = 0;
1373
1374 pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1375 pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1376
1377 return count;
1378}
1379
1380static struct intel_uncore_ops ivt_uncore_irp_ops = {
1381 .init_box = ivt_uncore_pci_init_box,
1382 .disable_box = snbep_uncore_pci_disable_box,
1383 .enable_box = snbep_uncore_pci_enable_box,
1384 .disable_event = ivt_uncore_irp_disable_event,
1385 .enable_event = ivt_uncore_irp_enable_event,
1386 .read_counter = ivt_uncore_irp_read_counter,
1387};
1388
1389static struct intel_uncore_type ivt_uncore_irp = {
1390 .name = "irp",
1391 .num_counters = 4,
1392 .num_boxes = 1,
1393 .perf_ctr_bits = 48,
1394 .event_mask = IVT_PMON_RAW_EVENT_MASK,
1395 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1396 .ops = &ivt_uncore_irp_ops,
1397 .format_group = &ivt_uncore_format_group,
1398};
1399
1400static struct intel_uncore_ops ivt_uncore_qpi_ops = {
1401 .init_box = ivt_uncore_pci_init_box,
1402 .disable_box = snbep_uncore_pci_disable_box,
1403 .enable_box = snbep_uncore_pci_enable_box,
1404 .disable_event = snbep_uncore_pci_disable_event,
1405 .enable_event = snbep_qpi_enable_event,
1406 .read_counter = snbep_uncore_pci_read_counter,
1407 .hw_config = snbep_qpi_hw_config,
1408 .get_constraint = uncore_get_constraint,
1409 .put_constraint = uncore_put_constraint,
1410};
1411
1315static struct intel_uncore_type ivt_uncore_qpi = { 1412static struct intel_uncore_type ivt_uncore_qpi = {
1316 .name = "qpi", 1413 .name = "qpi",
1317 .num_counters = 4, 1414 .num_counters = 4,
1318 .num_boxes = 3, 1415 .num_boxes = 3,
1319 .perf_ctr_bits = 48, 1416 .perf_ctr_bits = 48,
1320 .perf_ctr = SNBEP_PCI_PMON_CTR0, 1417 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1321 .event_ctl = SNBEP_PCI_PMON_CTL0, 1418 .event_ctl = SNBEP_PCI_PMON_CTL0,
1322 .event_mask = IVT_QPI_PCI_PMON_RAW_EVENT_MASK, 1419 .event_mask = IVT_QPI_PCI_PMON_RAW_EVENT_MASK,
1323 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 1420 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1324 .ops = &ivt_uncore_pci_ops, 1421 .num_shared_regs = 1,
1325 .format_group = &ivt_uncore_qpi_format_group, 1422 .ops = &ivt_uncore_qpi_ops,
1423 .format_group = &ivt_uncore_qpi_format_group,
1326}; 1424};
1327 1425
1328static struct intel_uncore_type ivt_uncore_r2pcie = { 1426static struct intel_uncore_type ivt_uncore_r2pcie = {
@@ -1346,6 +1444,7 @@ static struct intel_uncore_type ivt_uncore_r3qpi = {
1346enum { 1444enum {
1347 IVT_PCI_UNCORE_HA, 1445 IVT_PCI_UNCORE_HA,
1348 IVT_PCI_UNCORE_IMC, 1446 IVT_PCI_UNCORE_IMC,
1447 IVT_PCI_UNCORE_IRP,
1349 IVT_PCI_UNCORE_QPI, 1448 IVT_PCI_UNCORE_QPI,
1350 IVT_PCI_UNCORE_R2PCIE, 1449 IVT_PCI_UNCORE_R2PCIE,
1351 IVT_PCI_UNCORE_R3QPI, 1450 IVT_PCI_UNCORE_R3QPI,
@@ -1354,6 +1453,7 @@ enum {
1354static struct intel_uncore_type *ivt_pci_uncores[] = { 1453static struct intel_uncore_type *ivt_pci_uncores[] = {
1355 [IVT_PCI_UNCORE_HA] = &ivt_uncore_ha, 1454 [IVT_PCI_UNCORE_HA] = &ivt_uncore_ha,
1356 [IVT_PCI_UNCORE_IMC] = &ivt_uncore_imc, 1455 [IVT_PCI_UNCORE_IMC] = &ivt_uncore_imc,
1456 [IVT_PCI_UNCORE_IRP] = &ivt_uncore_irp,
1357 [IVT_PCI_UNCORE_QPI] = &ivt_uncore_qpi, 1457 [IVT_PCI_UNCORE_QPI] = &ivt_uncore_qpi,
1358 [IVT_PCI_UNCORE_R2PCIE] = &ivt_uncore_r2pcie, 1458 [IVT_PCI_UNCORE_R2PCIE] = &ivt_uncore_r2pcie,
1359 [IVT_PCI_UNCORE_R3QPI] = &ivt_uncore_r3qpi, 1459 [IVT_PCI_UNCORE_R3QPI] = &ivt_uncore_r3qpi,
@@ -1401,6 +1501,10 @@ static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids) = {
1401 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1), 1501 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1402 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 7), 1502 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 7),
1403 }, 1503 },
1504 { /* IRP */
1505 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1506 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IRP, 0),
1507 },
1404 { /* QPI0 Port 0 */ 1508 { /* QPI0 Port 0 */
1405 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32), 1509 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1406 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 0), 1510 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 0),
@@ -1429,6 +1533,16 @@ static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids) = {
1429 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e), 1533 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1430 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 2), 1534 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 2),
1431 }, 1535 },
1536 { /* QPI Port 0 filter */
1537 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1538 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1539 SNBEP_PCI_QPI_PORT0_FILTER),
1540 },
1541 { /* QPI Port 0 filter */
1542 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1543 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1544 SNBEP_PCI_QPI_PORT1_FILTER),
1545 },
1432 { /* end: all zeroes */ } 1546 { /* end: all zeroes */ }
1433}; 1547};
1434 1548
@@ -2706,14 +2820,14 @@ static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
2706 box->hrtimer.function = uncore_pmu_hrtimer; 2820 box->hrtimer.function = uncore_pmu_hrtimer;
2707} 2821}
2708 2822
2709struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu) 2823static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node)
2710{ 2824{
2711 struct intel_uncore_box *box; 2825 struct intel_uncore_box *box;
2712 int i, size; 2826 int i, size;
2713 2827
2714 size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg); 2828 size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
2715 2829
2716 box = kzalloc_node(size, GFP_KERNEL, cpu_to_node(cpu)); 2830 box = kzalloc_node(size, GFP_KERNEL, node);
2717 if (!box) 2831 if (!box)
2718 return NULL; 2832 return NULL;
2719 2833
@@ -3031,7 +3145,7 @@ static int uncore_validate_group(struct intel_uncore_pmu *pmu,
3031 struct intel_uncore_box *fake_box; 3145 struct intel_uncore_box *fake_box;
3032 int ret = -EINVAL, n; 3146 int ret = -EINVAL, n;
3033 3147
3034 fake_box = uncore_alloc_box(pmu->type, smp_processor_id()); 3148 fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
3035 if (!fake_box) 3149 if (!fake_box)
3036 return -ENOMEM; 3150 return -ENOMEM;
3037 3151
@@ -3294,7 +3408,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
3294 } 3408 }
3295 3409
3296 type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)]; 3410 type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
3297 box = uncore_alloc_box(type, 0); 3411 box = uncore_alloc_box(type, NUMA_NO_NODE);
3298 if (!box) 3412 if (!box)
3299 return -ENOMEM; 3413 return -ENOMEM;
3300 3414
@@ -3499,7 +3613,7 @@ static int uncore_cpu_prepare(int cpu, int phys_id)
3499 if (pmu->func_id < 0) 3613 if (pmu->func_id < 0)
3500 pmu->func_id = j; 3614 pmu->func_id = j;
3501 3615
3502 box = uncore_alloc_box(type, cpu); 3616 box = uncore_alloc_box(type, cpu_to_node(cpu));
3503 if (!box) 3617 if (!box)
3504 return -ENOMEM; 3618 return -ENOMEM;
3505 3619
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index aee6317b902f..06fe3ed8b851 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -11,15 +11,12 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
11 unsigned int cpu) 11 unsigned int cpu)
12{ 12{
13#ifdef CONFIG_SMP 13#ifdef CONFIG_SMP
14 if (c->x86_max_cores * smp_num_siblings > 1) { 14 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
15 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); 15 seq_printf(m, "siblings\t: %d\n", cpumask_weight(cpu_core_mask(cpu)));
16 seq_printf(m, "siblings\t: %d\n", 16 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
17 cpumask_weight(cpu_core_mask(cpu))); 17 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
18 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); 18 seq_printf(m, "apicid\t\t: %d\n", c->apicid);
19 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); 19 seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid);
20 seq_printf(m, "apicid\t\t: %d\n", c->apicid);
21 seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid);
22 }
23#endif 20#endif
24} 21}
25 22
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c
index 202759a14121..75c5ad5d35cc 100644
--- a/arch/x86/kernel/cpu/umc.c
+++ b/arch/x86/kernel/cpu/umc.c
@@ -11,8 +11,8 @@
11static const struct cpu_dev umc_cpu_dev = { 11static const struct cpu_dev umc_cpu_dev = {
12 .c_vendor = "UMC", 12 .c_vendor = "UMC",
13 .c_ident = { "UMC UMC UMC" }, 13 .c_ident = { "UMC UMC UMC" },
14 .c_models = { 14 .legacy_models = {
15 { .vendor = X86_VENDOR_UMC, .family = 4, .model_names = 15 { .family = 4, .model_names =
16 { 16 {
17 [1] = "U5D", 17 [1] = "U5D",
18 [2] = "U5S", 18 [2] = "U5S",
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index e0e0841eef45..18677a90d6a3 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -127,12 +127,12 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
127 cpu_emergency_vmxoff(); 127 cpu_emergency_vmxoff();
128 cpu_emergency_svm_disable(); 128 cpu_emergency_svm_disable();
129 129
130 lapic_shutdown();
131#ifdef CONFIG_X86_IO_APIC 130#ifdef CONFIG_X86_IO_APIC
132 /* Prevent crash_kexec() from deadlocking on ioapic_lock. */ 131 /* Prevent crash_kexec() from deadlocking on ioapic_lock. */
133 ioapic_zap_locks(); 132 ioapic_zap_locks();
134 disable_IO_APIC(); 133 disable_IO_APIC();
135#endif 134#endif
135 lapic_shutdown();
136#ifdef CONFIG_HPET_TIMER 136#ifdef CONFIG_HPET_TIMER
137 hpet_disable(); 137 hpet_disable();
138#endif 138#endif
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 376dc7873447..d35078ea1446 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -20,22 +20,13 @@
20#include <asm/hpet.h> 20#include <asm/hpet.h>
21#include <asm/apic.h> 21#include <asm/apic.h>
22#include <asm/pci_x86.h> 22#include <asm/pci_x86.h>
23#include <asm/setup.h>
23 24
24__initdata u64 initial_dtb; 25__initdata u64 initial_dtb;
25char __initdata cmd_line[COMMAND_LINE_SIZE]; 26char __initdata cmd_line[COMMAND_LINE_SIZE];
26 27
27int __initdata of_ioapic; 28int __initdata of_ioapic;
28 29
29unsigned long pci_address_to_pio(phys_addr_t address)
30{
31 /*
32 * The ioport address can be directly used by inX / outX
33 */
34 BUG_ON(address >= (1 << 16));
35 return (unsigned long)address;
36}
37EXPORT_SYMBOL_GPL(pci_address_to_pio);
38
39void __init early_init_dt_scan_chosen_arch(unsigned long node) 30void __init early_init_dt_scan_chosen_arch(unsigned long node)
40{ 31{
41 BUG(); 32 BUG();
@@ -51,15 +42,6 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
51 return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS)); 42 return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS));
52} 43}
53 44
54#ifdef CONFIG_BLK_DEV_INITRD
55void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
56{
57 initrd_start = (unsigned long)__va(start);
58 initrd_end = (unsigned long)__va(end);
59 initrd_below_start_ok = 1;
60}
61#endif
62
63void __init add_dtb(u64 data) 45void __init add_dtb(u64 data)
64{ 46{
65 initial_dtb = data + offsetof(struct setup_data, data); 47 initial_dtb = data + offsetof(struct setup_data, data);
@@ -105,7 +87,6 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
105 87
106static int x86_of_pci_irq_enable(struct pci_dev *dev) 88static int x86_of_pci_irq_enable(struct pci_dev *dev)
107{ 89{
108 struct of_irq oirq;
109 u32 virq; 90 u32 virq;
110 int ret; 91 int ret;
111 u8 pin; 92 u8 pin;
@@ -116,12 +97,7 @@ static int x86_of_pci_irq_enable(struct pci_dev *dev)
116 if (!pin) 97 if (!pin)
117 return 0; 98 return 0;
118 99
119 ret = of_irq_map_pci(dev, &oirq); 100 virq = of_irq_parse_and_map_pci(dev, 0, 0);
120 if (ret)
121 return ret;
122
123 virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
124 oirq.size);
125 if (virq == 0) 101 if (virq == 0)
126 return -EINVAL; 102 return -EINVAL;
127 dev->irq = virq; 103 dev->irq = virq;
@@ -230,7 +206,7 @@ static void __init dtb_apic_setup(void)
230static void __init x86_flattree_get_config(void) 206static void __init x86_flattree_get_config(void)
231{ 207{
232 u32 size, map_len; 208 u32 size, map_len;
233 void *new_dtb; 209 struct boot_param_header *dt;
234 210
235 if (!initial_dtb) 211 if (!initial_dtb)
236 return; 212 return;
@@ -238,24 +214,17 @@ static void __init x86_flattree_get_config(void)
238 map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK), 214 map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK),
239 (u64)sizeof(struct boot_param_header)); 215 (u64)sizeof(struct boot_param_header));
240 216
241 initial_boot_params = early_memremap(initial_dtb, map_len); 217 dt = early_memremap(initial_dtb, map_len);
242 size = be32_to_cpu(initial_boot_params->totalsize); 218 size = be32_to_cpu(dt->totalsize);
243 if (map_len < size) { 219 if (map_len < size) {
244 early_iounmap(initial_boot_params, map_len); 220 early_iounmap(dt, map_len);
245 initial_boot_params = early_memremap(initial_dtb, size); 221 dt = early_memremap(initial_dtb, size);
246 map_len = size; 222 map_len = size;
247 } 223 }
248 224
249 new_dtb = alloc_bootmem(size); 225 initial_boot_params = dt;
250 memcpy(new_dtb, initial_boot_params, size); 226 unflatten_and_copy_device_tree();
251 early_iounmap(initial_boot_params, map_len); 227 early_iounmap(dt, map_len);
252
253 initial_boot_params = new_dtb;
254
255 /* root level address cells */
256 of_scan_flat_dt(early_init_dt_scan_root, NULL);
257
258 unflatten_device_tree();
259} 228}
260#else 229#else
261static inline void x86_flattree_get_config(void) { } 230static inline void x86_flattree_get_config(void) { }
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index deb6421c9e69..d9c12d3022a7 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -25,12 +25,17 @@ unsigned int code_bytes = 64;
25int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE; 25int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
26static int die_counter; 26static int die_counter;
27 27
28void printk_address(unsigned long address, int reliable) 28static void printk_stack_address(unsigned long address, int reliable)
29{ 29{
30 pr_cont(" [<%p>] %s%pB\n", 30 pr_cont(" [<%p>] %s%pB\n",
31 (void *)address, reliable ? "" : "? ", (void *)address); 31 (void *)address, reliable ? "" : "? ", (void *)address);
32} 32}
33 33
34void printk_address(unsigned long address)
35{
36 pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
37}
38
34#ifdef CONFIG_FUNCTION_GRAPH_TRACER 39#ifdef CONFIG_FUNCTION_GRAPH_TRACER
35static void 40static void
36print_ftrace_graph_addr(unsigned long addr, void *data, 41print_ftrace_graph_addr(unsigned long addr, void *data,
@@ -151,7 +156,7 @@ static void print_trace_address(void *data, unsigned long addr, int reliable)
151{ 156{
152 touch_nmi_watchdog(); 157 touch_nmi_watchdog();
153 printk(data); 158 printk(data);
154 printk_address(addr, reliable); 159 printk_stack_address(addr, reliable);
155} 160}
156 161
157static const struct stacktrace_ops print_trace_ops = { 162static const struct stacktrace_ops print_trace_ops = {
@@ -281,7 +286,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
281#else 286#else
282 /* Executive summary in case the oops scrolled away */ 287 /* Executive summary in case the oops scrolled away */
283 printk(KERN_ALERT "RIP "); 288 printk(KERN_ALERT "RIP ");
284 printk_address(regs->ip, 1); 289 printk_address(regs->ip);
285 printk(" RSP <%016lx>\n", regs->sp); 290 printk(" RSP <%016lx>\n", regs->sp);
286#endif 291#endif
287 return 0; 292 return 0;
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index b3cd3ebae077..96f958d8cd45 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -313,6 +313,16 @@ static size_t __init gen6_stolen_size(int num, int slot, int func)
313 return gmch_ctrl << 25; /* 32 MB units */ 313 return gmch_ctrl << 25; /* 32 MB units */
314} 314}
315 315
316static inline size_t gen8_stolen_size(int num, int slot, int func)
317{
318 u16 gmch_ctrl;
319
320 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
321 gmch_ctrl >>= BDW_GMCH_GMS_SHIFT;
322 gmch_ctrl &= BDW_GMCH_GMS_MASK;
323 return gmch_ctrl << 25; /* 32 MB units */
324}
325
316typedef size_t (*stolen_size_fn)(int num, int slot, int func); 326typedef size_t (*stolen_size_fn)(int num, int slot, int func);
317 327
318static struct pci_device_id intel_stolen_ids[] __initdata = { 328static struct pci_device_id intel_stolen_ids[] __initdata = {
@@ -336,6 +346,8 @@ static struct pci_device_id intel_stolen_ids[] __initdata = {
336 INTEL_IVB_D_IDS(gen6_stolen_size), 346 INTEL_IVB_D_IDS(gen6_stolen_size),
337 INTEL_HSW_D_IDS(gen6_stolen_size), 347 INTEL_HSW_D_IDS(gen6_stolen_size),
338 INTEL_HSW_M_IDS(gen6_stolen_size), 348 INTEL_HSW_M_IDS(gen6_stolen_size),
349 INTEL_BDW_M_IDS(gen8_stolen_size),
350 INTEL_BDW_D_IDS(gen8_stolen_size)
339}; 351};
340 352
341static void __init intel_graphics_stolen(int num, int slot, int func) 353static void __init intel_graphics_stolen(int num, int slot, int func)
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index d15f575a861b..01d1c187c9f9 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -14,9 +14,11 @@
14#include <xen/hvc-console.h> 14#include <xen/hvc-console.h>
15#include <asm/pci-direct.h> 15#include <asm/pci-direct.h>
16#include <asm/fixmap.h> 16#include <asm/fixmap.h>
17#include <asm/mrst.h> 17#include <asm/intel-mid.h>
18#include <asm/pgtable.h> 18#include <asm/pgtable.h>
19#include <linux/usb/ehci_def.h> 19#include <linux/usb/ehci_def.h>
20#include <linux/efi.h>
21#include <asm/efi.h>
20 22
21/* Simple VGA output */ 23/* Simple VGA output */
22#define VGABASE (__ISA_IO_base + 0xb8000) 24#define VGABASE (__ISA_IO_base + 0xb8000)
@@ -234,6 +236,11 @@ static int __init setup_early_printk(char *buf)
234 early_console_register(&early_hsu_console, keep); 236 early_console_register(&early_hsu_console, keep);
235 } 237 }
236#endif 238#endif
239#ifdef CONFIG_EARLY_PRINTK_EFI
240 if (!strncmp(buf, "efi", 3))
241 early_console_register(&early_efi_console, keep);
242#endif
243
237 buf++; 244 buf++;
238 } 245 }
239 return 0; 246 return 0;
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index f0dcb0ceb6a2..51e2988c5728 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -362,12 +362,9 @@ END(ret_from_exception)
362#ifdef CONFIG_PREEMPT 362#ifdef CONFIG_PREEMPT
363ENTRY(resume_kernel) 363ENTRY(resume_kernel)
364 DISABLE_INTERRUPTS(CLBR_ANY) 364 DISABLE_INTERRUPTS(CLBR_ANY)
365 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
366 jnz restore_all
367need_resched: 365need_resched:
368 movl TI_flags(%ebp), %ecx # need_resched set ? 366 cmpl $0,PER_CPU_VAR(__preempt_count)
369 testb $_TIF_NEED_RESCHED, %cl 367 jnz restore_all
370 jz restore_all
371 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ? 368 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
372 jz restore_all 369 jz restore_all
373 call preempt_schedule_irq 370 call preempt_schedule_irq
@@ -1247,6 +1244,16 @@ return_to_handler:
1247 */ 1244 */
1248 .pushsection .kprobes.text, "ax" 1245 .pushsection .kprobes.text, "ax"
1249 1246
1247#ifdef CONFIG_TRACING
1248ENTRY(trace_page_fault)
1249 RING0_EC_FRAME
1250 ASM_CLAC
1251 pushl_cfi $trace_do_page_fault
1252 jmp error_code
1253 CFI_ENDPROC
1254END(trace_page_fault)
1255#endif
1256
1250ENTRY(page_fault) 1257ENTRY(page_fault)
1251 RING0_EC_FRAME 1258 RING0_EC_FRAME
1252 ASM_CLAC 1259 ASM_CLAC
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 1b69951a81e2..e21b0785a85b 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -487,21 +487,6 @@ ENDPROC(native_usergs_sysret64)
487 TRACE_IRQS_OFF 487 TRACE_IRQS_OFF
488 .endm 488 .endm
489 489
490ENTRY(save_rest)
491 PARTIAL_FRAME 1 (REST_SKIP+8)
492 movq 5*8+16(%rsp), %r11 /* save return address */
493 movq_cfi rbx, RBX+16
494 movq_cfi rbp, RBP+16
495 movq_cfi r12, R12+16
496 movq_cfi r13, R13+16
497 movq_cfi r14, R14+16
498 movq_cfi r15, R15+16
499 movq %r11, 8(%rsp) /* return address */
500 FIXUP_TOP_OF_STACK %r11, 16
501 ret
502 CFI_ENDPROC
503END(save_rest)
504
505/* save complete stack frame */ 490/* save complete stack frame */
506 .pushsection .kprobes.text, "ax" 491 .pushsection .kprobes.text, "ax"
507ENTRY(save_paranoid) 492ENTRY(save_paranoid)
@@ -1118,10 +1103,8 @@ retint_signal:
1118 /* Returning to kernel space. Check if we need preemption */ 1103 /* Returning to kernel space. Check if we need preemption */
1119 /* rcx: threadinfo. interrupts off. */ 1104 /* rcx: threadinfo. interrupts off. */
1120ENTRY(retint_kernel) 1105ENTRY(retint_kernel)
1121 cmpl $0,TI_preempt_count(%rcx) 1106 cmpl $0,PER_CPU_VAR(__preempt_count)
1122 jnz retint_restore_args 1107 jnz retint_restore_args
1123 bt $TIF_NEED_RESCHED,TI_flags(%rcx)
1124 jnc retint_restore_args
1125 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */ 1108 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
1126 jnc retint_restore_args 1109 jnc retint_restore_args
1127 call preempt_schedule_irq 1110 call preempt_schedule_irq
@@ -1295,6 +1278,17 @@ ENTRY(\sym)
1295END(\sym) 1278END(\sym)
1296.endm 1279.endm
1297 1280
1281#ifdef CONFIG_TRACING
1282.macro trace_errorentry sym do_sym
1283errorentry trace(\sym) trace(\do_sym)
1284errorentry \sym \do_sym
1285.endm
1286#else
1287.macro trace_errorentry sym do_sym
1288errorentry \sym \do_sym
1289.endm
1290#endif
1291
1298 /* error code is on the stack already */ 1292 /* error code is on the stack already */
1299.macro paranoiderrorentry sym do_sym 1293.macro paranoiderrorentry sym do_sym
1300ENTRY(\sym) 1294ENTRY(\sym)
@@ -1357,7 +1351,7 @@ bad_gs:
1357 .previous 1351 .previous
1358 1352
1359/* Call softirq on interrupt stack. Interrupts are off. */ 1353/* Call softirq on interrupt stack. Interrupts are off. */
1360ENTRY(call_softirq) 1354ENTRY(do_softirq_own_stack)
1361 CFI_STARTPROC 1355 CFI_STARTPROC
1362 pushq_cfi %rbp 1356 pushq_cfi %rbp
1363 CFI_REL_OFFSET rbp,0 1357 CFI_REL_OFFSET rbp,0
@@ -1374,7 +1368,7 @@ ENTRY(call_softirq)
1374 decl PER_CPU_VAR(irq_count) 1368 decl PER_CPU_VAR(irq_count)
1375 ret 1369 ret
1376 CFI_ENDPROC 1370 CFI_ENDPROC
1377END(call_softirq) 1371END(do_softirq_own_stack)
1378 1372
1379#ifdef CONFIG_XEN 1373#ifdef CONFIG_XEN
1380zeroentry xen_hypervisor_callback xen_do_hypervisor_callback 1374zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
@@ -1497,7 +1491,7 @@ zeroentry xen_int3 do_int3
1497errorentry xen_stack_segment do_stack_segment 1491errorentry xen_stack_segment do_stack_segment
1498#endif 1492#endif
1499errorentry general_protection do_general_protection 1493errorentry general_protection do_general_protection
1500errorentry page_fault do_page_fault 1494trace_errorentry page_fault do_page_fault
1501#ifdef CONFIG_KVM_GUEST 1495#ifdef CONFIG_KVM_GUEST
1502errorentry async_page_fault do_async_page_fault 1496errorentry async_page_fault do_async_page_fault
1503#endif 1497#endif
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 06f87bece92a..c61a14a4a310 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -35,8 +35,8 @@ asmlinkage void __init i386_start_kernel(void)
35 35
36 /* Call the subarch specific early setup function */ 36 /* Call the subarch specific early setup function */
37 switch (boot_params.hdr.hardware_subarch) { 37 switch (boot_params.hdr.hardware_subarch) {
38 case X86_SUBARCH_MRST: 38 case X86_SUBARCH_INTEL_MID:
39 x86_mrst_early_setup(); 39 x86_intel_mid_early_setup();
40 break; 40 break;
41 case X86_SUBARCH_CE4100: 41 case X86_SUBARCH_CE4100:
42 x86_ce4100_early_setup(); 42 x86_ce4100_early_setup();
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 1be8e43b669e..85126ccbdf6b 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -162,7 +162,7 @@ asmlinkage void __init x86_64_start_kernel(char * real_mode_data)
162 clear_bss(); 162 clear_bss();
163 163
164 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) 164 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
165 set_intr_gate(i, &early_idt_handlers[i]); 165 set_intr_gate(i, early_idt_handlers[i]);
166 load_idt((const struct desc_ptr *)&idt_descr); 166 load_idt((const struct desc_ptr *)&idt_descr);
167 167
168 copy_bootdata(__va(real_mode_data)); 168 copy_bootdata(__va(real_mode_data));
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
index 0fa69127209a..05fd74f537d6 100644
--- a/arch/x86/kernel/i386_ksyms_32.c
+++ b/arch/x86/kernel/i386_ksyms_32.c
@@ -37,3 +37,10 @@ EXPORT_SYMBOL(strstr);
37 37
38EXPORT_SYMBOL(csum_partial); 38EXPORT_SYMBOL(csum_partial);
39EXPORT_SYMBOL(empty_zero_page); 39EXPORT_SYMBOL(empty_zero_page);
40
41#ifdef CONFIG_PREEMPT
42EXPORT_SYMBOL(___preempt_schedule);
43#ifdef CONFIG_CONTEXT_TRACKING
44EXPORT_SYMBOL(___preempt_schedule_context);
45#endif
46#endif
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 5d576ab34403..e8368c6dd2a2 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -100,7 +100,7 @@ void unlazy_fpu(struct task_struct *tsk)
100 __save_init_fpu(tsk); 100 __save_init_fpu(tsk);
101 __thread_fpu_end(tsk); 101 __thread_fpu_end(tsk);
102 } else 102 } else
103 tsk->fpu_counter = 0; 103 tsk->thread.fpu_counter = 0;
104 preempt_enable(); 104 preempt_enable();
105} 105}
106EXPORT_SYMBOL(unlazy_fpu); 106EXPORT_SYMBOL(unlazy_fpu);
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 9a5c460404dc..2e977b5d61dd 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -312,8 +312,7 @@ static void init_8259A(int auto_eoi)
312 */ 312 */
313 outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */ 313 outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */
314 314
315 /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 on x86-64, 315 /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 */
316 to 0x20-0x27 on i386 */
317 outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR); 316 outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR);
318 317
319 /* 8259A-1 (the master) has a slave on IR2 */ 318 /* 8259A-1 (the master) has a slave on IR2 */
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 4186755f1d7c..d7fcbedc9c43 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -100,9 +100,6 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
100 irqctx->tinfo.task = curctx->tinfo.task; 100 irqctx->tinfo.task = curctx->tinfo.task;
101 irqctx->tinfo.previous_esp = current_stack_pointer; 101 irqctx->tinfo.previous_esp = current_stack_pointer;
102 102
103 /* Copy the preempt_count so that the [soft]irq checks work. */
104 irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
105
106 if (unlikely(overflow)) 103 if (unlikely(overflow))
107 call_on_stack(print_stack_overflow, isp); 104 call_on_stack(print_stack_overflow, isp);
108 105
@@ -131,7 +128,6 @@ void irq_ctx_init(int cpu)
131 THREAD_SIZE_ORDER)); 128 THREAD_SIZE_ORDER));
132 memset(&irqctx->tinfo, 0, sizeof(struct thread_info)); 129 memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
133 irqctx->tinfo.cpu = cpu; 130 irqctx->tinfo.cpu = cpu;
134 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
135 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); 131 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
136 132
137 per_cpu(hardirq_ctx, cpu) = irqctx; 133 per_cpu(hardirq_ctx, cpu) = irqctx;
@@ -149,35 +145,21 @@ void irq_ctx_init(int cpu)
149 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); 145 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
150} 146}
151 147
152asmlinkage void do_softirq(void) 148void do_softirq_own_stack(void)
153{ 149{
154 unsigned long flags;
155 struct thread_info *curctx; 150 struct thread_info *curctx;
156 union irq_ctx *irqctx; 151 union irq_ctx *irqctx;
157 u32 *isp; 152 u32 *isp;
158 153
159 if (in_interrupt()) 154 curctx = current_thread_info();
160 return; 155 irqctx = __this_cpu_read(softirq_ctx);
161 156 irqctx->tinfo.task = curctx->task;
162 local_irq_save(flags); 157 irqctx->tinfo.previous_esp = current_stack_pointer;
163
164 if (local_softirq_pending()) {
165 curctx = current_thread_info();
166 irqctx = __this_cpu_read(softirq_ctx);
167 irqctx->tinfo.task = curctx->task;
168 irqctx->tinfo.previous_esp = current_stack_pointer;
169
170 /* build the stack frame on the softirq stack */
171 isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
172 158
173 call_on_stack(__do_softirq, isp); 159 /* build the stack frame on the softirq stack */
174 /* 160 isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
175 * Shouldn't happen, we returned above if in_interrupt():
176 */
177 WARN_ON_ONCE(softirq_count());
178 }
179 161
180 local_irq_restore(flags); 162 call_on_stack(__do_softirq, isp);
181} 163}
182 164
183bool handle_irq(unsigned irq, struct pt_regs *regs) 165bool handle_irq(unsigned irq, struct pt_regs *regs)
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index d04d3ecded62..4d1c746892eb 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -87,24 +87,3 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
87 generic_handle_irq_desc(irq, desc); 87 generic_handle_irq_desc(irq, desc);
88 return true; 88 return true;
89} 89}
90
91
92extern void call_softirq(void);
93
94asmlinkage void do_softirq(void)
95{
96 __u32 pending;
97 unsigned long flags;
98
99 if (in_interrupt())
100 return;
101
102 local_irq_save(flags);
103 pending = local_softirq_pending();
104 /* Switch to interrupt stack */
105 if (pending) {
106 call_softirq();
107 WARN_ON_ONCE(softirq_count());
108 }
109 local_irq_restore(flags);
110}
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index ee11b7dfbfbb..26d5a55a2736 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -42,15 +42,27 @@ static void __jump_label_transform(struct jump_entry *entry,
42 int init) 42 int init)
43{ 43{
44 union jump_code_union code; 44 union jump_code_union code;
45 const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
45 const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5]; 46 const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
46 47
47 if (type == JUMP_LABEL_ENABLE) { 48 if (type == JUMP_LABEL_ENABLE) {
48 /* 49 if (init) {
49 * We are enabling this jump label. If it is not a nop 50 /*
50 * then something must have gone wrong. 51 * Jump label is enabled for the first time.
51 */ 52 * So we expect a default_nop...
52 if (unlikely(memcmp((void *)entry->code, ideal_nop, 5) != 0)) 53 */
53 bug_at((void *)entry->code, __LINE__); 54 if (unlikely(memcmp((void *)entry->code, default_nop, 5)
55 != 0))
56 bug_at((void *)entry->code, __LINE__);
57 } else {
58 /*
59 * ...otherwise expect an ideal_nop. Otherwise
60 * something went horribly wrong.
61 */
62 if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
63 != 0))
64 bug_at((void *)entry->code, __LINE__);
65 }
54 66
55 code.jump = 0xe9; 67 code.jump = 0xe9;
56 code.offset = entry->target - 68 code.offset = entry->target -
@@ -63,7 +75,6 @@ static void __jump_label_transform(struct jump_entry *entry,
63 * are converting the default nop to the ideal nop. 75 * are converting the default nop to the ideal nop.
64 */ 76 */
65 if (init) { 77 if (init) {
66 const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
67 if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0)) 78 if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
68 bug_at((void *)entry->code, __LINE__); 79 bug_at((void *)entry->code, __LINE__);
69 } else { 80 } else {
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 697b93af02dd..6dd802c6d780 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -464,7 +464,7 @@ static struct notifier_block kvm_cpu_notifier = {
464 464
465static void __init kvm_apf_trap_init(void) 465static void __init kvm_apf_trap_init(void)
466{ 466{
467 set_intr_gate(14, &async_page_fault); 467 set_intr_gate(14, async_page_fault);
468} 468}
469 469
470void __init kvm_guest_init(void) 470void __init kvm_guest_init(void)
@@ -609,7 +609,7 @@ static struct dentry *d_kvm_debug;
609 609
610struct dentry *kvm_init_debugfs(void) 610struct dentry *kvm_init_debugfs(void)
611{ 611{
612 d_kvm_debug = debugfs_create_dir("kvm", NULL); 612 d_kvm_debug = debugfs_create_dir("kvm-guest", NULL);
613 if (!d_kvm_debug) 613 if (!d_kvm_debug)
614 printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n"); 614 printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n");
615 615
@@ -775,11 +775,22 @@ void __init kvm_spinlock_init(void)
775 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) 775 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
776 return; 776 return;
777 777
778 printk(KERN_INFO "KVM setup paravirtual spinlock\n"); 778 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
779 pv_lock_ops.unlock_kick = kvm_unlock_kick;
780}
781
782static __init int kvm_spinlock_init_jump(void)
783{
784 if (!kvm_para_available())
785 return 0;
786 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
787 return 0;
779 788
780 static_key_slow_inc(&paravirt_ticketlocks_enabled); 789 static_key_slow_inc(&paravirt_ticketlocks_enabled);
790 printk(KERN_INFO "KVM setup paravirtual spinlock\n");
781 791
782 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning); 792 return 0;
783 pv_lock_ops.unlock_kick = kvm_unlock_kick;
784} 793}
794early_initcall(kvm_spinlock_init_jump);
795
785#endif /* CONFIG_PARAVIRT_SPINLOCKS */ 796#endif /* CONFIG_PARAVIRT_SPINLOCKS */
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 1570e0741344..e6041094ff26 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -139,6 +139,7 @@ bool kvm_check_and_clear_guest_paused(void)
139 src = &hv_clock[cpu].pvti; 139 src = &hv_clock[cpu].pvti;
140 if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) { 140 if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
141 src->flags &= ~PVCLOCK_GUEST_STOPPED; 141 src->flags &= ~PVCLOCK_GUEST_STOPPED;
142 pvclock_touch_watchdogs();
142 ret = true; 143 ret = true;
143 } 144 }
144 145
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 7123b5df479d..c3d4cc972eca 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -216,6 +216,7 @@ int apply_microcode_amd(int cpu)
216 /* need to apply patch? */ 216 /* need to apply patch? */
217 if (rev >= mc_amd->hdr.patch_id) { 217 if (rev >= mc_amd->hdr.patch_id) {
218 c->microcode = rev; 218 c->microcode = rev;
219 uci->cpu_sig.rev = rev;
219 return 0; 220 return 0;
220 } 221 }
221 222
@@ -430,7 +431,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
430 snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); 431 snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
431 432
432 if (request_firmware(&fw, (const char *)fw_name, device)) { 433 if (request_firmware(&fw, (const char *)fw_name, device)) {
433 pr_err("failed to load file %s\n", fw_name); 434 pr_debug("failed to load file %s\n", fw_name);
434 goto out; 435 goto out;
435 } 436 }
436 437
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 216a4d754b0c..18be189368bb 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -49,7 +49,7 @@ void *module_alloc(unsigned long size)
49 return NULL; 49 return NULL;
50 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, 50 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
51 GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, 51 GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
52 -1, __builtin_return_address(0)); 52 NUMA_NO_NODE, __builtin_return_address(0));
53} 53}
54 54
55#ifdef CONFIG_X86_32 55#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 88458faea2f8..05266b5aae22 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -46,7 +46,7 @@ static struct class *msr_class;
46static loff_t msr_seek(struct file *file, loff_t offset, int orig) 46static loff_t msr_seek(struct file *file, loff_t offset, int orig)
47{ 47{
48 loff_t ret; 48 loff_t ret;
49 struct inode *inode = file->f_mapping->host; 49 struct inode *inode = file_inode(file);
50 50
51 mutex_lock(&inode->i_mutex); 51 mutex_lock(&inode->i_mutex);
52 switch (orig) { 52 switch (orig) {
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index ba77ebc2c353..6fcb49ce50a1 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -113,10 +113,10 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
113 u64 before, delta, whole_msecs; 113 u64 before, delta, whole_msecs;
114 int remainder_ns, decimal_msecs, thishandled; 114 int remainder_ns, decimal_msecs, thishandled;
115 115
116 before = local_clock(); 116 before = sched_clock();
117 thishandled = a->handler(type, regs); 117 thishandled = a->handler(type, regs);
118 handled += thishandled; 118 handled += thishandled;
119 delta = local_clock() - before; 119 delta = sched_clock() - before;
120 trace_nmi_handler(a->handler, (int)delta, thishandled); 120 trace_nmi_handler(a->handler, (int)delta, thishandled);
121 121
122 if (delta < nmi_longest_ns) 122 if (delta < nmi_longest_ns)
diff --git a/arch/x86/kernel/preempt.S b/arch/x86/kernel/preempt.S
new file mode 100644
index 000000000000..ca7f0d58a87d
--- /dev/null
+++ b/arch/x86/kernel/preempt.S
@@ -0,0 +1,25 @@
1
2#include <linux/linkage.h>
3#include <asm/dwarf2.h>
4#include <asm/asm.h>
5#include <asm/calling.h>
6
7ENTRY(___preempt_schedule)
8 CFI_STARTPROC
9 SAVE_ALL
10 call preempt_schedule
11 RESTORE_ALL
12 ret
13 CFI_ENDPROC
14
15#ifdef CONFIG_CONTEXT_TRACKING
16
17ENTRY(___preempt_schedule_context)
18 CFI_STARTPROC
19 SAVE_ALL
20 call preempt_schedule_context
21 RESTORE_ALL
22 ret
23 CFI_ENDPROC
24
25#endif
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index c83516be1052..3fb8d95ab8b5 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -391,9 +391,9 @@ static void amd_e400_idle(void)
391 * The switch back from broadcast mode needs to be 391 * The switch back from broadcast mode needs to be
392 * called with interrupts disabled. 392 * called with interrupts disabled.
393 */ 393 */
394 local_irq_disable(); 394 local_irq_disable();
395 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); 395 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
396 local_irq_enable(); 396 local_irq_enable();
397 } else 397 } else
398 default_idle(); 398 default_idle();
399} 399}
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 884f98f69354..6f1236c29c4b 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -153,7 +153,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
153 childregs->orig_ax = -1; 153 childregs->orig_ax = -1;
154 childregs->cs = __KERNEL_CS | get_kernel_rpl(); 154 childregs->cs = __KERNEL_CS | get_kernel_rpl();
155 childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED; 155 childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
156 p->fpu_counter = 0; 156 p->thread.fpu_counter = 0;
157 p->thread.io_bitmap_ptr = NULL; 157 p->thread.io_bitmap_ptr = NULL;
158 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); 158 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
159 return 0; 159 return 0;
@@ -166,7 +166,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
166 p->thread.ip = (unsigned long) ret_from_fork; 166 p->thread.ip = (unsigned long) ret_from_fork;
167 task_user_gs(p) = get_user_gs(current_pt_regs()); 167 task_user_gs(p) = get_user_gs(current_pt_regs());
168 168
169 p->fpu_counter = 0; 169 p->thread.fpu_counter = 0;
170 p->thread.io_bitmap_ptr = NULL; 170 p->thread.io_bitmap_ptr = NULL;
171 tsk = current; 171 tsk = current;
172 err = -ENOMEM; 172 err = -ENOMEM;
@@ -292,6 +292,14 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
292 set_iopl_mask(next->iopl); 292 set_iopl_mask(next->iopl);
293 293
294 /* 294 /*
295 * If it were not for PREEMPT_ACTIVE we could guarantee that the
296 * preempt_count of all tasks was equal here and this would not be
297 * needed.
298 */
299 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
300 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
301
302 /*
295 * Now maybe handle debug registers and/or IO bitmaps 303 * Now maybe handle debug registers and/or IO bitmaps
296 */ 304 */
297 if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV || 305 if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index bb1dc51bab05..9c0280f93d05 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -63,7 +63,7 @@ void __show_regs(struct pt_regs *regs, int all)
63 unsigned int ds, cs, es; 63 unsigned int ds, cs, es;
64 64
65 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); 65 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
66 printk_address(regs->ip, 1); 66 printk_address(regs->ip);
67 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, 67 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
68 regs->sp, regs->flags); 68 regs->sp, regs->flags);
69 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n", 69 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
@@ -163,7 +163,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
163 p->thread.sp = (unsigned long) childregs; 163 p->thread.sp = (unsigned long) childregs;
164 p->thread.usersp = me->thread.usersp; 164 p->thread.usersp = me->thread.usersp;
165 set_tsk_thread_flag(p, TIF_FORK); 165 set_tsk_thread_flag(p, TIF_FORK);
166 p->fpu_counter = 0; 166 p->thread.fpu_counter = 0;
167 p->thread.io_bitmap_ptr = NULL; 167 p->thread.io_bitmap_ptr = NULL;
168 168
169 savesegment(gs, p->thread.gsindex); 169 savesegment(gs, p->thread.gsindex);
@@ -363,6 +363,14 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
363 this_cpu_write(old_rsp, next->usersp); 363 this_cpu_write(old_rsp, next->usersp);
364 this_cpu_write(current_task, next_p); 364 this_cpu_write(current_task, next_p);
365 365
366 /*
367 * If it were not for PREEMPT_ACTIVE we could guarantee that the
368 * preempt_count of all tasks was equal here and this would not be
369 * needed.
370 */
371 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
372 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
373
366 this_cpu_write(kernel_stack, 374 this_cpu_write(kernel_stack,
367 (unsigned long)task_stack_page(next_p) + 375 (unsigned long)task_stack_page(next_p) +
368 THREAD_SIZE - KERNEL_STACK_OFFSET); 376 THREAD_SIZE - KERNEL_STACK_OFFSET);
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index a16bae3f83b3..2f355d229a58 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -43,6 +43,14 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
43 return pv_tsc_khz; 43 return pv_tsc_khz;
44} 44}
45 45
46void pvclock_touch_watchdogs(void)
47{
48 touch_softlockup_watchdog_sync();
49 clocksource_touch_watchdog();
50 rcu_cpu_stall_reset();
51 reset_hung_task_detector();
52}
53
46static atomic64_t last_value = ATOMIC64_INIT(0); 54static atomic64_t last_value = ATOMIC64_INIT(0);
47 55
48void pvclock_resume(void) 56void pvclock_resume(void)
@@ -74,6 +82,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
74 version = __pvclock_read_cycles(src, &ret, &flags); 82 version = __pvclock_read_cycles(src, &ret, &flags);
75 } while ((src->version & 1) || version != src->version); 83 } while ((src->version & 1) || version != src->version);
76 84
85 if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) {
86 src->flags &= ~PVCLOCK_GUEST_STOPPED;
87 pvclock_touch_watchdogs();
88 }
89
77 if ((valid_flags & PVCLOCK_TSC_STABLE_BIT) && 90 if ((valid_flags & PVCLOCK_TSC_STABLE_BIT) &&
78 (flags & PVCLOCK_TSC_STABLE_BIT)) 91 (flags & PVCLOCK_TSC_STABLE_BIT))
79 return ret; 92 return ret;
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 563ed91e6faa..da3c599584a3 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -61,7 +61,7 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
61 if (reboot_type != BOOT_BIOS) { 61 if (reboot_type != BOOT_BIOS) {
62 reboot_type = BOOT_BIOS; 62 reboot_type = BOOT_BIOS;
63 pr_info("%s series board detected. Selecting %s-method for reboots.\n", 63 pr_info("%s series board detected. Selecting %s-method for reboots.\n",
64 "BIOS", d->ident); 64 d->ident, "BIOS");
65 } 65 }
66 return 0; 66 return 0;
67} 67}
@@ -117,7 +117,7 @@ static int __init set_pci_reboot(const struct dmi_system_id *d)
117 if (reboot_type != BOOT_CF9) { 117 if (reboot_type != BOOT_CF9) {
118 reboot_type = BOOT_CF9; 118 reboot_type = BOOT_CF9;
119 pr_info("%s series board detected. Selecting %s-method for reboots.\n", 119 pr_info("%s series board detected. Selecting %s-method for reboots.\n",
120 "PCI", d->ident); 120 d->ident, "PCI");
121 } 121 }
122 return 0; 122 return 0;
123} 123}
@@ -127,7 +127,7 @@ static int __init set_kbd_reboot(const struct dmi_system_id *d)
127 if (reboot_type != BOOT_KBD) { 127 if (reboot_type != BOOT_KBD) {
128 reboot_type = BOOT_KBD; 128 reboot_type = BOOT_KBD;
129 pr_info("%s series board detected. Selecting %s-method for reboot.\n", 129 pr_info("%s series board detected. Selecting %s-method for reboot.\n",
130 "KBD", d->ident); 130 d->ident, "KBD");
131 } 131 }
132 return 0; 132 return 0;
133} 133}
@@ -136,228 +136,256 @@ static int __init set_kbd_reboot(const struct dmi_system_id *d)
136 * This is a single dmi_table handling all reboot quirks. 136 * This is a single dmi_table handling all reboot quirks.
137 */ 137 */
138static struct dmi_system_id __initdata reboot_dmi_table[] = { 138static struct dmi_system_id __initdata reboot_dmi_table[] = {
139 { /* Handle problems with rebooting on Dell E520's */ 139
140 .callback = set_bios_reboot, 140 /* Acer */
141 .ident = "Dell E520", 141 { /* Handle reboot issue on Acer Aspire one */
142 .callback = set_kbd_reboot,
143 .ident = "Acer Aspire One A110",
142 .matches = { 144 .matches = {
143 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 145 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
144 DMI_MATCH(DMI_PRODUCT_NAME, "Dell DM061"), 146 DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"),
145 }, 147 },
146 }, 148 },
147 { /* Handle problems with rebooting on Dell 1300's */ 149
148 .callback = set_bios_reboot, 150 /* Apple */
149 .ident = "Dell PowerEdge 1300", 151 { /* Handle problems with rebooting on Apple MacBook5 */
152 .callback = set_pci_reboot,
153 .ident = "Apple MacBook5",
150 .matches = { 154 .matches = {
151 DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), 155 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
152 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"), 156 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"),
153 }, 157 },
154 }, 158 },
155 { /* Handle problems with rebooting on Dell 300's */ 159 { /* Handle problems with rebooting on Apple MacBookPro5 */
156 .callback = set_bios_reboot, 160 .callback = set_pci_reboot,
157 .ident = "Dell PowerEdge 300", 161 .ident = "Apple MacBookPro5",
158 .matches = { 162 .matches = {
159 DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), 163 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
160 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"), 164 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5"),
161 }, 165 },
162 }, 166 },
163 { /* Handle problems with rebooting on Dell Optiplex 745's SFF */ 167 { /* Handle problems with rebooting on Apple Macmini3,1 */
164 .callback = set_bios_reboot, 168 .callback = set_pci_reboot,
165 .ident = "Dell OptiPlex 745", 169 .ident = "Apple Macmini3,1",
166 .matches = { 170 .matches = {
167 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 171 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
168 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"), 172 DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"),
169 }, 173 },
170 }, 174 },
171 { /* Handle problems with rebooting on Dell Optiplex 745's DFF */ 175 { /* Handle problems with rebooting on the iMac9,1. */
176 .callback = set_pci_reboot,
177 .ident = "Apple iMac9,1",
178 .matches = {
179 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
180 DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
181 },
182 },
183
184 /* ASUS */
185 { /* Handle problems with rebooting on ASUS P4S800 */
172 .callback = set_bios_reboot, 186 .callback = set_bios_reboot,
173 .ident = "Dell OptiPlex 745", 187 .ident = "ASUS P4S800",
174 .matches = { 188 .matches = {
175 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 189 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
176 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"), 190 DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
177 DMI_MATCH(DMI_BOARD_NAME, "0MM599"),
178 }, 191 },
179 }, 192 },
180 { /* Handle problems with rebooting on Dell Optiplex 745 with 0KW626 */ 193
194 /* Dell */
195 { /* Handle problems with rebooting on Dell DXP061 */
181 .callback = set_bios_reboot, 196 .callback = set_bios_reboot,
182 .ident = "Dell OptiPlex 745", 197 .ident = "Dell DXP061",
183 .matches = { 198 .matches = {
184 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 199 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
185 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"), 200 DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"),
186 DMI_MATCH(DMI_BOARD_NAME, "0KW626"),
187 }, 201 },
188 }, 202 },
189 { /* Handle problems with rebooting on Dell Optiplex 330 with 0KP561 */ 203 { /* Handle problems with rebooting on Dell E520's */
190 .callback = set_bios_reboot, 204 .callback = set_bios_reboot,
191 .ident = "Dell OptiPlex 330", 205 .ident = "Dell E520",
192 .matches = { 206 .matches = {
193 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 207 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
194 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 330"), 208 DMI_MATCH(DMI_PRODUCT_NAME, "Dell DM061"),
195 DMI_MATCH(DMI_BOARD_NAME, "0KP561"),
196 }, 209 },
197 }, 210 },
198 { /* Handle problems with rebooting on Dell Optiplex 360 with 0T656F */ 211 { /* Handle problems with rebooting on the Latitude E5410. */
199 .callback = set_bios_reboot, 212 .callback = set_pci_reboot,
200 .ident = "Dell OptiPlex 360", 213 .ident = "Dell Latitude E5410",
201 .matches = { 214 .matches = {
202 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 215 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
203 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 360"), 216 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5410"),
204 DMI_MATCH(DMI_BOARD_NAME, "0T656F"),
205 }, 217 },
206 }, 218 },
207 { /* Handle problems with rebooting on Dell OptiPlex 760 with 0G919G */ 219 { /* Handle problems with rebooting on the Latitude E5420. */
208 .callback = set_bios_reboot, 220 .callback = set_pci_reboot,
209 .ident = "Dell OptiPlex 760", 221 .ident = "Dell Latitude E5420",
210 .matches = { 222 .matches = {
211 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 223 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
212 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 760"), 224 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5420"),
213 DMI_MATCH(DMI_BOARD_NAME, "0G919G"),
214 }, 225 },
215 }, 226 },
216 { /* Handle problems with rebooting on Dell 2400's */ 227 { /* Handle problems with rebooting on the Latitude E6320. */
217 .callback = set_bios_reboot, 228 .callback = set_pci_reboot,
218 .ident = "Dell PowerEdge 2400", 229 .ident = "Dell Latitude E6320",
219 .matches = { 230 .matches = {
220 DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), 231 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
221 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"), 232 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
222 }, 233 },
223 }, 234 },
224 { /* Handle problems with rebooting on Dell T5400's */ 235 { /* Handle problems with rebooting on the Latitude E6420. */
225 .callback = set_bios_reboot, 236 .callback = set_pci_reboot,
226 .ident = "Dell Precision T5400", 237 .ident = "Dell Latitude E6420",
227 .matches = { 238 .matches = {
228 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 239 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
229 DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T5400"), 240 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"),
230 }, 241 },
231 }, 242 },
232 { /* Handle problems with rebooting on Dell T7400's */ 243 { /* Handle problems with rebooting on Dell Optiplex 330 with 0KP561 */
233 .callback = set_bios_reboot, 244 .callback = set_bios_reboot,
234 .ident = "Dell Precision T7400", 245 .ident = "Dell OptiPlex 330",
235 .matches = { 246 .matches = {
236 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 247 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
237 DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T7400"), 248 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 330"),
249 DMI_MATCH(DMI_BOARD_NAME, "0KP561"),
238 }, 250 },
239 }, 251 },
240 { /* Handle problems with rebooting on HP laptops */ 252 { /* Handle problems with rebooting on Dell Optiplex 360 with 0T656F */
241 .callback = set_bios_reboot, 253 .callback = set_bios_reboot,
242 .ident = "HP Compaq Laptop", 254 .ident = "Dell OptiPlex 360",
243 .matches = { 255 .matches = {
244 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 256 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
245 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"), 257 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 360"),
258 DMI_MATCH(DMI_BOARD_NAME, "0T656F"),
246 }, 259 },
247 }, 260 },
248 { /* Handle problems with rebooting on Dell XPS710 */ 261 { /* Handle problems with rebooting on Dell Optiplex 745's SFF */
249 .callback = set_bios_reboot, 262 .callback = set_bios_reboot,
250 .ident = "Dell XPS710", 263 .ident = "Dell OptiPlex 745",
251 .matches = { 264 .matches = {
252 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 265 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
253 DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"), 266 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
254 }, 267 },
255 }, 268 },
256 { /* Handle problems with rebooting on Dell DXP061 */ 269 { /* Handle problems with rebooting on Dell Optiplex 745's DFF */
257 .callback = set_bios_reboot, 270 .callback = set_bios_reboot,
258 .ident = "Dell DXP061", 271 .ident = "Dell OptiPlex 745",
259 .matches = { 272 .matches = {
260 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 273 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
261 DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"), 274 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
275 DMI_MATCH(DMI_BOARD_NAME, "0MM599"),
262 }, 276 },
263 }, 277 },
264 { /* Handle problems with rebooting on Sony VGN-Z540N */ 278 { /* Handle problems with rebooting on Dell Optiplex 745 with 0KW626 */
265 .callback = set_bios_reboot, 279 .callback = set_bios_reboot,
266 .ident = "Sony VGN-Z540N", 280 .ident = "Dell OptiPlex 745",
267 .matches = { 281 .matches = {
268 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 282 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
269 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"), 283 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
284 DMI_MATCH(DMI_BOARD_NAME, "0KW626"),
270 }, 285 },
271 }, 286 },
272 { /* Handle problems with rebooting on ASUS P4S800 */ 287 { /* Handle problems with rebooting on Dell OptiPlex 760 with 0G919G */
273 .callback = set_bios_reboot, 288 .callback = set_bios_reboot,
274 .ident = "ASUS P4S800", 289 .ident = "Dell OptiPlex 760",
275 .matches = { 290 .matches = {
276 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), 291 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
277 DMI_MATCH(DMI_BOARD_NAME, "P4S800"), 292 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 760"),
293 DMI_MATCH(DMI_BOARD_NAME, "0G919G"),
278 }, 294 },
279 }, 295 },
280 296 { /* Handle problems with rebooting on the OptiPlex 990. */
281 { /* Handle reboot issue on Acer Aspire one */ 297 .callback = set_pci_reboot,
282 .callback = set_kbd_reboot, 298 .ident = "Dell OptiPlex 990",
283 .ident = "Acer Aspire One A110",
284 .matches = { 299 .matches = {
285 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 300 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
286 DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"), 301 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
287 }, 302 },
288 }, 303 },
289 { /* Handle problems with rebooting on Apple MacBook5 */ 304 { /* Handle problems with rebooting on Dell 300's */
290 .callback = set_pci_reboot, 305 .callback = set_bios_reboot,
291 .ident = "Apple MacBook5", 306 .ident = "Dell PowerEdge 300",
292 .matches = { 307 .matches = {
293 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 308 DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
294 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"), 309 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"),
295 }, 310 },
296 }, 311 },
297 { /* Handle problems with rebooting on Apple MacBookPro5 */ 312 { /* Handle problems with rebooting on Dell 1300's */
298 .callback = set_pci_reboot, 313 .callback = set_bios_reboot,
299 .ident = "Apple MacBookPro5", 314 .ident = "Dell PowerEdge 1300",
300 .matches = { 315 .matches = {
301 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 316 DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
302 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5"), 317 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"),
303 }, 318 },
304 }, 319 },
305 { /* Handle problems with rebooting on Apple Macmini3,1 */ 320 { /* Handle problems with rebooting on Dell 2400's */
306 .callback = set_pci_reboot, 321 .callback = set_bios_reboot,
307 .ident = "Apple Macmini3,1", 322 .ident = "Dell PowerEdge 2400",
308 .matches = { 323 .matches = {
309 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 324 DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
310 DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"), 325 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"),
311 }, 326 },
312 }, 327 },
313 { /* Handle problems with rebooting on the iMac9,1. */ 328 { /* Handle problems with rebooting on the Dell PowerEdge C6100. */
314 .callback = set_pci_reboot, 329 .callback = set_pci_reboot,
315 .ident = "Apple iMac9,1", 330 .ident = "Dell PowerEdge C6100",
316 .matches = { 331 .matches = {
317 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 332 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
318 DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"), 333 DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
319 }, 334 },
320 }, 335 },
321 { /* Handle problems with rebooting on the Latitude E6320. */ 336 { /* Handle problems with rebooting on the Precision M6600. */
322 .callback = set_pci_reboot, 337 .callback = set_pci_reboot,
323 .ident = "Dell Latitude E6320", 338 .ident = "Dell Precision M6600",
324 .matches = { 339 .matches = {
325 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 340 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
326 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"), 341 DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
327 }, 342 },
328 }, 343 },
329 { /* Handle problems with rebooting on the Latitude E5420. */ 344 { /* Handle problems with rebooting on Dell T5400's */
330 .callback = set_pci_reboot, 345 .callback = set_bios_reboot,
331 .ident = "Dell Latitude E5420", 346 .ident = "Dell Precision T5400",
332 .matches = { 347 .matches = {
333 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 348 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
334 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5420"), 349 DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T5400"),
335 }, 350 },
336 }, 351 },
337 { /* Handle problems with rebooting on the Latitude E6420. */ 352 { /* Handle problems with rebooting on Dell T7400's */
338 .callback = set_pci_reboot, 353 .callback = set_bios_reboot,
339 .ident = "Dell Latitude E6420", 354 .ident = "Dell Precision T7400",
340 .matches = { 355 .matches = {
341 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 356 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
342 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"), 357 DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T7400"),
343 }, 358 },
344 }, 359 },
345 { /* Handle problems with rebooting on the OptiPlex 990. */ 360 { /* Handle problems with rebooting on Dell XPS710 */
346 .callback = set_pci_reboot, 361 .callback = set_bios_reboot,
347 .ident = "Dell OptiPlex 990", 362 .ident = "Dell XPS710",
348 .matches = { 363 .matches = {
349 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 364 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
350 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"), 365 DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"),
351 }, 366 },
352 }, 367 },
353 { /* Handle problems with rebooting on the Precision M6600. */ 368
354 .callback = set_pci_reboot, 369 /* Hewlett-Packard */
355 .ident = "Dell OptiPlex 990", 370 { /* Handle problems with rebooting on HP laptops */
371 .callback = set_bios_reboot,
372 .ident = "HP Compaq Laptop",
356 .matches = { 373 .matches = {
357 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 374 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
358 DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"), 375 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"),
376 },
377 },
378
379 /* Sony */
380 { /* Handle problems with rebooting on Sony VGN-Z540N */
381 .callback = set_bios_reboot,
382 .ident = "Sony VGN-Z540N",
383 .matches = {
384 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
385 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"),
359 }, 386 },
360 }, 387 },
388
361 { } 389 { }
362}; 390};
363 391
@@ -511,10 +539,13 @@ static void native_machine_emergency_restart(void)
511 539
512 case BOOT_CF9_COND: 540 case BOOT_CF9_COND:
513 if (port_cf9_safe) { 541 if (port_cf9_safe) {
514 u8 cf9 = inb(0xcf9) & ~6; 542 u8 reboot_code = reboot_mode == REBOOT_WARM ?
543 0x06 : 0x0E;
544 u8 cf9 = inb(0xcf9) & ~reboot_code;
515 outb(cf9|2, 0xcf9); /* Request hard reset */ 545 outb(cf9|2, 0xcf9); /* Request hard reset */
516 udelay(50); 546 udelay(50);
517 outb(cf9|6, 0xcf9); /* Actually do the reset */ 547 /* Actually do the reset */
548 outb(cf9|reboot_code, 0xcf9);
518 udelay(50); 549 udelay(50);
519 } 550 }
520 reboot_type = BOOT_KBD; 551 reboot_type = BOOT_KBD;
@@ -526,6 +557,10 @@ static void native_machine_emergency_restart(void)
526void native_machine_shutdown(void) 557void native_machine_shutdown(void)
527{ 558{
528 /* Stop the cpus and apics */ 559 /* Stop the cpus and apics */
560#ifdef CONFIG_X86_IO_APIC
561 disable_IO_APIC();
562#endif
563
529#ifdef CONFIG_SMP 564#ifdef CONFIG_SMP
530 /* 565 /*
531 * Stop all of the others. Also disable the local irq to 566 * Stop all of the others. Also disable the local irq to
@@ -538,10 +573,6 @@ void native_machine_shutdown(void)
538 573
539 lapic_shutdown(); 574 lapic_shutdown();
540 575
541#ifdef CONFIG_X86_IO_APIC
542 disable_IO_APIC();
543#endif
544
545#ifdef CONFIG_HPET_TIMER 576#ifdef CONFIG_HPET_TIMER
546 hpet_disable(); 577 hpet_disable();
547#endif 578#endif
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 0aa29394ed6f..ca9622a25e95 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -12,7 +12,7 @@
12#include <asm/vsyscall.h> 12#include <asm/vsyscall.h>
13#include <asm/x86_init.h> 13#include <asm/x86_init.h>
14#include <asm/time.h> 14#include <asm/time.h>
15#include <asm/mrst.h> 15#include <asm/intel-mid.h>
16#include <asm/rtc.h> 16#include <asm/rtc.h>
17 17
18#ifdef CONFIG_X86_32 18#ifdef CONFIG_X86_32
@@ -189,9 +189,17 @@ static __init int add_rtc_cmos(void)
189 return 0; 189 return 0;
190 190
191 /* Intel MID platforms don't have ioport rtc */ 191 /* Intel MID platforms don't have ioport rtc */
192 if (mrst_identify_cpu()) 192 if (intel_mid_identify_cpu())
193 return -ENODEV; 193 return -ENODEV;
194 194
195#ifdef CONFIG_ACPI
196 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_CMOS_RTC) {
197 /* This warning can likely go away again in a year or two. */
198 pr_info("ACPI: not registering RTC platform device\n");
199 return -ENODEV;
200 }
201#endif
202
195 platform_device_register(&rtc_device); 203 platform_device_register(&rtc_device);
196 dev_info(&rtc_device.dev, 204 dev_info(&rtc_device.dev,
197 "registered platform RTC device (no PNP device found)\n"); 205 "registered platform RTC device (no PNP device found)\n");
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index f0de6294b955..cb233bc9dee3 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -993,6 +993,7 @@ void __init setup_arch(char **cmdline_p)
993 efi_init(); 993 efi_init();
994 994
995 dmi_scan_machine(); 995 dmi_scan_machine();
996 dmi_memdev_walk();
996 dmi_set_dump_stack_arch_desc(); 997 dmi_set_dump_stack_arch_desc();
997 998
998 /* 999 /*
@@ -1120,8 +1121,6 @@ void __init setup_arch(char **cmdline_p)
1120 acpi_initrd_override((void *)initrd_start, initrd_end - initrd_start); 1121 acpi_initrd_override((void *)initrd_start, initrd_end - initrd_start);
1121#endif 1122#endif
1122 1123
1123 reserve_crashkernel();
1124
1125 vsmp_init(); 1124 vsmp_init();
1126 1125
1127 io_delay_init(); 1126 io_delay_init();
@@ -1134,6 +1133,13 @@ void __init setup_arch(char **cmdline_p)
1134 early_acpi_boot_init(); 1133 early_acpi_boot_init();
1135 1134
1136 initmem_init(); 1135 initmem_init();
1136
1137 /*
1138 * Reserve memory for crash kernel after SRAT is parsed so that it
1139 * won't consume hotpluggable memory.
1140 */
1141 reserve_crashkernel();
1142
1137 memblock_find_dma_reserve(); 1143 memblock_find_dma_reserve();
1138 1144
1139#ifdef CONFIG_KVM_GUEST 1145#ifdef CONFIG_KVM_GUEST
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index aecc98a93d1b..85dc05a3aa02 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -73,36 +73,14 @@
73#include <asm/setup.h> 73#include <asm/setup.h>
74#include <asm/uv/uv.h> 74#include <asm/uv/uv.h>
75#include <linux/mc146818rtc.h> 75#include <linux/mc146818rtc.h>
76
77#include <asm/smpboot_hooks.h> 76#include <asm/smpboot_hooks.h>
78#include <asm/i8259.h> 77#include <asm/i8259.h>
79
80#include <asm/realmode.h> 78#include <asm/realmode.h>
79#include <asm/misc.h>
81 80
82/* State of each CPU */ 81/* State of each CPU */
83DEFINE_PER_CPU(int, cpu_state) = { 0 }; 82DEFINE_PER_CPU(int, cpu_state) = { 0 };
84 83
85#ifdef CONFIG_HOTPLUG_CPU
86/*
87 * We need this for trampoline_base protection from concurrent accesses when
88 * off- and onlining cores wildly.
89 */
90static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
91
92void cpu_hotplug_driver_lock(void)
93{
94 mutex_lock(&x86_cpu_hotplug_driver_mutex);
95}
96
97void cpu_hotplug_driver_unlock(void)
98{
99 mutex_unlock(&x86_cpu_hotplug_driver_mutex);
100}
101
102ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
103ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
104#endif
105
106/* Number of siblings per CPU package */ 84/* Number of siblings per CPU package */
107int smp_num_siblings = 1; 85int smp_num_siblings = 1;
108EXPORT_SYMBOL(smp_num_siblings); 86EXPORT_SYMBOL(smp_num_siblings);
@@ -648,21 +626,46 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
648 return (send_status | accept_status); 626 return (send_status | accept_status);
649} 627}
650 628
629void smp_announce(void)
630{
631 int num_nodes = num_online_nodes();
632
633 printk(KERN_INFO "x86: Booted up %d node%s, %d CPUs\n",
634 num_nodes, (num_nodes > 1 ? "s" : ""), num_online_cpus());
635}
636
651/* reduce the number of lines printed when booting a large cpu count system */ 637/* reduce the number of lines printed when booting a large cpu count system */
652static void announce_cpu(int cpu, int apicid) 638static void announce_cpu(int cpu, int apicid)
653{ 639{
654 static int current_node = -1; 640 static int current_node = -1;
655 int node = early_cpu_to_node(cpu); 641 int node = early_cpu_to_node(cpu);
642 static int width, node_width;
643
644 if (!width)
645 width = num_digits(num_possible_cpus()) + 1; /* + '#' sign */
646
647 if (!node_width)
648 node_width = num_digits(num_possible_nodes()) + 1; /* + '#' */
649
650 if (cpu == 1)
651 printk(KERN_INFO "x86: Booting SMP configuration:\n");
656 652
657 if (system_state == SYSTEM_BOOTING) { 653 if (system_state == SYSTEM_BOOTING) {
658 if (node != current_node) { 654 if (node != current_node) {
659 if (current_node > (-1)) 655 if (current_node > (-1))
660 pr_cont(" OK\n"); 656 pr_cont("\n");
661 current_node = node; 657 current_node = node;
662 pr_info("Booting Node %3d, Processors ", node); 658
659 printk(KERN_INFO ".... node %*s#%d, CPUs: ",
660 node_width - num_digits(node), " ", node);
663 } 661 }
664 pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " OK\n" : ""); 662
665 return; 663 /* Add padding for the BSP */
664 if (cpu == 1)
665 pr_cont("%*s", width + 1, " ");
666
667 pr_cont("%*s#%d", width - num_digits(cpu), " ", cpu);
668
666 } else 669 } else
667 pr_info("Booting Node %d Processor %d APIC 0x%x\n", 670 pr_info("Booting Node %d Processor %d APIC 0x%x\n",
668 node, cpu, apicid); 671 node, cpu, apicid);
diff --git a/arch/x86/kernel/sysfb_simplefb.c b/arch/x86/kernel/sysfb_simplefb.c
index 22513e96b012..86179d409893 100644
--- a/arch/x86/kernel/sysfb_simplefb.c
+++ b/arch/x86/kernel/sysfb_simplefb.c
@@ -72,14 +72,14 @@ __init int create_simplefb(const struct screen_info *si,
72 * the part that is occupied by the framebuffer */ 72 * the part that is occupied by the framebuffer */
73 len = mode->height * mode->stride; 73 len = mode->height * mode->stride;
74 len = PAGE_ALIGN(len); 74 len = PAGE_ALIGN(len);
75 if (len > si->lfb_size << 16) { 75 if (len > (u64)si->lfb_size << 16) {
76 printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n"); 76 printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
77 return -EINVAL; 77 return -EINVAL;
78 } 78 }
79 79
80 /* setup IORESOURCE_MEM as framebuffer memory */ 80 /* setup IORESOURCE_MEM as framebuffer memory */
81 memset(&res, 0, sizeof(res)); 81 memset(&res, 0, sizeof(res));
82 res.flags = IORESOURCE_MEM; 82 res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
83 res.name = simplefb_resname; 83 res.name = simplefb_resname;
84 res.start = si->lfb_base; 84 res.start = si->lfb_base;
85 res.end = si->lfb_base + len - 1; 85 res.end = si->lfb_base + len - 1;
diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c
index 6e60b5fe2244..649b010da00b 100644
--- a/arch/x86/kernel/topology.c
+++ b/arch/x86/kernel/topology.c
@@ -65,29 +65,32 @@ int __ref _debug_hotplug_cpu(int cpu, int action)
65 if (!cpu_is_hotpluggable(cpu)) 65 if (!cpu_is_hotpluggable(cpu))
66 return -EINVAL; 66 return -EINVAL;
67 67
68 cpu_hotplug_driver_lock(); 68 lock_device_hotplug();
69 69
70 switch (action) { 70 switch (action) {
71 case 0: 71 case 0:
72 ret = cpu_down(cpu); 72 ret = cpu_down(cpu);
73 if (!ret) { 73 if (!ret) {
74 pr_info("CPU %u is now offline\n", cpu); 74 pr_info("CPU %u is now offline\n", cpu);
75 dev->offline = true;
75 kobject_uevent(&dev->kobj, KOBJ_OFFLINE); 76 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
76 } else 77 } else
77 pr_debug("Can't offline CPU%d.\n", cpu); 78 pr_debug("Can't offline CPU%d.\n", cpu);
78 break; 79 break;
79 case 1: 80 case 1:
80 ret = cpu_up(cpu); 81 ret = cpu_up(cpu);
81 if (!ret) 82 if (!ret) {
83 dev->offline = false;
82 kobject_uevent(&dev->kobj, KOBJ_ONLINE); 84 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
83 else 85 } else {
84 pr_debug("Can't online CPU%d.\n", cpu); 86 pr_debug("Can't online CPU%d.\n", cpu);
87 }
85 break; 88 break;
86 default: 89 default:
87 ret = -EINVAL; 90 ret = -EINVAL;
88 } 91 }
89 92
90 cpu_hotplug_driver_unlock(); 93 unlock_device_hotplug();
91 94
92 return ret; 95 return ret;
93} 96}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 8c8093b146ca..b857ed890b4c 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -88,7 +88,7 @@ static inline void conditional_sti(struct pt_regs *regs)
88 88
89static inline void preempt_conditional_sti(struct pt_regs *regs) 89static inline void preempt_conditional_sti(struct pt_regs *regs)
90{ 90{
91 inc_preempt_count(); 91 preempt_count_inc();
92 if (regs->flags & X86_EFLAGS_IF) 92 if (regs->flags & X86_EFLAGS_IF)
93 local_irq_enable(); 93 local_irq_enable();
94} 94}
@@ -103,7 +103,7 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
103{ 103{
104 if (regs->flags & X86_EFLAGS_IF) 104 if (regs->flags & X86_EFLAGS_IF)
105 local_irq_disable(); 105 local_irq_disable();
106 dec_preempt_count(); 106 preempt_count_dec();
107} 107}
108 108
109static int __kprobes 109static int __kprobes
@@ -653,7 +653,7 @@ void math_state_restore(void)
653 return; 653 return;
654 } 654 }
655 655
656 tsk->fpu_counter++; 656 tsk->thread.fpu_counter++;
657} 657}
658EXPORT_SYMBOL_GPL(math_state_restore); 658EXPORT_SYMBOL_GPL(math_state_restore);
659 659
@@ -713,7 +713,7 @@ void __init early_trap_init(void)
713 /* int3 can be called from all */ 713 /* int3 can be called from all */
714 set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK); 714 set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
715#ifdef CONFIG_X86_32 715#ifdef CONFIG_X86_32
716 set_intr_gate(X86_TRAP_PF, &page_fault); 716 set_intr_gate(X86_TRAP_PF, page_fault);
717#endif 717#endif
718 load_idt(&idt_descr); 718 load_idt(&idt_descr);
719} 719}
@@ -721,7 +721,7 @@ void __init early_trap_init(void)
721void __init early_trap_pf_init(void) 721void __init early_trap_pf_init(void)
722{ 722{
723#ifdef CONFIG_X86_64 723#ifdef CONFIG_X86_64
724 set_intr_gate(X86_TRAP_PF, &page_fault); 724 set_intr_gate(X86_TRAP_PF, page_fault);
725#endif 725#endif
726} 726}
727 727
@@ -737,30 +737,30 @@ void __init trap_init(void)
737 early_iounmap(p, 4); 737 early_iounmap(p, 4);
738#endif 738#endif
739 739
740 set_intr_gate(X86_TRAP_DE, &divide_error); 740 set_intr_gate(X86_TRAP_DE, divide_error);
741 set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK); 741 set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
742 /* int4 can be called from all */ 742 /* int4 can be called from all */
743 set_system_intr_gate(X86_TRAP_OF, &overflow); 743 set_system_intr_gate(X86_TRAP_OF, &overflow);
744 set_intr_gate(X86_TRAP_BR, &bounds); 744 set_intr_gate(X86_TRAP_BR, bounds);
745 set_intr_gate(X86_TRAP_UD, &invalid_op); 745 set_intr_gate(X86_TRAP_UD, invalid_op);
746 set_intr_gate(X86_TRAP_NM, &device_not_available); 746 set_intr_gate(X86_TRAP_NM, device_not_available);
747#ifdef CONFIG_X86_32 747#ifdef CONFIG_X86_32
748 set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS); 748 set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
749#else 749#else
750 set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK); 750 set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
751#endif 751#endif
752 set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun); 752 set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun);
753 set_intr_gate(X86_TRAP_TS, &invalid_TSS); 753 set_intr_gate(X86_TRAP_TS, invalid_TSS);
754 set_intr_gate(X86_TRAP_NP, &segment_not_present); 754 set_intr_gate(X86_TRAP_NP, segment_not_present);
755 set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK); 755 set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
756 set_intr_gate(X86_TRAP_GP, &general_protection); 756 set_intr_gate(X86_TRAP_GP, general_protection);
757 set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug); 757 set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug);
758 set_intr_gate(X86_TRAP_MF, &coprocessor_error); 758 set_intr_gate(X86_TRAP_MF, coprocessor_error);
759 set_intr_gate(X86_TRAP_AC, &alignment_check); 759 set_intr_gate(X86_TRAP_AC, alignment_check);
760#ifdef CONFIG_X86_MCE 760#ifdef CONFIG_X86_MCE
761 set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK); 761 set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
762#endif 762#endif
763 set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error); 763 set_intr_gate(X86_TRAP_XF, simd_coprocessor_error);
764 764
765 /* Reserve all the builtin and the syscall vector: */ 765 /* Reserve all the builtin and the syscall vector: */
766 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) 766 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 10c4f3006afd..da6b35a98260 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -199,6 +199,15 @@ SECTIONS
199 __x86_cpu_dev_end = .; 199 __x86_cpu_dev_end = .;
200 } 200 }
201 201
202#ifdef CONFIG_X86_INTEL_MID
203 .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \
204 LOAD_OFFSET) {
205 __x86_intel_mid_dev_start = .;
206 *(.x86_intel_mid_dev.init)
207 __x86_intel_mid_dev_end = .;
208 }
209#endif
210
202 /* 211 /*
203 * start address and size of operations which during runtime 212 * start address and size of operations which during runtime
204 * can be patched with virtualization friendly instructions or 213 * can be patched with virtualization friendly instructions or
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index b014d9414d08..040681928e9d 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -66,3 +66,10 @@ EXPORT_SYMBOL(empty_zero_page);
66#ifndef CONFIG_PARAVIRT 66#ifndef CONFIG_PARAVIRT
67EXPORT_SYMBOL(native_load_gs_index); 67EXPORT_SYMBOL(native_load_gs_index);
68#endif 68#endif
69
70#ifdef CONFIG_PREEMPT
71EXPORT_SYMBOL(___preempt_schedule);
72#ifdef CONFIG_CONTEXT_TRACKING
73EXPORT_SYMBOL(___preempt_schedule_context);
74#endif
75#endif
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 8ce0072cd700..021783b1f46a 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -116,6 +116,8 @@ struct x86_msi_ops x86_msi = {
116 .teardown_msi_irqs = default_teardown_msi_irqs, 116 .teardown_msi_irqs = default_teardown_msi_irqs,
117 .restore_msi_irqs = default_restore_msi_irqs, 117 .restore_msi_irqs = default_restore_msi_irqs,
118 .setup_hpet_msi = default_setup_hpet_msi, 118 .setup_hpet_msi = default_setup_hpet_msi,
119 .msi_mask_irq = default_msi_mask_irq,
120 .msix_mask_irq = default_msix_mask_irq,
119}; 121};
120 122
121/* MSI arch specific hooks */ 123/* MSI arch specific hooks */
@@ -138,6 +140,14 @@ void arch_restore_msi_irqs(struct pci_dev *dev, int irq)
138{ 140{
139 x86_msi.restore_msi_irqs(dev, irq); 141 x86_msi.restore_msi_irqs(dev, irq);
140} 142}
143u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
144{
145 return x86_msi.msi_mask_irq(desc, mask, flag);
146}
147u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag)
148{
149 return x86_msi.msix_mask_irq(desc, flag);
150}
141#endif 151#endif
142 152
143struct x86_io_apic_ops x86_io_apic_ops = { 153struct x86_io_apic_ops x86_io_apic_ops = {