aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/boot/compressed/eboot.c2
-rw-r--r--arch/x86/include/asm/hypervisor.h2
-rw-r--r--arch/x86/include/asm/spinlock.h2
-rw-r--r--arch/x86/include/asm/xen/page.h5
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c73
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_rapl.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c12
-rw-r--r--arch/x86/kernel/process.c14
-rw-r--r--arch/x86/mm/ioremap.c14
-rw-r--r--arch/x86/net/bpf_jit_comp.c28
-rw-r--r--arch/x86/pci/acpi.c24
-rw-r--r--arch/x86/vdso/Makefile2
-rw-r--r--arch/x86/xen/enlighten.c27
-rw-r--r--arch/x86/xen/suspend.c10
15 files changed, 160 insertions, 60 deletions
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index ef17683484e9..48304b89b601 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -1109,6 +1109,8 @@ struct boot_params *make_boot_params(struct efi_config *c)
1109 if (!cmdline_ptr) 1109 if (!cmdline_ptr)
1110 goto fail; 1110 goto fail;
1111 hdr->cmd_line_ptr = (unsigned long)cmdline_ptr; 1111 hdr->cmd_line_ptr = (unsigned long)cmdline_ptr;
1112 /* Fill in upper bits of command line address, NOP on 32 bit */
1113 boot_params->ext_cmd_line_ptr = (u64)(unsigned long)cmdline_ptr >> 32;
1112 1114
1113 hdr->ramdisk_image = 0; 1115 hdr->ramdisk_image = 0;
1114 hdr->ramdisk_size = 0; 1116 hdr->ramdisk_size = 0;
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index e42f758a0fbd..055ea9941dd5 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -50,7 +50,7 @@ extern const struct hypervisor_x86 *x86_hyper;
50/* Recognized hypervisors */ 50/* Recognized hypervisors */
51extern const struct hypervisor_x86 x86_hyper_vmware; 51extern const struct hypervisor_x86 x86_hyper_vmware;
52extern const struct hypervisor_x86 x86_hyper_ms_hyperv; 52extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
53extern const struct hypervisor_x86 x86_hyper_xen_hvm; 53extern const struct hypervisor_x86 x86_hyper_xen;
54extern const struct hypervisor_x86 x86_hyper_kvm; 54extern const struct hypervisor_x86 x86_hyper_kvm;
55 55
56extern void init_hypervisor(struct cpuinfo_x86 *c); 56extern void init_hypervisor(struct cpuinfo_x86 *c);
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index cf87de3fc390..64b611782ef0 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -169,7 +169,7 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
169 struct __raw_tickets tmp = READ_ONCE(lock->tickets); 169 struct __raw_tickets tmp = READ_ONCE(lock->tickets);
170 170
171 tmp.head &= ~TICKET_SLOWPATH_FLAG; 171 tmp.head &= ~TICKET_SLOWPATH_FLAG;
172 return (tmp.tail - tmp.head) > TICKET_LOCK_INC; 172 return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
173} 173}
174#define arch_spin_is_contended arch_spin_is_contended 174#define arch_spin_is_contended arch_spin_is_contended
175 175
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 358dcd338915..c44a5d53e464 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -269,4 +269,9 @@ static inline bool xen_arch_need_swiotlb(struct device *dev,
269 return false; 269 return false;
270} 270}
271 271
272static inline unsigned long xen_get_swiotlb_free_pages(unsigned int order)
273{
274 return __get_free_pages(__GFP_NOWARN, order);
275}
276
272#endif /* _ASM_X86_XEN_PAGE_H */ 277#endif /* _ASM_X86_XEN_PAGE_H */
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index 36ce402a3fa5..d820d8eae96b 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -27,8 +27,8 @@
27 27
28static const __initconst struct hypervisor_x86 * const hypervisors[] = 28static const __initconst struct hypervisor_x86 * const hypervisors[] =
29{ 29{
30#ifdef CONFIG_XEN_PVHVM 30#ifdef CONFIG_XEN
31 &x86_hyper_xen_hvm, 31 &x86_hyper_xen,
32#endif 32#endif
33 &x86_hyper_vmware, 33 &x86_hyper_vmware,
34 &x86_hyper_ms_hyperv, 34 &x86_hyper_ms_hyperv,
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 219d3fb423a1..3998131d1a68 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1134,7 +1134,7 @@ static __initconst const u64 slm_hw_cache_extra_regs
1134 [ C(LL ) ] = { 1134 [ C(LL ) ] = {
1135 [ C(OP_READ) ] = { 1135 [ C(OP_READ) ] = {
1136 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS, 1136 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1137 [ C(RESULT_MISS) ] = SLM_DMND_READ|SLM_LLC_MISS, 1137 [ C(RESULT_MISS) ] = 0,
1138 }, 1138 },
1139 [ C(OP_WRITE) ] = { 1139 [ C(OP_WRITE) ] = {
1140 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS, 1140 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
@@ -1184,8 +1184,7 @@ static __initconst const u64 slm_hw_cache_event_ids
1184 [ C(OP_READ) ] = { 1184 [ C(OP_READ) ] = {
1185 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ 1185 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1186 [ C(RESULT_ACCESS) ] = 0x01b7, 1186 [ C(RESULT_ACCESS) ] = 0x01b7,
1187 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ 1187 [ C(RESULT_MISS) ] = 0,
1188 [ C(RESULT_MISS) ] = 0x01b7,
1189 }, 1188 },
1190 [ C(OP_WRITE) ] = { 1189 [ C(OP_WRITE) ] = {
1191 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ 1190 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
@@ -1217,7 +1216,7 @@ static __initconst const u64 slm_hw_cache_event_ids
1217 [ C(ITLB) ] = { 1216 [ C(ITLB) ] = {
1218 [ C(OP_READ) ] = { 1217 [ C(OP_READ) ] = {
1219 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ 1218 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1220 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */ 1219 [ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1221 }, 1220 },
1222 [ C(OP_WRITE) ] = { 1221 [ C(OP_WRITE) ] = {
1223 [ C(RESULT_ACCESS) ] = -1, 1222 [ C(RESULT_ACCESS) ] = -1,
@@ -2533,34 +2532,6 @@ ssize_t intel_event_sysfs_show(char *page, u64 config)
2533 return x86_event_sysfs_show(page, config, event); 2532 return x86_event_sysfs_show(page, config, event);
2534} 2533}
2535 2534
2536static __initconst const struct x86_pmu core_pmu = {
2537 .name = "core",
2538 .handle_irq = x86_pmu_handle_irq,
2539 .disable_all = x86_pmu_disable_all,
2540 .enable_all = core_pmu_enable_all,
2541 .enable = core_pmu_enable_event,
2542 .disable = x86_pmu_disable_event,
2543 .hw_config = x86_pmu_hw_config,
2544 .schedule_events = x86_schedule_events,
2545 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
2546 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
2547 .event_map = intel_pmu_event_map,
2548 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
2549 .apic = 1,
2550 /*
2551 * Intel PMCs cannot be accessed sanely above 32 bit width,
2552 * so we install an artificial 1<<31 period regardless of
2553 * the generic event period:
2554 */
2555 .max_period = (1ULL << 31) - 1,
2556 .get_event_constraints = intel_get_event_constraints,
2557 .put_event_constraints = intel_put_event_constraints,
2558 .event_constraints = intel_core_event_constraints,
2559 .guest_get_msrs = core_guest_get_msrs,
2560 .format_attrs = intel_arch_formats_attr,
2561 .events_sysfs_show = intel_event_sysfs_show,
2562};
2563
2564struct intel_shared_regs *allocate_shared_regs(int cpu) 2535struct intel_shared_regs *allocate_shared_regs(int cpu)
2565{ 2536{
2566 struct intel_shared_regs *regs; 2537 struct intel_shared_regs *regs;
@@ -2743,6 +2714,44 @@ static struct attribute *intel_arch3_formats_attr[] = {
2743 NULL, 2714 NULL,
2744}; 2715};
2745 2716
2717static __initconst const struct x86_pmu core_pmu = {
2718 .name = "core",
2719 .handle_irq = x86_pmu_handle_irq,
2720 .disable_all = x86_pmu_disable_all,
2721 .enable_all = core_pmu_enable_all,
2722 .enable = core_pmu_enable_event,
2723 .disable = x86_pmu_disable_event,
2724 .hw_config = x86_pmu_hw_config,
2725 .schedule_events = x86_schedule_events,
2726 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
2727 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
2728 .event_map = intel_pmu_event_map,
2729 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
2730 .apic = 1,
2731 /*
2732 * Intel PMCs cannot be accessed sanely above 32-bit width,
2733 * so we install an artificial 1<<31 period regardless of
2734 * the generic event period:
2735 */
2736 .max_period = (1ULL<<31) - 1,
2737 .get_event_constraints = intel_get_event_constraints,
2738 .put_event_constraints = intel_put_event_constraints,
2739 .event_constraints = intel_core_event_constraints,
2740 .guest_get_msrs = core_guest_get_msrs,
2741 .format_attrs = intel_arch_formats_attr,
2742 .events_sysfs_show = intel_event_sysfs_show,
2743
2744 /*
2745 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
2746 * together with PMU version 1 and thus be using core_pmu with
2747 * shared_regs. We need following callbacks here to allocate
2748 * it properly.
2749 */
2750 .cpu_prepare = intel_pmu_cpu_prepare,
2751 .cpu_starting = intel_pmu_cpu_starting,
2752 .cpu_dying = intel_pmu_cpu_dying,
2753};
2754
2746static __initconst const struct x86_pmu intel_pmu = { 2755static __initconst const struct x86_pmu intel_pmu = {
2747 .name = "Intel", 2756 .name = "Intel",
2748 .handle_irq = intel_pmu_handle_irq, 2757 .handle_irq = intel_pmu_handle_irq,
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
index 999289b94025..358c54ad20d4 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -722,6 +722,7 @@ static int __init rapl_pmu_init(void)
722 break; 722 break;
723 case 60: /* Haswell */ 723 case 60: /* Haswell */
724 case 69: /* Haswell-Celeron */ 724 case 69: /* Haswell-Celeron */
725 case 61: /* Broadwell */
725 rapl_cntr_mask = RAPL_IDX_HSW; 726 rapl_cntr_mask = RAPL_IDX_HSW;
726 rapl_pmu_events_group.attrs = rapl_events_hsw_attr; 727 rapl_pmu_events_group.attrs = rapl_events_hsw_attr;
727 break; 728 break;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
index 3001015b755c..4562e9e22c60 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
@@ -1,6 +1,13 @@
1/* Nehalem/SandBridge/Haswell uncore support */ 1/* Nehalem/SandBridge/Haswell uncore support */
2#include "perf_event_intel_uncore.h" 2#include "perf_event_intel_uncore.h"
3 3
4/* Uncore IMC PCI IDs */
5#define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
6#define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
7#define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
8#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
9#define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04
10
4/* SNB event control */ 11/* SNB event control */
5#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff 12#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
6#define SNB_UNC_CTL_UMASK_MASK 0x0000ff00 13#define SNB_UNC_CTL_UMASK_MASK 0x0000ff00
@@ -472,6 +479,10 @@ static const struct pci_device_id hsw_uncore_pci_ids[] = {
472 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC), 479 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
473 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 480 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
474 }, 481 },
482 { /* IMC */
483 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC),
484 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
485 },
475 { /* end: all zeroes */ }, 486 { /* end: all zeroes */ },
476}; 487};
477 488
@@ -502,6 +513,7 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
502 IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */ 513 IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */
503 IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */ 514 IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */
504 IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */ 515 IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */
516 IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */
505 { /* end marker */ } 517 { /* end marker */ }
506}; 518};
507 519
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 8213da62b1b7..6e338e3b1dc0 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -57,7 +57,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
57 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, 57 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
58#endif 58#endif
59}; 59};
60EXPORT_PER_CPU_SYMBOL_GPL(cpu_tss); 60EXPORT_PER_CPU_SYMBOL(cpu_tss);
61 61
62#ifdef CONFIG_X86_64 62#ifdef CONFIG_X86_64
63static DEFINE_PER_CPU(unsigned char, is_idle); 63static DEFINE_PER_CPU(unsigned char, is_idle);
@@ -156,11 +156,13 @@ void flush_thread(void)
156 /* FPU state will be reallocated lazily at the first use. */ 156 /* FPU state will be reallocated lazily at the first use. */
157 drop_fpu(tsk); 157 drop_fpu(tsk);
158 free_thread_xstate(tsk); 158 free_thread_xstate(tsk);
159 } else if (!used_math()) { 159 } else {
160 /* kthread execs. TODO: cleanup this horror. */ 160 if (!tsk_used_math(tsk)) {
161 if (WARN_ON(init_fpu(tsk))) 161 /* kthread execs. TODO: cleanup this horror. */
162 force_sig(SIGKILL, tsk); 162 if (WARN_ON(init_fpu(tsk)))
163 user_fpu_begin(); 163 force_sig(SIGKILL, tsk);
164 user_fpu_begin();
165 }
164 restore_init_xstate(); 166 restore_init_xstate();
165 } 167 }
166} 168}
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 5ead4d6cf3a7..70e7444c6835 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -351,18 +351,20 @@ int arch_ioremap_pmd_supported(void)
351 */ 351 */
352void *xlate_dev_mem_ptr(phys_addr_t phys) 352void *xlate_dev_mem_ptr(phys_addr_t phys)
353{ 353{
354 void *addr; 354 unsigned long start = phys & PAGE_MASK;
355 unsigned long start = phys & PAGE_MASK; 355 unsigned long offset = phys & ~PAGE_MASK;
356 unsigned long vaddr;
356 357
357 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */ 358 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
358 if (page_is_ram(start >> PAGE_SHIFT)) 359 if (page_is_ram(start >> PAGE_SHIFT))
359 return __va(phys); 360 return __va(phys);
360 361
361 addr = (void __force *)ioremap_cache(start, PAGE_SIZE); 362 vaddr = (unsigned long)ioremap_cache(start, PAGE_SIZE);
362 if (addr) 363 /* Only add the offset on success and return NULL if the ioremap() failed: */
363 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK)); 364 if (vaddr)
365 vaddr += offset;
364 366
365 return addr; 367 return (void *)vaddr;
366} 368}
367 369
368void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) 370void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 987514396c1e..99f76103c6b7 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -559,6 +559,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
559 if (is_ereg(dst_reg)) 559 if (is_ereg(dst_reg))
560 EMIT1(0x41); 560 EMIT1(0x41);
561 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); 561 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
562
563 /* emit 'movzwl eax, ax' */
564 if (is_ereg(dst_reg))
565 EMIT3(0x45, 0x0F, 0xB7);
566 else
567 EMIT2(0x0F, 0xB7);
568 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
562 break; 569 break;
563 case 32: 570 case 32:
564 /* emit 'bswap eax' to swap lower 4 bytes */ 571 /* emit 'bswap eax' to swap lower 4 bytes */
@@ -577,6 +584,27 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
577 break; 584 break;
578 585
579 case BPF_ALU | BPF_END | BPF_FROM_LE: 586 case BPF_ALU | BPF_END | BPF_FROM_LE:
587 switch (imm32) {
588 case 16:
589 /* emit 'movzwl eax, ax' to zero extend 16-bit
590 * into 64 bit
591 */
592 if (is_ereg(dst_reg))
593 EMIT3(0x45, 0x0F, 0xB7);
594 else
595 EMIT2(0x0F, 0xB7);
596 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
597 break;
598 case 32:
599 /* emit 'mov eax, eax' to clear upper 32-bits */
600 if (is_ereg(dst_reg))
601 EMIT1(0x45);
602 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
603 break;
604 case 64:
605 /* nop */
606 break;
607 }
580 break; 608 break;
581 609
582 /* ST: *(u8*)(dst_reg + off) = imm */ 610 /* ST: *(u8*)(dst_reg + off) = imm */
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index e4695985f9de..d93963340c3c 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -325,6 +325,26 @@ static void release_pci_root_info(struct pci_host_bridge *bridge)
325 kfree(info); 325 kfree(info);
326} 326}
327 327
328/*
329 * An IO port or MMIO resource assigned to a PCI host bridge may be
330 * consumed by the host bridge itself or available to its child
331 * bus/devices. The ACPI specification defines a bit (Producer/Consumer)
332 * to tell whether the resource is consumed by the host bridge itself,
333 * but firmware hasn't used that bit consistently, so we can't rely on it.
334 *
335 * On x86 and IA64 platforms, all IO port and MMIO resources are assumed
336 * to be available to child bus/devices except one special case:
337 * IO port [0xCF8-0xCFF] is consumed by the host bridge itself
338 * to access PCI configuration space.
339 *
340 * So explicitly filter out PCI CFG IO ports[0xCF8-0xCFF].
341 */
342static bool resource_is_pcicfg_ioport(struct resource *res)
343{
344 return (res->flags & IORESOURCE_IO) &&
345 res->start == 0xCF8 && res->end == 0xCFF;
346}
347
328static void probe_pci_root_info(struct pci_root_info *info, 348static void probe_pci_root_info(struct pci_root_info *info,
329 struct acpi_device *device, 349 struct acpi_device *device,
330 int busnum, int domain, 350 int busnum, int domain,
@@ -346,8 +366,8 @@ static void probe_pci_root_info(struct pci_root_info *info,
346 "no IO and memory resources present in _CRS\n"); 366 "no IO and memory resources present in _CRS\n");
347 else 367 else
348 resource_list_for_each_entry_safe(entry, tmp, list) { 368 resource_list_for_each_entry_safe(entry, tmp, list) {
349 if ((entry->res->flags & IORESOURCE_WINDOW) == 0 || 369 if ((entry->res->flags & IORESOURCE_DISABLED) ||
350 (entry->res->flags & IORESOURCE_DISABLED)) 370 resource_is_pcicfg_ioport(entry->res))
351 resource_list_destroy_entry(entry); 371 resource_list_destroy_entry(entry);
352 else 372 else
353 entry->res->name = info->name; 373 entry->res->name = info->name;
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
index 275a3a8b78af..e97032069f88 100644
--- a/arch/x86/vdso/Makefile
+++ b/arch/x86/vdso/Makefile
@@ -51,7 +51,7 @@ VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
51$(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE 51$(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
52 $(call if_changed,vdso) 52 $(call if_changed,vdso)
53 53
54HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi 54HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi -I$(srctree)/arch/x86/include/uapi
55hostprogs-y += vdso2c 55hostprogs-y += vdso2c
56 56
57quiet_cmd_vdso2c = VDSO2C $@ 57quiet_cmd_vdso2c = VDSO2C $@
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 94578efd3067..46957ead3060 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1760,6 +1760,9 @@ static struct notifier_block xen_hvm_cpu_notifier = {
1760 1760
1761static void __init xen_hvm_guest_init(void) 1761static void __init xen_hvm_guest_init(void)
1762{ 1762{
1763 if (xen_pv_domain())
1764 return;
1765
1763 init_hvm_pv_info(); 1766 init_hvm_pv_info();
1764 1767
1765 xen_hvm_init_shared_info(); 1768 xen_hvm_init_shared_info();
@@ -1775,6 +1778,7 @@ static void __init xen_hvm_guest_init(void)
1775 xen_hvm_init_time_ops(); 1778 xen_hvm_init_time_ops();
1776 xen_hvm_init_mmu_ops(); 1779 xen_hvm_init_mmu_ops();
1777} 1780}
1781#endif
1778 1782
1779static bool xen_nopv = false; 1783static bool xen_nopv = false;
1780static __init int xen_parse_nopv(char *arg) 1784static __init int xen_parse_nopv(char *arg)
@@ -1784,14 +1788,11 @@ static __init int xen_parse_nopv(char *arg)
1784} 1788}
1785early_param("xen_nopv", xen_parse_nopv); 1789early_param("xen_nopv", xen_parse_nopv);
1786 1790
1787static uint32_t __init xen_hvm_platform(void) 1791static uint32_t __init xen_platform(void)
1788{ 1792{
1789 if (xen_nopv) 1793 if (xen_nopv)
1790 return 0; 1794 return 0;
1791 1795
1792 if (xen_pv_domain())
1793 return 0;
1794
1795 return xen_cpuid_base(); 1796 return xen_cpuid_base();
1796} 1797}
1797 1798
@@ -1809,11 +1810,19 @@ bool xen_hvm_need_lapic(void)
1809} 1810}
1810EXPORT_SYMBOL_GPL(xen_hvm_need_lapic); 1811EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
1811 1812
1812const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = { 1813static void xen_set_cpu_features(struct cpuinfo_x86 *c)
1813 .name = "Xen HVM", 1814{
1814 .detect = xen_hvm_platform, 1815 if (xen_pv_domain())
1816 clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
1817}
1818
1819const struct hypervisor_x86 x86_hyper_xen = {
1820 .name = "Xen",
1821 .detect = xen_platform,
1822#ifdef CONFIG_XEN_PVHVM
1815 .init_platform = xen_hvm_guest_init, 1823 .init_platform = xen_hvm_guest_init,
1824#endif
1816 .x2apic_available = xen_x2apic_para_available, 1825 .x2apic_available = xen_x2apic_para_available,
1826 .set_cpu_features = xen_set_cpu_features,
1817}; 1827};
1818EXPORT_SYMBOL(x86_hyper_xen_hvm); 1828EXPORT_SYMBOL(x86_hyper_xen);
1819#endif
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index d9497698645a..53b4c0811f4f 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -88,7 +88,17 @@ static void xen_vcpu_notify_restore(void *data)
88 tick_resume_local(); 88 tick_resume_local();
89} 89}
90 90
91static void xen_vcpu_notify_suspend(void *data)
92{
93 tick_suspend_local();
94}
95
91void xen_arch_resume(void) 96void xen_arch_resume(void)
92{ 97{
93 on_each_cpu(xen_vcpu_notify_restore, NULL, 1); 98 on_each_cpu(xen_vcpu_notify_restore, NULL, 1);
94} 99}
100
101void xen_arch_suspend(void)
102{
103 on_each_cpu(xen_vcpu_notify_suspend, NULL, 1);
104}