aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Makefile2
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c9
-rw-r--r--arch/x86/include/asm/efi.h6
-rw-r--r--arch/x86/include/asm/xen/hypervisor.h1
-rw-r--r--arch/x86/include/asm/xen/interface.h4
-rw-r--r--arch/x86/kernel/apic/io_apic.c3
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c6
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event.c16
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c48
-rw-r--r--arch/x86/kernel/cpu/perf_event_knc.c93
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c127
-rw-r--r--arch/x86/kernel/e820.c3
-rw-r--r--arch/x86/kernel/entry_32.S8
-rw-r--r--arch/x86/kernel/entry_64.S2
-rw-r--r--arch/x86/kernel/kvm.c3
-rw-r--r--arch/x86/kernel/reboot.c8
-rw-r--r--arch/x86/kernel/setup.c30
-rw-r--r--arch/x86/kernel/signal.c4
-rw-r--r--arch/x86/kernel/uprobes.c16
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/mmu.c3
-rw-r--r--arch/x86/kvm/x86.c60
-rw-r--r--arch/x86/mm/init.c58
-rw-r--r--arch/x86/mm/init_64.c7
-rw-r--r--arch/x86/oprofile/nmi_int.c2
-rw-r--r--arch/x86/platform/efi/efi.c47
-rw-r--r--arch/x86/platform/efi/efi_64.c7
-rw-r--r--arch/x86/realmode/rm/wakeup_asm.S15
-rw-r--r--arch/x86/xen/enlighten.c2
-rw-r--r--arch/x86/xen/mmu.c21
32 files changed, 433 insertions, 192 deletions
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 58790bd85c1d..05afcca66de6 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -142,7 +142,7 @@ KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
142KBUILD_CFLAGS += $(mflags-y) 142KBUILD_CFLAGS += $(mflags-y)
143KBUILD_AFLAGS += $(mflags-y) 143KBUILD_AFLAGS += $(mflags-y)
144 144
145archscripts: 145archscripts: scripts_basic
146 $(Q)$(MAKE) $(build)=arch/x86/tools relocs 146 $(Q)$(MAKE) $(build)=arch/x86/tools relocs
147 147
148### 148###
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 7c04d0da709b..1b9c22bea8a7 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -515,6 +515,11 @@ static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
515} 515}
516 516
517 517
518static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
519{
520 aesni_enc(ctx, out, in);
521}
522
518static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 523static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
519 struct scatterlist *src, unsigned int nbytes) 524 struct scatterlist *src, unsigned int nbytes)
520{ 525{
@@ -525,7 +530,7 @@ static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
525 .tbuflen = sizeof(buf), 530 .tbuflen = sizeof(buf),
526 531
527 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), 532 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
528 .tweak_fn = XTS_TWEAK_CAST(aesni_enc), 533 .tweak_fn = aesni_xts_tweak,
529 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), 534 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
530 .crypt_fn = lrw_xts_encrypt_callback, 535 .crypt_fn = lrw_xts_encrypt_callback,
531 }; 536 };
@@ -550,7 +555,7 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
550 .tbuflen = sizeof(buf), 555 .tbuflen = sizeof(buf),
551 556
552 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), 557 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
553 .tweak_fn = XTS_TWEAK_CAST(aesni_enc), 558 .tweak_fn = aesni_xts_tweak,
554 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), 559 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
555 .crypt_fn = lrw_xts_decrypt_callback, 560 .crypt_fn = lrw_xts_decrypt_callback,
556 }; 561 };
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index c9dcc181d4d1..6e8fdf5ad113 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -35,7 +35,7 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
35#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ 35#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
36 efi_call_virt(f, a1, a2, a3, a4, a5, a6) 36 efi_call_virt(f, a1, a2, a3, a4, a5, a6)
37 37
38#define efi_ioremap(addr, size, type) ioremap_cache(addr, size) 38#define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size)
39 39
40#else /* !CONFIG_X86_32 */ 40#else /* !CONFIG_X86_32 */
41 41
@@ -89,7 +89,7 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
89 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) 89 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
90 90
91extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, 91extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
92 u32 type); 92 u32 type, u64 attribute);
93 93
94#endif /* CONFIG_X86_32 */ 94#endif /* CONFIG_X86_32 */
95 95
@@ -98,6 +98,8 @@ extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
98extern int efi_memblock_x86_reserve_range(void); 98extern int efi_memblock_x86_reserve_range(void);
99extern void efi_call_phys_prelog(void); 99extern void efi_call_phys_prelog(void);
100extern void efi_call_phys_epilog(void); 100extern void efi_call_phys_epilog(void);
101extern void efi_unmap_memmap(void);
102extern void efi_memory_uc(u64 addr, unsigned long size);
101 103
102#ifndef CONFIG_EFI 104#ifndef CONFIG_EFI
103/* 105/*
diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
index 66d0fff1ee84..125f344f06a9 100644
--- a/arch/x86/include/asm/xen/hypervisor.h
+++ b/arch/x86/include/asm/xen/hypervisor.h
@@ -33,7 +33,6 @@
33#ifndef _ASM_X86_XEN_HYPERVISOR_H 33#ifndef _ASM_X86_XEN_HYPERVISOR_H
34#define _ASM_X86_XEN_HYPERVISOR_H 34#define _ASM_X86_XEN_HYPERVISOR_H
35 35
36/* arch/i386/kernel/setup.c */
37extern struct shared_info *HYPERVISOR_shared_info; 36extern struct shared_info *HYPERVISOR_shared_info;
38extern struct start_info *xen_start_info; 37extern struct start_info *xen_start_info;
39 38
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h
index 6d2f75a82a14..54d52ff1304a 100644
--- a/arch/x86/include/asm/xen/interface.h
+++ b/arch/x86/include/asm/xen/interface.h
@@ -51,14 +51,14 @@
51 * with Xen so that on ARM we can have one ABI that works for 32 and 64 51 * with Xen so that on ARM we can have one ABI that works for 32 and 64
52 * bit guests. */ 52 * bit guests. */
53typedef unsigned long xen_pfn_t; 53typedef unsigned long xen_pfn_t;
54#define PRI_xen_pfn "lx"
54typedef unsigned long xen_ulong_t; 55typedef unsigned long xen_ulong_t;
56#define PRI_xen_ulong "lx"
55/* Guest handles for primitive C types. */ 57/* Guest handles for primitive C types. */
56__DEFINE_GUEST_HANDLE(uchar, unsigned char); 58__DEFINE_GUEST_HANDLE(uchar, unsigned char);
57__DEFINE_GUEST_HANDLE(uint, unsigned int); 59__DEFINE_GUEST_HANDLE(uint, unsigned int);
58__DEFINE_GUEST_HANDLE(ulong, unsigned long);
59DEFINE_GUEST_HANDLE(char); 60DEFINE_GUEST_HANDLE(char);
60DEFINE_GUEST_HANDLE(int); 61DEFINE_GUEST_HANDLE(int);
61DEFINE_GUEST_HANDLE(long);
62DEFINE_GUEST_HANDLE(void); 62DEFINE_GUEST_HANDLE(void);
63DEFINE_GUEST_HANDLE(uint64_t); 63DEFINE_GUEST_HANDLE(uint64_t);
64DEFINE_GUEST_HANDLE(uint32_t); 64DEFINE_GUEST_HANDLE(uint32_t);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index c265593ec2cd..1817fa911024 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2257,6 +2257,9 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
2257 continue; 2257 continue;
2258 2258
2259 cfg = irq_cfg(irq); 2259 cfg = irq_cfg(irq);
2260 if (!cfg)
2261 continue;
2262
2260 raw_spin_lock(&desc->lock); 2263 raw_spin_lock(&desc->lock);
2261 2264
2262 /* 2265 /*
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 9a7c90d80bc4..93c5451bdd52 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -991,7 +991,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
991 if (attrs) 991 if (attrs)
992 return attrs; 992 return attrs;
993 993
994 n = sizeof (default_attrs) / sizeof (struct attribute *); 994 n = ARRAY_SIZE(default_attrs);
995 995
996 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) 996 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
997 n += 2; 997 n += 2;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 29e87d3b2843..46cbf8689692 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -2209,11 +2209,6 @@ static struct dev_ext_attribute dev_attr_cmci_disabled = {
2209 &mce_cmci_disabled 2209 &mce_cmci_disabled
2210}; 2210};
2211 2211
2212static struct dev_ext_attribute dev_attr_bios_cmci_threshold = {
2213 __ATTR(bios_cmci_threshold, 0444, device_show_int, NULL),
2214 &mce_bios_cmci_threshold
2215};
2216
2217static struct device_attribute *mce_device_attrs[] = { 2212static struct device_attribute *mce_device_attrs[] = {
2218 &dev_attr_tolerant.attr, 2213 &dev_attr_tolerant.attr,
2219 &dev_attr_check_interval.attr, 2214 &dev_attr_check_interval.attr,
@@ -2222,7 +2217,6 @@ static struct device_attribute *mce_device_attrs[] = {
2222 &dev_attr_dont_log_ce.attr, 2217 &dev_attr_dont_log_ce.attr,
2223 &dev_attr_ignore_ce.attr, 2218 &dev_attr_ignore_ce.attr,
2224 &dev_attr_cmci_disabled.attr, 2219 &dev_attr_cmci_disabled.attr,
2225 &dev_attr_bios_cmci_threshold.attr,
2226 NULL 2220 NULL
2227}; 2221};
2228 2222
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index c4e916d77378..698b6ec12e0f 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -576,12 +576,10 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
576 int err = 0; 576 int err = 0;
577 577
578 if (shared_bank[bank]) { 578 if (shared_bank[bank]) {
579
580 nb = node_to_amd_nb(amd_get_nb_id(cpu)); 579 nb = node_to_amd_nb(amd_get_nb_id(cpu));
581 WARN_ON(!nb);
582 580
583 /* threshold descriptor already initialized on this node? */ 581 /* threshold descriptor already initialized on this node? */
584 if (nb->bank4) { 582 if (nb && nb->bank4) {
585 /* yes, use it */ 583 /* yes, use it */
586 b = nb->bank4; 584 b = nb->bank4;
587 err = kobject_add(b->kobj, &dev->kobj, name); 585 err = kobject_add(b->kobj, &dev->kobj, name);
@@ -615,8 +613,10 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
615 atomic_set(&b->cpus, 1); 613 atomic_set(&b->cpus, 1);
616 614
617 /* nb is already initialized, see above */ 615 /* nb is already initialized, see above */
618 WARN_ON(nb->bank4); 616 if (nb) {
619 nb->bank4 = b; 617 WARN_ON(nb->bank4);
618 nb->bank4 = b;
619 }
620 } 620 }
621 621
622 err = allocate_threshold_blocks(cpu, bank, 0, 622 err = allocate_threshold_blocks(cpu, bank, 0,
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 915b876edd1e..4a3374e61a93 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -208,12 +208,14 @@ static bool check_hw_exists(void)
208 } 208 }
209 209
210 /* 210 /*
211 * Now write a value and read it back to see if it matches, 211 * Read the current value, change it and read it back to see if it
212 * this is needed to detect certain hardware emulators (qemu/kvm) 212 * matches, this is needed to detect certain hardware emulators
213 * that don't trap on the MSR access and always return 0s. 213 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
214 */ 214 */
215 val = 0xabcdUL;
216 reg = x86_pmu_event_addr(0); 215 reg = x86_pmu_event_addr(0);
216 if (rdmsrl_safe(reg, &val))
217 goto msr_fail;
218 val ^= 0xffffUL;
217 ret = wrmsrl_safe(reg, val); 219 ret = wrmsrl_safe(reg, val);
218 ret |= rdmsrl_safe(reg, &val_new); 220 ret |= rdmsrl_safe(reg, &val_new);
219 if (ret || val != val_new) 221 if (ret || val != val_new)
@@ -338,6 +340,9 @@ int x86_setup_perfctr(struct perf_event *event)
338 /* BTS is currently only allowed for user-mode. */ 340 /* BTS is currently only allowed for user-mode. */
339 if (!attr->exclude_kernel) 341 if (!attr->exclude_kernel)
340 return -EOPNOTSUPP; 342 return -EOPNOTSUPP;
343
344 if (!attr->exclude_guest)
345 return -EOPNOTSUPP;
341 } 346 }
342 347
343 hwc->config |= config; 348 hwc->config |= config;
@@ -380,6 +385,9 @@ int x86_pmu_hw_config(struct perf_event *event)
380 if (event->attr.precise_ip) { 385 if (event->attr.precise_ip) {
381 int precise = 0; 386 int precise = 0;
382 387
388 if (!event->attr.exclude_guest)
389 return -EOPNOTSUPP;
390
383 /* Support for constant skid */ 391 /* Support for constant skid */
384 if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) { 392 if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
385 precise++; 393 precise++;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 99d96a4978b5..3cf3d97cce3a 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -118,22 +118,24 @@ static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
118{ 118{
119 struct pci_dev *pdev = box->pci_dev; 119 struct pci_dev *pdev = box->pci_dev;
120 int box_ctl = uncore_pci_box_ctl(box); 120 int box_ctl = uncore_pci_box_ctl(box);
121 u32 config; 121 u32 config = 0;
122 122
123 pci_read_config_dword(pdev, box_ctl, &config); 123 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
124 config |= SNBEP_PMON_BOX_CTL_FRZ; 124 config |= SNBEP_PMON_BOX_CTL_FRZ;
125 pci_write_config_dword(pdev, box_ctl, config); 125 pci_write_config_dword(pdev, box_ctl, config);
126 }
126} 127}
127 128
128static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box) 129static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
129{ 130{
130 struct pci_dev *pdev = box->pci_dev; 131 struct pci_dev *pdev = box->pci_dev;
131 int box_ctl = uncore_pci_box_ctl(box); 132 int box_ctl = uncore_pci_box_ctl(box);
132 u32 config; 133 u32 config = 0;
133 134
134 pci_read_config_dword(pdev, box_ctl, &config); 135 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
135 config &= ~SNBEP_PMON_BOX_CTL_FRZ; 136 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
136 pci_write_config_dword(pdev, box_ctl, config); 137 pci_write_config_dword(pdev, box_ctl, config);
138 }
137} 139}
138 140
139static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event) 141static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
@@ -156,7 +158,7 @@ static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct pe
156{ 158{
157 struct pci_dev *pdev = box->pci_dev; 159 struct pci_dev *pdev = box->pci_dev;
158 struct hw_perf_event *hwc = &event->hw; 160 struct hw_perf_event *hwc = &event->hw;
159 u64 count; 161 u64 count = 0;
160 162
161 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count); 163 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
162 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1); 164 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
@@ -603,11 +605,12 @@ static struct pci_driver snbep_uncore_pci_driver = {
603/* 605/*
604 * build pci bus to socket mapping 606 * build pci bus to socket mapping
605 */ 607 */
606static void snbep_pci2phy_map_init(void) 608static int snbep_pci2phy_map_init(void)
607{ 609{
608 struct pci_dev *ubox_dev = NULL; 610 struct pci_dev *ubox_dev = NULL;
609 int i, bus, nodeid; 611 int i, bus, nodeid;
610 u32 config; 612 int err = 0;
613 u32 config = 0;
611 614
612 while (1) { 615 while (1) {
613 /* find the UBOX device */ 616 /* find the UBOX device */
@@ -618,10 +621,14 @@ static void snbep_pci2phy_map_init(void)
618 break; 621 break;
619 bus = ubox_dev->bus->number; 622 bus = ubox_dev->bus->number;
620 /* get the Node ID of the local register */ 623 /* get the Node ID of the local register */
621 pci_read_config_dword(ubox_dev, 0x40, &config); 624 err = pci_read_config_dword(ubox_dev, 0x40, &config);
625 if (err)
626 break;
622 nodeid = config; 627 nodeid = config;
623 /* get the Node ID mapping */ 628 /* get the Node ID mapping */
624 pci_read_config_dword(ubox_dev, 0x54, &config); 629 err = pci_read_config_dword(ubox_dev, 0x54, &config);
630 if (err)
631 break;
625 /* 632 /*
626 * every three bits in the Node ID mapping register maps 633 * every three bits in the Node ID mapping register maps
627 * to a particular node. 634 * to a particular node.
@@ -633,7 +640,11 @@ static void snbep_pci2phy_map_init(void)
633 } 640 }
634 } 641 }
635 }; 642 };
636 return; 643
644 if (ubox_dev)
645 pci_dev_put(ubox_dev);
646
647 return err ? pcibios_err_to_errno(err) : 0;
637} 648}
638/* end of Sandy Bridge-EP uncore support */ 649/* end of Sandy Bridge-EP uncore support */
639 650
@@ -1547,7 +1558,6 @@ void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
1547{ 1558{
1548 struct hw_perf_event *hwc = &event->hw; 1559 struct hw_perf_event *hwc = &event->hw;
1549 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 1560 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1550 int port;
1551 1561
1552 /* adjust the main event selector and extra register index */ 1562 /* adjust the main event selector and extra register index */
1553 if (reg1->idx % 2) { 1563 if (reg1->idx % 2) {
@@ -1559,7 +1569,6 @@ void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
1559 } 1569 }
1560 1570
1561 /* adjust extra register config */ 1571 /* adjust extra register config */
1562 port = reg1->idx / 6 + box->pmu->pmu_idx * 4;
1563 switch (reg1->idx % 6) { 1572 switch (reg1->idx % 6) {
1564 case 2: 1573 case 2:
1565 /* shift the 8~15 bits to the 0~7 bits */ 1574 /* shift the 8~15 bits to the 0~7 bits */
@@ -2578,9 +2587,11 @@ static int __init uncore_pci_init(void)
2578 2587
2579 switch (boot_cpu_data.x86_model) { 2588 switch (boot_cpu_data.x86_model) {
2580 case 45: /* Sandy Bridge-EP */ 2589 case 45: /* Sandy Bridge-EP */
2590 ret = snbep_pci2phy_map_init();
2591 if (ret)
2592 return ret;
2581 pci_uncores = snbep_pci_uncores; 2593 pci_uncores = snbep_pci_uncores;
2582 uncore_pci_driver = &snbep_uncore_pci_driver; 2594 uncore_pci_driver = &snbep_uncore_pci_driver;
2583 snbep_pci2phy_map_init();
2584 break; 2595 break;
2585 default: 2596 default:
2586 return 0; 2597 return 0;
@@ -2926,6 +2937,9 @@ static int __init intel_uncore_init(void)
2926 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 2937 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
2927 return -ENODEV; 2938 return -ENODEV;
2928 2939
2940 if (cpu_has_hypervisor)
2941 return -ENODEV;
2942
2929 ret = uncore_pci_init(); 2943 ret = uncore_pci_init();
2930 if (ret) 2944 if (ret)
2931 goto fail; 2945 goto fail;
diff --git a/arch/x86/kernel/cpu/perf_event_knc.c b/arch/x86/kernel/cpu/perf_event_knc.c
index 7c46bfdbc373..4b7731bf23a8 100644
--- a/arch/x86/kernel/cpu/perf_event_knc.c
+++ b/arch/x86/kernel/cpu/perf_event_knc.c
@@ -3,6 +3,8 @@
3#include <linux/perf_event.h> 3#include <linux/perf_event.h>
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6#include <asm/hardirq.h>
7
6#include "perf_event.h" 8#include "perf_event.h"
7 9
8static const u64 knc_perfmon_event_map[] = 10static const u64 knc_perfmon_event_map[] =
@@ -173,30 +175,100 @@ static void knc_pmu_enable_all(int added)
173static inline void 175static inline void
174knc_pmu_disable_event(struct perf_event *event) 176knc_pmu_disable_event(struct perf_event *event)
175{ 177{
176 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
177 struct hw_perf_event *hwc = &event->hw; 178 struct hw_perf_event *hwc = &event->hw;
178 u64 val; 179 u64 val;
179 180
180 val = hwc->config; 181 val = hwc->config;
181 if (cpuc->enabled) 182 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
182 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
183 183
184 (void)wrmsrl_safe(hwc->config_base + hwc->idx, val); 184 (void)wrmsrl_safe(hwc->config_base + hwc->idx, val);
185} 185}
186 186
187static void knc_pmu_enable_event(struct perf_event *event) 187static void knc_pmu_enable_event(struct perf_event *event)
188{ 188{
189 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
190 struct hw_perf_event *hwc = &event->hw; 189 struct hw_perf_event *hwc = &event->hw;
191 u64 val; 190 u64 val;
192 191
193 val = hwc->config; 192 val = hwc->config;
194 if (cpuc->enabled) 193 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
195 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
196 194
197 (void)wrmsrl_safe(hwc->config_base + hwc->idx, val); 195 (void)wrmsrl_safe(hwc->config_base + hwc->idx, val);
198} 196}
199 197
198static inline u64 knc_pmu_get_status(void)
199{
200 u64 status;
201
202 rdmsrl(MSR_KNC_IA32_PERF_GLOBAL_STATUS, status);
203
204 return status;
205}
206
207static inline void knc_pmu_ack_status(u64 ack)
208{
209 wrmsrl(MSR_KNC_IA32_PERF_GLOBAL_OVF_CONTROL, ack);
210}
211
212static int knc_pmu_handle_irq(struct pt_regs *regs)
213{
214 struct perf_sample_data data;
215 struct cpu_hw_events *cpuc;
216 int handled = 0;
217 int bit, loops;
218 u64 status;
219
220 cpuc = &__get_cpu_var(cpu_hw_events);
221
222 knc_pmu_disable_all();
223
224 status = knc_pmu_get_status();
225 if (!status) {
226 knc_pmu_enable_all(0);
227 return handled;
228 }
229
230 loops = 0;
231again:
232 knc_pmu_ack_status(status);
233 if (++loops > 100) {
234 WARN_ONCE(1, "perf: irq loop stuck!\n");
235 perf_event_print_debug();
236 goto done;
237 }
238
239 inc_irq_stat(apic_perf_irqs);
240
241 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
242 struct perf_event *event = cpuc->events[bit];
243
244 handled++;
245
246 if (!test_bit(bit, cpuc->active_mask))
247 continue;
248
249 if (!intel_pmu_save_and_restart(event))
250 continue;
251
252 perf_sample_data_init(&data, 0, event->hw.last_period);
253
254 if (perf_event_overflow(event, &data, regs))
255 x86_pmu_stop(event, 0);
256 }
257
258 /*
259 * Repeat if there is more work to be done:
260 */
261 status = knc_pmu_get_status();
262 if (status)
263 goto again;
264
265done:
266 knc_pmu_enable_all(0);
267
268 return handled;
269}
270
271
200PMU_FORMAT_ATTR(event, "config:0-7" ); 272PMU_FORMAT_ATTR(event, "config:0-7" );
201PMU_FORMAT_ATTR(umask, "config:8-15" ); 273PMU_FORMAT_ATTR(umask, "config:8-15" );
202PMU_FORMAT_ATTR(edge, "config:18" ); 274PMU_FORMAT_ATTR(edge, "config:18" );
@@ -214,7 +286,7 @@ static struct attribute *intel_knc_formats_attr[] = {
214 286
215static __initconst struct x86_pmu knc_pmu = { 287static __initconst struct x86_pmu knc_pmu = {
216 .name = "knc", 288 .name = "knc",
217 .handle_irq = x86_pmu_handle_irq, 289 .handle_irq = knc_pmu_handle_irq,
218 .disable_all = knc_pmu_disable_all, 290 .disable_all = knc_pmu_disable_all,
219 .enable_all = knc_pmu_enable_all, 291 .enable_all = knc_pmu_enable_all,
220 .enable = knc_pmu_enable_event, 292 .enable = knc_pmu_enable_event,
@@ -226,12 +298,11 @@ static __initconst struct x86_pmu knc_pmu = {
226 .event_map = knc_pmu_event_map, 298 .event_map = knc_pmu_event_map,
227 .max_events = ARRAY_SIZE(knc_perfmon_event_map), 299 .max_events = ARRAY_SIZE(knc_perfmon_event_map),
228 .apic = 1, 300 .apic = 1,
229 .max_period = (1ULL << 31) - 1, 301 .max_period = (1ULL << 39) - 1,
230 .version = 0, 302 .version = 0,
231 .num_counters = 2, 303 .num_counters = 2,
232 /* in theory 40 bits, early silicon is buggy though */ 304 .cntval_bits = 40,
233 .cntval_bits = 32, 305 .cntval_mask = (1ULL << 40) - 1,
234 .cntval_mask = (1ULL << 32) - 1,
235 .get_event_constraints = x86_get_event_constraints, 306 .get_event_constraints = x86_get_event_constraints,
236 .event_constraints = knc_event_constraints, 307 .event_constraints = knc_event_constraints,
237 .format_attrs = intel_knc_formats_attr, 308 .format_attrs = intel_knc_formats_attr,
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index e4dd0f7a0453..7d0270bd793e 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -8,13 +8,106 @@
8 */ 8 */
9static const u64 p6_perfmon_event_map[] = 9static const u64 p6_perfmon_event_map[] =
10{ 10{
11 [PERF_COUNT_HW_CPU_CYCLES] = 0x0079, 11 [PERF_COUNT_HW_CPU_CYCLES] = 0x0079, /* CPU_CLK_UNHALTED */
12 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, 12 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, /* INST_RETIRED */
13 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e, 13 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e, /* L2_RQSTS:M:E:S:I */
14 [PERF_COUNT_HW_CACHE_MISSES] = 0x012e, 14 [PERF_COUNT_HW_CACHE_MISSES] = 0x012e, /* L2_RQSTS:I */
15 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, 15 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, /* BR_INST_RETIRED */
16 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, 16 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, /* BR_MISS_PRED_RETIRED */
17 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062, 17 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062, /* BUS_DRDY_CLOCKS */
18 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00a2, /* RESOURCE_STALLS */
19
20};
21
22static __initconst u64 p6_hw_cache_event_ids
23 [PERF_COUNT_HW_CACHE_MAX]
24 [PERF_COUNT_HW_CACHE_OP_MAX]
25 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
26{
27 [ C(L1D) ] = {
28 [ C(OP_READ) ] = {
29 [ C(RESULT_ACCESS) ] = 0x0043, /* DATA_MEM_REFS */
30 [ C(RESULT_MISS) ] = 0x0045, /* DCU_LINES_IN */
31 },
32 [ C(OP_WRITE) ] = {
33 [ C(RESULT_ACCESS) ] = 0,
34 [ C(RESULT_MISS) ] = 0x0f29, /* L2_LD:M:E:S:I */
35 },
36 [ C(OP_PREFETCH) ] = {
37 [ C(RESULT_ACCESS) ] = 0,
38 [ C(RESULT_MISS) ] = 0,
39 },
40 },
41 [ C(L1I ) ] = {
42 [ C(OP_READ) ] = {
43 [ C(RESULT_ACCESS) ] = 0x0080, /* IFU_IFETCH */
44 [ C(RESULT_MISS) ] = 0x0f28, /* L2_IFETCH:M:E:S:I */
45 },
46 [ C(OP_WRITE) ] = {
47 [ C(RESULT_ACCESS) ] = -1,
48 [ C(RESULT_MISS) ] = -1,
49 },
50 [ C(OP_PREFETCH) ] = {
51 [ C(RESULT_ACCESS) ] = 0,
52 [ C(RESULT_MISS) ] = 0,
53 },
54 },
55 [ C(LL ) ] = {
56 [ C(OP_READ) ] = {
57 [ C(RESULT_ACCESS) ] = 0,
58 [ C(RESULT_MISS) ] = 0,
59 },
60 [ C(OP_WRITE) ] = {
61 [ C(RESULT_ACCESS) ] = 0,
62 [ C(RESULT_MISS) ] = 0x0025, /* L2_M_LINES_INM */
63 },
64 [ C(OP_PREFETCH) ] = {
65 [ C(RESULT_ACCESS) ] = 0,
66 [ C(RESULT_MISS) ] = 0,
67 },
68 },
69 [ C(DTLB) ] = {
70 [ C(OP_READ) ] = {
71 [ C(RESULT_ACCESS) ] = 0x0043, /* DATA_MEM_REFS */
72 [ C(RESULT_MISS) ] = 0,
73 },
74 [ C(OP_WRITE) ] = {
75 [ C(RESULT_ACCESS) ] = 0,
76 [ C(RESULT_MISS) ] = 0,
77 },
78 [ C(OP_PREFETCH) ] = {
79 [ C(RESULT_ACCESS) ] = 0,
80 [ C(RESULT_MISS) ] = 0,
81 },
82 },
83 [ C(ITLB) ] = {
84 [ C(OP_READ) ] = {
85 [ C(RESULT_ACCESS) ] = 0x0080, /* IFU_IFETCH */
86 [ C(RESULT_MISS) ] = 0x0085, /* ITLB_MISS */
87 },
88 [ C(OP_WRITE) ] = {
89 [ C(RESULT_ACCESS) ] = -1,
90 [ C(RESULT_MISS) ] = -1,
91 },
92 [ C(OP_PREFETCH) ] = {
93 [ C(RESULT_ACCESS) ] = -1,
94 [ C(RESULT_MISS) ] = -1,
95 },
96 },
97 [ C(BPU ) ] = {
98 [ C(OP_READ) ] = {
99 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED */
100 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISS_PRED_RETIRED */
101 },
102 [ C(OP_WRITE) ] = {
103 [ C(RESULT_ACCESS) ] = -1,
104 [ C(RESULT_MISS) ] = -1,
105 },
106 [ C(OP_PREFETCH) ] = {
107 [ C(RESULT_ACCESS) ] = -1,
108 [ C(RESULT_MISS) ] = -1,
109 },
110 },
18}; 111};
19 112
20static u64 p6_pmu_event_map(int hw_event) 113static u64 p6_pmu_event_map(int hw_event)
@@ -34,7 +127,7 @@ static struct event_constraint p6_event_constraints[] =
34{ 127{
35 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */ 128 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
36 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ 129 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
37 INTEL_EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */ 130 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
38 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ 131 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
39 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ 132 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
40 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ 133 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
@@ -64,25 +157,25 @@ static void p6_pmu_enable_all(int added)
64static inline void 157static inline void
65p6_pmu_disable_event(struct perf_event *event) 158p6_pmu_disable_event(struct perf_event *event)
66{ 159{
67 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
68 struct hw_perf_event *hwc = &event->hw; 160 struct hw_perf_event *hwc = &event->hw;
69 u64 val = P6_NOP_EVENT; 161 u64 val = P6_NOP_EVENT;
70 162
71 if (cpuc->enabled)
72 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
73
74 (void)wrmsrl_safe(hwc->config_base, val); 163 (void)wrmsrl_safe(hwc->config_base, val);
75} 164}
76 165
77static void p6_pmu_enable_event(struct perf_event *event) 166static void p6_pmu_enable_event(struct perf_event *event)
78{ 167{
79 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
80 struct hw_perf_event *hwc = &event->hw; 168 struct hw_perf_event *hwc = &event->hw;
81 u64 val; 169 u64 val;
82 170
83 val = hwc->config; 171 val = hwc->config;
84 if (cpuc->enabled) 172
85 val |= ARCH_PERFMON_EVENTSEL_ENABLE; 173 /*
174 * p6 only has a global event enable, set on PerfEvtSel0
175 * We "disable" events by programming P6_NOP_EVENT
176 * and we rely on p6_pmu_enable_all() being called
177 * to actually enable the events.
178 */
86 179
87 (void)wrmsrl_safe(hwc->config_base, val); 180 (void)wrmsrl_safe(hwc->config_base, val);
88} 181}
@@ -158,5 +251,9 @@ __init int p6_pmu_init(void)
158 251
159 x86_pmu = p6_pmu; 252 x86_pmu = p6_pmu;
160 253
254 memcpy(hw_cache_event_ids, p6_hw_cache_event_ids,
255 sizeof(hw_cache_event_ids));
256
257
161 return 0; 258 return 0;
162} 259}
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index ed858e9e9a74..df06ade26bef 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1077,6 +1077,9 @@ void __init memblock_x86_fill(void)
1077 memblock_add(ei->addr, ei->size); 1077 memblock_add(ei->addr, ei->size);
1078 } 1078 }
1079 1079
1080 /* throw away partial pages */
1081 memblock_trim_memory(PAGE_SIZE);
1082
1080 memblock_dump_all(); 1083 memblock_dump_all();
1081} 1084}
1082 1085
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index a1193aef6d7d..88b725aa1d52 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1035,7 +1035,7 @@ ENTRY(xen_sysenter_target)
1035 1035
1036ENTRY(xen_hypervisor_callback) 1036ENTRY(xen_hypervisor_callback)
1037 CFI_STARTPROC 1037 CFI_STARTPROC
1038 pushl_cfi $0 1038 pushl_cfi $-1 /* orig_ax = -1 => not a system call */
1039 SAVE_ALL 1039 SAVE_ALL
1040 TRACE_IRQS_OFF 1040 TRACE_IRQS_OFF
1041 1041
@@ -1077,14 +1077,16 @@ ENTRY(xen_failsafe_callback)
10772: mov 8(%esp),%es 10772: mov 8(%esp),%es
10783: mov 12(%esp),%fs 10783: mov 12(%esp),%fs
10794: mov 16(%esp),%gs 10794: mov 16(%esp),%gs
1080 /* EAX == 0 => Category 1 (Bad segment)
1081 EAX != 0 => Category 2 (Bad IRET) */
1080 testl %eax,%eax 1082 testl %eax,%eax
1081 popl_cfi %eax 1083 popl_cfi %eax
1082 lea 16(%esp),%esp 1084 lea 16(%esp),%esp
1083 CFI_ADJUST_CFA_OFFSET -16 1085 CFI_ADJUST_CFA_OFFSET -16
1084 jz 5f 1086 jz 5f
1085 addl $16,%esp 1087 addl $16,%esp
1086 jmp iret_exc # EAX != 0 => Category 2 (Bad IRET) 1088 jmp iret_exc
10875: pushl_cfi $0 # EAX == 0 => Category 1 (Bad segment) 10895: pushl_cfi $-1 /* orig_ax = -1 => not a system call */
1088 SAVE_ALL 1090 SAVE_ALL
1089 jmp ret_from_exception 1091 jmp ret_from_exception
1090 CFI_ENDPROC 1092 CFI_ENDPROC
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 0c58952d64e8..b51b2c7ee51f 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1435,7 +1435,7 @@ ENTRY(xen_failsafe_callback)
1435 CFI_RESTORE r11 1435 CFI_RESTORE r11
1436 addq $0x30,%rsp 1436 addq $0x30,%rsp
1437 CFI_ADJUST_CFA_OFFSET -0x30 1437 CFI_ADJUST_CFA_OFFSET -0x30
1438 pushq_cfi $0 1438 pushq_cfi $-1 /* orig_ax = -1 => not a system call */
1439 SAVE_ALL 1439 SAVE_ALL
1440 jmp error_exit 1440 jmp error_exit
1441 CFI_ENDPROC 1441 CFI_ENDPROC
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index b3e5e51bc907..4180a874c764 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -247,7 +247,10 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
247 break; 247 break;
248 case KVM_PV_REASON_PAGE_NOT_PRESENT: 248 case KVM_PV_REASON_PAGE_NOT_PRESENT:
249 /* page is swapped out by the host. */ 249 /* page is swapped out by the host. */
250 rcu_irq_enter();
251 exit_idle();
250 kvm_async_pf_task_wait((u32)read_cr2()); 252 kvm_async_pf_task_wait((u32)read_cr2());
253 rcu_irq_exit();
251 break; 254 break;
252 case KVM_PV_REASON_PAGE_READY: 255 case KVM_PV_REASON_PAGE_READY:
253 rcu_irq_enter(); 256 rcu_irq_enter();
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 52190a938b4a..4e8ba39eaf0f 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -358,14 +358,6 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
358 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"), 358 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"),
359 }, 359 },
360 }, 360 },
361 { /* Handle problems with rebooting on CompuLab SBC-FITPC2 */
362 .callback = set_bios_reboot,
363 .ident = "CompuLab SBC-FITPC2",
364 .matches = {
365 DMI_MATCH(DMI_SYS_VENDOR, "CompuLab"),
366 DMI_MATCH(DMI_PRODUCT_NAME, "SBC-FITPC2"),
367 },
368 },
369 { /* Handle problems with rebooting on ASUS P4S800 */ 361 { /* Handle problems with rebooting on ASUS P4S800 */
370 .callback = set_bios_reboot, 362 .callback = set_bios_reboot,
371 .ident = "ASUS P4S800", 363 .ident = "ASUS P4S800",
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index a2bb18e02839..ca45696f30fb 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -920,8 +920,22 @@ void __init setup_arch(char **cmdline_p)
920 920
921#ifdef CONFIG_X86_64 921#ifdef CONFIG_X86_64
922 if (max_pfn > max_low_pfn) { 922 if (max_pfn > max_low_pfn) {
923 max_pfn_mapped = init_memory_mapping(1UL<<32, 923 int i;
924 max_pfn<<PAGE_SHIFT); 924 unsigned long start, end;
925 unsigned long start_pfn, end_pfn;
926
927 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn,
928 NULL) {
929
930 end = PFN_PHYS(end_pfn);
931 if (end <= (1UL<<32))
932 continue;
933
934 start = PFN_PHYS(start_pfn);
935 max_pfn_mapped = init_memory_mapping(
936 max((1UL<<32), start), end);
937 }
938
925 /* can we preseve max_low_pfn ?*/ 939 /* can we preseve max_low_pfn ?*/
926 max_low_pfn = max_pfn; 940 max_low_pfn = max_pfn;
927 } 941 }
@@ -1035,6 +1049,18 @@ void __init setup_arch(char **cmdline_p)
1035 arch_init_ideal_nops(); 1049 arch_init_ideal_nops();
1036 1050
1037 register_refined_jiffies(CLOCK_TICK_RATE); 1051 register_refined_jiffies(CLOCK_TICK_RATE);
1052
1053#ifdef CONFIG_EFI
1054 /* Once setup is done above, disable efi_enabled on mismatched
1055 * firmware/kernel archtectures since there is no support for
1056 * runtime services.
1057 */
1058 if (efi_enabled && IS_ENABLED(CONFIG_X86_64) != efi_64bit) {
1059 pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
1060 efi_unmap_memmap();
1061 efi_enabled = 0;
1062 }
1063#endif
1038} 1064}
1039 1065
1040#ifdef CONFIG_X86_32 1066#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 29ad351804e9..70b27ee6118e 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -824,10 +824,8 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
824 mce_notify_process(); 824 mce_notify_process();
825#endif /* CONFIG_X86_64 && CONFIG_X86_MCE */ 825#endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
826 826
827 if (thread_info_flags & _TIF_UPROBE) { 827 if (thread_info_flags & _TIF_UPROBE)
828 clear_thread_flag(TIF_UPROBE);
829 uprobe_notify_resume(regs); 828 uprobe_notify_resume(regs);
830 }
831 829
832 /* deal with pending signal delivery */ 830 /* deal with pending signal delivery */
833 if (thread_info_flags & _TIF_SIGPENDING) 831 if (thread_info_flags & _TIF_SIGPENDING)
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 9538f00827a9..aafa5557b396 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -651,31 +651,19 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
651 651
652/* 652/*
653 * Skip these instructions as per the currently known x86 ISA. 653 * Skip these instructions as per the currently known x86 ISA.
654 * 0x66* { 0x90 | 0x0f 0x1f | 0x0f 0x19 | 0x87 0xc0 } 654 * rep=0x66*; nop=0x90
655 */ 655 */
656static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) 656static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
657{ 657{
658 int i; 658 int i;
659 659
660 for (i = 0; i < MAX_UINSN_BYTES; i++) { 660 for (i = 0; i < MAX_UINSN_BYTES; i++) {
661 if ((auprobe->insn[i] == 0x66)) 661 if (auprobe->insn[i] == 0x66)
662 continue; 662 continue;
663 663
664 if (auprobe->insn[i] == 0x90) 664 if (auprobe->insn[i] == 0x90)
665 return true; 665 return true;
666 666
667 if (i == (MAX_UINSN_BYTES - 1))
668 break;
669
670 if ((auprobe->insn[i] == 0x0f) && (auprobe->insn[i+1] == 0x1f))
671 return true;
672
673 if ((auprobe->insn[i] == 0x0f) && (auprobe->insn[i+1] == 0x19))
674 return true;
675
676 if ((auprobe->insn[i] == 0x87) && (auprobe->insn[i+1] == 0xc0))
677 return true;
678
679 break; 667 break;
680 } 668 }
681 return false; 669 return false;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index c6e6b721b6ee..43e9fadca5d0 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1311,7 +1311,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
1311 vcpu->arch.apic_base = value; 1311 vcpu->arch.apic_base = value;
1312 if (apic_x2apic_mode(apic)) { 1312 if (apic_x2apic_mode(apic)) {
1313 u32 id = kvm_apic_id(apic); 1313 u32 id = kvm_apic_id(apic);
1314 u32 ldr = ((id & ~0xf) << 16) | (1 << (id & 0xf)); 1314 u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf));
1315 kvm_apic_set_ldr(apic, ldr); 1315 kvm_apic_set_ldr(apic, ldr);
1316 } 1316 }
1317 apic->base_address = apic->vcpu->arch.apic_base & 1317 apic->base_address = apic->vcpu->arch.apic_base &
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d289fee1ffb8..6f85fe0bf958 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2497,8 +2497,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2497 } 2497 }
2498 } 2498 }
2499 2499
2500 if (!is_error_pfn(pfn)) 2500 kvm_release_pfn_clean(pfn);
2501 kvm_release_pfn_clean(pfn);
2502} 2501}
2503 2502
2504static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) 2503static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1eefebe5d727..224a7e78cb6c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3779,7 +3779,7 @@ static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
3779{ 3779{
3780 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; 3780 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
3781 3781
3782 memcpy(vcpu->run->mmio.data, frag->data, frag->len); 3782 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
3783 return X86EMUL_CONTINUE; 3783 return X86EMUL_CONTINUE;
3784} 3784}
3785 3785
@@ -3832,18 +3832,11 @@ mmio:
3832 bytes -= handled; 3832 bytes -= handled;
3833 val += handled; 3833 val += handled;
3834 3834
3835 while (bytes) { 3835 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS);
3836 unsigned now = min(bytes, 8U); 3836 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
3837 3837 frag->gpa = gpa;
3838 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; 3838 frag->data = val;
3839 frag->gpa = gpa; 3839 frag->len = bytes;
3840 frag->data = val;
3841 frag->len = now;
3842
3843 gpa += now;
3844 val += now;
3845 bytes -= now;
3846 }
3847 return X86EMUL_CONTINUE; 3840 return X86EMUL_CONTINUE;
3848} 3841}
3849 3842
@@ -3890,7 +3883,7 @@ int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
3890 vcpu->mmio_needed = 1; 3883 vcpu->mmio_needed = 1;
3891 vcpu->mmio_cur_fragment = 0; 3884 vcpu->mmio_cur_fragment = 0;
3892 3885
3893 vcpu->run->mmio.len = vcpu->mmio_fragments[0].len; 3886 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len);
3894 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; 3887 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
3895 vcpu->run->exit_reason = KVM_EXIT_MMIO; 3888 vcpu->run->exit_reason = KVM_EXIT_MMIO;
3896 vcpu->run->mmio.phys_addr = gpa; 3889 vcpu->run->mmio.phys_addr = gpa;
@@ -5522,28 +5515,44 @@ static int complete_emulated_pio(struct kvm_vcpu *vcpu)
5522 * 5515 *
5523 * read: 5516 * read:
5524 * for each fragment 5517 * for each fragment
5525 * write gpa, len 5518 * for each mmio piece in the fragment
5526 * exit 5519 * write gpa, len
5527 * copy data 5520 * exit
5521 * copy data
5528 * execute insn 5522 * execute insn
5529 * 5523 *
5530 * write: 5524 * write:
5531 * for each fragment 5525 * for each fragment
5532 * write gpa, len 5526 * for each mmio piece in the fragment
5533 * copy data 5527 * write gpa, len
5534 * exit 5528 * copy data
5529 * exit
5535 */ 5530 */
5536static int complete_emulated_mmio(struct kvm_vcpu *vcpu) 5531static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
5537{ 5532{
5538 struct kvm_run *run = vcpu->run; 5533 struct kvm_run *run = vcpu->run;
5539 struct kvm_mmio_fragment *frag; 5534 struct kvm_mmio_fragment *frag;
5535 unsigned len;
5540 5536
5541 BUG_ON(!vcpu->mmio_needed); 5537 BUG_ON(!vcpu->mmio_needed);
5542 5538
5543 /* Complete previous fragment */ 5539 /* Complete previous fragment */
5544 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment++]; 5540 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
5541 len = min(8u, frag->len);
5545 if (!vcpu->mmio_is_write) 5542 if (!vcpu->mmio_is_write)
5546 memcpy(frag->data, run->mmio.data, frag->len); 5543 memcpy(frag->data, run->mmio.data, len);
5544
5545 if (frag->len <= 8) {
5546 /* Switch to the next fragment. */
5547 frag++;
5548 vcpu->mmio_cur_fragment++;
5549 } else {
5550 /* Go forward to the next mmio piece. */
5551 frag->data += len;
5552 frag->gpa += len;
5553 frag->len -= len;
5554 }
5555
5547 if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) { 5556 if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
5548 vcpu->mmio_needed = 0; 5557 vcpu->mmio_needed = 0;
5549 if (vcpu->mmio_is_write) 5558 if (vcpu->mmio_is_write)
@@ -5551,13 +5560,12 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
5551 vcpu->mmio_read_completed = 1; 5560 vcpu->mmio_read_completed = 1;
5552 return complete_emulated_io(vcpu); 5561 return complete_emulated_io(vcpu);
5553 } 5562 }
5554 /* Initiate next fragment */ 5563
5555 ++frag;
5556 run->exit_reason = KVM_EXIT_MMIO; 5564 run->exit_reason = KVM_EXIT_MMIO;
5557 run->mmio.phys_addr = frag->gpa; 5565 run->mmio.phys_addr = frag->gpa;
5558 if (vcpu->mmio_is_write) 5566 if (vcpu->mmio_is_write)
5559 memcpy(run->mmio.data, frag->data, frag->len); 5567 memcpy(run->mmio.data, frag->data, min(8u, frag->len));
5560 run->mmio.len = frag->len; 5568 run->mmio.len = min(8u, frag->len);
5561 run->mmio.is_write = vcpu->mmio_is_write; 5569 run->mmio.is_write = vcpu->mmio_is_write;
5562 vcpu->arch.complete_userspace_io = complete_emulated_mmio; 5570 vcpu->arch.complete_userspace_io = complete_emulated_mmio;
5563 return 0; 5571 return 0;
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index ab1f6a93b527..d7aea41563b3 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -35,40 +35,44 @@ struct map_range {
35 unsigned page_size_mask; 35 unsigned page_size_mask;
36}; 36};
37 37
38static void __init find_early_table_space(struct map_range *mr, unsigned long end, 38/*
39 int use_pse, int use_gbpages) 39 * First calculate space needed for kernel direct mapping page tables to cover
40 * mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB
41 * pages. Then find enough contiguous space for those page tables.
42 */
43static void __init find_early_table_space(struct map_range *mr, int nr_range)
40{ 44{
41 unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; 45 int i;
46 unsigned long puds = 0, pmds = 0, ptes = 0, tables;
47 unsigned long start = 0, good_end;
42 phys_addr_t base; 48 phys_addr_t base;
43 49
44 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; 50 for (i = 0; i < nr_range; i++) {
45 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); 51 unsigned long range, extra;
46
47 if (use_gbpages) {
48 unsigned long extra;
49
50 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
51 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
52 } else
53 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
54 52
55 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); 53 range = mr[i].end - mr[i].start;
54 puds += (range + PUD_SIZE - 1) >> PUD_SHIFT;
56 55
57 if (use_pse) { 56 if (mr[i].page_size_mask & (1 << PG_LEVEL_1G)) {
58 unsigned long extra; 57 extra = range - ((range >> PUD_SHIFT) << PUD_SHIFT);
58 pmds += (extra + PMD_SIZE - 1) >> PMD_SHIFT;
59 } else {
60 pmds += (range + PMD_SIZE - 1) >> PMD_SHIFT;
61 }
59 62
60 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); 63 if (mr[i].page_size_mask & (1 << PG_LEVEL_2M)) {
64 extra = range - ((range >> PMD_SHIFT) << PMD_SHIFT);
61#ifdef CONFIG_X86_32 65#ifdef CONFIG_X86_32
62 extra += PMD_SIZE; 66 extra += PMD_SIZE;
63#endif 67#endif
64 /* The first 2/4M doesn't use large pages. */ 68 ptes += (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
65 if (mr->start < PMD_SIZE) 69 } else {
66 extra += mr->end - mr->start; 70 ptes += (range + PAGE_SIZE - 1) >> PAGE_SHIFT;
67 71 }
68 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; 72 }
69 } else
70 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
71 73
74 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
75 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
72 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); 76 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
73 77
74#ifdef CONFIG_X86_32 78#ifdef CONFIG_X86_32
@@ -86,7 +90,7 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en
86 pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); 90 pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
87 91
88 printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n", 92 printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n",
89 end - 1, pgt_buf_start << PAGE_SHIFT, 93 mr[nr_range - 1].end - 1, pgt_buf_start << PAGE_SHIFT,
90 (pgt_buf_top << PAGE_SHIFT) - 1); 94 (pgt_buf_top << PAGE_SHIFT) - 1);
91} 95}
92 96
@@ -267,7 +271,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
267 * nodes are discovered. 271 * nodes are discovered.
268 */ 272 */
269 if (!after_bootmem) 273 if (!after_bootmem)
270 find_early_table_space(&mr[0], end, use_pse, use_gbpages); 274 find_early_table_space(mr, nr_range);
271 275
272 for (i = 0; i < nr_range; i++) 276 for (i = 0; i < nr_range; i++)
273 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, 277 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 2b6b4a3c8beb..3baff255adac 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -386,7 +386,8 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
386 * these mappings are more intelligent. 386 * these mappings are more intelligent.
387 */ 387 */
388 if (pte_val(*pte)) { 388 if (pte_val(*pte)) {
389 pages++; 389 if (!after_bootmem)
390 pages++;
390 continue; 391 continue;
391 } 392 }
392 393
@@ -451,6 +452,8 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
451 * attributes. 452 * attributes.
452 */ 453 */
453 if (page_size_mask & (1 << PG_LEVEL_2M)) { 454 if (page_size_mask & (1 << PG_LEVEL_2M)) {
455 if (!after_bootmem)
456 pages++;
454 last_map_addr = next; 457 last_map_addr = next;
455 continue; 458 continue;
456 } 459 }
@@ -526,6 +529,8 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
526 * attributes. 529 * attributes.
527 */ 530 */
528 if (page_size_mask & (1 << PG_LEVEL_1G)) { 531 if (page_size_mask & (1 << PG_LEVEL_1G)) {
532 if (!after_bootmem)
533 pages++;
529 last_map_addr = next; 534 last_map_addr = next;
530 continue; 535 continue;
531 } 536 }
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 26b8a8514ee5..48768df2471a 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -55,7 +55,7 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
55 val |= counter_config->extra; 55 val |= counter_config->extra;
56 event &= model->event_mask ? model->event_mask : 0xFF; 56 event &= model->event_mask ? model->event_mask : 0xFF;
57 val |= event & 0xFF; 57 val |= event & 0xFF;
58 val |= (event & 0x0F00) << 24; 58 val |= (u64)(event & 0x0F00) << 24;
59 59
60 return val; 60 return val;
61} 61}
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index aded2a91162a..ad4439145f85 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -70,11 +70,15 @@ EXPORT_SYMBOL(efi);
70struct efi_memory_map memmap; 70struct efi_memory_map memmap;
71 71
72bool efi_64bit; 72bool efi_64bit;
73static bool efi_native;
74 73
75static struct efi efi_phys __initdata; 74static struct efi efi_phys __initdata;
76static efi_system_table_t efi_systab __initdata; 75static efi_system_table_t efi_systab __initdata;
77 76
77static inline bool efi_is_native(void)
78{
79 return IS_ENABLED(CONFIG_X86_64) == efi_64bit;
80}
81
78static int __init setup_noefi(char *arg) 82static int __init setup_noefi(char *arg)
79{ 83{
80 efi_enabled = 0; 84 efi_enabled = 0;
@@ -420,7 +424,7 @@ void __init efi_reserve_boot_services(void)
420 } 424 }
421} 425}
422 426
423static void __init efi_unmap_memmap(void) 427void __init efi_unmap_memmap(void)
424{ 428{
425 if (memmap.map) { 429 if (memmap.map) {
426 early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size); 430 early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
@@ -432,7 +436,7 @@ void __init efi_free_boot_services(void)
432{ 436{
433 void *p; 437 void *p;
434 438
435 if (!efi_native) 439 if (!efi_is_native())
436 return; 440 return;
437 441
438 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 442 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
@@ -684,12 +688,10 @@ void __init efi_init(void)
684 return; 688 return;
685 } 689 }
686 efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab; 690 efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab;
687 efi_native = !efi_64bit;
688#else 691#else
689 efi_phys.systab = (efi_system_table_t *) 692 efi_phys.systab = (efi_system_table_t *)
690 (boot_params.efi_info.efi_systab | 693 (boot_params.efi_info.efi_systab |
691 ((__u64)boot_params.efi_info.efi_systab_hi<<32)); 694 ((__u64)boot_params.efi_info.efi_systab_hi<<32));
692 efi_native = efi_64bit;
693#endif 695#endif
694 696
695 if (efi_systab_init(efi_phys.systab)) { 697 if (efi_systab_init(efi_phys.systab)) {
@@ -723,7 +725,7 @@ void __init efi_init(void)
723 * that doesn't match the kernel 32/64-bit mode. 725 * that doesn't match the kernel 32/64-bit mode.
724 */ 726 */
725 727
726 if (!efi_native) 728 if (!efi_is_native())
727 pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n"); 729 pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
728 else if (efi_runtime_init()) { 730 else if (efi_runtime_init()) {
729 efi_enabled = 0; 731 efi_enabled = 0;
@@ -735,7 +737,7 @@ void __init efi_init(void)
735 return; 737 return;
736 } 738 }
737#ifdef CONFIG_X86_32 739#ifdef CONFIG_X86_32
738 if (efi_native) { 740 if (efi_is_native()) {
739 x86_platform.get_wallclock = efi_get_time; 741 x86_platform.get_wallclock = efi_get_time;
740 x86_platform.set_wallclock = efi_set_rtc_mmss; 742 x86_platform.set_wallclock = efi_set_rtc_mmss;
741 } 743 }
@@ -810,6 +812,16 @@ void __iomem *efi_lookup_mapped_addr(u64 phys_addr)
810 return NULL; 812 return NULL;
811} 813}
812 814
815void efi_memory_uc(u64 addr, unsigned long size)
816{
817 unsigned long page_shift = 1UL << EFI_PAGE_SHIFT;
818 u64 npages;
819
820 npages = round_up(size, page_shift) / page_shift;
821 memrange_efi_to_native(&addr, &npages);
822 set_memory_uc(addr, npages);
823}
824
813/* 825/*
814 * This function will switch the EFI runtime services to virtual mode. 826 * This function will switch the EFI runtime services to virtual mode.
815 * Essentially, look through the EFI memmap and map every region that 827 * Essentially, look through the EFI memmap and map every region that
@@ -823,7 +835,7 @@ void __init efi_enter_virtual_mode(void)
823 efi_memory_desc_t *md, *prev_md = NULL; 835 efi_memory_desc_t *md, *prev_md = NULL;
824 efi_status_t status; 836 efi_status_t status;
825 unsigned long size; 837 unsigned long size;
826 u64 end, systab, addr, npages, end_pfn; 838 u64 end, systab, end_pfn;
827 void *p, *va, *new_memmap = NULL; 839 void *p, *va, *new_memmap = NULL;
828 int count = 0; 840 int count = 0;
829 841
@@ -834,7 +846,7 @@ void __init efi_enter_virtual_mode(void)
834 * non-native EFI 846 * non-native EFI
835 */ 847 */
836 848
837 if (!efi_native) { 849 if (!efi_is_native()) {
838 efi_unmap_memmap(); 850 efi_unmap_memmap();
839 return; 851 return;
840 } 852 }
@@ -879,10 +891,14 @@ void __init efi_enter_virtual_mode(void)
879 end_pfn = PFN_UP(end); 891 end_pfn = PFN_UP(end);
880 if (end_pfn <= max_low_pfn_mapped 892 if (end_pfn <= max_low_pfn_mapped
881 || (end_pfn > (1UL << (32 - PAGE_SHIFT)) 893 || (end_pfn > (1UL << (32 - PAGE_SHIFT))
882 && end_pfn <= max_pfn_mapped)) 894 && end_pfn <= max_pfn_mapped)) {
883 va = __va(md->phys_addr); 895 va = __va(md->phys_addr);
884 else 896
885 va = efi_ioremap(md->phys_addr, size, md->type); 897 if (!(md->attribute & EFI_MEMORY_WB))
898 efi_memory_uc((u64)(unsigned long)va, size);
899 } else
900 va = efi_ioremap(md->phys_addr, size,
901 md->type, md->attribute);
886 902
887 md->virt_addr = (u64) (unsigned long) va; 903 md->virt_addr = (u64) (unsigned long) va;
888 904
@@ -892,13 +908,6 @@ void __init efi_enter_virtual_mode(void)
892 continue; 908 continue;
893 } 909 }
894 910
895 if (!(md->attribute & EFI_MEMORY_WB)) {
896 addr = md->virt_addr;
897 npages = md->num_pages;
898 memrange_efi_to_native(&addr, &npages);
899 set_memory_uc(addr, npages);
900 }
901
902 systab = (u64) (unsigned long) efi_phys.systab; 911 systab = (u64) (unsigned long) efi_phys.systab;
903 if (md->phys_addr <= systab && systab < end) { 912 if (md->phys_addr <= systab && systab < end) {
904 systab += md->virt_addr - md->phys_addr; 913 systab += md->virt_addr - md->phys_addr;
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index ac3aa54e2654..95fd505dfeb6 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -82,7 +82,7 @@ void __init efi_call_phys_epilog(void)
82} 82}
83 83
84void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, 84void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
85 u32 type) 85 u32 type, u64 attribute)
86{ 86{
87 unsigned long last_map_pfn; 87 unsigned long last_map_pfn;
88 88
@@ -92,8 +92,11 @@ void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
92 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); 92 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
93 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) { 93 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
94 unsigned long top = last_map_pfn << PAGE_SHIFT; 94 unsigned long top = last_map_pfn << PAGE_SHIFT;
95 efi_ioremap(top, size - (top - phys_addr), type); 95 efi_ioremap(top, size - (top - phys_addr), type, attribute);
96 } 96 }
97 97
98 if (!(attribute & EFI_MEMORY_WB))
99 efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);
100
98 return (void __iomem *)__va(phys_addr); 101 return (void __iomem *)__va(phys_addr);
99} 102}
diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
index e56479e58053..9e7e14797a72 100644
--- a/arch/x86/realmode/rm/wakeup_asm.S
+++ b/arch/x86/realmode/rm/wakeup_asm.S
@@ -74,18 +74,9 @@ ENTRY(wakeup_start)
74 74
75 lidtl wakeup_idt 75 lidtl wakeup_idt
76 76
77 /* Clear the EFLAGS but remember if we have EFLAGS.ID */ 77 /* Clear the EFLAGS */
78 movl $X86_EFLAGS_ID, %ecx
79 pushl %ecx
80 popfl
81 pushfl
82 popl %edi
83 pushl $0 78 pushl $0
84 popfl 79 popfl
85 pushfl
86 popl %edx
87 xorl %edx, %edi
88 andl %ecx, %edi /* %edi is zero iff CPUID & %cr4 are missing */
89 80
90 /* Check header signature... */ 81 /* Check header signature... */
91 movl signature, %eax 82 movl signature, %eax
@@ -120,12 +111,12 @@ ENTRY(wakeup_start)
120 movl %eax, %cr3 111 movl %eax, %cr3
121 112
122 btl $WAKEUP_BEHAVIOR_RESTORE_CR4, %edi 113 btl $WAKEUP_BEHAVIOR_RESTORE_CR4, %edi
123 jz 1f 114 jnc 1f
124 movl pmode_cr4, %eax 115 movl pmode_cr4, %eax
125 movl %eax, %cr4 116 movl %eax, %cr4
1261: 1171:
127 btl $WAKEUP_BEHAVIOR_RESTORE_EFER, %edi 118 btl $WAKEUP_BEHAVIOR_RESTORE_EFER, %edi
128 jz 1f 119 jnc 1f
129 movl pmode_efer, %eax 120 movl pmode_efer, %eax
130 movl pmode_efer + 4, %edx 121 movl pmode_efer + 4, %edx
131 movl $MSR_EFER, %ecx 122 movl $MSR_EFER, %ecx
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index e3497f240eab..586d83812b67 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -81,8 +81,6 @@
81#include "smp.h" 81#include "smp.h"
82#include "multicalls.h" 82#include "multicalls.h"
83 83
84#include <xen/events.h>
85
86EXPORT_SYMBOL_GPL(hypercall_page); 84EXPORT_SYMBOL_GPL(hypercall_page);
87 85
88DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); 86DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 6226c99729b9..dcf5f2dd91ec 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1288,6 +1288,25 @@ unsigned long xen_read_cr2_direct(void)
1288 return this_cpu_read(xen_vcpu_info.arch.cr2); 1288 return this_cpu_read(xen_vcpu_info.arch.cr2);
1289} 1289}
1290 1290
1291void xen_flush_tlb_all(void)
1292{
1293 struct mmuext_op *op;
1294 struct multicall_space mcs;
1295
1296 trace_xen_mmu_flush_tlb_all(0);
1297
1298 preempt_disable();
1299
1300 mcs = xen_mc_entry(sizeof(*op));
1301
1302 op = mcs.args;
1303 op->cmd = MMUEXT_TLB_FLUSH_ALL;
1304 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1305
1306 xen_mc_issue(PARAVIRT_LAZY_MMU);
1307
1308 preempt_enable();
1309}
1291static void xen_flush_tlb(void) 1310static void xen_flush_tlb(void)
1292{ 1311{
1293 struct mmuext_op *op; 1312 struct mmuext_op *op;
@@ -2518,7 +2537,7 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2518 err = 0; 2537 err = 0;
2519out: 2538out:
2520 2539
2521 flush_tlb_all(); 2540 xen_flush_tlb_all();
2522 2541
2523 return err; 2542 return err;
2524} 2543}