aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@linux.intel.com>2011-08-04 19:13:20 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2011-08-04 19:13:20 -0400
commit17b0436077d99211d8b26886235a36c5ec54ac57 (patch)
tree8bcd9b4a0f8285f749814e95ae0365c611ba2392 /arch/x86
parentaafade242ff24fac3aabf61c7861dfa44a3c2445 (diff)
parent02f8c6aee8df3cdc935e9bdd4f2d020306035dbe (diff)
Merge commit 'v3.0' into x86/vdso
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/include/asm/apb_timer.h2
-rw-r--r--arch/x86/include/asm/memblock.h2
-rw-r--r--arch/x86/include/asm/mmzone_32.h13
-rw-r--r--arch/x86/include/asm/mmzone_64.h3
-rw-r--r--arch/x86/include/asm/pvclock.h9
-rw-r--r--arch/x86/kernel/acpi/realmode/wakeup.S14
-rw-r--r--arch/x86/kernel/acpi/realmode/wakeup.h6
-rw-r--r--arch/x86/kernel/acpi/sleep.c6
-rw-r--r--arch/x86/kernel/amd_iommu.c48
-rw-r--r--arch/x86/kernel/amd_iommu_init.c8
-rw-r--r--arch/x86/kernel/apic/apic.c3
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c8
-rw-r--r--arch/x86/kernel/devicetree.c11
-rw-r--r--arch/x86/kernel/process.c4
-rw-r--r--arch/x86/kernel/process_32.c1
-rw-r--r--arch/x86/kernel/process_64.c1
-rw-r--r--arch/x86/kernel/reboot.c32
-rw-r--r--arch/x86/kernel/smpboot.c13
-rw-r--r--arch/x86/kvm/emulate.c94
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/vmx.c3
-rw-r--r--arch/x86/mm/init_64.c3
-rw-r--r--arch/x86/mm/memblock.c4
-rw-r--r--arch/x86/oprofile/nmi_int.c14
-rw-r--r--arch/x86/oprofile/op_model_amd.c13
-rw-r--r--arch/x86/pci/acpi.c2
-rw-r--r--arch/x86/pci/xen.c40
-rw-r--r--arch/x86/platform/efi/efi.c32
-rw-r--r--arch/x86/xen/enlighten.c9
-rw-r--r--arch/x86/xen/mmu.c14
-rw-r--r--arch/x86/xen/multicalls.c12
-rw-r--r--arch/x86/xen/setup.c10
-rw-r--r--arch/x86/xen/smp.c7
35 files changed, 328 insertions, 119 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c1e41bccdcb8..c05810c0d096 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1174,7 +1174,7 @@ comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
1174config AMD_NUMA 1174config AMD_NUMA
1175 def_bool y 1175 def_bool y
1176 prompt "Old style AMD Opteron NUMA detection" 1176 prompt "Old style AMD Opteron NUMA detection"
1177 depends on NUMA && PCI 1177 depends on X86_64 && NUMA && PCI
1178 ---help--- 1178 ---help---
1179 Enable AMD NUMA node topology detection. You should say Y here if 1179 Enable AMD NUMA node topology detection. You should say Y here if
1180 you have a multi processor AMD system. This uses an old method to 1180 you have a multi processor AMD system. This uses an old method to
diff --git a/arch/x86/include/asm/apb_timer.h b/arch/x86/include/asm/apb_timer.h
index 2fefa501d3ba..af60d8a2e288 100644
--- a/arch/x86/include/asm/apb_timer.h
+++ b/arch/x86/include/asm/apb_timer.h
@@ -62,7 +62,7 @@ extern int sfi_mtimer_num;
62#else /* CONFIG_APB_TIMER */ 62#else /* CONFIG_APB_TIMER */
63 63
64static inline unsigned long apbt_quick_calibrate(void) {return 0; } 64static inline unsigned long apbt_quick_calibrate(void) {return 0; }
65static inline void apbt_time_init(void) {return 0; } 65static inline void apbt_time_init(void) { }
66 66
67#endif 67#endif
68#endif /* ASM_X86_APBT_H */ 68#endif /* ASM_X86_APBT_H */
diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h
index 19ae14ba6978..0cd3800f33b9 100644
--- a/arch/x86/include/asm/memblock.h
+++ b/arch/x86/include/asm/memblock.h
@@ -4,7 +4,6 @@
4#define ARCH_DISCARD_MEMBLOCK 4#define ARCH_DISCARD_MEMBLOCK
5 5
6u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align); 6u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align);
7void memblock_x86_to_bootmem(u64 start, u64 end);
8 7
9void memblock_x86_reserve_range(u64 start, u64 end, char *name); 8void memblock_x86_reserve_range(u64 start, u64 end, char *name);
10void memblock_x86_free_range(u64 start, u64 end); 9void memblock_x86_free_range(u64 start, u64 end);
@@ -19,5 +18,6 @@ u64 memblock_x86_hole_size(u64 start, u64 end);
19u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align); 18u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align);
20u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit); 19u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit);
21u64 memblock_x86_memory_in_range(u64 addr, u64 limit); 20u64 memblock_x86_memory_in_range(u64 addr, u64 limit);
21bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align);
22 22
23#endif 23#endif
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
index 5e83a416eca8..ffa037f28d39 100644
--- a/arch/x86/include/asm/mmzone_32.h
+++ b/arch/x86/include/asm/mmzone_32.h
@@ -48,17 +48,6 @@ static inline int pfn_to_nid(unsigned long pfn)
48#endif 48#endif
49} 49}
50 50
51/*
52 * Following are macros that each numa implmentation must define.
53 */
54
55#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
56#define node_end_pfn(nid) \
57({ \
58 pg_data_t *__pgdat = NODE_DATA(nid); \
59 __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
60})
61
62static inline int pfn_valid(int pfn) 51static inline int pfn_valid(int pfn)
63{ 52{
64 int nid = pfn_to_nid(pfn); 53 int nid = pfn_to_nid(pfn);
@@ -68,6 +57,8 @@ static inline int pfn_valid(int pfn)
68 return 0; 57 return 0;
69} 58}
70 59
60#define early_pfn_valid(pfn) pfn_valid((pfn))
61
71#endif /* CONFIG_DISCONTIGMEM */ 62#endif /* CONFIG_DISCONTIGMEM */
72 63
73#ifdef CONFIG_NEED_MULTIPLE_NODES 64#ifdef CONFIG_NEED_MULTIPLE_NODES
diff --git a/arch/x86/include/asm/mmzone_64.h b/arch/x86/include/asm/mmzone_64.h
index b3f88d7867c7..129d9aa3ceb3 100644
--- a/arch/x86/include/asm/mmzone_64.h
+++ b/arch/x86/include/asm/mmzone_64.h
@@ -13,8 +13,5 @@ extern struct pglist_data *node_data[];
13 13
14#define NODE_DATA(nid) (node_data[nid]) 14#define NODE_DATA(nid) (node_data[nid])
15 15
16#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
17#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \
18 NODE_DATA(nid)->node_spanned_pages)
19#endif 16#endif
20#endif /* _ASM_X86_MMZONE_64_H */ 17#endif /* _ASM_X86_MMZONE_64_H */
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 31d84acc1512..a518c0a45044 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -22,6 +22,8 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
22 u64 product; 22 u64 product;
23#ifdef __i386__ 23#ifdef __i386__
24 u32 tmp1, tmp2; 24 u32 tmp1, tmp2;
25#else
26 ulong tmp;
25#endif 27#endif
26 28
27 if (shift < 0) 29 if (shift < 0)
@@ -42,8 +44,11 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
42 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) ); 44 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
43#elif defined(__x86_64__) 45#elif defined(__x86_64__)
44 __asm__ ( 46 __asm__ (
45 "mul %%rdx ; shrd $32,%%rdx,%%rax" 47 "mul %[mul_frac] ; shrd $32, %[hi], %[lo]"
46 : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) ); 48 : [lo]"=a"(product),
49 [hi]"=d"(tmp)
50 : "0"(delta),
51 [mul_frac]"rm"((u64)mul_frac));
47#else 52#else
48#error implement me! 53#error implement me!
49#endif 54#endif
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
index ead21b663117..b4fd836e4053 100644
--- a/arch/x86/kernel/acpi/realmode/wakeup.S
+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
@@ -28,6 +28,8 @@ pmode_cr3: .long 0 /* Saved %cr3 */
28pmode_cr4: .long 0 /* Saved %cr4 */ 28pmode_cr4: .long 0 /* Saved %cr4 */
29pmode_efer: .quad 0 /* Saved EFER */ 29pmode_efer: .quad 0 /* Saved EFER */
30pmode_gdt: .quad 0 30pmode_gdt: .quad 0
31pmode_misc_en: .quad 0 /* Saved MISC_ENABLE MSR */
32pmode_behavior: .long 0 /* Wakeup behavior flags */
31realmode_flags: .long 0 33realmode_flags: .long 0
32real_magic: .long 0 34real_magic: .long 0
33trampoline_segment: .word 0 35trampoline_segment: .word 0
@@ -91,6 +93,18 @@ wakeup_code:
91 /* Call the C code */ 93 /* Call the C code */
92 calll main 94 calll main
93 95
96 /* Restore MISC_ENABLE before entering protected mode, in case
97 BIOS decided to clear XD_DISABLE during S3. */
98 movl pmode_behavior, %eax
99 btl $WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE, %eax
100 jnc 1f
101
102 movl pmode_misc_en, %eax
103 movl pmode_misc_en + 4, %edx
104 movl $MSR_IA32_MISC_ENABLE, %ecx
105 wrmsr
1061:
107
94 /* Do any other stuff... */ 108 /* Do any other stuff... */
95 109
96#ifndef CONFIG_64BIT 110#ifndef CONFIG_64BIT
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.h b/arch/x86/kernel/acpi/realmode/wakeup.h
index e1828c07e79c..97a29e1430e3 100644
--- a/arch/x86/kernel/acpi/realmode/wakeup.h
+++ b/arch/x86/kernel/acpi/realmode/wakeup.h
@@ -21,6 +21,9 @@ struct wakeup_header {
21 u32 pmode_efer_low; /* Protected mode EFER */ 21 u32 pmode_efer_low; /* Protected mode EFER */
22 u32 pmode_efer_high; 22 u32 pmode_efer_high;
23 u64 pmode_gdt; 23 u64 pmode_gdt;
24 u32 pmode_misc_en_low; /* Protected mode MISC_ENABLE */
25 u32 pmode_misc_en_high;
26 u32 pmode_behavior; /* Wakeup routine behavior flags */
24 u32 realmode_flags; 27 u32 realmode_flags;
25 u32 real_magic; 28 u32 real_magic;
26 u16 trampoline_segment; /* segment with trampoline code, 64-bit only */ 29 u16 trampoline_segment; /* segment with trampoline code, 64-bit only */
@@ -39,4 +42,7 @@ extern struct wakeup_header wakeup_header;
39#define WAKEUP_HEADER_SIGNATURE 0x51ee1111 42#define WAKEUP_HEADER_SIGNATURE 0x51ee1111
40#define WAKEUP_END_SIGNATURE 0x65a22c82 43#define WAKEUP_END_SIGNATURE 0x65a22c82
41 44
45/* Wakeup behavior bits */
46#define WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE 0
47
42#endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */ 48#endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 18a857ba7a25..103b6ab368d3 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -77,6 +77,12 @@ int acpi_suspend_lowlevel(void)
77 77
78 header->pmode_cr0 = read_cr0(); 78 header->pmode_cr0 = read_cr0();
79 header->pmode_cr4 = read_cr4_safe(); 79 header->pmode_cr4 = read_cr4_safe();
80 header->pmode_behavior = 0;
81 if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
82 &header->pmode_misc_en_low,
83 &header->pmode_misc_en_high))
84 header->pmode_behavior |=
85 (1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE);
80 header->realmode_flags = acpi_realmode_flags; 86 header->realmode_flags = acpi_realmode_flags;
81 header->real_magic = 0x12345678; 87 header->real_magic = 0x12345678;
82 88
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index cd8cbeb5fa34..7c3a95e54ec5 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -30,6 +30,7 @@
30#include <asm/proto.h> 30#include <asm/proto.h>
31#include <asm/iommu.h> 31#include <asm/iommu.h>
32#include <asm/gart.h> 32#include <asm/gart.h>
33#include <asm/dma.h>
33#include <asm/amd_iommu_proto.h> 34#include <asm/amd_iommu_proto.h>
34#include <asm/amd_iommu_types.h> 35#include <asm/amd_iommu_types.h>
35#include <asm/amd_iommu.h> 36#include <asm/amd_iommu.h>
@@ -154,6 +155,10 @@ static int iommu_init_device(struct device *dev)
154 pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff); 155 pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff);
155 if (pdev) 156 if (pdev)
156 dev_data->alias = &pdev->dev; 157 dev_data->alias = &pdev->dev;
158 else {
159 kfree(dev_data);
160 return -ENOTSUPP;
161 }
157 162
158 atomic_set(&dev_data->bind, 0); 163 atomic_set(&dev_data->bind, 0);
159 164
@@ -163,6 +168,20 @@ static int iommu_init_device(struct device *dev)
163 return 0; 168 return 0;
164} 169}
165 170
171static void iommu_ignore_device(struct device *dev)
172{
173 u16 devid, alias;
174
175 devid = get_device_id(dev);
176 alias = amd_iommu_alias_table[devid];
177
178 memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
179 memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
180
181 amd_iommu_rlookup_table[devid] = NULL;
182 amd_iommu_rlookup_table[alias] = NULL;
183}
184
166static void iommu_uninit_device(struct device *dev) 185static void iommu_uninit_device(struct device *dev)
167{ 186{
168 kfree(dev->archdata.iommu); 187 kfree(dev->archdata.iommu);
@@ -192,7 +211,9 @@ int __init amd_iommu_init_devices(void)
192 continue; 211 continue;
193 212
194 ret = iommu_init_device(&pdev->dev); 213 ret = iommu_init_device(&pdev->dev);
195 if (ret) 214 if (ret == -ENOTSUPP)
215 iommu_ignore_device(&pdev->dev);
216 else if (ret)
196 goto out_free; 217 goto out_free;
197 } 218 }
198 219
@@ -2383,6 +2404,23 @@ static struct dma_map_ops amd_iommu_dma_ops = {
2383 .dma_supported = amd_iommu_dma_supported, 2404 .dma_supported = amd_iommu_dma_supported,
2384}; 2405};
2385 2406
2407static unsigned device_dma_ops_init(void)
2408{
2409 struct pci_dev *pdev = NULL;
2410 unsigned unhandled = 0;
2411
2412 for_each_pci_dev(pdev) {
2413 if (!check_device(&pdev->dev)) {
2414 unhandled += 1;
2415 continue;
2416 }
2417
2418 pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
2419 }
2420
2421 return unhandled;
2422}
2423
2386/* 2424/*
2387 * The function which clues the AMD IOMMU driver into dma_ops. 2425 * The function which clues the AMD IOMMU driver into dma_ops.
2388 */ 2426 */
@@ -2395,7 +2433,7 @@ void __init amd_iommu_init_api(void)
2395int __init amd_iommu_init_dma_ops(void) 2433int __init amd_iommu_init_dma_ops(void)
2396{ 2434{
2397 struct amd_iommu *iommu; 2435 struct amd_iommu *iommu;
2398 int ret; 2436 int ret, unhandled;
2399 2437
2400 /* 2438 /*
2401 * first allocate a default protection domain for every IOMMU we 2439 * first allocate a default protection domain for every IOMMU we
@@ -2421,7 +2459,11 @@ int __init amd_iommu_init_dma_ops(void)
2421 swiotlb = 0; 2459 swiotlb = 0;
2422 2460
2423 /* Make the driver finally visible to the drivers */ 2461 /* Make the driver finally visible to the drivers */
2424 dma_ops = &amd_iommu_dma_ops; 2462 unhandled = device_dma_ops_init();
2463 if (unhandled && max_pfn > MAX_DMA32_PFN) {
2464 /* There are unhandled devices - initialize swiotlb for them */
2465 swiotlb = 1;
2466 }
2425 2467
2426 amd_iommu_stats_init(); 2468 amd_iommu_stats_init();
2427 2469
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 9179c21120a8..bfc8453bd98d 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -731,8 +731,8 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
731{ 731{
732 u8 *p = (u8 *)h; 732 u8 *p = (u8 *)h;
733 u8 *end = p, flags = 0; 733 u8 *end = p, flags = 0;
734 u16 dev_i, devid = 0, devid_start = 0, devid_to = 0; 734 u16 devid = 0, devid_start = 0, devid_to = 0;
735 u32 ext_flags = 0; 735 u32 dev_i, ext_flags = 0;
736 bool alias = false; 736 bool alias = false;
737 struct ivhd_entry *e; 737 struct ivhd_entry *e;
738 738
@@ -887,7 +887,7 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
887/* Initializes the device->iommu mapping for the driver */ 887/* Initializes the device->iommu mapping for the driver */
888static int __init init_iommu_devices(struct amd_iommu *iommu) 888static int __init init_iommu_devices(struct amd_iommu *iommu)
889{ 889{
890 u16 i; 890 u32 i;
891 891
892 for (i = iommu->first_device; i <= iommu->last_device; ++i) 892 for (i = iommu->first_device; i <= iommu->last_device; ++i)
893 set_iommu_for_device(iommu, i); 893 set_iommu_for_device(iommu, i);
@@ -1177,7 +1177,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
1177 */ 1177 */
1178static void init_device_table(void) 1178static void init_device_table(void)
1179{ 1179{
1180 u16 devid; 1180 u32 devid;
1181 1181
1182 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { 1182 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1183 set_dev_entry_bit(devid, DEV_ENTRY_VALID); 1183 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index b961af86bfea..b9338b8cf420 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -390,7 +390,8 @@ static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
390 390
391/* 391/*
392 * If mask=1, the LVT entry does not generate interrupts while mask=0 392 * If mask=1, the LVT entry does not generate interrupts while mask=0
393 * enables the vector. See also the BKDGs. 393 * enables the vector. See also the BKDGs. Must be called with
394 * preemption disabled.
394 */ 395 */
395 396
396int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask) 397int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask)
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index b511a011b7d0..adc66c3a1fef 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -632,14 +632,14 @@ late_initcall(uv_init_heartbeat);
632 632
633/* Direct Legacy VGA I/O traffic to designated IOH */ 633/* Direct Legacy VGA I/O traffic to designated IOH */
634int uv_set_vga_state(struct pci_dev *pdev, bool decode, 634int uv_set_vga_state(struct pci_dev *pdev, bool decode,
635 unsigned int command_bits, bool change_bridge) 635 unsigned int command_bits, u32 flags)
636{ 636{
637 int domain, bus, rc; 637 int domain, bus, rc;
638 638
639 PR_DEVEL("devfn %x decode %d cmd %x chg_brdg %d\n", 639 PR_DEVEL("devfn %x decode %d cmd %x flags %d\n",
640 pdev->devfn, decode, command_bits, change_bridge); 640 pdev->devfn, decode, command_bits, flags);
641 641
642 if (!change_bridge) 642 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
643 return 0; 643 return 0;
644 644
645 if ((command_bits & PCI_COMMAND_IO) == 0) 645 if ((command_bits & PCI_COMMAND_IO) == 0)
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 690bc8461835..9aeb78a23de4 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -13,6 +13,7 @@
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <linux/of_pci.h> 15#include <linux/of_pci.h>
16#include <linux/initrd.h>
16 17
17#include <asm/hpet.h> 18#include <asm/hpet.h>
18#include <asm/irq_controller.h> 19#include <asm/irq_controller.h>
@@ -98,6 +99,16 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
98 return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS)); 99 return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS));
99} 100}
100 101
102#ifdef CONFIG_BLK_DEV_INITRD
103void __init early_init_dt_setup_initrd_arch(unsigned long start,
104 unsigned long end)
105{
106 initrd_start = (unsigned long)__va(start);
107 initrd_end = (unsigned long)__va(end);
108 initrd_below_start_ok = 1;
109}
110#endif
111
101void __init add_dtb(u64 data) 112void __init add_dtb(u64 data)
102{ 113{
103 initial_dtb = data + offsetof(struct setup_data, data); 114 initial_dtb = data + offsetof(struct setup_data, data);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 2e4928d45a2d..e1ba8cb24e4e 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -337,7 +337,7 @@ EXPORT_SYMBOL(boot_option_idle_override);
337 * Powermanagement idle function, if any.. 337 * Powermanagement idle function, if any..
338 */ 338 */
339void (*pm_idle)(void); 339void (*pm_idle)(void);
340#if defined(CONFIG_APM_MODULE) && defined(CONFIG_APM_CPU_IDLE) 340#ifdef CONFIG_APM_MODULE
341EXPORT_SYMBOL(pm_idle); 341EXPORT_SYMBOL(pm_idle);
342#endif 342#endif
343 343
@@ -399,7 +399,7 @@ void default_idle(void)
399 cpu_relax(); 399 cpu_relax();
400 } 400 }
401} 401}
402#if defined(CONFIG_APM_MODULE) && defined(CONFIG_APM_CPU_IDLE) 402#ifdef CONFIG_APM_MODULE
403EXPORT_SYMBOL(default_idle); 403EXPORT_SYMBOL(default_idle);
404#endif 404#endif
405 405
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 8d128783af47..a3d0dc59067b 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -245,7 +245,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
245{ 245{
246 set_user_gs(regs, 0); 246 set_user_gs(regs, 0);
247 regs->fs = 0; 247 regs->fs = 0;
248 set_fs(USER_DS);
249 regs->ds = __USER_DS; 248 regs->ds = __USER_DS;
250 regs->es = __USER_DS; 249 regs->es = __USER_DS;
251 regs->ss = __USER_DS; 250 regs->ss = __USER_DS;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 6c9dd922ac0d..ca6f7ab8df33 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -338,7 +338,6 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip,
338 regs->cs = _cs; 338 regs->cs = _cs;
339 regs->ss = _ss; 339 regs->ss = _ss;
340 regs->flags = X86_EFLAGS_IF; 340 regs->flags = X86_EFLAGS_IF;
341 set_fs(USER_DS);
342 /* 341 /*
343 * Free the old FP and other extended state 342 * Free the old FP and other extended state
344 */ 343 */
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 0c016f727695..9242436e9937 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -294,6 +294,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
294 DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"), 294 DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"),
295 }, 295 },
296 }, 296 },
297 { /* Handle reboot issue on Acer Aspire one */
298 .callback = set_bios_reboot,
299 .ident = "Acer Aspire One A110",
300 .matches = {
301 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
302 DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"),
303 },
304 },
297 { } 305 { }
298}; 306};
299 307
@@ -411,6 +419,30 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
411 DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"), 419 DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
412 }, 420 },
413 }, 421 },
422 { /* Handle problems with rebooting on the Latitude E6320. */
423 .callback = set_pci_reboot,
424 .ident = "Dell Latitude E6320",
425 .matches = {
426 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
427 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
428 },
429 },
430 { /* Handle problems with rebooting on the Latitude E5420. */
431 .callback = set_pci_reboot,
432 .ident = "Dell Latitude E5420",
433 .matches = {
434 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
435 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5420"),
436 },
437 },
438 { /* Handle problems with rebooting on the Latitude E6420. */
439 .callback = set_pci_reboot,
440 .ident = "Dell Latitude E6420",
441 .matches = {
442 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
443 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"),
444 },
445 },
414 { } 446 { }
415}; 447};
416 448
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 33a0c11797de..9fd3137230d4 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -285,6 +285,19 @@ notrace static void __cpuinit start_secondary(void *unused)
285 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 285 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
286 x86_platform.nmi_init(); 286 x86_platform.nmi_init();
287 287
288 /*
289 * Wait until the cpu which brought this one up marked it
290 * online before enabling interrupts. If we don't do that then
291 * we can end up waking up the softirq thread before this cpu
292 * reached the active state, which makes the scheduler unhappy
293 * and schedule the softirq thread on the wrong cpu. This is
294 * only observable with forced threaded interrupts, but in
295 * theory it could also happen w/o them. It's just way harder
296 * to achieve.
297 */
298 while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask))
299 cpu_relax();
300
288 /* enable local interrupts */ 301 /* enable local interrupts */
289 local_irq_enable(); 302 local_irq_enable();
290 303
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index d6e2477feb18..adc98675cda0 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -47,38 +47,40 @@
47#define DstDI (5<<1) /* Destination is in ES:(E)DI */ 47#define DstDI (5<<1) /* Destination is in ES:(E)DI */
48#define DstMem64 (6<<1) /* 64bit memory operand */ 48#define DstMem64 (6<<1) /* 64bit memory operand */
49#define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */ 49#define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */
50#define DstMask (7<<1) 50#define DstDX (8<<1) /* Destination is in DX register */
51#define DstMask (0xf<<1)
51/* Source operand type. */ 52/* Source operand type. */
52#define SrcNone (0<<4) /* No source operand. */ 53#define SrcNone (0<<5) /* No source operand. */
53#define SrcReg (1<<4) /* Register operand. */ 54#define SrcReg (1<<5) /* Register operand. */
54#define SrcMem (2<<4) /* Memory operand. */ 55#define SrcMem (2<<5) /* Memory operand. */
55#define SrcMem16 (3<<4) /* Memory operand (16-bit). */ 56#define SrcMem16 (3<<5) /* Memory operand (16-bit). */
56#define SrcMem32 (4<<4) /* Memory operand (32-bit). */ 57#define SrcMem32 (4<<5) /* Memory operand (32-bit). */
57#define SrcImm (5<<4) /* Immediate operand. */ 58#define SrcImm (5<<5) /* Immediate operand. */
58#define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */ 59#define SrcImmByte (6<<5) /* 8-bit sign-extended immediate operand. */
59#define SrcOne (7<<4) /* Implied '1' */ 60#define SrcOne (7<<5) /* Implied '1' */
60#define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */ 61#define SrcImmUByte (8<<5) /* 8-bit unsigned immediate operand. */
61#define SrcImmU (9<<4) /* Immediate operand, unsigned */ 62#define SrcImmU (9<<5) /* Immediate operand, unsigned */
62#define SrcSI (0xa<<4) /* Source is in the DS:RSI */ 63#define SrcSI (0xa<<5) /* Source is in the DS:RSI */
63#define SrcImmFAddr (0xb<<4) /* Source is immediate far address */ 64#define SrcImmFAddr (0xb<<5) /* Source is immediate far address */
64#define SrcMemFAddr (0xc<<4) /* Source is far address in memory */ 65#define SrcMemFAddr (0xc<<5) /* Source is far address in memory */
65#define SrcAcc (0xd<<4) /* Source Accumulator */ 66#define SrcAcc (0xd<<5) /* Source Accumulator */
66#define SrcImmU16 (0xe<<4) /* Immediate operand, unsigned, 16 bits */ 67#define SrcImmU16 (0xe<<5) /* Immediate operand, unsigned, 16 bits */
67#define SrcMask (0xf<<4) 68#define SrcDX (0xf<<5) /* Source is in DX register */
69#define SrcMask (0xf<<5)
68/* Generic ModRM decode. */ 70/* Generic ModRM decode. */
69#define ModRM (1<<8) 71#define ModRM (1<<9)
70/* Destination is only written; never read. */ 72/* Destination is only written; never read. */
71#define Mov (1<<9) 73#define Mov (1<<10)
72#define BitOp (1<<10) 74#define BitOp (1<<11)
73#define MemAbs (1<<11) /* Memory operand is absolute displacement */ 75#define MemAbs (1<<12) /* Memory operand is absolute displacement */
74#define String (1<<12) /* String instruction (rep capable) */ 76#define String (1<<13) /* String instruction (rep capable) */
75#define Stack (1<<13) /* Stack instruction (push/pop) */ 77#define Stack (1<<14) /* Stack instruction (push/pop) */
76#define GroupMask (7<<14) /* Opcode uses one of the group mechanisms */ 78#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
77#define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */ 79#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
78#define GroupDual (2<<14) /* Alternate decoding of mod == 3 */ 80#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
79#define Prefix (3<<14) /* Instruction varies with 66/f2/f3 prefix */ 81#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
80#define RMExt (4<<14) /* Opcode extension in ModRM r/m if mod == 3 */ 82#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
81#define Sse (1<<17) /* SSE Vector instruction */ 83#define Sse (1<<18) /* SSE Vector instruction */
82/* Misc flags */ 84/* Misc flags */
83#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */ 85#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
84#define VendorSpecific (1<<22) /* Vendor specific instruction */ 86#define VendorSpecific (1<<22) /* Vendor specific instruction */
@@ -3154,8 +3156,8 @@ static struct opcode opcode_table[256] = {
3154 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op), 3156 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3155 I(SrcImmByte | Mov | Stack, em_push), 3157 I(SrcImmByte | Mov | Stack, em_push),
3156 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op), 3158 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3157 D2bvIP(DstDI | Mov | String, ins, check_perm_in), /* insb, insw/insd */ 3159 D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */
3158 D2bvIP(SrcSI | ImplicitOps | String, outs, check_perm_out), /* outsb, outsw/outsd */ 3160 D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */
3159 /* 0x70 - 0x7F */ 3161 /* 0x70 - 0x7F */
3160 X16(D(SrcImmByte)), 3162 X16(D(SrcImmByte)),
3161 /* 0x80 - 0x87 */ 3163 /* 0x80 - 0x87 */
@@ -3212,8 +3214,8 @@ static struct opcode opcode_table[256] = {
3212 /* 0xE8 - 0xEF */ 3214 /* 0xE8 - 0xEF */
3213 D(SrcImm | Stack), D(SrcImm | ImplicitOps), 3215 D(SrcImm | Stack), D(SrcImm | ImplicitOps),
3214 D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps), 3216 D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
3215 D2bvIP(SrcNone | DstAcc, in, check_perm_in), 3217 D2bvIP(SrcDX | DstAcc, in, check_perm_in),
3216 D2bvIP(SrcAcc | ImplicitOps, out, check_perm_out), 3218 D2bvIP(SrcAcc | DstDX, out, check_perm_out),
3217 /* 0xF0 - 0xF7 */ 3219 /* 0xF0 - 0xF7 */
3218 N, DI(ImplicitOps, icebp), N, N, 3220 N, DI(ImplicitOps, icebp), N, N,
3219 DI(ImplicitOps | Priv, hlt), D(ImplicitOps), 3221 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
@@ -3370,7 +3372,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
3370 int def_op_bytes, def_ad_bytes, goffset, simd_prefix; 3372 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
3371 bool op_prefix = false; 3373 bool op_prefix = false;
3372 struct opcode opcode; 3374 struct opcode opcode;
3373 struct operand memop = { .type = OP_NONE }; 3375 struct operand memop = { .type = OP_NONE }, *memopp = NULL;
3374 3376
3375 c->eip = ctxt->eip; 3377 c->eip = ctxt->eip;
3376 c->fetch.start = c->eip; 3378 c->fetch.start = c->eip;
@@ -3545,9 +3547,6 @@ done_prefixes:
3545 if (memop.type == OP_MEM && c->ad_bytes != 8) 3547 if (memop.type == OP_MEM && c->ad_bytes != 8)
3546 memop.addr.mem.ea = (u32)memop.addr.mem.ea; 3548 memop.addr.mem.ea = (u32)memop.addr.mem.ea;
3547 3549
3548 if (memop.type == OP_MEM && c->rip_relative)
3549 memop.addr.mem.ea += c->eip;
3550
3551 /* 3550 /*
3552 * Decode and fetch the source operand: register, memory 3551 * Decode and fetch the source operand: register, memory
3553 * or immediate. 3552 * or immediate.
@@ -3569,6 +3568,7 @@ done_prefixes:
3569 c->op_bytes; 3568 c->op_bytes;
3570 srcmem_common: 3569 srcmem_common:
3571 c->src = memop; 3570 c->src = memop;
3571 memopp = &c->src;
3572 break; 3572 break;
3573 case SrcImmU16: 3573 case SrcImmU16:
3574 rc = decode_imm(ctxt, &c->src, 2, false); 3574 rc = decode_imm(ctxt, &c->src, 2, false);
@@ -3613,6 +3613,12 @@ done_prefixes:
3613 memop.bytes = c->op_bytes + 2; 3613 memop.bytes = c->op_bytes + 2;
3614 goto srcmem_common; 3614 goto srcmem_common;
3615 break; 3615 break;
3616 case SrcDX:
3617 c->src.type = OP_REG;
3618 c->src.bytes = 2;
3619 c->src.addr.reg = &c->regs[VCPU_REGS_RDX];
3620 fetch_register_operand(&c->src);
3621 break;
3616 } 3622 }
3617 3623
3618 if (rc != X86EMUL_CONTINUE) 3624 if (rc != X86EMUL_CONTINUE)
@@ -3659,6 +3665,7 @@ done_prefixes:
3659 case DstMem: 3665 case DstMem:
3660 case DstMem64: 3666 case DstMem64:
3661 c->dst = memop; 3667 c->dst = memop;
3668 memopp = &c->dst;
3662 if ((c->d & DstMask) == DstMem64) 3669 if ((c->d & DstMask) == DstMem64)
3663 c->dst.bytes = 8; 3670 c->dst.bytes = 8;
3664 else 3671 else
@@ -3682,14 +3689,23 @@ done_prefixes:
3682 c->dst.addr.mem.seg = VCPU_SREG_ES; 3689 c->dst.addr.mem.seg = VCPU_SREG_ES;
3683 c->dst.val = 0; 3690 c->dst.val = 0;
3684 break; 3691 break;
3692 case DstDX:
3693 c->dst.type = OP_REG;
3694 c->dst.bytes = 2;
3695 c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
3696 fetch_register_operand(&c->dst);
3697 break;
3685 case ImplicitOps: 3698 case ImplicitOps:
3686 /* Special instructions do their own operand decoding. */ 3699 /* Special instructions do their own operand decoding. */
3687 default: 3700 default:
3688 c->dst.type = OP_NONE; /* Disable writeback. */ 3701 c->dst.type = OP_NONE; /* Disable writeback. */
3689 return 0; 3702 break;
3690 } 3703 }
3691 3704
3692done: 3705done:
3706 if (memopp && memopp->type == OP_MEM && c->rip_relative)
3707 memopp->addr.mem.ea += c->eip;
3708
3693 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; 3709 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3694} 3710}
3695 3711
@@ -4027,7 +4043,6 @@ special_insn:
4027 break; 4043 break;
4028 case 0xec: /* in al,dx */ 4044 case 0xec: /* in al,dx */
4029 case 0xed: /* in (e/r)ax,dx */ 4045 case 0xed: /* in (e/r)ax,dx */
4030 c->src.val = c->regs[VCPU_REGS_RDX];
4031 do_io_in: 4046 do_io_in:
4032 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val, 4047 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
4033 &c->dst.val)) 4048 &c->dst.val))
@@ -4035,7 +4050,6 @@ special_insn:
4035 break; 4050 break;
4036 case 0xee: /* out dx,al */ 4051 case 0xee: /* out dx,al */
4037 case 0xef: /* out dx,(e/r)ax */ 4052 case 0xef: /* out dx,(e/r)ax */
4038 c->dst.val = c->regs[VCPU_REGS_RDX];
4039 do_io_out: 4053 do_io_out:
4040 ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val, 4054 ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val,
4041 &c->src.val, 1); 4055 &c->src.val, 1);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index bd14bb4c8594..aee38623b768 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -565,7 +565,7 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
565 565
566static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn) 566static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
567{ 567{
568 return gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true); 568 return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
569} 569}
570 570
571static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) 571static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 6c4dc010c4cb..9d03ad4dd5ec 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -121,7 +121,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
121 gva_t addr, u32 access) 121 gva_t addr, u32 access)
122{ 122{
123 pt_element_t pte; 123 pt_element_t pte;
124 pt_element_t __user *ptep_user; 124 pt_element_t __user *uninitialized_var(ptep_user);
125 gfn_t table_gfn; 125 gfn_t table_gfn;
126 unsigned index, pt_access, uninitialized_var(pte_access); 126 unsigned index, pt_access, uninitialized_var(pte_access);
127 gpa_t pte_gpa; 127 gpa_t pte_gpa;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4c3fa0f67469..d48ec60ea421 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2047,7 +2047,8 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
2047 unsigned long cr0, 2047 unsigned long cr0,
2048 struct kvm_vcpu *vcpu) 2048 struct kvm_vcpu *vcpu)
2049{ 2049{
2050 vmx_decache_cr3(vcpu); 2050 if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
2051 vmx_decache_cr3(vcpu);
2051 if (!(cr0 & X86_CR0_PG)) { 2052 if (!(cr0 & X86_CR0_PG)) {
2052 /* From paging/starting to nonpaging */ 2053 /* From paging/starting to nonpaging */
2053 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, 2054 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index d865c4aeec55..bbaaa005bf0e 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -28,6 +28,7 @@
28#include <linux/poison.h> 28#include <linux/poison.h>
29#include <linux/dma-mapping.h> 29#include <linux/dma-mapping.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/memory.h>
31#include <linux/memory_hotplug.h> 32#include <linux/memory_hotplug.h>
32#include <linux/nmi.h> 33#include <linux/nmi.h>
33#include <linux/gfp.h> 34#include <linux/gfp.h>
@@ -895,8 +896,6 @@ const char *arch_vma_name(struct vm_area_struct *vma)
895} 896}
896 897
897#ifdef CONFIG_X86_UV 898#ifdef CONFIG_X86_UV
898#define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS)
899
900unsigned long memory_block_size_bytes(void) 899unsigned long memory_block_size_bytes(void)
901{ 900{
902 if (is_uv_system()) { 901 if (is_uv_system()) {
diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c
index aa1169392b83..992da5ec5a64 100644
--- a/arch/x86/mm/memblock.c
+++ b/arch/x86/mm/memblock.c
@@ -8,7 +8,7 @@
8#include <linux/range.h> 8#include <linux/range.h>
9 9
10/* Check for already reserved areas */ 10/* Check for already reserved areas */
11static bool __init check_with_memblock_reserved_size(u64 *addrp, u64 *sizep, u64 align) 11bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align)
12{ 12{
13 struct memblock_region *r; 13 struct memblock_region *r;
14 u64 addr = *addrp, last; 14 u64 addr = *addrp, last;
@@ -59,7 +59,7 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
59 if (addr >= ei_last) 59 if (addr >= ei_last)
60 continue; 60 continue;
61 *sizep = ei_last - addr; 61 *sizep = ei_last - addr;
62 while (check_with_memblock_reserved_size(&addr, sizep, align)) 62 while (memblock_x86_check_reserved_size(&addr, sizep, align))
63 ; 63 ;
64 64
65 if (*sizep) 65 if (*sizep)
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index cf9750004a08..68894fdc034b 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -112,8 +112,10 @@ static void nmi_cpu_start(void *dummy)
112static int nmi_start(void) 112static int nmi_start(void)
113{ 113{
114 get_online_cpus(); 114 get_online_cpus();
115 on_each_cpu(nmi_cpu_start, NULL, 1);
116 ctr_running = 1; 115 ctr_running = 1;
116 /* make ctr_running visible to the nmi handler: */
117 smp_mb();
118 on_each_cpu(nmi_cpu_start, NULL, 1);
117 put_online_cpus(); 119 put_online_cpus();
118 return 0; 120 return 0;
119} 121}
@@ -504,15 +506,18 @@ static int nmi_setup(void)
504 506
505 nmi_enabled = 0; 507 nmi_enabled = 0;
506 ctr_running = 0; 508 ctr_running = 0;
507 barrier(); 509 /* make variables visible to the nmi handler: */
510 smp_mb();
508 err = register_die_notifier(&profile_exceptions_nb); 511 err = register_die_notifier(&profile_exceptions_nb);
509 if (err) 512 if (err)
510 goto fail; 513 goto fail;
511 514
512 get_online_cpus(); 515 get_online_cpus();
513 register_cpu_notifier(&oprofile_cpu_nb); 516 register_cpu_notifier(&oprofile_cpu_nb);
514 on_each_cpu(nmi_cpu_setup, NULL, 1);
515 nmi_enabled = 1; 517 nmi_enabled = 1;
518 /* make nmi_enabled visible to the nmi handler: */
519 smp_mb();
520 on_each_cpu(nmi_cpu_setup, NULL, 1);
516 put_online_cpus(); 521 put_online_cpus();
517 522
518 return 0; 523 return 0;
@@ -531,7 +536,8 @@ static void nmi_shutdown(void)
531 nmi_enabled = 0; 536 nmi_enabled = 0;
532 ctr_running = 0; 537 ctr_running = 0;
533 put_online_cpus(); 538 put_online_cpus();
534 barrier(); 539 /* make variables visible to the nmi handler: */
540 smp_mb();
535 unregister_die_notifier(&profile_exceptions_nb); 541 unregister_die_notifier(&profile_exceptions_nb);
536 msrs = &get_cpu_var(cpu_msrs); 542 msrs = &get_cpu_var(cpu_msrs);
537 model->shutdown(msrs); 543 model->shutdown(msrs);
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 9fd8a567fe1e..9cbb710dc94b 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -609,16 +609,21 @@ static int setup_ibs_ctl(int ibs_eilvt_off)
609 return 0; 609 return 0;
610} 610}
611 611
612/*
613 * This runs only on the current cpu. We try to find an LVT offset and
614 * setup the local APIC. For this we must disable preemption. On
615 * success we initialize all nodes with this offset. This updates then
616 * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
617 * the IBS interrupt vector is called from op_amd_setup_ctrs()/op_-
618 * amd_cpu_shutdown() using the new offset.
619 */
612static int force_ibs_eilvt_setup(void) 620static int force_ibs_eilvt_setup(void)
613{ 621{
614 int offset; 622 int offset;
615 int ret; 623 int ret;
616 624
617 /*
618 * find the next free available EILVT entry, skip offset 0,
619 * pin search to this cpu
620 */
621 preempt_disable(); 625 preempt_disable();
626 /* find the next free available EILVT entry, skip offset 0 */
622 for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) { 627 for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
623 if (get_eilvt(offset)) 628 if (get_eilvt(offset))
624 break; 629 break;
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 0972315c3860..68c3c1395202 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -188,7 +188,7 @@ static bool resource_contains(struct resource *res, resource_size_t point)
188 return false; 188 return false;
189} 189}
190 190
191static void coalesce_windows(struct pci_root_info *info, int type) 191static void coalesce_windows(struct pci_root_info *info, unsigned long type)
192{ 192{
193 int i, j; 193 int i, j;
194 struct resource *res1, *res2; 194 struct resource *res1, *res2;
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 8214724ce54d..f567965c0620 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -327,7 +327,7 @@ int __init pci_xen_hvm_init(void)
327} 327}
328 328
329#ifdef CONFIG_XEN_DOM0 329#ifdef CONFIG_XEN_DOM0
330static int xen_register_pirq(u32 gsi, int triggering) 330static int xen_register_pirq(u32 gsi, int gsi_override, int triggering)
331{ 331{
332 int rc, pirq, irq = -1; 332 int rc, pirq, irq = -1;
333 struct physdev_map_pirq map_irq; 333 struct physdev_map_pirq map_irq;
@@ -344,16 +344,18 @@ static int xen_register_pirq(u32 gsi, int triggering)
344 shareable = 1; 344 shareable = 1;
345 name = "ioapic-level"; 345 name = "ioapic-level";
346 } 346 }
347
348 pirq = xen_allocate_pirq_gsi(gsi); 347 pirq = xen_allocate_pirq_gsi(gsi);
349 if (pirq < 0) 348 if (pirq < 0)
350 goto out; 349 goto out;
351 350
352 irq = xen_bind_pirq_gsi_to_irq(gsi, pirq, shareable, name); 351 if (gsi_override >= 0)
352 irq = xen_bind_pirq_gsi_to_irq(gsi_override, pirq, shareable, name);
353 else
354 irq = xen_bind_pirq_gsi_to_irq(gsi, pirq, shareable, name);
353 if (irq < 0) 355 if (irq < 0)
354 goto out; 356 goto out;
355 357
356 printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d\n", pirq, irq); 358 printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d (gsi=%d)\n", pirq, irq, gsi);
357 359
358 map_irq.domid = DOMID_SELF; 360 map_irq.domid = DOMID_SELF;
359 map_irq.type = MAP_PIRQ_TYPE_GSI; 361 map_irq.type = MAP_PIRQ_TYPE_GSI;
@@ -370,7 +372,7 @@ out:
370 return irq; 372 return irq;
371} 373}
372 374
373static int xen_register_gsi(u32 gsi, int triggering, int polarity) 375static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polarity)
374{ 376{
375 int rc, irq; 377 int rc, irq;
376 struct physdev_setup_gsi setup_gsi; 378 struct physdev_setup_gsi setup_gsi;
@@ -381,7 +383,7 @@ static int xen_register_gsi(u32 gsi, int triggering, int polarity)
381 printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n", 383 printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n",
382 gsi, triggering, polarity); 384 gsi, triggering, polarity);
383 385
384 irq = xen_register_pirq(gsi, triggering); 386 irq = xen_register_pirq(gsi, gsi_override, triggering);
385 387
386 setup_gsi.gsi = gsi; 388 setup_gsi.gsi = gsi;
387 setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1); 389 setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1);
@@ -403,6 +405,8 @@ static __init void xen_setup_acpi_sci(void)
403 int rc; 405 int rc;
404 int trigger, polarity; 406 int trigger, polarity;
405 int gsi = acpi_sci_override_gsi; 407 int gsi = acpi_sci_override_gsi;
408 int irq = -1;
409 int gsi_override = -1;
406 410
407 if (!gsi) 411 if (!gsi)
408 return; 412 return;
@@ -419,7 +423,25 @@ static __init void xen_setup_acpi_sci(void)
419 printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d " 423 printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d "
420 "polarity=%d\n", gsi, trigger, polarity); 424 "polarity=%d\n", gsi, trigger, polarity);
421 425
422 gsi = xen_register_gsi(gsi, trigger, polarity); 426 /* Before we bind the GSI to a Linux IRQ, check whether
427 * we need to override it with bus_irq (IRQ) value. Usually for
428 * IRQs below IRQ_LEGACY_IRQ this holds IRQ == GSI, as so:
429 * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 low level)
430 * but there are oddballs where the IRQ != GSI:
431 * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 20 low level)
432 * which ends up being: gsi_to_irq[9] == 20
433 * (which is what acpi_gsi_to_irq ends up calling when starting the
434 * the ACPI interpreter and keels over since IRQ 9 has not been
435 * setup as we had setup IRQ 20 for it).
436 */
437 /* Check whether the GSI != IRQ */
438 if (acpi_gsi_to_irq(gsi, &irq) == 0) {
439 if (irq >= 0 && irq != gsi)
440 /* Bugger, we MUST have that IRQ. */
441 gsi_override = irq;
442 }
443
444 gsi = xen_register_gsi(gsi, gsi_override, trigger, polarity);
423 printk(KERN_INFO "xen: acpi sci %d\n", gsi); 445 printk(KERN_INFO "xen: acpi sci %d\n", gsi);
424 446
425 return; 447 return;
@@ -428,7 +450,7 @@ static __init void xen_setup_acpi_sci(void)
428static int acpi_register_gsi_xen(struct device *dev, u32 gsi, 450static int acpi_register_gsi_xen(struct device *dev, u32 gsi,
429 int trigger, int polarity) 451 int trigger, int polarity)
430{ 452{
431 return xen_register_gsi(gsi, trigger, polarity); 453 return xen_register_gsi(gsi, -1 /* no GSI override */, trigger, polarity);
432} 454}
433 455
434static int __init pci_xen_initial_domain(void) 456static int __init pci_xen_initial_domain(void)
@@ -467,7 +489,7 @@ void __init xen_setup_pirqs(void)
467 if (acpi_get_override_irq(irq, &trigger, &polarity) == -1) 489 if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
468 continue; 490 continue;
469 491
470 xen_register_pirq(irq, 492 xen_register_pirq(irq, -1 /* no GSI override */,
471 trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE); 493 trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE);
472 } 494 }
473} 495}
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 0d3a4fa34560..899e393d8e73 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -310,14 +310,31 @@ void __init efi_reserve_boot_services(void)
310 310
311 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 311 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
312 efi_memory_desc_t *md = p; 312 efi_memory_desc_t *md = p;
313 unsigned long long start = md->phys_addr; 313 u64 start = md->phys_addr;
314 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; 314 u64 size = md->num_pages << EFI_PAGE_SHIFT;
315 315
316 if (md->type != EFI_BOOT_SERVICES_CODE && 316 if (md->type != EFI_BOOT_SERVICES_CODE &&
317 md->type != EFI_BOOT_SERVICES_DATA) 317 md->type != EFI_BOOT_SERVICES_DATA)
318 continue; 318 continue;
319 319 /* Only reserve where possible:
320 memblock_x86_reserve_range(start, start + size, "EFI Boot"); 320 * - Not within any already allocated areas
321 * - Not over any memory area (really needed, if above?)
322 * - Not within any part of the kernel
323 * - Not the bios reserved area
324 */
325 if ((start+size >= virt_to_phys(_text)
326 && start <= virt_to_phys(_end)) ||
327 !e820_all_mapped(start, start+size, E820_RAM) ||
328 memblock_x86_check_reserved_size(&start, &size,
329 1<<EFI_PAGE_SHIFT)) {
330 /* Could not reserve, skip it */
331 md->num_pages = 0;
332 memblock_dbg(PFX "Could not reserve boot range "
333 "[0x%010llx-0x%010llx]\n",
334 start, start+size-1);
335 } else
336 memblock_x86_reserve_range(start, start+size,
337 "EFI Boot");
321 } 338 }
322} 339}
323 340
@@ -334,6 +351,10 @@ static void __init efi_free_boot_services(void)
334 md->type != EFI_BOOT_SERVICES_DATA) 351 md->type != EFI_BOOT_SERVICES_DATA)
335 continue; 352 continue;
336 353
354 /* Could not reserve boot area */
355 if (!size)
356 continue;
357
337 free_bootmem_late(start, size); 358 free_bootmem_late(start, size);
338 } 359 }
339} 360}
@@ -483,9 +504,6 @@ void __init efi_init(void)
483 x86_platform.set_wallclock = efi_set_rtc_mmss; 504 x86_platform.set_wallclock = efi_set_rtc_mmss;
484#endif 505#endif
485 506
486 /* Setup for EFI runtime service */
487 reboot_type = BOOT_EFI;
488
489#if EFI_DEBUG 507#if EFI_DEBUG
490 print_efi_memmap(); 508 print_efi_memmap();
491#endif 509#endif
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index dd7b88f2ec7a..5525163a0398 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1033,6 +1033,13 @@ static void xen_machine_halt(void)
1033 xen_reboot(SHUTDOWN_poweroff); 1033 xen_reboot(SHUTDOWN_poweroff);
1034} 1034}
1035 1035
1036static void xen_machine_power_off(void)
1037{
1038 if (pm_power_off)
1039 pm_power_off();
1040 xen_reboot(SHUTDOWN_poweroff);
1041}
1042
1036static void xen_crash_shutdown(struct pt_regs *regs) 1043static void xen_crash_shutdown(struct pt_regs *regs)
1037{ 1044{
1038 xen_reboot(SHUTDOWN_crash); 1045 xen_reboot(SHUTDOWN_crash);
@@ -1058,7 +1065,7 @@ int xen_panic_handler_init(void)
1058static const struct machine_ops xen_machine_ops __initconst = { 1065static const struct machine_ops xen_machine_ops __initconst = {
1059 .restart = xen_restart, 1066 .restart = xen_restart,
1060 .halt = xen_machine_halt, 1067 .halt = xen_machine_halt,
1061 .power_off = xen_machine_halt, 1068 .power_off = xen_machine_power_off,
1062 .shutdown = xen_machine_halt, 1069 .shutdown = xen_machine_halt,
1063 .crash_shutdown = xen_crash_shutdown, 1070 .crash_shutdown = xen_crash_shutdown,
1064 .emergency_restart = xen_emergency_restart, 1071 .emergency_restart = xen_emergency_restart,
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index dc708dcc62f1..0ccccb67a993 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -59,6 +59,7 @@
59#include <asm/page.h> 59#include <asm/page.h>
60#include <asm/init.h> 60#include <asm/init.h>
61#include <asm/pat.h> 61#include <asm/pat.h>
62#include <asm/smp.h>
62 63
63#include <asm/xen/hypercall.h> 64#include <asm/xen/hypercall.h>
64#include <asm/xen/hypervisor.h> 65#include <asm/xen/hypervisor.h>
@@ -1231,7 +1232,11 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
1231{ 1232{
1232 struct { 1233 struct {
1233 struct mmuext_op op; 1234 struct mmuext_op op;
1235#ifdef CONFIG_SMP
1236 DECLARE_BITMAP(mask, num_processors);
1237#else
1234 DECLARE_BITMAP(mask, NR_CPUS); 1238 DECLARE_BITMAP(mask, NR_CPUS);
1239#endif
1235 } *args; 1240 } *args;
1236 struct multicall_space mcs; 1241 struct multicall_space mcs;
1237 1242
@@ -1599,6 +1604,11 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1599 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { 1604 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1600 pte_t pte; 1605 pte_t pte;
1601 1606
1607#ifdef CONFIG_X86_32
1608 if (pfn > max_pfn_mapped)
1609 max_pfn_mapped = pfn;
1610#endif
1611
1602 if (!pte_none(pte_page[pteidx])) 1612 if (!pte_none(pte_page[pteidx]))
1603 continue; 1613 continue;
1604 1614
@@ -1766,7 +1776,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
1766 initial_kernel_pmd = 1776 initial_kernel_pmd =
1767 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); 1777 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
1768 1778
1769 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); 1779 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
1780 xen_start_info->nr_pt_frames * PAGE_SIZE +
1781 512*1024);
1770 1782
1771 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); 1783 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
1772 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); 1784 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
index 8bff7e7c290b..1b2b73ff0a6e 100644
--- a/arch/x86/xen/multicalls.c
+++ b/arch/x86/xen/multicalls.c
@@ -189,10 +189,10 @@ struct multicall_space __xen_mc_entry(size_t args)
189 unsigned argidx = roundup(b->argidx, sizeof(u64)); 189 unsigned argidx = roundup(b->argidx, sizeof(u64));
190 190
191 BUG_ON(preemptible()); 191 BUG_ON(preemptible());
192 BUG_ON(b->argidx > MC_ARGS); 192 BUG_ON(b->argidx >= MC_ARGS);
193 193
194 if (b->mcidx == MC_BATCH || 194 if (b->mcidx == MC_BATCH ||
195 (argidx + args) > MC_ARGS) { 195 (argidx + args) >= MC_ARGS) {
196 mc_stats_flush(b->mcidx == MC_BATCH ? FL_SLOTS : FL_ARGS); 196 mc_stats_flush(b->mcidx == MC_BATCH ? FL_SLOTS : FL_ARGS);
197 xen_mc_flush(); 197 xen_mc_flush();
198 argidx = roundup(b->argidx, sizeof(u64)); 198 argidx = roundup(b->argidx, sizeof(u64));
@@ -206,7 +206,7 @@ struct multicall_space __xen_mc_entry(size_t args)
206 ret.args = &b->args[argidx]; 206 ret.args = &b->args[argidx];
207 b->argidx = argidx + args; 207 b->argidx = argidx + args;
208 208
209 BUG_ON(b->argidx > MC_ARGS); 209 BUG_ON(b->argidx >= MC_ARGS);
210 return ret; 210 return ret;
211} 211}
212 212
@@ -216,7 +216,7 @@ struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
216 struct multicall_space ret = { NULL, NULL }; 216 struct multicall_space ret = { NULL, NULL };
217 217
218 BUG_ON(preemptible()); 218 BUG_ON(preemptible());
219 BUG_ON(b->argidx > MC_ARGS); 219 BUG_ON(b->argidx >= MC_ARGS);
220 220
221 if (b->mcidx == 0) 221 if (b->mcidx == 0)
222 return ret; 222 return ret;
@@ -224,14 +224,14 @@ struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
224 if (b->entries[b->mcidx - 1].op != op) 224 if (b->entries[b->mcidx - 1].op != op)
225 return ret; 225 return ret;
226 226
227 if ((b->argidx + size) > MC_ARGS) 227 if ((b->argidx + size) >= MC_ARGS)
228 return ret; 228 return ret;
229 229
230 ret.mc = &b->entries[b->mcidx - 1]; 230 ret.mc = &b->entries[b->mcidx - 1];
231 ret.args = &b->args[b->argidx]; 231 ret.args = &b->args[b->argidx];
232 b->argidx += size; 232 b->argidx += size;
233 233
234 BUG_ON(b->argidx > MC_ARGS); 234 BUG_ON(b->argidx >= MC_ARGS);
235 return ret; 235 return ret;
236} 236}
237 237
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index be1a464f6d66..60aeeb56948f 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -227,11 +227,7 @@ char * __init xen_memory_setup(void)
227 227
228 memcpy(map_raw, map, sizeof(map)); 228 memcpy(map_raw, map, sizeof(map));
229 e820.nr_map = 0; 229 e820.nr_map = 0;
230#ifdef CONFIG_X86_32
231 xen_extra_mem_start = mem_end; 230 xen_extra_mem_start = mem_end;
232#else
233 xen_extra_mem_start = max((1ULL << 32), mem_end);
234#endif
235 for (i = 0; i < memmap.nr_entries; i++) { 231 for (i = 0; i < memmap.nr_entries; i++) {
236 unsigned long long end; 232 unsigned long long end;
237 233
@@ -266,6 +262,12 @@ char * __init xen_memory_setup(void)
266 if (map[i].size > 0) 262 if (map[i].size > 0)
267 e820_add_region(map[i].addr, map[i].size, map[i].type); 263 e820_add_region(map[i].addr, map[i].size, map[i].type);
268 } 264 }
265 /* Align the balloon area so that max_low_pfn does not get set
266 * to be at the _end_ of the PCI gap at the far end (fee01000).
267 * Note that xen_extra_mem_start gets set in the loop above to be
268 * past the last E820 region. */
269 if (xen_initial_domain() && (xen_extra_mem_start < (1ULL<<32)))
270 xen_extra_mem_start = (1ULL<<32);
269 271
270 /* 272 /*
271 * In domU, the ISA region is normal, usable memory, but we 273 * In domU, the ISA region is normal, usable memory, but we
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 41038c01de40..b4533a86d7e4 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -205,11 +205,18 @@ static void __init xen_smp_prepare_boot_cpu(void)
205static void __init xen_smp_prepare_cpus(unsigned int max_cpus) 205static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
206{ 206{
207 unsigned cpu; 207 unsigned cpu;
208 unsigned int i;
208 209
209 xen_init_lock_cpu(0); 210 xen_init_lock_cpu(0);
210 211
211 smp_store_cpu_info(0); 212 smp_store_cpu_info(0);
212 cpu_data(0).x86_max_cores = 1; 213 cpu_data(0).x86_max_cores = 1;
214
215 for_each_possible_cpu(i) {
216 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
217 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
218 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
219 }
213 set_cpu_sibling_map(0); 220 set_cpu_sibling_map(0);
214 221
215 if (xen_smp_intr_init(0)) 222 if (xen_smp_intr_init(0))