aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/apb_timer.h2
-rw-r--r--arch/x86/include/asm/memblock.h2
-rw-r--r--arch/x86/include/asm/mmzone_32.h11
-rw-r--r--arch/x86/include/asm/mmzone_64.h3
-rw-r--r--arch/x86/include/asm/pvclock.h9
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/amd_iommu.c48
-rw-r--r--arch/x86/kernel/amd_iommu_init.c8
-rw-r--r--arch/x86/kernel/apic/apic.c3
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c8
-rw-r--r--arch/x86/kernel/devicetree.c11
-rw-r--r--arch/x86/kernel/process.c6
-rw-r--r--arch/x86/kernel/process_32.c1
-rw-r--r--arch/x86/kernel/process_64.c1
-rw-r--r--arch/x86/kernel/smpboot.c15
-rw-r--r--arch/x86/kvm/emulate.c94
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/vmx.c3
-rw-r--r--arch/x86/lguest/boot.c1
-rw-r--r--arch/x86/mm/memblock.c4
-rw-r--r--arch/x86/oprofile/op_model_amd.c13
-rw-r--r--arch/x86/pci/acpi.c2
-rw-r--r--arch/x86/platform/efi/efi.c29
24 files changed, 190 insertions, 90 deletions
diff --git a/arch/x86/include/asm/apb_timer.h b/arch/x86/include/asm/apb_timer.h
index 2fefa501d3ba..af60d8a2e288 100644
--- a/arch/x86/include/asm/apb_timer.h
+++ b/arch/x86/include/asm/apb_timer.h
@@ -62,7 +62,7 @@ extern int sfi_mtimer_num;
62#else /* CONFIG_APB_TIMER */ 62#else /* CONFIG_APB_TIMER */
63 63
64static inline unsigned long apbt_quick_calibrate(void) {return 0; } 64static inline unsigned long apbt_quick_calibrate(void) {return 0; }
65static inline void apbt_time_init(void) {return 0; } 65static inline void apbt_time_init(void) { }
66 66
67#endif 67#endif
68#endif /* ASM_X86_APBT_H */ 68#endif /* ASM_X86_APBT_H */
diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h
index 19ae14ba6978..0cd3800f33b9 100644
--- a/arch/x86/include/asm/memblock.h
+++ b/arch/x86/include/asm/memblock.h
@@ -4,7 +4,6 @@
4#define ARCH_DISCARD_MEMBLOCK 4#define ARCH_DISCARD_MEMBLOCK
5 5
6u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align); 6u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align);
7void memblock_x86_to_bootmem(u64 start, u64 end);
8 7
9void memblock_x86_reserve_range(u64 start, u64 end, char *name); 8void memblock_x86_reserve_range(u64 start, u64 end, char *name);
10void memblock_x86_free_range(u64 start, u64 end); 9void memblock_x86_free_range(u64 start, u64 end);
@@ -19,5 +18,6 @@ u64 memblock_x86_hole_size(u64 start, u64 end);
19u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align); 18u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align);
20u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit); 19u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit);
21u64 memblock_x86_memory_in_range(u64 addr, u64 limit); 20u64 memblock_x86_memory_in_range(u64 addr, u64 limit);
21bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align);
22 22
23#endif 23#endif
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
index 5e83a416eca8..224e8c5eb307 100644
--- a/arch/x86/include/asm/mmzone_32.h
+++ b/arch/x86/include/asm/mmzone_32.h
@@ -48,17 +48,6 @@ static inline int pfn_to_nid(unsigned long pfn)
48#endif 48#endif
49} 49}
50 50
51/*
52 * Following are macros that each numa implmentation must define.
53 */
54
55#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
56#define node_end_pfn(nid) \
57({ \
58 pg_data_t *__pgdat = NODE_DATA(nid); \
59 __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
60})
61
62static inline int pfn_valid(int pfn) 51static inline int pfn_valid(int pfn)
63{ 52{
64 int nid = pfn_to_nid(pfn); 53 int nid = pfn_to_nid(pfn);
diff --git a/arch/x86/include/asm/mmzone_64.h b/arch/x86/include/asm/mmzone_64.h
index b3f88d7867c7..129d9aa3ceb3 100644
--- a/arch/x86/include/asm/mmzone_64.h
+++ b/arch/x86/include/asm/mmzone_64.h
@@ -13,8 +13,5 @@ extern struct pglist_data *node_data[];
13 13
14#define NODE_DATA(nid) (node_data[nid]) 14#define NODE_DATA(nid) (node_data[nid])
15 15
16#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
17#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \
18 NODE_DATA(nid)->node_spanned_pages)
19#endif 16#endif
20#endif /* _ASM_X86_MMZONE_64_H */ 17#endif /* _ASM_X86_MMZONE_64_H */
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 31d84acc1512..a518c0a45044 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -22,6 +22,8 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
22 u64 product; 22 u64 product;
23#ifdef __i386__ 23#ifdef __i386__
24 u32 tmp1, tmp2; 24 u32 tmp1, tmp2;
25#else
26 ulong tmp;
25#endif 27#endif
26 28
27 if (shift < 0) 29 if (shift < 0)
@@ -42,8 +44,11 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
42 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) ); 44 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
43#elif defined(__x86_64__) 45#elif defined(__x86_64__)
44 __asm__ ( 46 __asm__ (
45 "mul %%rdx ; shrd $32,%%rdx,%%rax" 47 "mul %[mul_frac] ; shrd $32, %[hi], %[lo]"
46 : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) ); 48 : [lo]"=a"(product),
49 [hi]"=d"(tmp)
50 : "0"(delta),
51 [mul_frac]"rm"((u64)mul_frac));
47#else 52#else
48#error implement me! 53#error implement me!
49#endif 54#endif
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index f5abe3a245b8..90b06d4daee2 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -8,6 +8,7 @@ CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE)
8 8
9ifdef CONFIG_FUNCTION_TRACER 9ifdef CONFIG_FUNCTION_TRACER
10# Do not profile debug and lowlevel utilities 10# Do not profile debug and lowlevel utilities
11CFLAGS_REMOVE_tsc.o = -pg
11CFLAGS_REMOVE_rtc.o = -pg 12CFLAGS_REMOVE_rtc.o = -pg
12CFLAGS_REMOVE_paravirt-spinlocks.o = -pg 13CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
13CFLAGS_REMOVE_pvclock.o = -pg 14CFLAGS_REMOVE_pvclock.o = -pg
@@ -28,6 +29,7 @@ CFLAGS_paravirt.o := $(nostackp)
28GCOV_PROFILE_vsyscall_64.o := n 29GCOV_PROFILE_vsyscall_64.o := n
29GCOV_PROFILE_hpet.o := n 30GCOV_PROFILE_hpet.o := n
30GCOV_PROFILE_tsc.o := n 31GCOV_PROFILE_tsc.o := n
32GCOV_PROFILE_vread_tsc_64.o := n
31GCOV_PROFILE_paravirt.o := n 33GCOV_PROFILE_paravirt.o := n
32 34
33# vread_tsc_64 is hot and should be fully optimized: 35# vread_tsc_64 is hot and should be fully optimized:
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index cd8cbeb5fa34..7c3a95e54ec5 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -30,6 +30,7 @@
30#include <asm/proto.h> 30#include <asm/proto.h>
31#include <asm/iommu.h> 31#include <asm/iommu.h>
32#include <asm/gart.h> 32#include <asm/gart.h>
33#include <asm/dma.h>
33#include <asm/amd_iommu_proto.h> 34#include <asm/amd_iommu_proto.h>
34#include <asm/amd_iommu_types.h> 35#include <asm/amd_iommu_types.h>
35#include <asm/amd_iommu.h> 36#include <asm/amd_iommu.h>
@@ -154,6 +155,10 @@ static int iommu_init_device(struct device *dev)
154 pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff); 155 pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff);
155 if (pdev) 156 if (pdev)
156 dev_data->alias = &pdev->dev; 157 dev_data->alias = &pdev->dev;
158 else {
159 kfree(dev_data);
160 return -ENOTSUPP;
161 }
157 162
158 atomic_set(&dev_data->bind, 0); 163 atomic_set(&dev_data->bind, 0);
159 164
@@ -163,6 +168,20 @@ static int iommu_init_device(struct device *dev)
163 return 0; 168 return 0;
164} 169}
165 170
171static void iommu_ignore_device(struct device *dev)
172{
173 u16 devid, alias;
174
175 devid = get_device_id(dev);
176 alias = amd_iommu_alias_table[devid];
177
178 memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
179 memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
180
181 amd_iommu_rlookup_table[devid] = NULL;
182 amd_iommu_rlookup_table[alias] = NULL;
183}
184
166static void iommu_uninit_device(struct device *dev) 185static void iommu_uninit_device(struct device *dev)
167{ 186{
168 kfree(dev->archdata.iommu); 187 kfree(dev->archdata.iommu);
@@ -192,7 +211,9 @@ int __init amd_iommu_init_devices(void)
192 continue; 211 continue;
193 212
194 ret = iommu_init_device(&pdev->dev); 213 ret = iommu_init_device(&pdev->dev);
195 if (ret) 214 if (ret == -ENOTSUPP)
215 iommu_ignore_device(&pdev->dev);
216 else if (ret)
196 goto out_free; 217 goto out_free;
197 } 218 }
198 219
@@ -2383,6 +2404,23 @@ static struct dma_map_ops amd_iommu_dma_ops = {
2383 .dma_supported = amd_iommu_dma_supported, 2404 .dma_supported = amd_iommu_dma_supported,
2384}; 2405};
2385 2406
2407static unsigned device_dma_ops_init(void)
2408{
2409 struct pci_dev *pdev = NULL;
2410 unsigned unhandled = 0;
2411
2412 for_each_pci_dev(pdev) {
2413 if (!check_device(&pdev->dev)) {
2414 unhandled += 1;
2415 continue;
2416 }
2417
2418 pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
2419 }
2420
2421 return unhandled;
2422}
2423
2386/* 2424/*
2387 * The function which clues the AMD IOMMU driver into dma_ops. 2425 * The function which clues the AMD IOMMU driver into dma_ops.
2388 */ 2426 */
@@ -2395,7 +2433,7 @@ void __init amd_iommu_init_api(void)
2395int __init amd_iommu_init_dma_ops(void) 2433int __init amd_iommu_init_dma_ops(void)
2396{ 2434{
2397 struct amd_iommu *iommu; 2435 struct amd_iommu *iommu;
2398 int ret; 2436 int ret, unhandled;
2399 2437
2400 /* 2438 /*
2401 * first allocate a default protection domain for every IOMMU we 2439 * first allocate a default protection domain for every IOMMU we
@@ -2421,7 +2459,11 @@ int __init amd_iommu_init_dma_ops(void)
2421 swiotlb = 0; 2459 swiotlb = 0;
2422 2460
2423 /* Make the driver finally visible to the drivers */ 2461 /* Make the driver finally visible to the drivers */
2424 dma_ops = &amd_iommu_dma_ops; 2462 unhandled = device_dma_ops_init();
2463 if (unhandled && max_pfn > MAX_DMA32_PFN) {
2464 /* There are unhandled devices - initialize swiotlb for them */
2465 swiotlb = 1;
2466 }
2425 2467
2426 amd_iommu_stats_init(); 2468 amd_iommu_stats_init();
2427 2469
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 9179c21120a8..bfc8453bd98d 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -731,8 +731,8 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
731{ 731{
732 u8 *p = (u8 *)h; 732 u8 *p = (u8 *)h;
733 u8 *end = p, flags = 0; 733 u8 *end = p, flags = 0;
734 u16 dev_i, devid = 0, devid_start = 0, devid_to = 0; 734 u16 devid = 0, devid_start = 0, devid_to = 0;
735 u32 ext_flags = 0; 735 u32 dev_i, ext_flags = 0;
736 bool alias = false; 736 bool alias = false;
737 struct ivhd_entry *e; 737 struct ivhd_entry *e;
738 738
@@ -887,7 +887,7 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
887/* Initializes the device->iommu mapping for the driver */ 887/* Initializes the device->iommu mapping for the driver */
888static int __init init_iommu_devices(struct amd_iommu *iommu) 888static int __init init_iommu_devices(struct amd_iommu *iommu)
889{ 889{
890 u16 i; 890 u32 i;
891 891
892 for (i = iommu->first_device; i <= iommu->last_device; ++i) 892 for (i = iommu->first_device; i <= iommu->last_device; ++i)
893 set_iommu_for_device(iommu, i); 893 set_iommu_for_device(iommu, i);
@@ -1177,7 +1177,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
1177 */ 1177 */
1178static void init_device_table(void) 1178static void init_device_table(void)
1179{ 1179{
1180 u16 devid; 1180 u32 devid;
1181 1181
1182 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { 1182 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1183 set_dev_entry_bit(devid, DEV_ENTRY_VALID); 1183 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index b961af86bfea..b9338b8cf420 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -390,7 +390,8 @@ static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
390 390
391/* 391/*
392 * If mask=1, the LVT entry does not generate interrupts while mask=0 392 * If mask=1, the LVT entry does not generate interrupts while mask=0
393 * enables the vector. See also the BKDGs. 393 * enables the vector. See also the BKDGs. Must be called with
394 * preemption disabled.
394 */ 395 */
395 396
396int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask) 397int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask)
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index b511a011b7d0..adc66c3a1fef 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -632,14 +632,14 @@ late_initcall(uv_init_heartbeat);
632 632
633/* Direct Legacy VGA I/O traffic to designated IOH */ 633/* Direct Legacy VGA I/O traffic to designated IOH */
634int uv_set_vga_state(struct pci_dev *pdev, bool decode, 634int uv_set_vga_state(struct pci_dev *pdev, bool decode,
635 unsigned int command_bits, bool change_bridge) 635 unsigned int command_bits, u32 flags)
636{ 636{
637 int domain, bus, rc; 637 int domain, bus, rc;
638 638
639 PR_DEVEL("devfn %x decode %d cmd %x chg_brdg %d\n", 639 PR_DEVEL("devfn %x decode %d cmd %x flags %d\n",
640 pdev->devfn, decode, command_bits, change_bridge); 640 pdev->devfn, decode, command_bits, flags);
641 641
642 if (!change_bridge) 642 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
643 return 0; 643 return 0;
644 644
645 if ((command_bits & PCI_COMMAND_IO) == 0) 645 if ((command_bits & PCI_COMMAND_IO) == 0)
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 690bc8461835..9aeb78a23de4 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -13,6 +13,7 @@
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <linux/of_pci.h> 15#include <linux/of_pci.h>
16#include <linux/initrd.h>
16 17
17#include <asm/hpet.h> 18#include <asm/hpet.h>
18#include <asm/irq_controller.h> 19#include <asm/irq_controller.h>
@@ -98,6 +99,16 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
98 return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS)); 99 return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS));
99} 100}
100 101
102#ifdef CONFIG_BLK_DEV_INITRD
103void __init early_init_dt_setup_initrd_arch(unsigned long start,
104 unsigned long end)
105{
106 initrd_start = (unsigned long)__va(start);
107 initrd_end = (unsigned long)__va(end);
108 initrd_below_start_ok = 1;
109}
110#endif
111
101void __init add_dtb(u64 data) 112void __init add_dtb(u64 data)
102{ 113{
103 initial_dtb = data + offsetof(struct setup_data, data); 114 initial_dtb = data + offsetof(struct setup_data, data);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 426a5b66f7e4..e1ba8cb24e4e 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -337,7 +337,7 @@ EXPORT_SYMBOL(boot_option_idle_override);
337 * Powermanagement idle function, if any.. 337 * Powermanagement idle function, if any..
338 */ 338 */
339void (*pm_idle)(void); 339void (*pm_idle)(void);
340#if defined(CONFIG_APM_MODULE) && defined(CONFIG_APM_CPU_IDLE) 340#ifdef CONFIG_APM_MODULE
341EXPORT_SYMBOL(pm_idle); 341EXPORT_SYMBOL(pm_idle);
342#endif 342#endif
343 343
@@ -399,7 +399,7 @@ void default_idle(void)
399 cpu_relax(); 399 cpu_relax();
400 } 400 }
401} 401}
402#if defined(CONFIG_APM_MODULE) && defined(CONFIG_APM_CPU_IDLE) 402#ifdef CONFIG_APM_MODULE
403EXPORT_SYMBOL(default_idle); 403EXPORT_SYMBOL(default_idle);
404#endif 404#endif
405 405
@@ -642,7 +642,7 @@ static int __init idle_setup(char *str)
642 boot_option_idle_override = IDLE_POLL; 642 boot_option_idle_override = IDLE_POLL;
643 } else if (!strcmp(str, "mwait")) { 643 } else if (!strcmp(str, "mwait")) {
644 boot_option_idle_override = IDLE_FORCE_MWAIT; 644 boot_option_idle_override = IDLE_FORCE_MWAIT;
645 WARN_ONCE(1, "\idle=mwait\" will be removed in 2012\"\n"); 645 WARN_ONCE(1, "\"idle=mwait\" will be removed in 2012\n");
646 } else if (!strcmp(str, "halt")) { 646 } else if (!strcmp(str, "halt")) {
647 /* 647 /*
648 * When the boot option of idle=halt is added, halt is 648 * When the boot option of idle=halt is added, halt is
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 8d128783af47..a3d0dc59067b 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -245,7 +245,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
245{ 245{
246 set_user_gs(regs, 0); 246 set_user_gs(regs, 0);
247 regs->fs = 0; 247 regs->fs = 0;
248 set_fs(USER_DS);
249 regs->ds = __USER_DS; 248 regs->ds = __USER_DS;
250 regs->es = __USER_DS; 249 regs->es = __USER_DS;
251 regs->ss = __USER_DS; 250 regs->ss = __USER_DS;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 6c9dd922ac0d..ca6f7ab8df33 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -338,7 +338,6 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip,
338 regs->cs = _cs; 338 regs->cs = _cs;
339 regs->ss = _ss; 339 regs->ss = _ss;
340 regs->flags = X86_EFLAGS_IF; 340 regs->flags = X86_EFLAGS_IF;
341 set_fs(USER_DS);
342 /* 341 /*
343 * Free the old FP and other extended state 342 * Free the old FP and other extended state
344 */ 343 */
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index eefd96765e79..9fd3137230d4 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -285,6 +285,19 @@ notrace static void __cpuinit start_secondary(void *unused)
285 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 285 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
286 x86_platform.nmi_init(); 286 x86_platform.nmi_init();
287 287
288 /*
289 * Wait until the cpu which brought this one up marked it
290 * online before enabling interrupts. If we don't do that then
291 * we can end up waking up the softirq thread before this cpu
292 * reached the active state, which makes the scheduler unhappy
293 * and schedule the softirq thread on the wrong cpu. This is
294 * only observable with forced threaded interrupts, but in
295 * theory it could also happen w/o them. It's just way harder
296 * to achieve.
297 */
298 while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask))
299 cpu_relax();
300
288 /* enable local interrupts */ 301 /* enable local interrupts */
289 local_irq_enable(); 302 local_irq_enable();
290 303
@@ -1332,7 +1345,7 @@ static inline void mwait_play_dead(void)
1332 void *mwait_ptr; 1345 void *mwait_ptr;
1333 struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info); 1346 struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
1334 1347
1335 if (!this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c)) 1348 if (!(this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c)))
1336 return; 1349 return;
1337 if (!this_cpu_has(X86_FEATURE_CLFLSH)) 1350 if (!this_cpu_has(X86_FEATURE_CLFLSH))
1338 return; 1351 return;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index d6e2477feb18..adc98675cda0 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -47,38 +47,40 @@
47#define DstDI (5<<1) /* Destination is in ES:(E)DI */ 47#define DstDI (5<<1) /* Destination is in ES:(E)DI */
48#define DstMem64 (6<<1) /* 64bit memory operand */ 48#define DstMem64 (6<<1) /* 64bit memory operand */
49#define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */ 49#define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */
50#define DstMask (7<<1) 50#define DstDX (8<<1) /* Destination is in DX register */
51#define DstMask (0xf<<1)
51/* Source operand type. */ 52/* Source operand type. */
52#define SrcNone (0<<4) /* No source operand. */ 53#define SrcNone (0<<5) /* No source operand. */
53#define SrcReg (1<<4) /* Register operand. */ 54#define SrcReg (1<<5) /* Register operand. */
54#define SrcMem (2<<4) /* Memory operand. */ 55#define SrcMem (2<<5) /* Memory operand. */
55#define SrcMem16 (3<<4) /* Memory operand (16-bit). */ 56#define SrcMem16 (3<<5) /* Memory operand (16-bit). */
56#define SrcMem32 (4<<4) /* Memory operand (32-bit). */ 57#define SrcMem32 (4<<5) /* Memory operand (32-bit). */
57#define SrcImm (5<<4) /* Immediate operand. */ 58#define SrcImm (5<<5) /* Immediate operand. */
58#define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */ 59#define SrcImmByte (6<<5) /* 8-bit sign-extended immediate operand. */
59#define SrcOne (7<<4) /* Implied '1' */ 60#define SrcOne (7<<5) /* Implied '1' */
60#define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */ 61#define SrcImmUByte (8<<5) /* 8-bit unsigned immediate operand. */
61#define SrcImmU (9<<4) /* Immediate operand, unsigned */ 62#define SrcImmU (9<<5) /* Immediate operand, unsigned */
62#define SrcSI (0xa<<4) /* Source is in the DS:RSI */ 63#define SrcSI (0xa<<5) /* Source is in the DS:RSI */
63#define SrcImmFAddr (0xb<<4) /* Source is immediate far address */ 64#define SrcImmFAddr (0xb<<5) /* Source is immediate far address */
64#define SrcMemFAddr (0xc<<4) /* Source is far address in memory */ 65#define SrcMemFAddr (0xc<<5) /* Source is far address in memory */
65#define SrcAcc (0xd<<4) /* Source Accumulator */ 66#define SrcAcc (0xd<<5) /* Source Accumulator */
66#define SrcImmU16 (0xe<<4) /* Immediate operand, unsigned, 16 bits */ 67#define SrcImmU16 (0xe<<5) /* Immediate operand, unsigned, 16 bits */
67#define SrcMask (0xf<<4) 68#define SrcDX (0xf<<5) /* Source is in DX register */
69#define SrcMask (0xf<<5)
68/* Generic ModRM decode. */ 70/* Generic ModRM decode. */
69#define ModRM (1<<8) 71#define ModRM (1<<9)
70/* Destination is only written; never read. */ 72/* Destination is only written; never read. */
71#define Mov (1<<9) 73#define Mov (1<<10)
72#define BitOp (1<<10) 74#define BitOp (1<<11)
73#define MemAbs (1<<11) /* Memory operand is absolute displacement */ 75#define MemAbs (1<<12) /* Memory operand is absolute displacement */
74#define String (1<<12) /* String instruction (rep capable) */ 76#define String (1<<13) /* String instruction (rep capable) */
75#define Stack (1<<13) /* Stack instruction (push/pop) */ 77#define Stack (1<<14) /* Stack instruction (push/pop) */
76#define GroupMask (7<<14) /* Opcode uses one of the group mechanisms */ 78#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
77#define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */ 79#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
78#define GroupDual (2<<14) /* Alternate decoding of mod == 3 */ 80#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
79#define Prefix (3<<14) /* Instruction varies with 66/f2/f3 prefix */ 81#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
80#define RMExt (4<<14) /* Opcode extension in ModRM r/m if mod == 3 */ 82#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
81#define Sse (1<<17) /* SSE Vector instruction */ 83#define Sse (1<<18) /* SSE Vector instruction */
82/* Misc flags */ 84/* Misc flags */
83#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */ 85#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
84#define VendorSpecific (1<<22) /* Vendor specific instruction */ 86#define VendorSpecific (1<<22) /* Vendor specific instruction */
@@ -3154,8 +3156,8 @@ static struct opcode opcode_table[256] = {
3154 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op), 3156 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3155 I(SrcImmByte | Mov | Stack, em_push), 3157 I(SrcImmByte | Mov | Stack, em_push),
3156 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op), 3158 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3157 D2bvIP(DstDI | Mov | String, ins, check_perm_in), /* insb, insw/insd */ 3159 D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */
3158 D2bvIP(SrcSI | ImplicitOps | String, outs, check_perm_out), /* outsb, outsw/outsd */ 3160 D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */
3159 /* 0x70 - 0x7F */ 3161 /* 0x70 - 0x7F */
3160 X16(D(SrcImmByte)), 3162 X16(D(SrcImmByte)),
3161 /* 0x80 - 0x87 */ 3163 /* 0x80 - 0x87 */
@@ -3212,8 +3214,8 @@ static struct opcode opcode_table[256] = {
3212 /* 0xE8 - 0xEF */ 3214 /* 0xE8 - 0xEF */
3213 D(SrcImm | Stack), D(SrcImm | ImplicitOps), 3215 D(SrcImm | Stack), D(SrcImm | ImplicitOps),
3214 D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps), 3216 D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
3215 D2bvIP(SrcNone | DstAcc, in, check_perm_in), 3217 D2bvIP(SrcDX | DstAcc, in, check_perm_in),
3216 D2bvIP(SrcAcc | ImplicitOps, out, check_perm_out), 3218 D2bvIP(SrcAcc | DstDX, out, check_perm_out),
3217 /* 0xF0 - 0xF7 */ 3219 /* 0xF0 - 0xF7 */
3218 N, DI(ImplicitOps, icebp), N, N, 3220 N, DI(ImplicitOps, icebp), N, N,
3219 DI(ImplicitOps | Priv, hlt), D(ImplicitOps), 3221 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
@@ -3370,7 +3372,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
3370 int def_op_bytes, def_ad_bytes, goffset, simd_prefix; 3372 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
3371 bool op_prefix = false; 3373 bool op_prefix = false;
3372 struct opcode opcode; 3374 struct opcode opcode;
3373 struct operand memop = { .type = OP_NONE }; 3375 struct operand memop = { .type = OP_NONE }, *memopp = NULL;
3374 3376
3375 c->eip = ctxt->eip; 3377 c->eip = ctxt->eip;
3376 c->fetch.start = c->eip; 3378 c->fetch.start = c->eip;
@@ -3545,9 +3547,6 @@ done_prefixes:
3545 if (memop.type == OP_MEM && c->ad_bytes != 8) 3547 if (memop.type == OP_MEM && c->ad_bytes != 8)
3546 memop.addr.mem.ea = (u32)memop.addr.mem.ea; 3548 memop.addr.mem.ea = (u32)memop.addr.mem.ea;
3547 3549
3548 if (memop.type == OP_MEM && c->rip_relative)
3549 memop.addr.mem.ea += c->eip;
3550
3551 /* 3550 /*
3552 * Decode and fetch the source operand: register, memory 3551 * Decode and fetch the source operand: register, memory
3553 * or immediate. 3552 * or immediate.
@@ -3569,6 +3568,7 @@ done_prefixes:
3569 c->op_bytes; 3568 c->op_bytes;
3570 srcmem_common: 3569 srcmem_common:
3571 c->src = memop; 3570 c->src = memop;
3571 memopp = &c->src;
3572 break; 3572 break;
3573 case SrcImmU16: 3573 case SrcImmU16:
3574 rc = decode_imm(ctxt, &c->src, 2, false); 3574 rc = decode_imm(ctxt, &c->src, 2, false);
@@ -3613,6 +3613,12 @@ done_prefixes:
3613 memop.bytes = c->op_bytes + 2; 3613 memop.bytes = c->op_bytes + 2;
3614 goto srcmem_common; 3614 goto srcmem_common;
3615 break; 3615 break;
3616 case SrcDX:
3617 c->src.type = OP_REG;
3618 c->src.bytes = 2;
3619 c->src.addr.reg = &c->regs[VCPU_REGS_RDX];
3620 fetch_register_operand(&c->src);
3621 break;
3616 } 3622 }
3617 3623
3618 if (rc != X86EMUL_CONTINUE) 3624 if (rc != X86EMUL_CONTINUE)
@@ -3659,6 +3665,7 @@ done_prefixes:
3659 case DstMem: 3665 case DstMem:
3660 case DstMem64: 3666 case DstMem64:
3661 c->dst = memop; 3667 c->dst = memop;
3668 memopp = &c->dst;
3662 if ((c->d & DstMask) == DstMem64) 3669 if ((c->d & DstMask) == DstMem64)
3663 c->dst.bytes = 8; 3670 c->dst.bytes = 8;
3664 else 3671 else
@@ -3682,14 +3689,23 @@ done_prefixes:
3682 c->dst.addr.mem.seg = VCPU_SREG_ES; 3689 c->dst.addr.mem.seg = VCPU_SREG_ES;
3683 c->dst.val = 0; 3690 c->dst.val = 0;
3684 break; 3691 break;
3692 case DstDX:
3693 c->dst.type = OP_REG;
3694 c->dst.bytes = 2;
3695 c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
3696 fetch_register_operand(&c->dst);
3697 break;
3685 case ImplicitOps: 3698 case ImplicitOps:
3686 /* Special instructions do their own operand decoding. */ 3699 /* Special instructions do their own operand decoding. */
3687 default: 3700 default:
3688 c->dst.type = OP_NONE; /* Disable writeback. */ 3701 c->dst.type = OP_NONE; /* Disable writeback. */
3689 return 0; 3702 break;
3690 } 3703 }
3691 3704
3692done: 3705done:
3706 if (memopp && memopp->type == OP_MEM && c->rip_relative)
3707 memopp->addr.mem.ea += c->eip;
3708
3693 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; 3709 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3694} 3710}
3695 3711
@@ -4027,7 +4043,6 @@ special_insn:
4027 break; 4043 break;
4028 case 0xec: /* in al,dx */ 4044 case 0xec: /* in al,dx */
4029 case 0xed: /* in (e/r)ax,dx */ 4045 case 0xed: /* in (e/r)ax,dx */
4030 c->src.val = c->regs[VCPU_REGS_RDX];
4031 do_io_in: 4046 do_io_in:
4032 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val, 4047 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
4033 &c->dst.val)) 4048 &c->dst.val))
@@ -4035,7 +4050,6 @@ special_insn:
4035 break; 4050 break;
4036 case 0xee: /* out dx,al */ 4051 case 0xee: /* out dx,al */
4037 case 0xef: /* out dx,(e/r)ax */ 4052 case 0xef: /* out dx,(e/r)ax */
4038 c->dst.val = c->regs[VCPU_REGS_RDX];
4039 do_io_out: 4053 do_io_out:
4040 ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val, 4054 ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val,
4041 &c->src.val, 1); 4055 &c->src.val, 1);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index bd14bb4c8594..aee38623b768 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -565,7 +565,7 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
565 565
566static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn) 566static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
567{ 567{
568 return gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true); 568 return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
569} 569}
570 570
571static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) 571static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 6c4dc010c4cb..9d03ad4dd5ec 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -121,7 +121,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
121 gva_t addr, u32 access) 121 gva_t addr, u32 access)
122{ 122{
123 pt_element_t pte; 123 pt_element_t pte;
124 pt_element_t __user *ptep_user; 124 pt_element_t __user *uninitialized_var(ptep_user);
125 gfn_t table_gfn; 125 gfn_t table_gfn;
126 unsigned index, pt_access, uninitialized_var(pte_access); 126 unsigned index, pt_access, uninitialized_var(pte_access);
127 gpa_t pte_gpa; 127 gpa_t pte_gpa;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4c3fa0f67469..d48ec60ea421 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2047,7 +2047,8 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
2047 unsigned long cr0, 2047 unsigned long cr0,
2048 struct kvm_vcpu *vcpu) 2048 struct kvm_vcpu *vcpu)
2049{ 2049{
2050 vmx_decache_cr3(vcpu); 2050 if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
2051 vmx_decache_cr3(vcpu);
2051 if (!(cr0 & X86_CR0_PG)) { 2052 if (!(cr0 & X86_CR0_PG)) {
2052 /* From paging/starting to nonpaging */ 2053 /* From paging/starting to nonpaging */
2053 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, 2054 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index e191c096ab90..db832fd65ecb 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -993,6 +993,7 @@ static void lguest_time_irq(unsigned int irq, struct irq_desc *desc)
993static void lguest_time_init(void) 993static void lguest_time_init(void)
994{ 994{
995 /* Set up the timer interrupt (0) to go to our simple timer routine */ 995 /* Set up the timer interrupt (0) to go to our simple timer routine */
996 lguest_setup_irq(0);
996 irq_set_handler(0, lguest_time_irq); 997 irq_set_handler(0, lguest_time_irq);
997 998
998 clocksource_register_hz(&lguest_clock, NSEC_PER_SEC); 999 clocksource_register_hz(&lguest_clock, NSEC_PER_SEC);
diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c
index aa1169392b83..992da5ec5a64 100644
--- a/arch/x86/mm/memblock.c
+++ b/arch/x86/mm/memblock.c
@@ -8,7 +8,7 @@
8#include <linux/range.h> 8#include <linux/range.h>
9 9
10/* Check for already reserved areas */ 10/* Check for already reserved areas */
11static bool __init check_with_memblock_reserved_size(u64 *addrp, u64 *sizep, u64 align) 11bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align)
12{ 12{
13 struct memblock_region *r; 13 struct memblock_region *r;
14 u64 addr = *addrp, last; 14 u64 addr = *addrp, last;
@@ -59,7 +59,7 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
59 if (addr >= ei_last) 59 if (addr >= ei_last)
60 continue; 60 continue;
61 *sizep = ei_last - addr; 61 *sizep = ei_last - addr;
62 while (check_with_memblock_reserved_size(&addr, sizep, align)) 62 while (memblock_x86_check_reserved_size(&addr, sizep, align))
63 ; 63 ;
64 64
65 if (*sizep) 65 if (*sizep)
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 9fd8a567fe1e..9cbb710dc94b 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -609,16 +609,21 @@ static int setup_ibs_ctl(int ibs_eilvt_off)
609 return 0; 609 return 0;
610} 610}
611 611
612/*
613 * This runs only on the current cpu. We try to find an LVT offset and
614 * setup the local APIC. For this we must disable preemption. On
615 * success we initialize all nodes with this offset. This updates then
616 * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
617 * the IBS interrupt vector is called from op_amd_setup_ctrs()/op_-
618 * amd_cpu_shutdown() using the new offset.
619 */
612static int force_ibs_eilvt_setup(void) 620static int force_ibs_eilvt_setup(void)
613{ 621{
614 int offset; 622 int offset;
615 int ret; 623 int ret;
616 624
617 /*
618 * find the next free available EILVT entry, skip offset 0,
619 * pin search to this cpu
620 */
621 preempt_disable(); 625 preempt_disable();
626 /* find the next free available EILVT entry, skip offset 0 */
622 for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) { 627 for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
623 if (get_eilvt(offset)) 628 if (get_eilvt(offset))
624 break; 629 break;
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 0972315c3860..68c3c1395202 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -188,7 +188,7 @@ static bool resource_contains(struct resource *res, resource_size_t point)
188 return false; 188 return false;
189} 189}
190 190
191static void coalesce_windows(struct pci_root_info *info, int type) 191static void coalesce_windows(struct pci_root_info *info, unsigned long type)
192{ 192{
193 int i, j; 193 int i, j;
194 struct resource *res1, *res2; 194 struct resource *res1, *res2;
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 0d3a4fa34560..474356b98ede 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -310,14 +310,31 @@ void __init efi_reserve_boot_services(void)
310 310
311 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 311 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
312 efi_memory_desc_t *md = p; 312 efi_memory_desc_t *md = p;
313 unsigned long long start = md->phys_addr; 313 u64 start = md->phys_addr;
314 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; 314 u64 size = md->num_pages << EFI_PAGE_SHIFT;
315 315
316 if (md->type != EFI_BOOT_SERVICES_CODE && 316 if (md->type != EFI_BOOT_SERVICES_CODE &&
317 md->type != EFI_BOOT_SERVICES_DATA) 317 md->type != EFI_BOOT_SERVICES_DATA)
318 continue; 318 continue;
319 319 /* Only reserve where possible:
320 memblock_x86_reserve_range(start, start + size, "EFI Boot"); 320 * - Not within any already allocated areas
321 * - Not over any memory area (really needed, if above?)
322 * - Not within any part of the kernel
323 * - Not the bios reserved area
324 */
325 if ((start+size >= virt_to_phys(_text)
326 && start <= virt_to_phys(_end)) ||
327 !e820_all_mapped(start, start+size, E820_RAM) ||
328 memblock_x86_check_reserved_size(&start, &size,
329 1<<EFI_PAGE_SHIFT)) {
330 /* Could not reserve, skip it */
331 md->num_pages = 0;
332 memblock_dbg(PFX "Could not reserve boot range "
333 "[0x%010llx-0x%010llx]\n",
334 start, start+size-1);
335 } else
336 memblock_x86_reserve_range(start, start+size,
337 "EFI Boot");
321 } 338 }
322} 339}
323 340
@@ -334,6 +351,10 @@ static void __init efi_free_boot_services(void)
334 md->type != EFI_BOOT_SERVICES_DATA) 351 md->type != EFI_BOOT_SERVICES_DATA)
335 continue; 352 continue;
336 353
354 /* Could not reserve boot area */
355 if (!size)
356 continue;
357
337 free_bootmem_late(start, size); 358 free_bootmem_late(start, size);
338 } 359 }
339} 360}