aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2012-04-13 03:57:10 -0400
committerIngo Molnar <mingo@kernel.org>2012-04-13 03:57:10 -0400
commita385ec4f11bdcf81af094c03e2444ee9b7fad2e5 (patch)
treea2c186cb828e3713c2ec48a4d7191166fb798b3d /arch/x86/kernel
parent659c36fcda403013a01b85da07cf2d9711e6d6c7 (diff)
parent0034102808e0dbbf3a2394b82b1bb40b5778de9e (diff)
Merge tag 'v3.4-rc2' into perf/core
Merge Linux 3.4-rc2: we were on v3.3, update the base. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/acpi/boot.c5
-rw-r--r--arch/x86/kernel/acpi/cstate.c1
-rw-r--r--arch/x86/kernel/amd_gart_64.c11
-rw-r--r--arch/x86/kernel/apic/apic.c13
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c3
-rw-r--r--arch/x86/kernel/apic/io_apic.c159
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c7
-rw-r--r--arch/x86/kernel/apm_32.c1
-rw-r--r--arch/x86/kernel/asm-offsets_64.c6
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/p5.c1
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c1
-rw-r--r--arch/x86/kernel/cpu/mcheck/winchip.c1
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c1
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event.c4
-rw-r--r--arch/x86/kernel/cpuid.c1
-rw-r--r--arch/x86/kernel/dumpstack.c9
-rw-r--r--arch/x86/kernel/entry_64.S44
-rw-r--r--arch/x86/kernel/i8259.c1
-rw-r--r--arch/x86/kernel/irq.c7
-rw-r--r--arch/x86/kernel/irqinit.c3
-rw-r--r--arch/x86/kernel/kdebugfs.c9
-rw-r--r--arch/x86/kernel/kgdb.c61
-rw-r--r--arch/x86/kernel/kvm.c4
-rw-r--r--arch/x86/kernel/kvmclock.c15
-rw-r--r--arch/x86/kernel/ldt.c1
-rw-r--r--arch/x86/kernel/machine_kexec_32.c1
-rw-r--r--arch/x86/kernel/mca_32.c1
-rw-r--r--arch/x86/kernel/module.c1
-rw-r--r--arch/x86/kernel/msr.c1
-rw-r--r--arch/x86/kernel/paravirt.c1
-rw-r--r--arch/x86/kernel/pci-calgary_64.c10
-rw-r--r--arch/x86/kernel/pci-dma.c3
-rw-r--r--arch/x86/kernel/pci-nommu.c6
-rw-r--r--arch/x86/kernel/pci-swiotlb.c17
-rw-r--r--arch/x86/kernel/process.c125
-rw-r--r--arch/x86/kernel/process_32.c60
-rw-r--r--arch/x86/kernel/process_64.c136
-rw-r--r--arch/x86/kernel/ptrace.c103
-rw-r--r--arch/x86/kernel/setup.c12
-rw-r--r--arch/x86/kernel/signal.c140
-rw-r--r--arch/x86/kernel/smpboot.c10
-rw-r--r--arch/x86/kernel/sys_x86_64.c6
-rw-r--r--arch/x86/kernel/syscall_64.c8
-rw-r--r--arch/x86/kernel/tboot.c9
-rw-r--r--arch/x86/kernel/tce_64.c1
-rw-r--r--arch/x86/kernel/tls.c5
-rw-r--r--arch/x86/kernel/traps.c134
-rw-r--r--arch/x86/kernel/tsc.c14
-rw-r--r--arch/x86/kernel/vm86_32.c2
-rw-r--r--arch/x86/kernel/vsyscall_64.c27
-rw-r--r--arch/x86/kernel/x86_init.c5
55 files changed, 793 insertions, 430 deletions
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 406ed77216d0..a415b1f44365 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -239,7 +239,7 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
239 * to not preallocating memory for all NR_CPUS 239 * to not preallocating memory for all NR_CPUS
240 * when we use CPU hotplug. 240 * when we use CPU hotplug.
241 */ 241 */
242 if (!cpu_has_x2apic && (apic_id >= 0xff) && enabled) 242 if (!apic->apic_id_valid(apic_id) && enabled)
243 printk(KERN_WARNING PREFIX "x2apic entry ignored\n"); 243 printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
244 else 244 else
245 acpi_register_lapic(apic_id, enabled); 245 acpi_register_lapic(apic_id, enabled);
@@ -642,6 +642,7 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
642 kfree(buffer.pointer); 642 kfree(buffer.pointer);
643 buffer.length = ACPI_ALLOCATE_BUFFER; 643 buffer.length = ACPI_ALLOCATE_BUFFER;
644 buffer.pointer = NULL; 644 buffer.pointer = NULL;
645 lapic = NULL;
645 646
646 if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL)) 647 if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL))
647 goto out; 648 goto out;
@@ -650,7 +651,7 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
650 goto free_tmp_map; 651 goto free_tmp_map;
651 652
652 cpumask_copy(tmp_map, cpu_present_mask); 653 cpumask_copy(tmp_map, cpu_present_mask);
653 acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED); 654 acpi_register_lapic(physid, ACPI_MADT_ENABLED);
654 655
655 /* 656 /*
656 * If mp_register_lapic successfully generates a new logical cpu 657 * If mp_register_lapic successfully generates a new logical cpu
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index f50e7fb2a201..d2b7f27781bc 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -14,6 +14,7 @@
14#include <acpi/processor.h> 14#include <acpi/processor.h>
15#include <asm/acpi.h> 15#include <asm/acpi.h>
16#include <asm/mwait.h> 16#include <asm/mwait.h>
17#include <asm/special_insns.h>
17 18
18/* 19/*
19 * Initialize bm_flags based on the CPU cache properties 20 * Initialize bm_flags based on the CPU cache properties
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index b1e7c7f7a0af..e66311200cbd 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -477,7 +477,7 @@ error:
477/* allocate and map a coherent mapping */ 477/* allocate and map a coherent mapping */
478static void * 478static void *
479gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, 479gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
480 gfp_t flag) 480 gfp_t flag, struct dma_attrs *attrs)
481{ 481{
482 dma_addr_t paddr; 482 dma_addr_t paddr;
483 unsigned long align_mask; 483 unsigned long align_mask;
@@ -500,7 +500,8 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
500 } 500 }
501 __free_pages(page, get_order(size)); 501 __free_pages(page, get_order(size));
502 } else 502 } else
503 return dma_generic_alloc_coherent(dev, size, dma_addr, flag); 503 return dma_generic_alloc_coherent(dev, size, dma_addr, flag,
504 attrs);
504 505
505 return NULL; 506 return NULL;
506} 507}
@@ -508,7 +509,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
508/* free a coherent mapping */ 509/* free a coherent mapping */
509static void 510static void
510gart_free_coherent(struct device *dev, size_t size, void *vaddr, 511gart_free_coherent(struct device *dev, size_t size, void *vaddr,
511 dma_addr_t dma_addr) 512 dma_addr_t dma_addr, struct dma_attrs *attrs)
512{ 513{
513 gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL); 514 gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL);
514 free_pages((unsigned long)vaddr, get_order(size)); 515 free_pages((unsigned long)vaddr, get_order(size));
@@ -700,8 +701,8 @@ static struct dma_map_ops gart_dma_ops = {
700 .unmap_sg = gart_unmap_sg, 701 .unmap_sg = gart_unmap_sg,
701 .map_page = gart_map_page, 702 .map_page = gart_map_page,
702 .unmap_page = gart_unmap_page, 703 .unmap_page = gart_unmap_page,
703 .alloc_coherent = gart_alloc_coherent, 704 .alloc = gart_alloc_coherent,
704 .free_coherent = gart_free_coherent, 705 .free = gart_free_coherent,
705 .mapping_error = gart_mapping_error, 706 .mapping_error = gart_mapping_error,
706}; 707};
707 708
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 2eec05b6d1b8..11544d8f1e97 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -383,20 +383,25 @@ static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new)
383 383
384static unsigned int reserve_eilvt_offset(int offset, unsigned int new) 384static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
385{ 385{
386 unsigned int rsvd; /* 0: uninitialized */ 386 unsigned int rsvd, vector;
387 387
388 if (offset >= APIC_EILVT_NR_MAX) 388 if (offset >= APIC_EILVT_NR_MAX)
389 return ~0; 389 return ~0;
390 390
391 rsvd = atomic_read(&eilvt_offsets[offset]) & ~APIC_EILVT_MASKED; 391 rsvd = atomic_read(&eilvt_offsets[offset]);
392 do { 392 do {
393 if (rsvd && 393 vector = rsvd & ~APIC_EILVT_MASKED; /* 0: unassigned */
394 !eilvt_entry_is_changeable(rsvd, new)) 394 if (vector && !eilvt_entry_is_changeable(vector, new))
395 /* may not change if vectors are different */ 395 /* may not change if vectors are different */
396 return rsvd; 396 return rsvd;
397 rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new); 397 rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new);
398 } while (rsvd != new); 398 } while (rsvd != new);
399 399
400 rsvd &= ~APIC_EILVT_MASKED;
401 if (rsvd && rsvd != vector)
402 pr_info("LVT offset %d assigned for vector 0x%02x\n",
403 offset, rsvd);
404
400 return new; 405 return new;
401} 406}
402 407
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index d9ea5f331ac5..899803e03214 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -229,11 +229,10 @@ static int __init numachip_system_init(void)
229} 229}
230early_initcall(numachip_system_init); 230early_initcall(numachip_system_init);
231 231
232static int __cpuinit numachip_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 232static int numachip_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
233{ 233{
234 if (!strncmp(oem_id, "NUMASC", 6)) { 234 if (!strncmp(oem_id, "NUMASC", 6)) {
235 numachip_system = 1; 235 numachip_system = 1;
236 setup_force_cpu_cap(X86_FEATURE_X2APIC);
237 return 1; 236 return 1;
238 } 237 }
239 238
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 6d10a66fc5a9..e88300d8e80a 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -64,9 +64,28 @@
64#include <asm/apic.h> 64#include <asm/apic.h>
65 65
66#define __apicdebuginit(type) static type __init 66#define __apicdebuginit(type) static type __init
67
67#define for_each_irq_pin(entry, head) \ 68#define for_each_irq_pin(entry, head) \
68 for (entry = head; entry; entry = entry->next) 69 for (entry = head; entry; entry = entry->next)
69 70
71static void __init __ioapic_init_mappings(void);
72
73static unsigned int __io_apic_read (unsigned int apic, unsigned int reg);
74static void __io_apic_write (unsigned int apic, unsigned int reg, unsigned int val);
75static void __io_apic_modify(unsigned int apic, unsigned int reg, unsigned int val);
76
77static struct io_apic_ops io_apic_ops = {
78 .init = __ioapic_init_mappings,
79 .read = __io_apic_read,
80 .write = __io_apic_write,
81 .modify = __io_apic_modify,
82};
83
84void __init set_io_apic_ops(const struct io_apic_ops *ops)
85{
86 io_apic_ops = *ops;
87}
88
70/* 89/*
71 * Is the SiS APIC rmw bug present ? 90 * Is the SiS APIC rmw bug present ?
72 * -1 = don't know, 0 = no, 1 = yes 91 * -1 = don't know, 0 = no, 1 = yes
@@ -294,6 +313,22 @@ static void free_irq_at(unsigned int at, struct irq_cfg *cfg)
294 irq_free_desc(at); 313 irq_free_desc(at);
295} 314}
296 315
316static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
317{
318 return io_apic_ops.read(apic, reg);
319}
320
321static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
322{
323 io_apic_ops.write(apic, reg, value);
324}
325
326static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
327{
328 io_apic_ops.modify(apic, reg, value);
329}
330
331
297struct io_apic { 332struct io_apic {
298 unsigned int index; 333 unsigned int index;
299 unsigned int unused[3]; 334 unsigned int unused[3];
@@ -314,16 +349,17 @@ static inline void io_apic_eoi(unsigned int apic, unsigned int vector)
314 writel(vector, &io_apic->eoi); 349 writel(vector, &io_apic->eoi);
315} 350}
316 351
317static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) 352static unsigned int __io_apic_read(unsigned int apic, unsigned int reg)
318{ 353{
319 struct io_apic __iomem *io_apic = io_apic_base(apic); 354 struct io_apic __iomem *io_apic = io_apic_base(apic);
320 writel(reg, &io_apic->index); 355 writel(reg, &io_apic->index);
321 return readl(&io_apic->data); 356 return readl(&io_apic->data);
322} 357}
323 358
324static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) 359static void __io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
325{ 360{
326 struct io_apic __iomem *io_apic = io_apic_base(apic); 361 struct io_apic __iomem *io_apic = io_apic_base(apic);
362
327 writel(reg, &io_apic->index); 363 writel(reg, &io_apic->index);
328 writel(value, &io_apic->data); 364 writel(value, &io_apic->data);
329} 365}
@@ -334,7 +370,7 @@ static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned i
334 * 370 *
335 * Older SiS APIC requires we rewrite the index register 371 * Older SiS APIC requires we rewrite the index register
336 */ 372 */
337static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) 373static void __io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
338{ 374{
339 struct io_apic __iomem *io_apic = io_apic_base(apic); 375 struct io_apic __iomem *io_apic = io_apic_base(apic);
340 376
@@ -377,6 +413,7 @@ static struct IO_APIC_route_entry __ioapic_read_entry(int apic, int pin)
377 413
378 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); 414 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
379 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); 415 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
416
380 return eu.entry; 417 return eu.entry;
381} 418}
382 419
@@ -384,9 +421,11 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
384{ 421{
385 union entry_union eu; 422 union entry_union eu;
386 unsigned long flags; 423 unsigned long flags;
424
387 raw_spin_lock_irqsave(&ioapic_lock, flags); 425 raw_spin_lock_irqsave(&ioapic_lock, flags);
388 eu.entry = __ioapic_read_entry(apic, pin); 426 eu.entry = __ioapic_read_entry(apic, pin);
389 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 427 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
428
390 return eu.entry; 429 return eu.entry;
391} 430}
392 431
@@ -396,8 +435,7 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
396 * the interrupt, and we need to make sure the entry is fully populated 435 * the interrupt, and we need to make sure the entry is fully populated
397 * before that happens. 436 * before that happens.
398 */ 437 */
399static void 438static void __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
400__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
401{ 439{
402 union entry_union eu = {{0, 0}}; 440 union entry_union eu = {{0, 0}};
403 441
@@ -409,6 +447,7 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
409static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 447static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
410{ 448{
411 unsigned long flags; 449 unsigned long flags;
450
412 raw_spin_lock_irqsave(&ioapic_lock, flags); 451 raw_spin_lock_irqsave(&ioapic_lock, flags);
413 __ioapic_write_entry(apic, pin, e); 452 __ioapic_write_entry(apic, pin, e);
414 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 453 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
@@ -435,8 +474,7 @@ static void ioapic_mask_entry(int apic, int pin)
435 * shared ISA-space IRQs, so we have to support them. We are super 474 * shared ISA-space IRQs, so we have to support them. We are super
436 * fast in the common case, and fast for shared ISA-space IRQs. 475 * fast in the common case, and fast for shared ISA-space IRQs.
437 */ 476 */
438static int 477static int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
439__add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
440{ 478{
441 struct irq_pin_list **last, *entry; 479 struct irq_pin_list **last, *entry;
442 480
@@ -521,6 +559,7 @@ static void io_apic_sync(struct irq_pin_list *entry)
521 * a dummy read from the IO-APIC 559 * a dummy read from the IO-APIC
522 */ 560 */
523 struct io_apic __iomem *io_apic; 561 struct io_apic __iomem *io_apic;
562
524 io_apic = io_apic_base(entry->apic); 563 io_apic = io_apic_base(entry->apic);
525 readl(&io_apic->data); 564 readl(&io_apic->data);
526} 565}
@@ -2512,21 +2551,73 @@ static void ack_apic_edge(struct irq_data *data)
2512 2551
2513atomic_t irq_mis_count; 2552atomic_t irq_mis_count;
2514 2553
2515static void ack_apic_level(struct irq_data *data)
2516{
2517 struct irq_cfg *cfg = data->chip_data;
2518 int i, do_unmask_irq = 0, irq = data->irq;
2519 unsigned long v;
2520
2521 irq_complete_move(cfg);
2522#ifdef CONFIG_GENERIC_PENDING_IRQ 2554#ifdef CONFIG_GENERIC_PENDING_IRQ
2555static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
2556{
2523 /* If we are moving the irq we need to mask it */ 2557 /* If we are moving the irq we need to mask it */
2524 if (unlikely(irqd_is_setaffinity_pending(data))) { 2558 if (unlikely(irqd_is_setaffinity_pending(data))) {
2525 do_unmask_irq = 1;
2526 mask_ioapic(cfg); 2559 mask_ioapic(cfg);
2560 return true;
2527 } 2561 }
2562 return false;
2563}
2564
2565static inline void ioapic_irqd_unmask(struct irq_data *data,
2566 struct irq_cfg *cfg, bool masked)
2567{
2568 if (unlikely(masked)) {
2569 /* Only migrate the irq if the ack has been received.
2570 *
2571 * On rare occasions the broadcast level triggered ack gets
2572 * delayed going to ioapics, and if we reprogram the
2573 * vector while Remote IRR is still set the irq will never
2574 * fire again.
2575 *
2576 * To prevent this scenario we read the Remote IRR bit
2577 * of the ioapic. This has two effects.
2578 * - On any sane system the read of the ioapic will
2579 * flush writes (and acks) going to the ioapic from
2580 * this cpu.
2581 * - We get to see if the ACK has actually been delivered.
2582 *
2583 * Based on failed experiments of reprogramming the
2584 * ioapic entry from outside of irq context starting
2585 * with masking the ioapic entry and then polling until
2586 * Remote IRR was clear before reprogramming the
2587 * ioapic I don't trust the Remote IRR bit to be
2588 * completey accurate.
2589 *
2590 * However there appears to be no other way to plug
2591 * this race, so if the Remote IRR bit is not
2592 * accurate and is causing problems then it is a hardware bug
2593 * and you can go talk to the chipset vendor about it.
2594 */
2595 if (!io_apic_level_ack_pending(cfg))
2596 irq_move_masked_irq(data);
2597 unmask_ioapic(cfg);
2598 }
2599}
2600#else
2601static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
2602{
2603 return false;
2604}
2605static inline void ioapic_irqd_unmask(struct irq_data *data,
2606 struct irq_cfg *cfg, bool masked)
2607{
2608}
2528#endif 2609#endif
2529 2610
2611static void ack_apic_level(struct irq_data *data)
2612{
2613 struct irq_cfg *cfg = data->chip_data;
2614 int i, irq = data->irq;
2615 unsigned long v;
2616 bool masked;
2617
2618 irq_complete_move(cfg);
2619 masked = ioapic_irqd_mask(data, cfg);
2620
2530 /* 2621 /*
2531 * It appears there is an erratum which affects at least version 0x11 2622 * It appears there is an erratum which affects at least version 0x11
2532 * of I/O APIC (that's the 82093AA and cores integrated into various 2623 * of I/O APIC (that's the 82093AA and cores integrated into various
@@ -2581,38 +2672,7 @@ static void ack_apic_level(struct irq_data *data)
2581 eoi_ioapic_irq(irq, cfg); 2672 eoi_ioapic_irq(irq, cfg);
2582 } 2673 }
2583 2674
2584 /* Now we can move and renable the irq */ 2675 ioapic_irqd_unmask(data, cfg, masked);
2585 if (unlikely(do_unmask_irq)) {
2586 /* Only migrate the irq if the ack has been received.
2587 *
2588 * On rare occasions the broadcast level triggered ack gets
2589 * delayed going to ioapics, and if we reprogram the
2590 * vector while Remote IRR is still set the irq will never
2591 * fire again.
2592 *
2593 * To prevent this scenario we read the Remote IRR bit
2594 * of the ioapic. This has two effects.
2595 * - On any sane system the read of the ioapic will
2596 * flush writes (and acks) going to the ioapic from
2597 * this cpu.
2598 * - We get to see if the ACK has actually been delivered.
2599 *
2600 * Based on failed experiments of reprogramming the
2601 * ioapic entry from outside of irq context starting
2602 * with masking the ioapic entry and then polling until
2603 * Remote IRR was clear before reprogramming the
2604 * ioapic I don't trust the Remote IRR bit to be
2605 * completey accurate.
2606 *
2607 * However there appears to be no other way to plug
2608 * this race, so if the Remote IRR bit is not
2609 * accurate and is causing problems then it is a hardware bug
2610 * and you can go talk to the chipset vendor about it.
2611 */
2612 if (!io_apic_level_ack_pending(cfg))
2613 irq_move_masked_irq(data);
2614 unmask_ioapic(cfg);
2615 }
2616} 2676}
2617 2677
2618#ifdef CONFIG_IRQ_REMAP 2678#ifdef CONFIG_IRQ_REMAP
@@ -3873,6 +3933,11 @@ static struct resource * __init ioapic_setup_resources(int nr_ioapics)
3873 3933
3874void __init ioapic_and_gsi_init(void) 3934void __init ioapic_and_gsi_init(void)
3875{ 3935{
3936 io_apic_ops.init();
3937}
3938
3939static void __init __ioapic_init_mappings(void)
3940{
3876 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0; 3941 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
3877 struct resource *ioapic_res; 3942 struct resource *ioapic_res;
3878 int i; 3943 int i;
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 9193713060a9..48f3103b3c93 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -213,7 +213,7 @@ static struct apic apic_x2apic_cluster = {
213 .name = "cluster x2apic", 213 .name = "cluster x2apic",
214 .probe = x2apic_cluster_probe, 214 .probe = x2apic_cluster_probe,
215 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, 215 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
216 .apic_id_valid = default_apic_id_valid, 216 .apic_id_valid = x2apic_apic_id_valid,
217 .apic_id_registered = x2apic_apic_id_registered, 217 .apic_id_registered = x2apic_apic_id_registered,
218 218
219 .irq_delivery_mode = dest_LowestPrio, 219 .irq_delivery_mode = dest_LowestPrio,
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index bcd1db6eaca9..8a778db45e3a 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -119,7 +119,7 @@ static struct apic apic_x2apic_phys = {
119 .name = "physical x2apic", 119 .name = "physical x2apic",
120 .probe = x2apic_phys_probe, 120 .probe = x2apic_phys_probe,
121 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, 121 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
122 .apic_id_valid = default_apic_id_valid, 122 .apic_id_valid = x2apic_apic_id_valid,
123 .apic_id_registered = x2apic_apic_id_registered, 123 .apic_id_registered = x2apic_apic_id_registered,
124 124
125 .irq_delivery_mode = dest_Fixed, 125 .irq_delivery_mode = dest_Fixed,
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index fc4771425852..87bfa69e216e 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -266,6 +266,11 @@ static void uv_send_IPI_all(int vector)
266 uv_send_IPI_mask(cpu_online_mask, vector); 266 uv_send_IPI_mask(cpu_online_mask, vector);
267} 267}
268 268
269static int uv_apic_id_valid(int apicid)
270{
271 return 1;
272}
273
269static int uv_apic_id_registered(void) 274static int uv_apic_id_registered(void)
270{ 275{
271 return 1; 276 return 1;
@@ -351,7 +356,7 @@ static struct apic __refdata apic_x2apic_uv_x = {
351 .name = "UV large system", 356 .name = "UV large system",
352 .probe = uv_probe, 357 .probe = uv_probe,
353 .acpi_madt_oem_check = uv_acpi_madt_oem_check, 358 .acpi_madt_oem_check = uv_acpi_madt_oem_check,
354 .apic_id_valid = default_apic_id_valid, 359 .apic_id_valid = uv_apic_id_valid,
355 .apic_id_registered = uv_apic_id_registered, 360 .apic_id_registered = uv_apic_id_registered,
356 361
357 .irq_delivery_mode = dest_Fixed, 362 .irq_delivery_mode = dest_Fixed,
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 5d56931a15b3..459e78cbf61e 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -231,7 +231,6 @@
231#include <linux/syscore_ops.h> 231#include <linux/syscore_ops.h>
232#include <linux/i8253.h> 232#include <linux/i8253.h>
233 233
234#include <asm/system.h>
235#include <asm/uaccess.h> 234#include <asm/uaccess.h>
236#include <asm/desc.h> 235#include <asm/desc.h>
237#include <asm/olpc.h> 236#include <asm/olpc.h>
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 834e897b1e25..1b4754f82ba7 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -1,6 +1,12 @@
1#include <asm/ia32.h> 1#include <asm/ia32.h>
2 2
3#define __SYSCALL_64(nr, sym, compat) [nr] = 1, 3#define __SYSCALL_64(nr, sym, compat) [nr] = 1,
4#define __SYSCALL_COMMON(nr, sym, compat) [nr] = 1,
5#ifdef CONFIG_X86_X32_ABI
6# define __SYSCALL_X32(nr, sym, compat) [nr] = 1,
7#else
8# define __SYSCALL_X32(nr, sym, compat) /* nothing */
9#endif
4static char syscalls_64[] = { 10static char syscalls_64[] = {
5#include <asm/syscalls_64.h> 11#include <asm/syscalls_64.h>
6}; 12};
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index e49477444fff..67e258362a3d 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -999,7 +999,7 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
999 else 999 else
1000 printk(KERN_CONT "\n"); 1000 printk(KERN_CONT "\n");
1001 1001
1002 __print_cpu_msr(); 1002 print_cpu_msr(c);
1003} 1003}
1004 1004
1005void __cpuinit print_cpu_msr(struct cpuinfo_x86 *c) 1005void __cpuinit print_cpu_msr(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
index 5c0e6533d9bc..2d5454cd2c4f 100644
--- a/arch/x86/kernel/cpu/mcheck/p5.c
+++ b/arch/x86/kernel/cpu/mcheck/p5.c
@@ -9,7 +9,6 @@
9#include <linux/smp.h> 9#include <linux/smp.h>
10 10
11#include <asm/processor.h> 11#include <asm/processor.h>
12#include <asm/system.h>
13#include <asm/mce.h> 12#include <asm/mce.h>
14#include <asm/msr.h> 13#include <asm/msr.h>
15 14
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 67bb17a37a0a..47a1870279aa 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -25,7 +25,6 @@
25#include <linux/cpu.h> 25#include <linux/cpu.h>
26 26
27#include <asm/processor.h> 27#include <asm/processor.h>
28#include <asm/system.h>
29#include <asm/apic.h> 28#include <asm/apic.h>
30#include <asm/idle.h> 29#include <asm/idle.h>
31#include <asm/mce.h> 30#include <asm/mce.h>
diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
index 54060f565974..2d7998fb628c 100644
--- a/arch/x86/kernel/cpu/mcheck/winchip.c
+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
@@ -8,7 +8,6 @@
8#include <linux/init.h> 8#include <linux/init.h>
9 9
10#include <asm/processor.h> 10#include <asm/processor.h>
11#include <asm/system.h>
12#include <asm/mce.h> 11#include <asm/mce.h>
13#include <asm/msr.h> 12#include <asm/msr.h>
14 13
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 97b26356e9ee..75772ae6c65f 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -12,7 +12,6 @@
12#include <asm/processor-flags.h> 12#include <asm/processor-flags.h>
13#include <asm/cpufeature.h> 13#include <asm/cpufeature.h>
14#include <asm/tlbflush.h> 14#include <asm/tlbflush.h>
15#include <asm/system.h>
16#include <asm/mtrr.h> 15#include <asm/mtrr.h>
17#include <asm/msr.h> 16#include <asm/msr.h>
18#include <asm/pat.h> 17#include <asm/pat.h>
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index 79289632cb27..a041e094b8b9 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -167,6 +167,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
167{ 167{
168 int err = 0; 168 int err = 0;
169 mtrr_type type; 169 mtrr_type type;
170 unsigned long base;
170 unsigned long size; 171 unsigned long size;
171 struct mtrr_sentry sentry; 172 struct mtrr_sentry sentry;
172 struct mtrr_gentry gentry; 173 struct mtrr_gentry gentry;
@@ -267,14 +268,14 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
267#endif 268#endif
268 if (gentry.regnum >= num_var_ranges) 269 if (gentry.regnum >= num_var_ranges)
269 return -EINVAL; 270 return -EINVAL;
270 mtrr_if->get(gentry.regnum, &gentry.base, &size, &type); 271 mtrr_if->get(gentry.regnum, &base, &size, &type);
271 272
272 /* Hide entries that go above 4GB */ 273 /* Hide entries that go above 4GB */
273 if (gentry.base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT)) 274 if (base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))
274 || size >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))) 275 || size >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT)))
275 gentry.base = gentry.size = gentry.type = 0; 276 gentry.base = gentry.size = gentry.type = 0;
276 else { 277 else {
277 gentry.base <<= PAGE_SHIFT; 278 gentry.base = base << PAGE_SHIFT;
278 gentry.size = size << PAGE_SHIFT; 279 gentry.size = size << PAGE_SHIFT;
279 gentry.type = type; 280 gentry.type = type;
280 } 281 }
@@ -321,11 +322,12 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
321#endif 322#endif
322 if (gentry.regnum >= num_var_ranges) 323 if (gentry.regnum >= num_var_ranges)
323 return -EINVAL; 324 return -EINVAL;
324 mtrr_if->get(gentry.regnum, &gentry.base, &size, &type); 325 mtrr_if->get(gentry.regnum, &base, &size, &type);
325 /* Hide entries that would overflow */ 326 /* Hide entries that would overflow */
326 if (size != (__typeof__(gentry.size))size) 327 if (size != (__typeof__(gentry.size))size)
327 gentry.base = gentry.size = gentry.type = 0; 328 gentry.base = gentry.size = gentry.type = 0;
328 else { 329 else {
330 gentry.base = base;
329 gentry.size = size; 331 gentry.size = size;
330 gentry.type = type; 332 gentry.type = type;
331 } 333 }
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index f02672a6617e..bb8e03407e18 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -29,7 +29,6 @@
29#include <asm/apic.h> 29#include <asm/apic.h>
30#include <asm/stacktrace.h> 30#include <asm/stacktrace.h>
31#include <asm/nmi.h> 31#include <asm/nmi.h>
32#include <asm/compat.h>
33#include <asm/smp.h> 32#include <asm/smp.h>
34#include <asm/alternative.h> 33#include <asm/alternative.h>
35#include <asm/timer.h> 34#include <asm/timer.h>
@@ -1763,6 +1762,9 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
1763} 1762}
1764 1763
1765#ifdef CONFIG_COMPAT 1764#ifdef CONFIG_COMPAT
1765
1766#include <asm/compat.h>
1767
1766static inline int 1768static inline int
1767perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) 1769perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1768{ 1770{
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index a524353d93f2..39472dd2323f 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -43,7 +43,6 @@
43 43
44#include <asm/processor.h> 44#include <asm/processor.h>
45#include <asm/msr.h> 45#include <asm/msr.h>
46#include <asm/system.h>
47 46
48static struct class *cpuid_class; 47static struct class *cpuid_class;
49 48
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 4025fe4f928f..1b81839b6c88 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -37,13 +37,16 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
37 const struct stacktrace_ops *ops, 37 const struct stacktrace_ops *ops,
38 struct thread_info *tinfo, int *graph) 38 struct thread_info *tinfo, int *graph)
39{ 39{
40 struct task_struct *task = tinfo->task; 40 struct task_struct *task;
41 unsigned long ret_addr; 41 unsigned long ret_addr;
42 int index = task->curr_ret_stack; 42 int index;
43 43
44 if (addr != (unsigned long)return_to_handler) 44 if (addr != (unsigned long)return_to_handler)
45 return; 45 return;
46 46
47 task = tinfo->task;
48 index = task->curr_ret_stack;
49
47 if (!task->ret_stack || index < *graph) 50 if (!task->ret_stack || index < *graph)
48 return; 51 return;
49 52
@@ -265,7 +268,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
265#endif 268#endif
266 printk("\n"); 269 printk("\n");
267 if (notify_die(DIE_OOPS, str, regs, err, 270 if (notify_die(DIE_OOPS, str, regs, err,
268 current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) 271 current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
269 return 1; 272 return 1;
270 273
271 show_registers(regs); 274 show_registers(regs);
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 734ebd1d3caa..cdc79b5cfcd9 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -481,7 +481,12 @@ GLOBAL(system_call_after_swapgs)
481 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) 481 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
482 jnz tracesys 482 jnz tracesys
483system_call_fastpath: 483system_call_fastpath:
484#if __SYSCALL_MASK == ~0
484 cmpq $__NR_syscall_max,%rax 485 cmpq $__NR_syscall_max,%rax
486#else
487 andl $__SYSCALL_MASK,%eax
488 cmpl $__NR_syscall_max,%eax
489#endif
485 ja badsys 490 ja badsys
486 movq %r10,%rcx 491 movq %r10,%rcx
487 call *sys_call_table(,%rax,8) # XXX: rip relative 492 call *sys_call_table(,%rax,8) # XXX: rip relative
@@ -595,7 +600,12 @@ tracesys:
595 */ 600 */
596 LOAD_ARGS ARGOFFSET, 1 601 LOAD_ARGS ARGOFFSET, 1
597 RESTORE_REST 602 RESTORE_REST
603#if __SYSCALL_MASK == ~0
598 cmpq $__NR_syscall_max,%rax 604 cmpq $__NR_syscall_max,%rax
605#else
606 andl $__SYSCALL_MASK,%eax
607 cmpl $__NR_syscall_max,%eax
608#endif
599 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */ 609 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
600 movq %r10,%rcx /* fixup for C */ 610 movq %r10,%rcx /* fixup for C */
601 call *sys_call_table(,%rax,8) 611 call *sys_call_table(,%rax,8)
@@ -735,6 +745,40 @@ ENTRY(stub_rt_sigreturn)
735 CFI_ENDPROC 745 CFI_ENDPROC
736END(stub_rt_sigreturn) 746END(stub_rt_sigreturn)
737 747
748#ifdef CONFIG_X86_X32_ABI
749 PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
750
751ENTRY(stub_x32_rt_sigreturn)
752 CFI_STARTPROC
753 addq $8, %rsp
754 PARTIAL_FRAME 0
755 SAVE_REST
756 movq %rsp,%rdi
757 FIXUP_TOP_OF_STACK %r11
758 call sys32_x32_rt_sigreturn
759 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
760 RESTORE_REST
761 jmp int_ret_from_sys_call
762 CFI_ENDPROC
763END(stub_x32_rt_sigreturn)
764
765ENTRY(stub_x32_execve)
766 CFI_STARTPROC
767 addq $8, %rsp
768 PARTIAL_FRAME 0
769 SAVE_REST
770 FIXUP_TOP_OF_STACK %r11
771 movq %rsp, %rcx
772 call sys32_execve
773 RESTORE_TOP_OF_STACK %r11
774 movq %rax,RAX(%rsp)
775 RESTORE_REST
776 jmp int_ret_from_sys_call
777 CFI_ENDPROC
778END(stub_x32_execve)
779
780#endif
781
738/* 782/*
739 * Build the entry stubs and pointer table with some assembler magic. 783 * Build the entry stubs and pointer table with some assembler magic.
740 * We pack 7 stubs into a single 32-byte chunk, which will fit in a 784 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 610485223bdb..36d1853e91af 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -15,7 +15,6 @@
15#include <linux/delay.h> 15#include <linux/delay.h>
16 16
17#include <linux/atomic.h> 17#include <linux/atomic.h>
18#include <asm/system.h>
19#include <asm/timer.h> 18#include <asm/timer.h>
20#include <asm/hw_irq.h> 19#include <asm/hw_irq.h>
21#include <asm/pgtable.h> 20#include <asm/pgtable.h>
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 7943e0c21bde..3dafc6003b7c 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -282,8 +282,13 @@ void fixup_irqs(void)
282 else if (!(warned++)) 282 else if (!(warned++))
283 set_affinity = 0; 283 set_affinity = 0;
284 284
285 /*
286 * We unmask if the irq was not marked masked by the
287 * core code. That respects the lazy irq disable
288 * behaviour.
289 */
285 if (!irqd_can_move_in_process_context(data) && 290 if (!irqd_can_move_in_process_context(data) &&
286 !irqd_irq_disabled(data) && chip->irq_unmask) 291 !irqd_irq_masked(data) && chip->irq_unmask)
287 chip->irq_unmask(data); 292 chip->irq_unmask(data);
288 293
289 raw_spin_unlock(&desc->lock); 294 raw_spin_unlock(&desc->lock);
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 43e2b1cff0a7..252981afd6c4 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -16,7 +16,6 @@
16#include <linux/delay.h> 16#include <linux/delay.h>
17 17
18#include <linux/atomic.h> 18#include <linux/atomic.h>
19#include <asm/system.h>
20#include <asm/timer.h> 19#include <asm/timer.h>
21#include <asm/hw_irq.h> 20#include <asm/hw_irq.h>
22#include <asm/pgtable.h> 21#include <asm/pgtable.h>
@@ -61,7 +60,7 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id)
61 outb(0, 0xF0); 60 outb(0, 0xF0);
62 if (ignore_fpu_irq || !boot_cpu_data.hard_math) 61 if (ignore_fpu_irq || !boot_cpu_data.hard_math)
63 return IRQ_NONE; 62 return IRQ_NONE;
64 math_error(get_irq_regs(), 0, 16); 63 math_error(get_irq_regs(), 0, X86_TRAP_MF);
65 return IRQ_HANDLED; 64 return IRQ_HANDLED;
66} 65}
67 66
diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
index 90fcf62854bb..1d5d31ea686b 100644
--- a/arch/x86/kernel/kdebugfs.c
+++ b/arch/x86/kernel/kdebugfs.c
@@ -68,16 +68,9 @@ static ssize_t setup_data_read(struct file *file, char __user *user_buf,
68 return count; 68 return count;
69} 69}
70 70
71static int setup_data_open(struct inode *inode, struct file *file)
72{
73 file->private_data = inode->i_private;
74
75 return 0;
76}
77
78static const struct file_operations fops_setup_data = { 71static const struct file_operations fops_setup_data = {
79 .read = setup_data_read, 72 .read = setup_data_read,
80 .open = setup_data_open, 73 .open = simple_open,
81 .llseek = default_llseek, 74 .llseek = default_llseek,
82}; 75};
83 76
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index fdc37b3d0ce3..8bfb6146f753 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -43,10 +43,11 @@
43#include <linux/smp.h> 43#include <linux/smp.h>
44#include <linux/nmi.h> 44#include <linux/nmi.h>
45#include <linux/hw_breakpoint.h> 45#include <linux/hw_breakpoint.h>
46#include <linux/uaccess.h>
47#include <linux/memory.h>
46 48
47#include <asm/debugreg.h> 49#include <asm/debugreg.h>
48#include <asm/apicdef.h> 50#include <asm/apicdef.h>
49#include <asm/system.h>
50#include <asm/apic.h> 51#include <asm/apic.h>
51#include <asm/nmi.h> 52#include <asm/nmi.h>
52 53
@@ -742,6 +743,64 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
742 regs->ip = ip; 743 regs->ip = ip;
743} 744}
744 745
746int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
747{
748 int err;
749 char opc[BREAK_INSTR_SIZE];
750
751 bpt->type = BP_BREAKPOINT;
752 err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
753 BREAK_INSTR_SIZE);
754 if (err)
755 return err;
756 err = probe_kernel_write((char *)bpt->bpt_addr,
757 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
758#ifdef CONFIG_DEBUG_RODATA
759 if (!err)
760 return err;
761 /*
762 * It is safe to call text_poke() because normal kernel execution
763 * is stopped on all cores, so long as the text_mutex is not locked.
764 */
765 if (mutex_is_locked(&text_mutex))
766 return -EBUSY;
767 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
768 BREAK_INSTR_SIZE);
769 err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
770 if (err)
771 return err;
772 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
773 return -EINVAL;
774 bpt->type = BP_POKE_BREAKPOINT;
775#endif /* CONFIG_DEBUG_RODATA */
776 return err;
777}
778
779int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
780{
781#ifdef CONFIG_DEBUG_RODATA
782 int err;
783 char opc[BREAK_INSTR_SIZE];
784
785 if (bpt->type != BP_POKE_BREAKPOINT)
786 goto knl_write;
787 /*
788 * It is safe to call text_poke() because normal kernel execution
789 * is stopped on all cores, so long as the text_mutex is not locked.
790 */
791 if (mutex_is_locked(&text_mutex))
792 goto knl_write;
793 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
794 err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
795 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
796 goto knl_write;
797 return err;
798knl_write:
799#endif /* CONFIG_DEBUG_RODATA */
800 return probe_kernel_write((char *)bpt->bpt_addr,
801 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
802}
803
745struct kgdb_arch arch_kgdb_ops = { 804struct kgdb_arch arch_kgdb_ops = {
746 /* Breakpoint instruction: */ 805 /* Breakpoint instruction: */
747 .gdb_bpt_instr = { 0xcc }, 806 .gdb_bpt_instr = { 0xcc },
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 694d801bf606..b8ba6e4a27e4 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -38,6 +38,7 @@
38#include <asm/traps.h> 38#include <asm/traps.h>
39#include <asm/desc.h> 39#include <asm/desc.h>
40#include <asm/tlbflush.h> 40#include <asm/tlbflush.h>
41#include <asm/idle.h>
41 42
42static int kvmapf = 1; 43static int kvmapf = 1;
43 44
@@ -253,7 +254,10 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
253 kvm_async_pf_task_wait((u32)read_cr2()); 254 kvm_async_pf_task_wait((u32)read_cr2());
254 break; 255 break;
255 case KVM_PV_REASON_PAGE_READY: 256 case KVM_PV_REASON_PAGE_READY:
257 rcu_irq_enter();
258 exit_idle();
256 kvm_async_pf_task_wake((u32)read_cr2()); 259 kvm_async_pf_task_wake((u32)read_cr2());
260 rcu_irq_exit();
257 break; 261 break;
258 } 262 }
259} 263}
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 44842d756b29..f8492da65bfc 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -136,6 +136,15 @@ int kvm_register_clock(char *txt)
136 return ret; 136 return ret;
137} 137}
138 138
139static void kvm_save_sched_clock_state(void)
140{
141}
142
143static void kvm_restore_sched_clock_state(void)
144{
145 kvm_register_clock("primary cpu clock, resume");
146}
147
139#ifdef CONFIG_X86_LOCAL_APIC 148#ifdef CONFIG_X86_LOCAL_APIC
140static void __cpuinit kvm_setup_secondary_clock(void) 149static void __cpuinit kvm_setup_secondary_clock(void)
141{ 150{
@@ -144,8 +153,6 @@ static void __cpuinit kvm_setup_secondary_clock(void)
144 * we shouldn't fail. 153 * we shouldn't fail.
145 */ 154 */
146 WARN_ON(kvm_register_clock("secondary cpu clock")); 155 WARN_ON(kvm_register_clock("secondary cpu clock"));
147 /* ok, done with our trickery, call native */
148 setup_secondary_APIC_clock();
149} 156}
150#endif 157#endif
151 158
@@ -194,9 +201,11 @@ void __init kvmclock_init(void)
194 x86_platform.get_wallclock = kvm_get_wallclock; 201 x86_platform.get_wallclock = kvm_get_wallclock;
195 x86_platform.set_wallclock = kvm_set_wallclock; 202 x86_platform.set_wallclock = kvm_set_wallclock;
196#ifdef CONFIG_X86_LOCAL_APIC 203#ifdef CONFIG_X86_LOCAL_APIC
197 x86_cpuinit.setup_percpu_clockev = 204 x86_cpuinit.early_percpu_clock_init =
198 kvm_setup_secondary_clock; 205 kvm_setup_secondary_clock;
199#endif 206#endif
207 x86_platform.save_sched_clock_state = kvm_save_sched_clock_state;
208 x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state;
200 machine_ops.shutdown = kvm_shutdown; 209 machine_ops.shutdown = kvm_shutdown;
201#ifdef CONFIG_KEXEC 210#ifdef CONFIG_KEXEC
202 machine_ops.crash_shutdown = kvm_crash_shutdown; 211 machine_ops.crash_shutdown = kvm_crash_shutdown;
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index ea697263b373..ebc987398923 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -15,7 +15,6 @@
15#include <linux/vmalloc.h> 15#include <linux/vmalloc.h>
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17 17
18#include <asm/system.h>
19#include <asm/ldt.h> 18#include <asm/ldt.h>
20#include <asm/desc.h> 19#include <asm/desc.h>
21#include <asm/mmu_context.h> 20#include <asm/mmu_context.h>
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index a3fa43ba5d3b..5b19e4d78b00 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -23,7 +23,6 @@
23#include <asm/apic.h> 23#include <asm/apic.h>
24#include <asm/cpufeature.h> 24#include <asm/cpufeature.h>
25#include <asm/desc.h> 25#include <asm/desc.h>
26#include <asm/system.h>
27#include <asm/cacheflush.h> 26#include <asm/cacheflush.h>
28#include <asm/debugreg.h> 27#include <asm/debugreg.h>
29 28
diff --git a/arch/x86/kernel/mca_32.c b/arch/x86/kernel/mca_32.c
index 177183cbb6ae..7eb1e2b97827 100644
--- a/arch/x86/kernel/mca_32.c
+++ b/arch/x86/kernel/mca_32.c
@@ -43,7 +43,6 @@
43#include <linux/mca.h> 43#include <linux/mca.h>
44#include <linux/kprobes.h> 44#include <linux/kprobes.h>
45#include <linux/slab.h> 45#include <linux/slab.h>
46#include <asm/system.h>
47#include <asm/io.h> 46#include <asm/io.h>
48#include <linux/proc_fs.h> 47#include <linux/proc_fs.h>
49#include <linux/mman.h> 48#include <linux/mman.h>
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 925179f871de..f21fd94ac897 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -26,7 +26,6 @@
26#include <linux/gfp.h> 26#include <linux/gfp.h>
27#include <linux/jump_label.h> 27#include <linux/jump_label.h>
28 28
29#include <asm/system.h>
30#include <asm/page.h> 29#include <asm/page.h>
31#include <asm/pgtable.h> 30#include <asm/pgtable.h>
32 31
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 96356762a51d..eb113693f043 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -40,7 +40,6 @@
40 40
41#include <asm/processor.h> 41#include <asm/processor.h>
42#include <asm/msr.h> 42#include <asm/msr.h>
43#include <asm/system.h>
44 43
45static struct class *msr_class; 44static struct class *msr_class;
46 45
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 9c57c02e54f6..ab137605e694 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -38,6 +38,7 @@
38#include <asm/apic.h> 38#include <asm/apic.h>
39#include <asm/tlbflush.h> 39#include <asm/tlbflush.h>
40#include <asm/timer.h> 40#include <asm/timer.h>
41#include <asm/special_insns.h>
41 42
42/* nop stub */ 43/* nop stub */
43void _paravirt_nop(void) 44void _paravirt_nop(void)
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 726494b58345..d0b2fb9ccbb1 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -42,7 +42,6 @@
42#include <asm/calgary.h> 42#include <asm/calgary.h>
43#include <asm/tce.h> 43#include <asm/tce.h>
44#include <asm/pci-direct.h> 44#include <asm/pci-direct.h>
45#include <asm/system.h>
46#include <asm/dma.h> 45#include <asm/dma.h>
47#include <asm/rio.h> 46#include <asm/rio.h>
48#include <asm/bios_ebda.h> 47#include <asm/bios_ebda.h>
@@ -431,7 +430,7 @@ static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr,
431} 430}
432 431
433static void* calgary_alloc_coherent(struct device *dev, size_t size, 432static void* calgary_alloc_coherent(struct device *dev, size_t size,
434 dma_addr_t *dma_handle, gfp_t flag) 433 dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)
435{ 434{
436 void *ret = NULL; 435 void *ret = NULL;
437 dma_addr_t mapping; 436 dma_addr_t mapping;
@@ -464,7 +463,8 @@ error:
464} 463}
465 464
466static void calgary_free_coherent(struct device *dev, size_t size, 465static void calgary_free_coherent(struct device *dev, size_t size,
467 void *vaddr, dma_addr_t dma_handle) 466 void *vaddr, dma_addr_t dma_handle,
467 struct dma_attrs *attrs)
468{ 468{
469 unsigned int npages; 469 unsigned int npages;
470 struct iommu_table *tbl = find_iommu_table(dev); 470 struct iommu_table *tbl = find_iommu_table(dev);
@@ -477,8 +477,8 @@ static void calgary_free_coherent(struct device *dev, size_t size,
477} 477}
478 478
479static struct dma_map_ops calgary_dma_ops = { 479static struct dma_map_ops calgary_dma_ops = {
480 .alloc_coherent = calgary_alloc_coherent, 480 .alloc = calgary_alloc_coherent,
481 .free_coherent = calgary_free_coherent, 481 .free = calgary_free_coherent,
482 .map_sg = calgary_map_sg, 482 .map_sg = calgary_map_sg,
483 .unmap_sg = calgary_unmap_sg, 483 .unmap_sg = calgary_unmap_sg,
484 .map_page = calgary_map_page, 484 .map_page = calgary_map_page,
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 28e5e06fcba4..3003250ac51d 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -96,7 +96,8 @@ void __init pci_iommu_alloc(void)
96 } 96 }
97} 97}
98void *dma_generic_alloc_coherent(struct device *dev, size_t size, 98void *dma_generic_alloc_coherent(struct device *dev, size_t size,
99 dma_addr_t *dma_addr, gfp_t flag) 99 dma_addr_t *dma_addr, gfp_t flag,
100 struct dma_attrs *attrs)
100{ 101{
101 unsigned long dma_mask; 102 unsigned long dma_mask;
102 struct page *page; 103 struct page *page;
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index 3af4af810c07..f96050685b46 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -75,7 +75,7 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
75} 75}
76 76
77static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr, 77static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
78 dma_addr_t dma_addr) 78 dma_addr_t dma_addr, struct dma_attrs *attrs)
79{ 79{
80 free_pages((unsigned long)vaddr, get_order(size)); 80 free_pages((unsigned long)vaddr, get_order(size));
81} 81}
@@ -96,8 +96,8 @@ static void nommu_sync_sg_for_device(struct device *dev,
96} 96}
97 97
98struct dma_map_ops nommu_dma_ops = { 98struct dma_map_ops nommu_dma_ops = {
99 .alloc_coherent = dma_generic_alloc_coherent, 99 .alloc = dma_generic_alloc_coherent,
100 .free_coherent = nommu_free_coherent, 100 .free = nommu_free_coherent,
101 .map_sg = nommu_map_sg, 101 .map_sg = nommu_map_sg,
102 .map_page = nommu_map_page, 102 .map_page = nommu_map_page,
103 .sync_single_for_device = nommu_sync_single_for_device, 103 .sync_single_for_device = nommu_sync_single_for_device,
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 8f972cbddef0..6c483ba98b9c 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -15,21 +15,30 @@
15int swiotlb __read_mostly; 15int swiotlb __read_mostly;
16 16
17static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, 17static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
18 dma_addr_t *dma_handle, gfp_t flags) 18 dma_addr_t *dma_handle, gfp_t flags,
19 struct dma_attrs *attrs)
19{ 20{
20 void *vaddr; 21 void *vaddr;
21 22
22 vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags); 23 vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags,
24 attrs);
23 if (vaddr) 25 if (vaddr)
24 return vaddr; 26 return vaddr;
25 27
26 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); 28 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
27} 29}
28 30
31static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
32 void *vaddr, dma_addr_t dma_addr,
33 struct dma_attrs *attrs)
34{
35 swiotlb_free_coherent(dev, size, vaddr, dma_addr);
36}
37
29static struct dma_map_ops swiotlb_dma_ops = { 38static struct dma_map_ops swiotlb_dma_ops = {
30 .mapping_error = swiotlb_dma_mapping_error, 39 .mapping_error = swiotlb_dma_mapping_error,
31 .alloc_coherent = x86_swiotlb_alloc_coherent, 40 .alloc = x86_swiotlb_alloc_coherent,
32 .free_coherent = swiotlb_free_coherent, 41 .free = x86_swiotlb_free_coherent,
33 .sync_single_for_cpu = swiotlb_sync_single_for_cpu, 42 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
34 .sync_single_for_device = swiotlb_sync_single_for_device, 43 .sync_single_for_device = swiotlb_sync_single_for_device,
35 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 44 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 14baf78d5a1f..1d92a5ab6e8b 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -12,10 +12,12 @@
12#include <linux/user-return-notifier.h> 12#include <linux/user-return-notifier.h>
13#include <linux/dmi.h> 13#include <linux/dmi.h>
14#include <linux/utsname.h> 14#include <linux/utsname.h>
15#include <linux/stackprotector.h>
16#include <linux/tick.h>
17#include <linux/cpuidle.h>
15#include <trace/events/power.h> 18#include <trace/events/power.h>
16#include <linux/hw_breakpoint.h> 19#include <linux/hw_breakpoint.h>
17#include <asm/cpu.h> 20#include <asm/cpu.h>
18#include <asm/system.h>
19#include <asm/apic.h> 21#include <asm/apic.h>
20#include <asm/syscalls.h> 22#include <asm/syscalls.h>
21#include <asm/idle.h> 23#include <asm/idle.h>
@@ -23,6 +25,24 @@
23#include <asm/i387.h> 25#include <asm/i387.h>
24#include <asm/fpu-internal.h> 26#include <asm/fpu-internal.h>
25#include <asm/debugreg.h> 27#include <asm/debugreg.h>
28#include <asm/nmi.h>
29
30#ifdef CONFIG_X86_64
31static DEFINE_PER_CPU(unsigned char, is_idle);
32static ATOMIC_NOTIFIER_HEAD(idle_notifier);
33
34void idle_notifier_register(struct notifier_block *n)
35{
36 atomic_notifier_chain_register(&idle_notifier, n);
37}
38EXPORT_SYMBOL_GPL(idle_notifier_register);
39
40void idle_notifier_unregister(struct notifier_block *n)
41{
42 atomic_notifier_chain_unregister(&idle_notifier, n);
43}
44EXPORT_SYMBOL_GPL(idle_notifier_unregister);
45#endif
26 46
27struct kmem_cache *task_xstate_cachep; 47struct kmem_cache *task_xstate_cachep;
28EXPORT_SYMBOL_GPL(task_xstate_cachep); 48EXPORT_SYMBOL_GPL(task_xstate_cachep);
@@ -342,36 +362,105 @@ void (*pm_idle)(void);
342EXPORT_SYMBOL(pm_idle); 362EXPORT_SYMBOL(pm_idle);
343#endif 363#endif
344 364
345#ifdef CONFIG_X86_32 365static inline int hlt_use_halt(void)
346/*
347 * This halt magic was a workaround for ancient floppy DMA
348 * wreckage. It should be safe to remove.
349 */
350static int hlt_counter;
351void disable_hlt(void)
352{ 366{
353 hlt_counter++; 367 return 1;
354} 368}
355EXPORT_SYMBOL(disable_hlt);
356 369
357void enable_hlt(void) 370#ifndef CONFIG_SMP
371static inline void play_dead(void)
358{ 372{
359 hlt_counter--; 373 BUG();
360} 374}
361EXPORT_SYMBOL(enable_hlt); 375#endif
362 376
363static inline int hlt_use_halt(void) 377#ifdef CONFIG_X86_64
378void enter_idle(void)
364{ 379{
365 return (!hlt_counter && boot_cpu_data.hlt_works_ok); 380 percpu_write(is_idle, 1);
381 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
366} 382}
367#else 383
368static inline int hlt_use_halt(void) 384static void __exit_idle(void)
369{ 385{
370 return 1; 386 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
387 return;
388 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
389}
390
391/* Called from interrupts to signify idle end */
392void exit_idle(void)
393{
394 /* idle loop has pid 0 */
395 if (current->pid)
396 return;
397 __exit_idle();
371} 398}
372#endif 399#endif
373 400
374/* 401/*
402 * The idle thread. There's no useful work to be
403 * done, so just try to conserve power and have a
404 * low exit latency (ie sit in a loop waiting for
405 * somebody to say that they'd like to reschedule)
406 */
407void cpu_idle(void)
408{
409 /*
410 * If we're the non-boot CPU, nothing set the stack canary up
411 * for us. CPU0 already has it initialized but no harm in
412 * doing it again. This is a good place for updating it, as
413 * we wont ever return from this function (so the invalid
414 * canaries already on the stack wont ever trigger).
415 */
416 boot_init_stack_canary();
417 current_thread_info()->status |= TS_POLLING;
418
419 while (1) {
420 tick_nohz_idle_enter();
421
422 while (!need_resched()) {
423 rmb();
424
425 if (cpu_is_offline(smp_processor_id()))
426 play_dead();
427
428 /*
429 * Idle routines should keep interrupts disabled
430 * from here on, until they go to idle.
431 * Otherwise, idle callbacks can misfire.
432 */
433 local_touch_nmi();
434 local_irq_disable();
435
436 enter_idle();
437
438 /* Don't trace irqs off for idle */
439 stop_critical_timings();
440
441 /* enter_idle() needs rcu for notifiers */
442 rcu_idle_enter();
443
444 if (cpuidle_idle_call())
445 pm_idle();
446
447 rcu_idle_exit();
448 start_critical_timings();
449
450 /* In many cases the interrupt that ended idle
451 has already called exit_idle. But some idle
452 loops can be woken up without interrupt. */
453 __exit_idle();
454 }
455
456 tick_nohz_idle_exit();
457 preempt_enable_no_resched();
458 schedule();
459 preempt_disable();
460 }
461}
462
463/*
375 * We use this if we don't have any better 464 * We use this if we don't have any better
376 * idle routine.. 465 * idle routine..
377 */ 466 */
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 9d7d4842bfaf..ae6847303e26 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -9,7 +9,6 @@
9 * This file handles the architecture-dependent parts of process handling.. 9 * This file handles the architecture-dependent parts of process handling..
10 */ 10 */
11 11
12#include <linux/stackprotector.h>
13#include <linux/cpu.h> 12#include <linux/cpu.h>
14#include <linux/errno.h> 13#include <linux/errno.h>
15#include <linux/sched.h> 14#include <linux/sched.h>
@@ -31,17 +30,14 @@
31#include <linux/kallsyms.h> 30#include <linux/kallsyms.h>
32#include <linux/ptrace.h> 31#include <linux/ptrace.h>
33#include <linux/personality.h> 32#include <linux/personality.h>
34#include <linux/tick.h>
35#include <linux/percpu.h> 33#include <linux/percpu.h>
36#include <linux/prctl.h> 34#include <linux/prctl.h>
37#include <linux/ftrace.h> 35#include <linux/ftrace.h>
38#include <linux/uaccess.h> 36#include <linux/uaccess.h>
39#include <linux/io.h> 37#include <linux/io.h>
40#include <linux/kdebug.h> 38#include <linux/kdebug.h>
41#include <linux/cpuidle.h>
42 39
43#include <asm/pgtable.h> 40#include <asm/pgtable.h>
44#include <asm/system.h>
45#include <asm/ldt.h> 41#include <asm/ldt.h>
46#include <asm/processor.h> 42#include <asm/processor.h>
47#include <asm/i387.h> 43#include <asm/i387.h>
@@ -58,7 +54,7 @@
58#include <asm/idle.h> 54#include <asm/idle.h>
59#include <asm/syscalls.h> 55#include <asm/syscalls.h>
60#include <asm/debugreg.h> 56#include <asm/debugreg.h>
61#include <asm/nmi.h> 57#include <asm/switch_to.h>
62 58
63asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 59asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
64 60
@@ -70,60 +66,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
70 return ((unsigned long *)tsk->thread.sp)[3]; 66 return ((unsigned long *)tsk->thread.sp)[3];
71} 67}
72 68
73#ifndef CONFIG_SMP
74static inline void play_dead(void)
75{
76 BUG();
77}
78#endif
79
80/*
81 * The idle thread. There's no useful work to be
82 * done, so just try to conserve power and have a
83 * low exit latency (ie sit in a loop waiting for
84 * somebody to say that they'd like to reschedule)
85 */
86void cpu_idle(void)
87{
88 int cpu = smp_processor_id();
89
90 /*
91 * If we're the non-boot CPU, nothing set the stack canary up
92 * for us. CPU0 already has it initialized but no harm in
93 * doing it again. This is a good place for updating it, as
94 * we wont ever return from this function (so the invalid
95 * canaries already on the stack wont ever trigger).
96 */
97 boot_init_stack_canary();
98
99 current_thread_info()->status |= TS_POLLING;
100
101 /* endless idle loop with no priority at all */
102 while (1) {
103 tick_nohz_idle_enter();
104 rcu_idle_enter();
105 while (!need_resched()) {
106
107 check_pgt_cache();
108 rmb();
109
110 if (cpu_is_offline(cpu))
111 play_dead();
112
113 local_touch_nmi();
114 local_irq_disable();
115 /* Don't trace irqs off for idle */
116 stop_critical_timings();
117 if (cpuidle_idle_call())
118 pm_idle();
119 start_critical_timings();
120 }
121 rcu_idle_exit();
122 tick_nohz_idle_exit();
123 schedule_preempt_disabled();
124 }
125}
126
127void __show_regs(struct pt_regs *regs, int all) 69void __show_regs(struct pt_regs *regs, int all)
128{ 70{
129 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; 71 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 292da13fc5aa..733ca39f367e 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -14,7 +14,6 @@
14 * This file handles the architecture-dependent parts of process handling.. 14 * This file handles the architecture-dependent parts of process handling..
15 */ 15 */
16 16
17#include <linux/stackprotector.h>
18#include <linux/cpu.h> 17#include <linux/cpu.h>
19#include <linux/errno.h> 18#include <linux/errno.h>
20#include <linux/sched.h> 19#include <linux/sched.h>
@@ -32,15 +31,12 @@
32#include <linux/notifier.h> 31#include <linux/notifier.h>
33#include <linux/kprobes.h> 32#include <linux/kprobes.h>
34#include <linux/kdebug.h> 33#include <linux/kdebug.h>
35#include <linux/tick.h>
36#include <linux/prctl.h> 34#include <linux/prctl.h>
37#include <linux/uaccess.h> 35#include <linux/uaccess.h>
38#include <linux/io.h> 36#include <linux/io.h>
39#include <linux/ftrace.h> 37#include <linux/ftrace.h>
40#include <linux/cpuidle.h>
41 38
42#include <asm/pgtable.h> 39#include <asm/pgtable.h>
43#include <asm/system.h>
44#include <asm/processor.h> 40#include <asm/processor.h>
45#include <asm/i387.h> 41#include <asm/i387.h>
46#include <asm/fpu-internal.h> 42#include <asm/fpu-internal.h>
@@ -52,114 +48,11 @@
52#include <asm/idle.h> 48#include <asm/idle.h>
53#include <asm/syscalls.h> 49#include <asm/syscalls.h>
54#include <asm/debugreg.h> 50#include <asm/debugreg.h>
55#include <asm/nmi.h> 51#include <asm/switch_to.h>
56 52
57asmlinkage extern void ret_from_fork(void); 53asmlinkage extern void ret_from_fork(void);
58 54
59DEFINE_PER_CPU(unsigned long, old_rsp); 55DEFINE_PER_CPU(unsigned long, old_rsp);
60static DEFINE_PER_CPU(unsigned char, is_idle);
61
62static ATOMIC_NOTIFIER_HEAD(idle_notifier);
63
64void idle_notifier_register(struct notifier_block *n)
65{
66 atomic_notifier_chain_register(&idle_notifier, n);
67}
68EXPORT_SYMBOL_GPL(idle_notifier_register);
69
70void idle_notifier_unregister(struct notifier_block *n)
71{
72 atomic_notifier_chain_unregister(&idle_notifier, n);
73}
74EXPORT_SYMBOL_GPL(idle_notifier_unregister);
75
76void enter_idle(void)
77{
78 percpu_write(is_idle, 1);
79 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
80}
81
82static void __exit_idle(void)
83{
84 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
85 return;
86 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
87}
88
89/* Called from interrupts to signify idle end */
90void exit_idle(void)
91{
92 /* idle loop has pid 0 */
93 if (current->pid)
94 return;
95 __exit_idle();
96}
97
98#ifndef CONFIG_SMP
99static inline void play_dead(void)
100{
101 BUG();
102}
103#endif
104
105/*
106 * The idle thread. There's no useful work to be
107 * done, so just try to conserve power and have a
108 * low exit latency (ie sit in a loop waiting for
109 * somebody to say that they'd like to reschedule)
110 */
111void cpu_idle(void)
112{
113 current_thread_info()->status |= TS_POLLING;
114
115 /*
116 * If we're the non-boot CPU, nothing set the stack canary up
117 * for us. CPU0 already has it initialized but no harm in
118 * doing it again. This is a good place for updating it, as
119 * we wont ever return from this function (so the invalid
120 * canaries already on the stack wont ever trigger).
121 */
122 boot_init_stack_canary();
123
124 /* endless idle loop with no priority at all */
125 while (1) {
126 tick_nohz_idle_enter();
127 while (!need_resched()) {
128
129 rmb();
130
131 if (cpu_is_offline(smp_processor_id()))
132 play_dead();
133 /*
134 * Idle routines should keep interrupts disabled
135 * from here on, until they go to idle.
136 * Otherwise, idle callbacks can misfire.
137 */
138 local_touch_nmi();
139 local_irq_disable();
140 enter_idle();
141 /* Don't trace irqs off for idle */
142 stop_critical_timings();
143
144 /* enter_idle() needs rcu for notifiers */
145 rcu_idle_enter();
146
147 if (cpuidle_idle_call())
148 pm_idle();
149
150 rcu_idle_exit();
151 start_critical_timings();
152
153 /* In many cases the interrupt that ended idle
154 has already called exit_idle. But some idle
155 loops can be woken up without interrupt. */
156 __exit_idle();
157 }
158
159 tick_nohz_idle_exit();
160 schedule_preempt_disabled();
161 }
162}
163 56
164/* Prints also some state that isn't saved in the pt_regs */ 57/* Prints also some state that isn't saved in the pt_regs */
165void __show_regs(struct pt_regs *regs, int all) 58void __show_regs(struct pt_regs *regs, int all)
@@ -365,7 +258,9 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
365void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp) 258void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
366{ 259{
367 start_thread_common(regs, new_ip, new_sp, 260 start_thread_common(regs, new_ip, new_sp,
368 __USER32_CS, __USER32_DS, __USER32_DS); 261 test_thread_flag(TIF_X32)
262 ? __USER_CS : __USER32_CS,
263 __USER_DS, __USER_DS);
369} 264}
370#endif 265#endif
371 266
@@ -488,6 +383,8 @@ void set_personality_64bit(void)
488 383
489 /* Make sure to be in 64bit mode */ 384 /* Make sure to be in 64bit mode */
490 clear_thread_flag(TIF_IA32); 385 clear_thread_flag(TIF_IA32);
386 clear_thread_flag(TIF_ADDR32);
387 clear_thread_flag(TIF_X32);
491 388
492 /* Ensure the corresponding mm is not marked. */ 389 /* Ensure the corresponding mm is not marked. */
493 if (current->mm) 390 if (current->mm)
@@ -500,20 +397,31 @@ void set_personality_64bit(void)
500 current->personality &= ~READ_IMPLIES_EXEC; 397 current->personality &= ~READ_IMPLIES_EXEC;
501} 398}
502 399
503void set_personality_ia32(void) 400void set_personality_ia32(bool x32)
504{ 401{
505 /* inherit personality from parent */ 402 /* inherit personality from parent */
506 403
507 /* Make sure to be in 32bit mode */ 404 /* Make sure to be in 32bit mode */
508 set_thread_flag(TIF_IA32); 405 set_thread_flag(TIF_ADDR32);
509 current->personality |= force_personality32;
510 406
511 /* Mark the associated mm as containing 32-bit tasks. */ 407 /* Mark the associated mm as containing 32-bit tasks. */
512 if (current->mm) 408 if (current->mm)
513 current->mm->context.ia32_compat = 1; 409 current->mm->context.ia32_compat = 1;
514 410
515 /* Prepare the first "return" to user space */ 411 if (x32) {
516 current_thread_info()->status |= TS_COMPAT; 412 clear_thread_flag(TIF_IA32);
413 set_thread_flag(TIF_X32);
414 current->personality &= ~READ_IMPLIES_EXEC;
415 /* is_compat_task() uses the presence of the x32
416 syscall bit flag to determine compat status */
417 current_thread_info()->status &= ~TS_COMPAT;
418 } else {
419 set_thread_flag(TIF_IA32);
420 clear_thread_flag(TIF_X32);
421 current->personality |= force_personality32;
422 /* Prepare the first "return" to user space */
423 current_thread_info()->status |= TS_COMPAT;
424 }
517} 425}
518 426
519unsigned long get_wchan(struct task_struct *p) 427unsigned long get_wchan(struct task_struct *p)
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 78f05e438be5..685845cf16e0 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -24,7 +24,6 @@
24 24
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26#include <asm/pgtable.h> 26#include <asm/pgtable.h>
27#include <asm/system.h>
28#include <asm/processor.h> 27#include <asm/processor.h>
29#include <asm/i387.h> 28#include <asm/i387.h>
30#include <asm/fpu-internal.h> 29#include <asm/fpu-internal.h>
@@ -34,6 +33,7 @@
34#include <asm/prctl.h> 33#include <asm/prctl.h>
35#include <asm/proto.h> 34#include <asm/proto.h>
36#include <asm/hw_breakpoint.h> 35#include <asm/hw_breakpoint.h>
36#include <asm/traps.h>
37 37
38#include "tls.h" 38#include "tls.h"
39 39
@@ -1131,6 +1131,100 @@ static int genregs32_set(struct task_struct *target,
1131 return ret; 1131 return ret;
1132} 1132}
1133 1133
1134#ifdef CONFIG_X86_X32_ABI
1135static long x32_arch_ptrace(struct task_struct *child,
1136 compat_long_t request, compat_ulong_t caddr,
1137 compat_ulong_t cdata)
1138{
1139 unsigned long addr = caddr;
1140 unsigned long data = cdata;
1141 void __user *datap = compat_ptr(data);
1142 int ret;
1143
1144 switch (request) {
1145 /* Read 32bits at location addr in the USER area. Only allow
1146 to return the lower 32bits of segment and debug registers. */
1147 case PTRACE_PEEKUSR: {
1148 u32 tmp;
1149
1150 ret = -EIO;
1151 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1152 addr < offsetof(struct user_regs_struct, cs))
1153 break;
1154
1155 tmp = 0; /* Default return condition */
1156 if (addr < sizeof(struct user_regs_struct))
1157 tmp = getreg(child, addr);
1158 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1159 addr <= offsetof(struct user, u_debugreg[7])) {
1160 addr -= offsetof(struct user, u_debugreg[0]);
1161 tmp = ptrace_get_debugreg(child, addr / sizeof(data));
1162 }
1163 ret = put_user(tmp, (__u32 __user *)datap);
1164 break;
1165 }
1166
1167 /* Write the word at location addr in the USER area. Only allow
1168 to update segment and debug registers with the upper 32bits
1169 zero-extended. */
1170 case PTRACE_POKEUSR:
1171 ret = -EIO;
1172 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1173 addr < offsetof(struct user_regs_struct, cs))
1174 break;
1175
1176 if (addr < sizeof(struct user_regs_struct))
1177 ret = putreg(child, addr, data);
1178 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1179 addr <= offsetof(struct user, u_debugreg[7])) {
1180 addr -= offsetof(struct user, u_debugreg[0]);
1181 ret = ptrace_set_debugreg(child,
1182 addr / sizeof(data), data);
1183 }
1184 break;
1185
1186 case PTRACE_GETREGS: /* Get all gp regs from the child. */
1187 return copy_regset_to_user(child,
1188 task_user_regset_view(current),
1189 REGSET_GENERAL,
1190 0, sizeof(struct user_regs_struct),
1191 datap);
1192
1193 case PTRACE_SETREGS: /* Set all gp regs in the child. */
1194 return copy_regset_from_user(child,
1195 task_user_regset_view(current),
1196 REGSET_GENERAL,
1197 0, sizeof(struct user_regs_struct),
1198 datap);
1199
1200 case PTRACE_GETFPREGS: /* Get the child FPU state. */
1201 return copy_regset_to_user(child,
1202 task_user_regset_view(current),
1203 REGSET_FP,
1204 0, sizeof(struct user_i387_struct),
1205 datap);
1206
1207 case PTRACE_SETFPREGS: /* Set the child FPU state. */
1208 return copy_regset_from_user(child,
1209 task_user_regset_view(current),
1210 REGSET_FP,
1211 0, sizeof(struct user_i387_struct),
1212 datap);
1213
1214 /* normal 64bit interface to access TLS data.
1215 Works just like arch_prctl, except that the arguments
1216 are reversed. */
1217 case PTRACE_ARCH_PRCTL:
1218 return do_arch_prctl(child, data, addr);
1219
1220 default:
1221 return compat_ptrace_request(child, request, addr, data);
1222 }
1223
1224 return ret;
1225}
1226#endif
1227
1134long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 1228long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1135 compat_ulong_t caddr, compat_ulong_t cdata) 1229 compat_ulong_t caddr, compat_ulong_t cdata)
1136{ 1230{
@@ -1140,6 +1234,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1140 int ret; 1234 int ret;
1141 __u32 val; 1235 __u32 val;
1142 1236
1237#ifdef CONFIG_X86_X32_ABI
1238 if (!is_ia32_task())
1239 return x32_arch_ptrace(child, request, caddr, cdata);
1240#endif
1241
1143 switch (request) { 1242 switch (request) {
1144 case PTRACE_PEEKUSR: 1243 case PTRACE_PEEKUSR:
1145 ret = getreg32(child, addr, &val); 1244 ret = getreg32(child, addr, &val);
@@ -1327,7 +1426,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
1327 int error_code, int si_code, 1426 int error_code, int si_code,
1328 struct siginfo *info) 1427 struct siginfo *info)
1329{ 1428{
1330 tsk->thread.trap_no = 1; 1429 tsk->thread.trap_nr = X86_TRAP_DB;
1331 tsk->thread.error_code = error_code; 1430 tsk->thread.error_code = error_code;
1332 1431
1333 memset(info, 0, sizeof(*info)); 1432 memset(info, 0, sizeof(*info));
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 88638883176a..1a2901562059 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -90,7 +90,6 @@
90#include <asm/processor.h> 90#include <asm/processor.h>
91#include <asm/bugs.h> 91#include <asm/bugs.h>
92 92
93#include <asm/system.h>
94#include <asm/vsyscall.h> 93#include <asm/vsyscall.h>
95#include <asm/cpu.h> 94#include <asm/cpu.h>
96#include <asm/desc.h> 95#include <asm/desc.h>
@@ -509,15 +508,6 @@ static void __init memblock_x86_reserve_range_setup_data(void)
509 508
510#ifdef CONFIG_KEXEC 509#ifdef CONFIG_KEXEC
511 510
512static inline unsigned long long get_total_mem(void)
513{
514 unsigned long long total;
515
516 total = max_pfn - min_low_pfn;
517
518 return total << PAGE_SHIFT;
519}
520
521/* 511/*
522 * Keep the crash kernel below this limit. On 32 bits earlier kernels 512 * Keep the crash kernel below this limit. On 32 bits earlier kernels
523 * would limit the kernel to the low 512 MiB due to mapping restrictions. 513 * would limit the kernel to the low 512 MiB due to mapping restrictions.
@@ -536,7 +526,7 @@ static void __init reserve_crashkernel(void)
536 unsigned long long crash_size, crash_base; 526 unsigned long long crash_size, crash_base;
537 int ret; 527 int ret;
538 528
539 total_mem = get_total_mem(); 529 total_mem = memblock_phys_mem_size();
540 530
541 ret = parse_crashkernel(boot_command_line, total_mem, 531 ret = parse_crashkernel(boot_command_line, total_mem,
542 &crash_size, &crash_base); 532 &crash_size, &crash_base);
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 25edcfc9ba5b..115eac431483 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -10,10 +10,8 @@
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/signal.h>
14#include <linux/errno.h> 13#include <linux/errno.h>
15#include <linux/wait.h> 14#include <linux/wait.h>
16#include <linux/ptrace.h>
17#include <linux/tracehook.h> 15#include <linux/tracehook.h>
18#include <linux/unistd.h> 16#include <linux/unistd.h>
19#include <linux/stddef.h> 17#include <linux/stddef.h>
@@ -27,10 +25,12 @@
27#include <asm/fpu-internal.h> 25#include <asm/fpu-internal.h>
28#include <asm/vdso.h> 26#include <asm/vdso.h>
29#include <asm/mce.h> 27#include <asm/mce.h>
28#include <asm/sighandling.h>
30 29
31#ifdef CONFIG_X86_64 30#ifdef CONFIG_X86_64
32#include <asm/proto.h> 31#include <asm/proto.h>
33#include <asm/ia32_unistd.h> 32#include <asm/ia32_unistd.h>
33#include <asm/sys_ia32.h>
34#endif /* CONFIG_X86_64 */ 34#endif /* CONFIG_X86_64 */
35 35
36#include <asm/syscall.h> 36#include <asm/syscall.h>
@@ -38,13 +38,6 @@
38 38
39#include <asm/sigframe.h> 39#include <asm/sigframe.h>
40 40
41#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
42
43#define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
44 X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
45 X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
46 X86_EFLAGS_CF)
47
48#ifdef CONFIG_X86_32 41#ifdef CONFIG_X86_32
49# define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF) 42# define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF)
50#else 43#else
@@ -69,9 +62,8 @@
69 regs->seg = GET_SEG(seg) | 3; \ 62 regs->seg = GET_SEG(seg) | 3; \
70} while (0) 63} while (0)
71 64
72static int 65int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
73restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, 66 unsigned long *pax)
74 unsigned long *pax)
75{ 67{
76 void __user *buf; 68 void __user *buf;
77 unsigned int tmpflags; 69 unsigned int tmpflags;
@@ -126,9 +118,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
126 return err; 118 return err;
127} 119}
128 120
129static int 121int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
130setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, 122 struct pt_regs *regs, unsigned long mask)
131 struct pt_regs *regs, unsigned long mask)
132{ 123{
133 int err = 0; 124 int err = 0;
134 125
@@ -160,7 +151,7 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
160 put_user_ex(regs->r15, &sc->r15); 151 put_user_ex(regs->r15, &sc->r15);
161#endif /* CONFIG_X86_64 */ 152#endif /* CONFIG_X86_64 */
162 153
163 put_user_ex(current->thread.trap_no, &sc->trapno); 154 put_user_ex(current->thread.trap_nr, &sc->trapno);
164 put_user_ex(current->thread.error_code, &sc->err); 155 put_user_ex(current->thread.error_code, &sc->err);
165 put_user_ex(regs->ip, &sc->ip); 156 put_user_ex(regs->ip, &sc->ip);
166#ifdef CONFIG_X86_32 157#ifdef CONFIG_X86_32
@@ -643,6 +634,16 @@ static int signr_convert(int sig)
643#define is_ia32 0 634#define is_ia32 0
644#endif /* CONFIG_IA32_EMULATION */ 635#endif /* CONFIG_IA32_EMULATION */
645 636
637#ifdef CONFIG_X86_X32_ABI
638#define is_x32 test_thread_flag(TIF_X32)
639
640static int x32_setup_rt_frame(int sig, struct k_sigaction *ka,
641 siginfo_t *info, compat_sigset_t *set,
642 struct pt_regs *regs);
643#else /* !CONFIG_X86_X32_ABI */
644#define is_x32 0
645#endif /* CONFIG_X86_X32_ABI */
646
646int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 647int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
647 sigset_t *set, struct pt_regs *regs); 648 sigset_t *set, struct pt_regs *regs);
648int ia32_setup_frame(int sig, struct k_sigaction *ka, 649int ia32_setup_frame(int sig, struct k_sigaction *ka,
@@ -667,8 +668,14 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
667 ret = ia32_setup_rt_frame(usig, ka, info, set, regs); 668 ret = ia32_setup_rt_frame(usig, ka, info, set, regs);
668 else 669 else
669 ret = ia32_setup_frame(usig, ka, set, regs); 670 ret = ia32_setup_frame(usig, ka, set, regs);
670 } else 671#ifdef CONFIG_X86_X32_ABI
672 } else if (is_x32) {
673 ret = x32_setup_rt_frame(usig, ka, info,
674 (compat_sigset_t *)set, regs);
675#endif
676 } else {
671 ret = __setup_rt_frame(sig, ka, info, set, regs); 677 ret = __setup_rt_frame(sig, ka, info, set, regs);
678 }
672 679
673 if (ret) { 680 if (ret) {
674 force_sigsegv(sig, current); 681 force_sigsegv(sig, current);
@@ -851,3 +858,102 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
851 858
852 force_sig(SIGSEGV, me); 859 force_sig(SIGSEGV, me);
853} 860}
861
862#ifdef CONFIG_X86_X32_ABI
863static int x32_setup_rt_frame(int sig, struct k_sigaction *ka,
864 siginfo_t *info, compat_sigset_t *set,
865 struct pt_regs *regs)
866{
867 struct rt_sigframe_x32 __user *frame;
868 void __user *restorer;
869 int err = 0;
870 void __user *fpstate = NULL;
871
872 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
873
874 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
875 return -EFAULT;
876
877 if (ka->sa.sa_flags & SA_SIGINFO) {
878 if (copy_siginfo_to_user32(&frame->info, info))
879 return -EFAULT;
880 }
881
882 put_user_try {
883 /* Create the ucontext. */
884 if (cpu_has_xsave)
885 put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
886 else
887 put_user_ex(0, &frame->uc.uc_flags);
888 put_user_ex(0, &frame->uc.uc_link);
889 put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
890 put_user_ex(sas_ss_flags(regs->sp),
891 &frame->uc.uc_stack.ss_flags);
892 put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
893 put_user_ex(0, &frame->uc.uc__pad0);
894 err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
895 regs, set->sig[0]);
896 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
897
898 if (ka->sa.sa_flags & SA_RESTORER) {
899 restorer = ka->sa.sa_restorer;
900 } else {
901 /* could use a vstub here */
902 restorer = NULL;
903 err |= -EFAULT;
904 }
905 put_user_ex(restorer, &frame->pretcode);
906 } put_user_catch(err);
907
908 if (err)
909 return -EFAULT;
910
911 /* Set up registers for signal handler */
912 regs->sp = (unsigned long) frame;
913 regs->ip = (unsigned long) ka->sa.sa_handler;
914
915 /* We use the x32 calling convention here... */
916 regs->di = sig;
917 regs->si = (unsigned long) &frame->info;
918 regs->dx = (unsigned long) &frame->uc;
919
920 loadsegment(ds, __USER_DS);
921 loadsegment(es, __USER_DS);
922
923 regs->cs = __USER_CS;
924 regs->ss = __USER_DS;
925
926 return 0;
927}
928
929asmlinkage long sys32_x32_rt_sigreturn(struct pt_regs *regs)
930{
931 struct rt_sigframe_x32 __user *frame;
932 sigset_t set;
933 unsigned long ax;
934 struct pt_regs tregs;
935
936 frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8);
937
938 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
939 goto badframe;
940 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
941 goto badframe;
942
943 sigdelsetmask(&set, ~_BLOCKABLE);
944 set_current_blocked(&set);
945
946 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
947 goto badframe;
948
949 tregs = *regs;
950 if (sys32_sigaltstack(&frame->uc.uc_stack, NULL, &tregs) == -EFAULT)
951 goto badframe;
952
953 return ax;
954
955badframe:
956 signal_fault(regs, frame, "x32 rt_sigreturn");
957 return 0;
958}
959#endif
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index e578a79a3093..6e1e406038c2 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -50,6 +50,7 @@
50#include <linux/tboot.h> 50#include <linux/tboot.h>
51#include <linux/stackprotector.h> 51#include <linux/stackprotector.h>
52#include <linux/gfp.h> 52#include <linux/gfp.h>
53#include <linux/cpuidle.h>
53 54
54#include <asm/acpi.h> 55#include <asm/acpi.h>
55#include <asm/desc.h> 56#include <asm/desc.h>
@@ -219,14 +220,9 @@ static void __cpuinit smp_callin(void)
219 * Update loops_per_jiffy in cpu_data. Previous call to 220 * Update loops_per_jiffy in cpu_data. Previous call to
220 * smp_store_cpu_info() stored a value that is close but not as 221 * smp_store_cpu_info() stored a value that is close but not as
221 * accurate as the value just calculated. 222 * accurate as the value just calculated.
222 *
223 * Need to enable IRQs because it can take longer and then
224 * the NMI watchdog might kill us.
225 */ 223 */
226 local_irq_enable();
227 calibrate_delay(); 224 calibrate_delay();
228 cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy; 225 cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
229 local_irq_disable();
230 pr_debug("Stack at about %p\n", &cpuid); 226 pr_debug("Stack at about %p\n", &cpuid);
231 227
232 /* 228 /*
@@ -255,6 +251,7 @@ notrace static void __cpuinit start_secondary(void *unused)
255 * most necessary things. 251 * most necessary things.
256 */ 252 */
257 cpu_init(); 253 cpu_init();
254 x86_cpuinit.early_percpu_clock_init();
258 preempt_disable(); 255 preempt_disable();
259 smp_callin(); 256 smp_callin();
260 257
@@ -1408,7 +1405,8 @@ void native_play_dead(void)
1408 tboot_shutdown(TB_SHUTDOWN_WFS); 1405 tboot_shutdown(TB_SHUTDOWN_WFS);
1409 1406
1410 mwait_play_dead(); /* Only returns on failure */ 1407 mwait_play_dead(); /* Only returns on failure */
1411 hlt_play_dead(); 1408 if (cpuidle_play_dead())
1409 hlt_play_dead();
1412} 1410}
1413 1411
1414#else /* ... !CONFIG_HOTPLUG_CPU */ 1412#else /* ... !CONFIG_HOTPLUG_CPU */
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index ef59642ff1bf..b4d3c3927dd8 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -98,7 +98,7 @@ out:
98static void find_start_end(unsigned long flags, unsigned long *begin, 98static void find_start_end(unsigned long flags, unsigned long *begin,
99 unsigned long *end) 99 unsigned long *end)
100{ 100{
101 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) { 101 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
102 unsigned long new_begin; 102 unsigned long new_begin;
103 /* This is usually used needed to map code in small 103 /* This is usually used needed to map code in small
104 model, so it needs to be in the first 31bit. Limit 104 model, so it needs to be in the first 31bit. Limit
@@ -144,7 +144,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
144 (!vma || addr + len <= vma->vm_start)) 144 (!vma || addr + len <= vma->vm_start))
145 return addr; 145 return addr;
146 } 146 }
147 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) 147 if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
148 && len <= mm->cached_hole_size) { 148 && len <= mm->cached_hole_size) {
149 mm->cached_hole_size = 0; 149 mm->cached_hole_size = 0;
150 mm->free_area_cache = begin; 150 mm->free_area_cache = begin;
@@ -205,7 +205,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
205 return addr; 205 return addr;
206 206
207 /* for MAP_32BIT mappings we force the legact mmap base */ 207 /* for MAP_32BIT mappings we force the legact mmap base */
208 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) 208 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
209 goto bottomup; 209 goto bottomup;
210 210
211 /* requesting a specific address */ 211 /* requesting a specific address */
diff --git a/arch/x86/kernel/syscall_64.c b/arch/x86/kernel/syscall_64.c
index 7ac7943be02c..5c7f8c20da74 100644
--- a/arch/x86/kernel/syscall_64.c
+++ b/arch/x86/kernel/syscall_64.c
@@ -5,6 +5,14 @@
5#include <linux/cache.h> 5#include <linux/cache.h>
6#include <asm/asm-offsets.h> 6#include <asm/asm-offsets.h>
7 7
8#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
9
10#ifdef CONFIG_X86_X32_ABI
11# define __SYSCALL_X32(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
12#else
13# define __SYSCALL_X32(nr, sym, compat) /* nothing */
14#endif
15
8#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ; 16#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ;
9#include <asm/syscalls_64.h> 17#include <asm/syscalls_64.h>
10#undef __SYSCALL_64 18#undef __SYSCALL_64
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index e2410e27f97e..6410744ac5cb 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -272,7 +272,7 @@ static void tboot_copy_fadt(const struct acpi_table_fadt *fadt)
272 offsetof(struct acpi_table_facs, firmware_waking_vector); 272 offsetof(struct acpi_table_facs, firmware_waking_vector);
273} 273}
274 274
275void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control) 275static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
276{ 276{
277 static u32 acpi_shutdown_map[ACPI_S_STATE_COUNT] = { 277 static u32 acpi_shutdown_map[ACPI_S_STATE_COUNT] = {
278 /* S0,1,2: */ -1, -1, -1, 278 /* S0,1,2: */ -1, -1, -1,
@@ -281,7 +281,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
281 /* S5: */ TB_SHUTDOWN_S5 }; 281 /* S5: */ TB_SHUTDOWN_S5 };
282 282
283 if (!tboot_enabled()) 283 if (!tboot_enabled())
284 return; 284 return 0;
285 285
286 tboot_copy_fadt(&acpi_gbl_FADT); 286 tboot_copy_fadt(&acpi_gbl_FADT);
287 tboot->acpi_sinfo.pm1a_cnt_val = pm1a_control; 287 tboot->acpi_sinfo.pm1a_cnt_val = pm1a_control;
@@ -292,10 +292,11 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
292 if (sleep_state >= ACPI_S_STATE_COUNT || 292 if (sleep_state >= ACPI_S_STATE_COUNT ||
293 acpi_shutdown_map[sleep_state] == -1) { 293 acpi_shutdown_map[sleep_state] == -1) {
294 pr_warning("unsupported sleep state 0x%x\n", sleep_state); 294 pr_warning("unsupported sleep state 0x%x\n", sleep_state);
295 return; 295 return -1;
296 } 296 }
297 297
298 tboot_shutdown(acpi_shutdown_map[sleep_state]); 298 tboot_shutdown(acpi_shutdown_map[sleep_state]);
299 return 0;
299} 300}
300 301
301static atomic_t ap_wfs_count; 302static atomic_t ap_wfs_count;
@@ -345,6 +346,8 @@ static __init int tboot_late_init(void)
345 346
346 atomic_set(&ap_wfs_count, 0); 347 atomic_set(&ap_wfs_count, 0);
347 register_hotcpu_notifier(&tboot_cpu_notifier); 348 register_hotcpu_notifier(&tboot_cpu_notifier);
349
350 acpi_os_set_prepare_sleep(&tboot_sleep);
348 return 0; 351 return 0;
349} 352}
350 353
diff --git a/arch/x86/kernel/tce_64.c b/arch/x86/kernel/tce_64.c
index 9e540fee7009..ab40954e113e 100644
--- a/arch/x86/kernel/tce_64.c
+++ b/arch/x86/kernel/tce_64.c
@@ -34,6 +34,7 @@
34#include <asm/tce.h> 34#include <asm/tce.h>
35#include <asm/calgary.h> 35#include <asm/calgary.h>
36#include <asm/proto.h> 36#include <asm/proto.h>
37#include <asm/cacheflush.h>
37 38
38/* flush a tce at 'tceaddr' to main memory */ 39/* flush a tce at 'tceaddr' to main memory */
39static inline void flush_tce(void* tceaddr) 40static inline void flush_tce(void* tceaddr)
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
index 6bb7b8579e70..9d9d2f9e77a5 100644
--- a/arch/x86/kernel/tls.c
+++ b/arch/x86/kernel/tls.c
@@ -6,7 +6,6 @@
6 6
7#include <asm/uaccess.h> 7#include <asm/uaccess.h>
8#include <asm/desc.h> 8#include <asm/desc.h>
9#include <asm/system.h>
10#include <asm/ldt.h> 9#include <asm/ldt.h>
11#include <asm/processor.h> 10#include <asm/processor.h>
12#include <asm/proto.h> 11#include <asm/proto.h>
@@ -163,7 +162,7 @@ int regset_tls_get(struct task_struct *target, const struct user_regset *regset,
163{ 162{
164 const struct desc_struct *tls; 163 const struct desc_struct *tls;
165 164
166 if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || 165 if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
167 (pos % sizeof(struct user_desc)) != 0 || 166 (pos % sizeof(struct user_desc)) != 0 ||
168 (count % sizeof(struct user_desc)) != 0) 167 (count % sizeof(struct user_desc)) != 0)
169 return -EINVAL; 168 return -EINVAL;
@@ -198,7 +197,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
198 struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES]; 197 struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
199 const struct user_desc *info; 198 const struct user_desc *info;
200 199
201 if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || 200 if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
202 (pos % sizeof(struct user_desc)) != 0 || 201 (pos % sizeof(struct user_desc)) != 0 ||
203 (count % sizeof(struct user_desc)) != 0) 202 (count % sizeof(struct user_desc)) != 0)
204 return -EINVAL; 203 return -EINVAL;
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index ec61d4c1b93b..ff9281f16029 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -50,7 +50,6 @@
50#include <asm/processor.h> 50#include <asm/processor.h>
51#include <asm/debugreg.h> 51#include <asm/debugreg.h>
52#include <linux/atomic.h> 52#include <linux/atomic.h>
53#include <asm/system.h>
54#include <asm/traps.h> 53#include <asm/traps.h>
55#include <asm/desc.h> 54#include <asm/desc.h>
56#include <asm/i387.h> 55#include <asm/i387.h>
@@ -120,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
120 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. 119 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
121 * On nmi (interrupt 2), do_trap should not be called. 120 * On nmi (interrupt 2), do_trap should not be called.
122 */ 121 */
123 if (trapnr < 6) 122 if (trapnr < X86_TRAP_UD)
124 goto vm86_trap; 123 goto vm86_trap;
125 goto trap_signal; 124 goto trap_signal;
126 } 125 }
@@ -133,7 +132,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
133trap_signal: 132trap_signal:
134#endif 133#endif
135 /* 134 /*
136 * We want error_code and trap_no set for userspace faults and 135 * We want error_code and trap_nr set for userspace faults and
137 * kernelspace faults which result in die(), but not 136 * kernelspace faults which result in die(), but not
138 * kernelspace faults which are fixed up. die() gives the 137 * kernelspace faults which are fixed up. die() gives the
139 * process no chance to handle the signal and notice the 138 * process no chance to handle the signal and notice the
@@ -142,7 +141,7 @@ trap_signal:
142 * delivered, faults. See also do_general_protection below. 141 * delivered, faults. See also do_general_protection below.
143 */ 142 */
144 tsk->thread.error_code = error_code; 143 tsk->thread.error_code = error_code;
145 tsk->thread.trap_no = trapnr; 144 tsk->thread.trap_nr = trapnr;
146 145
147#ifdef CONFIG_X86_64 146#ifdef CONFIG_X86_64
148 if (show_unhandled_signals && unhandled_signal(tsk, signr) && 147 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
@@ -165,7 +164,7 @@ trap_signal:
165kernel_trap: 164kernel_trap:
166 if (!fixup_exception(regs)) { 165 if (!fixup_exception(regs)) {
167 tsk->thread.error_code = error_code; 166 tsk->thread.error_code = error_code;
168 tsk->thread.trap_no = trapnr; 167 tsk->thread.trap_nr = trapnr;
169 die(str, regs, error_code); 168 die(str, regs, error_code);
170 } 169 }
171 return; 170 return;
@@ -204,27 +203,31 @@ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
204 do_trap(trapnr, signr, str, regs, error_code, &info); \ 203 do_trap(trapnr, signr, str, regs, error_code, &info); \
205} 204}
206 205
207DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) 206DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV,
208DO_ERROR(4, SIGSEGV, "overflow", overflow) 207 regs->ip)
209DO_ERROR(5, SIGSEGV, "bounds", bounds) 208DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow)
210DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip) 209DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds)
211DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) 210DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN,
212DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) 211 regs->ip)
213DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) 212DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",
213 coprocessor_segment_overrun)
214DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
215DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
214#ifdef CONFIG_X86_32 216#ifdef CONFIG_X86_32
215DO_ERROR(12, SIGBUS, "stack segment", stack_segment) 217DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
216#endif 218#endif
217DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) 219DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check,
220 BUS_ADRALN, 0)
218 221
219#ifdef CONFIG_X86_64 222#ifdef CONFIG_X86_64
220/* Runs on IST stack */ 223/* Runs on IST stack */
221dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) 224dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
222{ 225{
223 if (notify_die(DIE_TRAP, "stack segment", regs, error_code, 226 if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
224 12, SIGBUS) == NOTIFY_STOP) 227 X86_TRAP_SS, SIGBUS) == NOTIFY_STOP)
225 return; 228 return;
226 preempt_conditional_sti(regs); 229 preempt_conditional_sti(regs);
227 do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL); 230 do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
228 preempt_conditional_cli(regs); 231 preempt_conditional_cli(regs);
229} 232}
230 233
@@ -234,10 +237,10 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
234 struct task_struct *tsk = current; 237 struct task_struct *tsk = current;
235 238
236 /* Return not checked because double check cannot be ignored */ 239 /* Return not checked because double check cannot be ignored */
237 notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV); 240 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
238 241
239 tsk->thread.error_code = error_code; 242 tsk->thread.error_code = error_code;
240 tsk->thread.trap_no = 8; 243 tsk->thread.trap_nr = X86_TRAP_DF;
241 244
242 /* 245 /*
243 * This is always a kernel trap and never fixable (and thus must 246 * This is always a kernel trap and never fixable (and thus must
@@ -265,7 +268,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
265 goto gp_in_kernel; 268 goto gp_in_kernel;
266 269
267 tsk->thread.error_code = error_code; 270 tsk->thread.error_code = error_code;
268 tsk->thread.trap_no = 13; 271 tsk->thread.trap_nr = X86_TRAP_GP;
269 272
270 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && 273 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
271 printk_ratelimit()) { 274 printk_ratelimit()) {
@@ -292,9 +295,9 @@ gp_in_kernel:
292 return; 295 return;
293 296
294 tsk->thread.error_code = error_code; 297 tsk->thread.error_code = error_code;
295 tsk->thread.trap_no = 13; 298 tsk->thread.trap_nr = X86_TRAP_GP;
296 if (notify_die(DIE_GPF, "general protection fault", regs, 299 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
297 error_code, 13, SIGSEGV) == NOTIFY_STOP) 300 X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
298 return; 301 return;
299 die("general protection fault", regs, error_code); 302 die("general protection fault", regs, error_code);
300} 303}
@@ -303,13 +306,13 @@ gp_in_kernel:
303dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) 306dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
304{ 307{
305#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 308#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
306 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) 309 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
307 == NOTIFY_STOP) 310 SIGTRAP) == NOTIFY_STOP)
308 return; 311 return;
309#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ 312#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
310 313
311 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) 314 if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
312 == NOTIFY_STOP) 315 SIGTRAP) == NOTIFY_STOP)
313 return; 316 return;
314 317
315 /* 318 /*
@@ -318,7 +321,7 @@ dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
318 */ 321 */
319 debug_stack_usage_inc(); 322 debug_stack_usage_inc();
320 preempt_conditional_sti(regs); 323 preempt_conditional_sti(regs);
321 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); 324 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
322 preempt_conditional_cli(regs); 325 preempt_conditional_cli(regs);
323 debug_stack_usage_dec(); 326 debug_stack_usage_dec();
324} 327}
@@ -423,8 +426,8 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
423 preempt_conditional_sti(regs); 426 preempt_conditional_sti(regs);
424 427
425 if (regs->flags & X86_VM_MASK) { 428 if (regs->flags & X86_VM_MASK) {
426 handle_vm86_trap((struct kernel_vm86_regs *) regs, 429 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
427 error_code, 1); 430 X86_TRAP_DB);
428 preempt_conditional_cli(regs); 431 preempt_conditional_cli(regs);
429 debug_stack_usage_dec(); 432 debug_stack_usage_dec();
430 return; 433 return;
@@ -461,7 +464,8 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
461 struct task_struct *task = current; 464 struct task_struct *task = current;
462 siginfo_t info; 465 siginfo_t info;
463 unsigned short err; 466 unsigned short err;
464 char *str = (trapnr == 16) ? "fpu exception" : "simd exception"; 467 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
468 "simd exception";
465 469
466 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) 470 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
467 return; 471 return;
@@ -471,7 +475,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
471 { 475 {
472 if (!fixup_exception(regs)) { 476 if (!fixup_exception(regs)) {
473 task->thread.error_code = error_code; 477 task->thread.error_code = error_code;
474 task->thread.trap_no = trapnr; 478 task->thread.trap_nr = trapnr;
475 die(str, regs, error_code); 479 die(str, regs, error_code);
476 } 480 }
477 return; 481 return;
@@ -481,12 +485,12 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
481 * Save the info for the exception handler and clear the error. 485 * Save the info for the exception handler and clear the error.
482 */ 486 */
483 save_init_fpu(task); 487 save_init_fpu(task);
484 task->thread.trap_no = trapnr; 488 task->thread.trap_nr = trapnr;
485 task->thread.error_code = error_code; 489 task->thread.error_code = error_code;
486 info.si_signo = SIGFPE; 490 info.si_signo = SIGFPE;
487 info.si_errno = 0; 491 info.si_errno = 0;
488 info.si_addr = (void __user *)regs->ip; 492 info.si_addr = (void __user *)regs->ip;
489 if (trapnr == 16) { 493 if (trapnr == X86_TRAP_MF) {
490 unsigned short cwd, swd; 494 unsigned short cwd, swd;
491 /* 495 /*
492 * (~cwd & swd) will mask out exceptions that are not set to unmasked 496 * (~cwd & swd) will mask out exceptions that are not set to unmasked
@@ -530,10 +534,11 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
530 info.si_code = FPE_FLTRES; 534 info.si_code = FPE_FLTRES;
531 } else { 535 } else {
532 /* 536 /*
533 * If we're using IRQ 13, or supposedly even some trap 16 537 * If we're using IRQ 13, or supposedly even some trap
534 * implementations, it's possible we get a spurious trap... 538 * X86_TRAP_MF implementations, it's possible
539 * we get a spurious trap, which is not an error.
535 */ 540 */
536 return; /* Spurious trap, no error */ 541 return;
537 } 542 }
538 force_sig_info(SIGFPE, &info, task); 543 force_sig_info(SIGFPE, &info, task);
539} 544}
@@ -544,13 +549,13 @@ dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
544 ignore_fpu_irq = 1; 549 ignore_fpu_irq = 1;
545#endif 550#endif
546 551
547 math_error(regs, error_code, 16); 552 math_error(regs, error_code, X86_TRAP_MF);
548} 553}
549 554
550dotraplinkage void 555dotraplinkage void
551do_simd_coprocessor_error(struct pt_regs *regs, long error_code) 556do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
552{ 557{
553 math_error(regs, error_code, 19); 558 math_error(regs, error_code, X86_TRAP_XF);
554} 559}
555 560
556dotraplinkage void 561dotraplinkage void
@@ -644,20 +649,21 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
644 info.si_errno = 0; 649 info.si_errno = 0;
645 info.si_code = ILL_BADSTK; 650 info.si_code = ILL_BADSTK;
646 info.si_addr = NULL; 651 info.si_addr = NULL;
647 if (notify_die(DIE_TRAP, "iret exception", 652 if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
648 regs, error_code, 32, SIGILL) == NOTIFY_STOP) 653 X86_TRAP_IRET, SIGILL) == NOTIFY_STOP)
649 return; 654 return;
650 do_trap(32, SIGILL, "iret exception", regs, error_code, &info); 655 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
656 &info);
651} 657}
652#endif 658#endif
653 659
654/* Set of traps needed for early debugging. */ 660/* Set of traps needed for early debugging. */
655void __init early_trap_init(void) 661void __init early_trap_init(void)
656{ 662{
657 set_intr_gate_ist(1, &debug, DEBUG_STACK); 663 set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
658 /* int3 can be called from all */ 664 /* int3 can be called from all */
659 set_system_intr_gate_ist(3, &int3, DEBUG_STACK); 665 set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
660 set_intr_gate(14, &page_fault); 666 set_intr_gate(X86_TRAP_PF, &page_fault);
661 load_idt(&idt_descr); 667 load_idt(&idt_descr);
662} 668}
663 669
@@ -673,30 +679,30 @@ void __init trap_init(void)
673 early_iounmap(p, 4); 679 early_iounmap(p, 4);
674#endif 680#endif
675 681
676 set_intr_gate(0, &divide_error); 682 set_intr_gate(X86_TRAP_DE, &divide_error);
677 set_intr_gate_ist(2, &nmi, NMI_STACK); 683 set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
678 /* int4 can be called from all */ 684 /* int4 can be called from all */
679 set_system_intr_gate(4, &overflow); 685 set_system_intr_gate(X86_TRAP_OF, &overflow);
680 set_intr_gate(5, &bounds); 686 set_intr_gate(X86_TRAP_BR, &bounds);
681 set_intr_gate(6, &invalid_op); 687 set_intr_gate(X86_TRAP_UD, &invalid_op);
682 set_intr_gate(7, &device_not_available); 688 set_intr_gate(X86_TRAP_NM, &device_not_available);
683#ifdef CONFIG_X86_32 689#ifdef CONFIG_X86_32
684 set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS); 690 set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
685#else 691#else
686 set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK); 692 set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
687#endif 693#endif
688 set_intr_gate(9, &coprocessor_segment_overrun); 694 set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun);
689 set_intr_gate(10, &invalid_TSS); 695 set_intr_gate(X86_TRAP_TS, &invalid_TSS);
690 set_intr_gate(11, &segment_not_present); 696 set_intr_gate(X86_TRAP_NP, &segment_not_present);
691 set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK); 697 set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
692 set_intr_gate(13, &general_protection); 698 set_intr_gate(X86_TRAP_GP, &general_protection);
693 set_intr_gate(15, &spurious_interrupt_bug); 699 set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug);
694 set_intr_gate(16, &coprocessor_error); 700 set_intr_gate(X86_TRAP_MF, &coprocessor_error);
695 set_intr_gate(17, &alignment_check); 701 set_intr_gate(X86_TRAP_AC, &alignment_check);
696#ifdef CONFIG_X86_MCE 702#ifdef CONFIG_X86_MCE
697 set_intr_gate_ist(18, &machine_check, MCE_STACK); 703 set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
698#endif 704#endif
699 set_intr_gate(19, &simd_coprocessor_error); 705 set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error);
700 706
701 /* Reserve all the builtin and the syscall vector: */ 707 /* Reserve all the builtin and the syscall vector: */
702 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) 708 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
@@ -721,7 +727,7 @@ void __init trap_init(void)
721 727
722#ifdef CONFIG_X86_64 728#ifdef CONFIG_X86_64
723 memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16); 729 memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16);
724 set_nmi_gate(1, &debug); 730 set_nmi_gate(X86_TRAP_DB, &debug);
725 set_nmi_gate(3, &int3); 731 set_nmi_gate(X86_TRAP_BP, &int3);
726#endif 732#endif
727} 733}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 183c5925a9fe..fc0a147e3727 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -630,7 +630,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
630 630
631static unsigned long long cyc2ns_suspend; 631static unsigned long long cyc2ns_suspend;
632 632
633void save_sched_clock_state(void) 633void tsc_save_sched_clock_state(void)
634{ 634{
635 if (!sched_clock_stable) 635 if (!sched_clock_stable)
636 return; 636 return;
@@ -646,7 +646,7 @@ void save_sched_clock_state(void)
646 * that sched_clock() continues from the point where it was left off during 646 * that sched_clock() continues from the point where it was left off during
647 * suspend. 647 * suspend.
648 */ 648 */
649void restore_sched_clock_state(void) 649void tsc_restore_sched_clock_state(void)
650{ 650{
651 unsigned long long offset; 651 unsigned long long offset;
652 unsigned long flags; 652 unsigned long flags;
@@ -933,6 +933,16 @@ static int __init init_tsc_clocksource(void)
933 clocksource_tsc.rating = 0; 933 clocksource_tsc.rating = 0;
934 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; 934 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
935 } 935 }
936
937 /*
938 * Trust the results of the earlier calibration on systems
939 * exporting a reliable TSC.
940 */
941 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
942 clocksource_register_khz(&clocksource_tsc, tsc_khz);
943 return 0;
944 }
945
936 schedule_delayed_work(&tsc_irqwork, 0); 946 schedule_delayed_work(&tsc_irqwork, 0);
937 return 0; 947 return 0;
938} 948}
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 328cb37bb827..255f58ae71e8 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -569,7 +569,7 @@ int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
569 } 569 }
570 if (trapno != 1) 570 if (trapno != 1)
571 return 1; /* we let this handle by the calling routine */ 571 return 1; /* we let this handle by the calling routine */
572 current->thread.trap_no = trapno; 572 current->thread.trap_nr = trapno;
573 current->thread.error_code = error_code; 573 current->thread.error_code = error_code;
574 force_sig(SIGTRAP, current); 574 force_sig(SIGTRAP, current);
575 return 0; 575 return 0;
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index b07ba9393564..f386dc49f988 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -52,10 +52,7 @@
52#include "vsyscall_trace.h" 52#include "vsyscall_trace.h"
53 53
54DEFINE_VVAR(int, vgetcpu_mode); 54DEFINE_VVAR(int, vgetcpu_mode);
55DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = 55DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
56{
57 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
58};
59 56
60static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE; 57static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
61 58
@@ -80,20 +77,15 @@ early_param("vsyscall", vsyscall_setup);
80 77
81void update_vsyscall_tz(void) 78void update_vsyscall_tz(void)
82{ 79{
83 unsigned long flags;
84
85 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
86 /* sys_tz has changed */
87 vsyscall_gtod_data.sys_tz = sys_tz; 80 vsyscall_gtod_data.sys_tz = sys_tz;
88 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
89} 81}
90 82
91void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, 83void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
92 struct clocksource *clock, u32 mult) 84 struct clocksource *clock, u32 mult)
93{ 85{
94 unsigned long flags; 86 struct timespec monotonic;
95 87
96 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); 88 write_seqcount_begin(&vsyscall_gtod_data.seq);
97 89
98 /* copy vsyscall data */ 90 /* copy vsyscall data */
99 vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode; 91 vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode;
@@ -101,12 +93,19 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
101 vsyscall_gtod_data.clock.mask = clock->mask; 93 vsyscall_gtod_data.clock.mask = clock->mask;
102 vsyscall_gtod_data.clock.mult = mult; 94 vsyscall_gtod_data.clock.mult = mult;
103 vsyscall_gtod_data.clock.shift = clock->shift; 95 vsyscall_gtod_data.clock.shift = clock->shift;
96
104 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; 97 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
105 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; 98 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
106 vsyscall_gtod_data.wall_to_monotonic = *wtm; 99
100 monotonic = timespec_add(*wall_time, *wtm);
101 vsyscall_gtod_data.monotonic_time_sec = monotonic.tv_sec;
102 vsyscall_gtod_data.monotonic_time_nsec = monotonic.tv_nsec;
103
107 vsyscall_gtod_data.wall_time_coarse = __current_kernel_time(); 104 vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
105 vsyscall_gtod_data.monotonic_time_coarse =
106 timespec_add(vsyscall_gtod_data.wall_time_coarse, *wtm);
108 107
109 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); 108 write_seqcount_end(&vsyscall_gtod_data.seq);
110} 109}
111 110
112static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, 111static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
@@ -153,7 +152,7 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size)
153 152
154 thread->error_code = 6; /* user fault, no page, write */ 153 thread->error_code = 6; /* user fault, no page, write */
155 thread->cr2 = ptr; 154 thread->cr2 = ptr;
156 thread->trap_no = 14; 155 thread->trap_nr = X86_TRAP_PF;
157 156
158 memset(&info, 0, sizeof(info)); 157 memset(&info, 0, sizeof(info));
159 info.si_signo = SIGSEGV; 158 info.si_signo = SIGSEGV;
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 947a06ccc673..e9f265fd79ae 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -91,6 +91,7 @@ struct x86_init_ops x86_init __initdata = {
91}; 91};
92 92
93struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { 93struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
94 .early_percpu_clock_init = x86_init_noop,
94 .setup_percpu_clockev = setup_secondary_APIC_clock, 95 .setup_percpu_clockev = setup_secondary_APIC_clock,
95 .fixup_cpu_id = x86_default_fixup_cpu_id, 96 .fixup_cpu_id = x86_default_fixup_cpu_id,
96}; 97};
@@ -107,7 +108,9 @@ struct x86_platform_ops x86_platform = {
107 .is_untracked_pat_range = is_ISA_range, 108 .is_untracked_pat_range = is_ISA_range,
108 .nmi_init = default_nmi_init, 109 .nmi_init = default_nmi_init,
109 .get_nmi_reason = default_get_nmi_reason, 110 .get_nmi_reason = default_get_nmi_reason,
110 .i8042_detect = default_i8042_detect 111 .i8042_detect = default_i8042_detect,
112 .save_sched_clock_state = tsc_save_sched_clock_state,
113 .restore_sched_clock_state = tsc_restore_sched_clock_state,
111}; 114};
112 115
113EXPORT_SYMBOL_GPL(x86_platform); 116EXPORT_SYMBOL_GPL(x86_platform);