aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-05-11 08:44:27 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-11 08:44:31 -0400
commit41fb454ebe6024f5c1e3b3cbc0abc0da762e7b51 (patch)
tree51c50bcb67a5039448ddfa1869d7948cab1217e9 /arch/x86/kernel
parent19c1a6f5764d787113fa323ffb18be7991208f82 (diff)
parent091bf7624d1c90cec9e578a18529f615213ff847 (diff)
Merge commit 'v2.6.30-rc5' into core/iommu
Merge reason: core/iommu was on an .30-rc1 base, update it to .30-rc5 to refresh. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/amd_iommu_init.c16
-rw-r--r--arch/x86/kernel/apic/apic.c6
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c4
-rw-r--r--arch/x86/kernel/apic/io_apic.c7
-rw-r--r--arch/x86/kernel/apic/nmi.c5
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c31
-rw-r--r--arch/x86/kernel/bios_uv.c3
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c1
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--[-rwxr-xr-x]arch/x86/kernel/cpu/cpu_debug.c0
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c101
-rw-r--r--arch/x86/kernel/cpu/cpufreq/longhaul.c1
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c33
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel_64.c10
-rw-r--r--arch/x86/kernel/cpu/proc.c2
-rw-r--r--arch/x86/kernel/e820.c11
-rw-r--r--arch/x86/kernel/entry_64.S3
-rw-r--r--arch/x86/kernel/ftrace.c2
-rw-r--r--arch/x86/kernel/hpet.c24
-rw-r--r--arch/x86/kernel/i8253.c2
-rw-r--r--arch/x86/kernel/irq.c2
-rw-r--r--arch/x86/kernel/kvmclock.c7
-rw-r--r--arch/x86/kernel/machine_kexec_32.c4
-rw-r--r--arch/x86/kernel/machine_kexec_64.c4
-rw-r--r--arch/x86/kernel/microcode_core.c35
-rw-r--r--arch/x86/kernel/mpparse.c7
-rw-r--r--arch/x86/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86/kernel/ptrace.c3
-rw-r--r--arch/x86/kernel/quirks.c2
-rw-r--r--arch/x86/kernel/reboot.c8
-rw-r--r--arch/x86/kernel/tlb_uv.c189
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kernel/uv_sysfs.c4
-rw-r--r--arch/x86/kernel/uv_time.c10
-rw-r--r--arch/x86/kernel/vmiclock_32.c2
-rw-r--r--arch/x86/kernel/xsave.c6
36 files changed, 348 insertions, 203 deletions
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 42c33cebf00f..8c0be0902dac 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -49,10 +49,10 @@
49#define IVHD_DEV_EXT_SELECT 0x46 49#define IVHD_DEV_EXT_SELECT 0x46
50#define IVHD_DEV_EXT_SELECT_RANGE 0x47 50#define IVHD_DEV_EXT_SELECT_RANGE 0x47
51 51
52#define IVHD_FLAG_HT_TUN_EN 0x00 52#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
53#define IVHD_FLAG_PASSPW_EN 0x01 53#define IVHD_FLAG_PASSPW_EN_MASK 0x02
54#define IVHD_FLAG_RESPASSPW_EN 0x02 54#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
55#define IVHD_FLAG_ISOC_EN 0x03 55#define IVHD_FLAG_ISOC_EN_MASK 0x08
56 56
57#define IVMD_FLAG_EXCL_RANGE 0x08 57#define IVMD_FLAG_EXCL_RANGE 0x08
58#define IVMD_FLAG_UNITY_MAP 0x01 58#define IVMD_FLAG_UNITY_MAP 0x01
@@ -569,19 +569,19 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
569 * First set the recommended feature enable bits from ACPI 569 * First set the recommended feature enable bits from ACPI
570 * into the IOMMU control registers 570 * into the IOMMU control registers
571 */ 571 */
572 h->flags & IVHD_FLAG_HT_TUN_EN ? 572 h->flags & IVHD_FLAG_HT_TUN_EN_MASK ?
573 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : 573 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
574 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); 574 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
575 575
576 h->flags & IVHD_FLAG_PASSPW_EN ? 576 h->flags & IVHD_FLAG_PASSPW_EN_MASK ?
577 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : 577 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
578 iommu_feature_disable(iommu, CONTROL_PASSPW_EN); 578 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
579 579
580 h->flags & IVHD_FLAG_RESPASSPW_EN ? 580 h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
581 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : 581 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
582 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); 582 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
583 583
584 h->flags & IVHD_FLAG_ISOC_EN ? 584 h->flags & IVHD_FLAG_ISOC_EN_MASK ?
585 iommu_feature_enable(iommu, CONTROL_ISOC_EN) : 585 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
586 iommu_feature_disable(iommu, CONTROL_ISOC_EN); 586 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
587 587
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 098ec84b8c00..f2870920f246 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -431,6 +431,12 @@ static void __cpuinit setup_APIC_timer(void)
431{ 431{
432 struct clock_event_device *levt = &__get_cpu_var(lapic_events); 432 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
433 433
434 if (cpu_has(&current_cpu_data, X86_FEATURE_ARAT)) {
435 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
436 /* Make LAPIC timer preferrable over percpu HPET */
437 lapic_clockevent.rating = 150;
438 }
439
434 memcpy(levt, &lapic_clockevent, sizeof(*levt)); 440 memcpy(levt, &lapic_clockevent, sizeof(*levt));
435 levt->cpumask = cpumask_of(smp_processor_id()); 441 levt->cpumask = cpumask_of(smp_processor_id());
436 442
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 0014714ea97b..306e5e88fb6f 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -212,7 +212,7 @@ struct apic apic_flat = {
212 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 212 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
213 .wait_for_init_deassert = NULL, 213 .wait_for_init_deassert = NULL,
214 .smp_callin_clear_local_apic = NULL, 214 .smp_callin_clear_local_apic = NULL,
215 .inquire_remote_apic = NULL, 215 .inquire_remote_apic = default_inquire_remote_apic,
216 216
217 .read = native_apic_mem_read, 217 .read = native_apic_mem_read,
218 .write = native_apic_mem_write, 218 .write = native_apic_mem_write,
@@ -362,7 +362,7 @@ struct apic apic_physflat = {
362 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 362 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
363 .wait_for_init_deassert = NULL, 363 .wait_for_init_deassert = NULL,
364 .smp_callin_clear_local_apic = NULL, 364 .smp_callin_clear_local_apic = NULL,
365 .inquire_remote_apic = NULL, 365 .inquire_remote_apic = default_inquire_remote_apic,
366 366
367 .read = native_apic_mem_read, 367 .read = native_apic_mem_read,
368 .write = native_apic_mem_write, 368 .write = native_apic_mem_write,
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 767fe7e46d68..30da617d18e4 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2524,7 +2524,6 @@ static void irq_complete_move(struct irq_desc **descp)
2524static inline void irq_complete_move(struct irq_desc **descp) {} 2524static inline void irq_complete_move(struct irq_desc **descp) {}
2525#endif 2525#endif
2526 2526
2527#ifdef CONFIG_X86_X2APIC
2528static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) 2527static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
2529{ 2528{
2530 int apic, pin; 2529 int apic, pin;
@@ -2558,6 +2557,7 @@ eoi_ioapic_irq(struct irq_desc *desc)
2558 spin_unlock_irqrestore(&ioapic_lock, flags); 2557 spin_unlock_irqrestore(&ioapic_lock, flags);
2559} 2558}
2560 2559
2560#ifdef CONFIG_X86_X2APIC
2561static void ack_x2apic_level(unsigned int irq) 2561static void ack_x2apic_level(unsigned int irq)
2562{ 2562{
2563 struct irq_desc *desc = irq_to_desc(irq); 2563 struct irq_desc *desc = irq_to_desc(irq);
@@ -2634,6 +2634,9 @@ static void ack_apic_level(unsigned int irq)
2634 */ 2634 */
2635 ack_APIC_irq(); 2635 ack_APIC_irq();
2636 2636
2637 if (irq_remapped(irq))
2638 eoi_ioapic_irq(desc);
2639
2637 /* Now we can move and renable the irq */ 2640 /* Now we can move and renable the irq */
2638 if (unlikely(do_unmask_irq)) { 2641 if (unlikely(do_unmask_irq)) {
2639 /* Only migrate the irq if the ack has been received. 2642 /* Only migrate the irq if the ack has been received.
@@ -3667,12 +3670,14 @@ int arch_setup_hpet_msi(unsigned int irq)
3667{ 3670{
3668 int ret; 3671 int ret;
3669 struct msi_msg msg; 3672 struct msi_msg msg;
3673 struct irq_desc *desc = irq_to_desc(irq);
3670 3674
3671 ret = msi_compose_msg(NULL, irq, &msg); 3675 ret = msi_compose_msg(NULL, irq, &msg);
3672 if (ret < 0) 3676 if (ret < 0)
3673 return ret; 3677 return ret;
3674 3678
3675 hpet_msi_write(irq, &msg); 3679 hpet_msi_write(irq, &msg);
3680 desc->status |= IRQ_MOVE_PCNTXT;
3676 set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq, 3681 set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq,
3677 "edge"); 3682 "edge");
3678 3683
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index d6bd62407152..ce4fbfa315a1 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -138,7 +138,7 @@ int __init check_nmi_watchdog(void)
138 if (!prev_nmi_count) 138 if (!prev_nmi_count)
139 goto error; 139 goto error;
140 140
141 alloc_cpumask_var(&backtrace_mask, GFP_KERNEL); 141 alloc_cpumask_var(&backtrace_mask, GFP_KERNEL|__GFP_ZERO);
142 printk(KERN_INFO "Testing NMI watchdog ... "); 142 printk(KERN_INFO "Testing NMI watchdog ... ");
143 143
144#ifdef CONFIG_SMP 144#ifdef CONFIG_SMP
@@ -414,7 +414,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
414 touched = 1; 414 touched = 1;
415 } 415 }
416 416
417 if (cpumask_test_cpu(cpu, backtrace_mask)) { 417 /* We can be called before check_nmi_watchdog, hence NULL check. */
418 if (backtrace_mask != NULL && cpumask_test_cpu(cpu, backtrace_mask)) {
418 static DEFINE_SPINLOCK(lock); /* Serialise the printks */ 419 static DEFINE_SPINLOCK(lock); /* Serialise the printks */
419 420
420 spin_lock(&lock); 421 spin_lock(&lock);
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 1248318436e8..2bda69352976 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -19,6 +19,7 @@
19#include <linux/timer.h> 19#include <linux/timer.h>
20#include <linux/cpu.h> 20#include <linux/cpu.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/io.h>
22 23
23#include <asm/uv/uv_mmrs.h> 24#include <asm/uv/uv_mmrs.h>
24#include <asm/uv/uv_hub.h> 25#include <asm/uv/uv_hub.h>
@@ -34,6 +35,17 @@ DEFINE_PER_CPU(int, x2apic_extra_bits);
34 35
35static enum uv_system_type uv_system_type; 36static enum uv_system_type uv_system_type;
36 37
38static int early_get_nodeid(void)
39{
40 union uvh_node_id_u node_id;
41 unsigned long *mmr;
42
43 mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_NODE_ID, sizeof(*mmr));
44 node_id.v = *mmr;
45 early_iounmap(mmr, sizeof(*mmr));
46 return node_id.s.node_id;
47}
48
37static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 49static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
38{ 50{
39 if (!strcmp(oem_id, "SGI")) { 51 if (!strcmp(oem_id, "SGI")) {
@@ -42,6 +54,8 @@ static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
42 else if (!strcmp(oem_table_id, "UVX")) 54 else if (!strcmp(oem_table_id, "UVX"))
43 uv_system_type = UV_X2APIC; 55 uv_system_type = UV_X2APIC;
44 else if (!strcmp(oem_table_id, "UVH")) { 56 else if (!strcmp(oem_table_id, "UVH")) {
57 __get_cpu_var(x2apic_extra_bits) =
58 early_get_nodeid() << (UV_APIC_PNODE_SHIFT - 1);
45 uv_system_type = UV_NON_UNIQUE_APIC; 59 uv_system_type = UV_NON_UNIQUE_APIC;
46 return 1; 60 return 1;
47 } 61 }
@@ -549,7 +563,8 @@ void __init uv_system_init(void)
549 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; 563 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
550 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; 564 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
551 int max_pnode = 0; 565 int max_pnode = 0;
552 unsigned long mmr_base, present; 566 unsigned long mmr_base, present, paddr;
567 unsigned short pnode_mask;
553 568
554 map_low_mmrs(); 569 map_low_mmrs();
555 570
@@ -592,6 +607,7 @@ void __init uv_system_init(void)
592 } 607 }
593 } 608 }
594 609
610 pnode_mask = (1 << n_val) - 1;
595 node_id.v = uv_read_local_mmr(UVH_NODE_ID); 611 node_id.v = uv_read_local_mmr(UVH_NODE_ID);
596 gnode_upper = (((unsigned long)node_id.s.node_id) & 612 gnode_upper = (((unsigned long)node_id.s.node_id) &
597 ~((1 << n_val) - 1)) << m_val; 613 ~((1 << n_val) - 1)) << m_val;
@@ -615,7 +631,7 @@ void __init uv_system_init(void)
615 uv_cpu_hub_info(cpu)->numa_blade_id = blade; 631 uv_cpu_hub_info(cpu)->numa_blade_id = blade;
616 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; 632 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
617 uv_cpu_hub_info(cpu)->pnode = pnode; 633 uv_cpu_hub_info(cpu)->pnode = pnode;
618 uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) - 1; 634 uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
619 uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; 635 uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
620 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; 636 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
621 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; 637 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
@@ -631,6 +647,17 @@ void __init uv_system_init(void)
631 lcpu, blade); 647 lcpu, blade);
632 } 648 }
633 649
650 /* Add blade/pnode info for nodes without cpus */
651 for_each_online_node(nid) {
652 if (uv_node_to_blade[nid] >= 0)
653 continue;
654 paddr = node_start_pfn(nid) << PAGE_SHIFT;
655 paddr = uv_soc_phys_ram_to_gpa(paddr);
656 pnode = (paddr >> m_val) & pnode_mask;
657 blade = boot_pnode_to_blade(pnode);
658 uv_node_to_blade[nid] = blade;
659 }
660
634 map_gru_high(max_pnode); 661 map_gru_high(max_pnode);
635 map_mmr_high(max_pnode); 662 map_mmr_high(max_pnode);
636 map_config_high(max_pnode); 663 map_config_high(max_pnode);
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c
index f63882728d91..63a88e1f987d 100644
--- a/arch/x86/kernel/bios_uv.c
+++ b/arch/x86/kernel/bios_uv.c
@@ -182,7 +182,8 @@ void uv_bios_init(void)
182 memcpy(&uv_systab, tab, sizeof(struct uv_systab)); 182 memcpy(&uv_systab, tab, sizeof(struct uv_systab));
183 iounmap(tab); 183 iounmap(tab);
184 184
185 printk(KERN_INFO "EFI UV System Table Revision %d\n", tab->revision); 185 printk(KERN_INFO "EFI UV System Table Revision %d\n",
186 uv_systab.revision);
186} 187}
187#else /* !CONFIG_EFI */ 188#else /* !CONFIG_EFI */
188 189
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index 8220ae69849d..c965e5212714 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -31,6 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
31 31
32 static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { 32 static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
33 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 }, 33 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
34 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006 },
34 { 0, 0, 0, 0 } 35 { 0, 0, 0, 0 }
35 }; 36 };
36 37
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c4f667896c28..c1caefc82e62 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1203,6 +1203,8 @@ void __cpuinit cpu_init(void)
1203 load_TR_desc(); 1203 load_TR_desc();
1204 load_LDT(&init_mm.context); 1204 load_LDT(&init_mm.context);
1205 1205
1206 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1207
1206#ifdef CONFIG_DOUBLEFAULT 1208#ifdef CONFIG_DOUBLEFAULT
1207 /* Set up doublefault TSS pointer in the GDT */ 1209 /* Set up doublefault TSS pointer in the GDT */
1208 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); 1210 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c
index 46e29ab96c6a..46e29ab96c6a 100755..100644
--- a/arch/x86/kernel/cpu/cpu_debug.c
+++ b/arch/x86/kernel/cpu/cpu_debug.c
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 19f6b9d27e83..208ecf6643df 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -65,13 +65,18 @@ enum {
65struct acpi_cpufreq_data { 65struct acpi_cpufreq_data {
66 struct acpi_processor_performance *acpi_data; 66 struct acpi_processor_performance *acpi_data;
67 struct cpufreq_frequency_table *freq_table; 67 struct cpufreq_frequency_table *freq_table;
68 unsigned int max_freq;
69 unsigned int resume; 68 unsigned int resume;
70 unsigned int cpu_feature; 69 unsigned int cpu_feature;
71}; 70};
72 71
73static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); 72static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data);
74 73
74struct acpi_msr_data {
75 u64 saved_aperf, saved_mperf;
76};
77
78static DEFINE_PER_CPU(struct acpi_msr_data, msr_data);
79
75DEFINE_TRACE(power_mark); 80DEFINE_TRACE(power_mark);
76 81
77/* acpi_perf_data is a pointer to percpu data. */ 82/* acpi_perf_data is a pointer to percpu data. */
@@ -152,7 +157,8 @@ struct drv_cmd {
152 u32 val; 157 u32 val;
153}; 158};
154 159
155static long do_drv_read(void *_cmd) 160/* Called via smp_call_function_single(), on the target CPU */
161static void do_drv_read(void *_cmd)
156{ 162{
157 struct drv_cmd *cmd = _cmd; 163 struct drv_cmd *cmd = _cmd;
158 u32 h; 164 u32 h;
@@ -169,10 +175,10 @@ static long do_drv_read(void *_cmd)
169 default: 175 default:
170 break; 176 break;
171 } 177 }
172 return 0;
173} 178}
174 179
175static long do_drv_write(void *_cmd) 180/* Called via smp_call_function_many(), on the target CPUs */
181static void do_drv_write(void *_cmd)
176{ 182{
177 struct drv_cmd *cmd = _cmd; 183 struct drv_cmd *cmd = _cmd;
178 u32 lo, hi; 184 u32 lo, hi;
@@ -191,23 +197,24 @@ static long do_drv_write(void *_cmd)
191 default: 197 default:
192 break; 198 break;
193 } 199 }
194 return 0;
195} 200}
196 201
197static void drv_read(struct drv_cmd *cmd) 202static void drv_read(struct drv_cmd *cmd)
198{ 203{
199 cmd->val = 0; 204 cmd->val = 0;
200 205
201 work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd); 206 smp_call_function_single(cpumask_any(cmd->mask), do_drv_read, cmd, 1);
202} 207}
203 208
204static void drv_write(struct drv_cmd *cmd) 209static void drv_write(struct drv_cmd *cmd)
205{ 210{
206 unsigned int i; 211 int this_cpu;
207 212
208 for_each_cpu(i, cmd->mask) { 213 this_cpu = get_cpu();
209 work_on_cpu(i, do_drv_write, cmd); 214 if (cpumask_test_cpu(this_cpu, cmd->mask))
210 } 215 do_drv_write(cmd);
216 smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
217 put_cpu();
211} 218}
212 219
213static u32 get_cur_val(const struct cpumask *mask) 220static u32 get_cur_val(const struct cpumask *mask)
@@ -241,28 +248,23 @@ static u32 get_cur_val(const struct cpumask *mask)
241 return cmd.val; 248 return cmd.val;
242} 249}
243 250
244struct perf_cur { 251struct perf_pair {
245 union { 252 union {
246 struct { 253 struct {
247 u32 lo; 254 u32 lo;
248 u32 hi; 255 u32 hi;
249 } split; 256 } split;
250 u64 whole; 257 u64 whole;
251 } aperf_cur, mperf_cur; 258 } aperf, mperf;
252}; 259};
253 260
254 261/* Called via smp_call_function_single(), on the target CPU */
255static long read_measured_perf_ctrs(void *_cur) 262static void read_measured_perf_ctrs(void *_cur)
256{ 263{
257 struct perf_cur *cur = _cur; 264 struct perf_pair *cur = _cur;
258
259 rdmsr(MSR_IA32_APERF, cur->aperf_cur.split.lo, cur->aperf_cur.split.hi);
260 rdmsr(MSR_IA32_MPERF, cur->mperf_cur.split.lo, cur->mperf_cur.split.hi);
261 265
262 wrmsr(MSR_IA32_APERF, 0, 0); 266 rdmsr(MSR_IA32_APERF, cur->aperf.split.lo, cur->aperf.split.hi);
263 wrmsr(MSR_IA32_MPERF, 0, 0); 267 rdmsr(MSR_IA32_MPERF, cur->mperf.split.lo, cur->mperf.split.hi);
264
265 return 0;
266} 268}
267 269
268/* 270/*
@@ -281,58 +283,63 @@ static long read_measured_perf_ctrs(void *_cur)
281static unsigned int get_measured_perf(struct cpufreq_policy *policy, 283static unsigned int get_measured_perf(struct cpufreq_policy *policy,
282 unsigned int cpu) 284 unsigned int cpu)
283{ 285{
284 struct perf_cur cur; 286 struct perf_pair readin, cur;
285 unsigned int perf_percent; 287 unsigned int perf_percent;
286 unsigned int retval; 288 unsigned int retval;
287 289
288 if (!work_on_cpu(cpu, read_measured_perf_ctrs, &cur)) 290 if (smp_call_function_single(cpu, read_measured_perf_ctrs, &readin, 1))
289 return 0; 291 return 0;
290 292
293 cur.aperf.whole = readin.aperf.whole -
294 per_cpu(msr_data, cpu).saved_aperf;
295 cur.mperf.whole = readin.mperf.whole -
296 per_cpu(msr_data, cpu).saved_mperf;
297 per_cpu(msr_data, cpu).saved_aperf = readin.aperf.whole;
298 per_cpu(msr_data, cpu).saved_mperf = readin.mperf.whole;
299
291#ifdef __i386__ 300#ifdef __i386__
292 /* 301 /*
293 * We dont want to do 64 bit divide with 32 bit kernel 302 * We dont want to do 64 bit divide with 32 bit kernel
294 * Get an approximate value. Return failure in case we cannot get 303 * Get an approximate value. Return failure in case we cannot get
295 * an approximate value. 304 * an approximate value.
296 */ 305 */
297 if (unlikely(cur.aperf_cur.split.hi || cur.mperf_cur.split.hi)) { 306 if (unlikely(cur.aperf.split.hi || cur.mperf.split.hi)) {
298 int shift_count; 307 int shift_count;
299 u32 h; 308 u32 h;
300 309
301 h = max_t(u32, cur.aperf_cur.split.hi, cur.mperf_cur.split.hi); 310 h = max_t(u32, cur.aperf.split.hi, cur.mperf.split.hi);
302 shift_count = fls(h); 311 shift_count = fls(h);
303 312
304 cur.aperf_cur.whole >>= shift_count; 313 cur.aperf.whole >>= shift_count;
305 cur.mperf_cur.whole >>= shift_count; 314 cur.mperf.whole >>= shift_count;
306 } 315 }
307 316
308 if (((unsigned long)(-1) / 100) < cur.aperf_cur.split.lo) { 317 if (((unsigned long)(-1) / 100) < cur.aperf.split.lo) {
309 int shift_count = 7; 318 int shift_count = 7;
310 cur.aperf_cur.split.lo >>= shift_count; 319 cur.aperf.split.lo >>= shift_count;
311 cur.mperf_cur.split.lo >>= shift_count; 320 cur.mperf.split.lo >>= shift_count;
312 } 321 }
313 322
314 if (cur.aperf_cur.split.lo && cur.mperf_cur.split.lo) 323 if (cur.aperf.split.lo && cur.mperf.split.lo)
315 perf_percent = (cur.aperf_cur.split.lo * 100) / 324 perf_percent = (cur.aperf.split.lo * 100) / cur.mperf.split.lo;
316 cur.mperf_cur.split.lo;
317 else 325 else
318 perf_percent = 0; 326 perf_percent = 0;
319 327
320#else 328#else
321 if (unlikely(((unsigned long)(-1) / 100) < cur.aperf_cur.whole)) { 329 if (unlikely(((unsigned long)(-1) / 100) < cur.aperf.whole)) {
322 int shift_count = 7; 330 int shift_count = 7;
323 cur.aperf_cur.whole >>= shift_count; 331 cur.aperf.whole >>= shift_count;
324 cur.mperf_cur.whole >>= shift_count; 332 cur.mperf.whole >>= shift_count;
325 } 333 }
326 334
327 if (cur.aperf_cur.whole && cur.mperf_cur.whole) 335 if (cur.aperf.whole && cur.mperf.whole)
328 perf_percent = (cur.aperf_cur.whole * 100) / 336 perf_percent = (cur.aperf.whole * 100) / cur.mperf.whole;
329 cur.mperf_cur.whole;
330 else 337 else
331 perf_percent = 0; 338 perf_percent = 0;
332 339
333#endif 340#endif
334 341
335 retval = per_cpu(drv_data, policy->cpu)->max_freq * perf_percent / 100; 342 retval = (policy->cpuinfo.max_freq * perf_percent) / 100;
336 343
337 return retval; 344 return retval;
338} 345}
@@ -685,16 +692,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
685 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */ 692 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
686 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && 693 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
687 policy->cpuinfo.transition_latency > 20 * 1000) { 694 policy->cpuinfo.transition_latency > 20 * 1000) {
688 static int print_once;
689 policy->cpuinfo.transition_latency = 20 * 1000; 695 policy->cpuinfo.transition_latency = 20 * 1000;
690 if (!print_once) { 696 printk_once(KERN_INFO "Capping off P-state tranision"
691 print_once = 1; 697 " latency at 20 uS\n");
692 printk(KERN_INFO "Capping off P-state tranision latency"
693 " at 20 uS\n");
694 }
695 } 698 }
696 699
697 data->max_freq = perf->states[0].core_frequency * 1000;
698 /* table init */ 700 /* table init */
699 for (i = 0; i < perf->state_count; i++) { 701 for (i = 0; i < perf->state_count; i++) {
700 if (i > 0 && perf->states[i].core_frequency >= 702 if (i > 0 && perf->states[i].core_frequency >=
@@ -713,6 +715,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
713 if (result) 715 if (result)
714 goto err_freqfree; 716 goto err_freqfree;
715 717
718 if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
719 printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");
720
716 switch (perf->control_register.space_id) { 721 switch (perf->control_register.space_id) {
717 case ACPI_ADR_SPACE_SYSTEM_IO: 722 case ACPI_ADR_SPACE_SYSTEM_IO:
718 /* Current speed is unknown and not detectable by IO port */ 723 /* Current speed is unknown and not detectable by IO port */
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c
index 0bd48e65a0ca..ce2ed3e4aad9 100644
--- a/arch/x86/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c
@@ -33,7 +33,6 @@
33#include <linux/timex.h> 33#include <linux/timex.h>
34#include <linux/io.h> 34#include <linux/io.h>
35#include <linux/acpi.h> 35#include <linux/acpi.h>
36#include <linux/kernel.h>
37 36
38#include <asm/msr.h> 37#include <asm/msr.h>
39#include <acpi/processor.h> 38#include <acpi/processor.h>
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 863f89568b1a..6fb0b359d2a5 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -239,9 +239,10 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
239 * Don't get the IP here because it's unlikely to 239 * Don't get the IP here because it's unlikely to
240 * have anything to do with the actual error location. 240 * have anything to do with the actual error location.
241 */ 241 */
242 242 if (!(flags & MCP_DONTLOG)) {
243 mce_log(&m); 243 mce_log(&m);
244 add_taint(TAINT_MACHINE_CHECK); 244 add_taint(TAINT_MACHINE_CHECK);
245 }
245 246
246 /* 247 /*
247 * Clear state for this bank. 248 * Clear state for this bank.
@@ -452,13 +453,14 @@ void mce_log_therm_throt_event(__u64 status)
452 */ 453 */
453 454
454static int check_interval = 5 * 60; /* 5 minutes */ 455static int check_interval = 5 * 60; /* 5 minutes */
455static int next_interval; /* in jiffies */ 456static DEFINE_PER_CPU(int, next_interval); /* in jiffies */
456static void mcheck_timer(unsigned long); 457static void mcheck_timer(unsigned long);
457static DEFINE_PER_CPU(struct timer_list, mce_timer); 458static DEFINE_PER_CPU(struct timer_list, mce_timer);
458 459
459static void mcheck_timer(unsigned long data) 460static void mcheck_timer(unsigned long data)
460{ 461{
461 struct timer_list *t = &per_cpu(mce_timer, data); 462 struct timer_list *t = &per_cpu(mce_timer, data);
463 int *n;
462 464
463 WARN_ON(smp_processor_id() != data); 465 WARN_ON(smp_processor_id() != data);
464 466
@@ -470,14 +472,14 @@ static void mcheck_timer(unsigned long data)
470 * Alert userspace if needed. If we logged an MCE, reduce the 472 * Alert userspace if needed. If we logged an MCE, reduce the
471 * polling interval, otherwise increase the polling interval. 473 * polling interval, otherwise increase the polling interval.
472 */ 474 */
475 n = &__get_cpu_var(next_interval);
473 if (mce_notify_user()) { 476 if (mce_notify_user()) {
474 next_interval = max(next_interval/2, HZ/100); 477 *n = max(*n/2, HZ/100);
475 } else { 478 } else {
476 next_interval = min(next_interval * 2, 479 *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
477 (int)round_jiffies_relative(check_interval*HZ));
478 } 480 }
479 481
480 t->expires = jiffies + next_interval; 482 t->expires = jiffies + *n;
481 add_timer(t); 483 add_timer(t);
482} 484}
483 485
@@ -584,7 +586,7 @@ static void mce_init(void *dummy)
584 * Log the machine checks left over from the previous reset. 586 * Log the machine checks left over from the previous reset.
585 */ 587 */
586 bitmap_fill(all_banks, MAX_NR_BANKS); 588 bitmap_fill(all_banks, MAX_NR_BANKS);
587 machine_check_poll(MCP_UC, &all_banks); 589 machine_check_poll(MCP_UC|(!mce_bootlog ? MCP_DONTLOG : 0), &all_banks);
588 590
589 set_in_cr4(X86_CR4_MCE); 591 set_in_cr4(X86_CR4_MCE);
590 592
@@ -632,14 +634,13 @@ static void mce_cpu_features(struct cpuinfo_x86 *c)
632static void mce_init_timer(void) 634static void mce_init_timer(void)
633{ 635{
634 struct timer_list *t = &__get_cpu_var(mce_timer); 636 struct timer_list *t = &__get_cpu_var(mce_timer);
637 int *n = &__get_cpu_var(next_interval);
635 638
636 /* data race harmless because everyone sets to the same value */ 639 *n = check_interval * HZ;
637 if (!next_interval) 640 if (!*n)
638 next_interval = check_interval * HZ;
639 if (!next_interval)
640 return; 641 return;
641 setup_timer(t, mcheck_timer, smp_processor_id()); 642 setup_timer(t, mcheck_timer, smp_processor_id());
642 t->expires = round_jiffies(jiffies + next_interval); 643 t->expires = round_jiffies(jiffies + *n);
643 add_timer(t); 644 add_timer(t);
644} 645}
645 646
@@ -907,7 +908,6 @@ static void mce_cpu_restart(void *data)
907/* Reinit MCEs after user configuration changes */ 908/* Reinit MCEs after user configuration changes */
908static void mce_restart(void) 909static void mce_restart(void)
909{ 910{
910 next_interval = check_interval * HZ;
911 on_each_cpu(mce_cpu_restart, NULL, 1); 911 on_each_cpu(mce_cpu_restart, NULL, 1);
912} 912}
913 913
@@ -1110,7 +1110,8 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb,
1110 break; 1110 break;
1111 case CPU_DOWN_FAILED: 1111 case CPU_DOWN_FAILED:
1112 case CPU_DOWN_FAILED_FROZEN: 1112 case CPU_DOWN_FAILED_FROZEN:
1113 t->expires = round_jiffies(jiffies + next_interval); 1113 t->expires = round_jiffies(jiffies +
1114 __get_cpu_var(next_interval));
1114 add_timer_on(t, cpu); 1115 add_timer_on(t, cpu);
1115 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); 1116 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
1116 break; 1117 break;
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
index d6b72df89d69..cef3ee30744b 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
@@ -151,10 +151,11 @@ static void print_update(char *type, int *hdr, int num)
151static void cmci_discover(int banks, int boot) 151static void cmci_discover(int banks, int boot)
152{ 152{
153 unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned); 153 unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned);
154 unsigned long flags;
154 int hdr = 0; 155 int hdr = 0;
155 int i; 156 int i;
156 157
157 spin_lock(&cmci_discover_lock); 158 spin_lock_irqsave(&cmci_discover_lock, flags);
158 for (i = 0; i < banks; i++) { 159 for (i = 0; i < banks; i++) {
159 u64 val; 160 u64 val;
160 161
@@ -184,7 +185,7 @@ static void cmci_discover(int banks, int boot)
184 WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); 185 WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
185 } 186 }
186 } 187 }
187 spin_unlock(&cmci_discover_lock); 188 spin_unlock_irqrestore(&cmci_discover_lock, flags);
188 if (hdr) 189 if (hdr)
189 printk(KERN_CONT "\n"); 190 printk(KERN_CONT "\n");
190} 191}
@@ -211,13 +212,14 @@ void cmci_recheck(void)
211 */ 212 */
212void cmci_clear(void) 213void cmci_clear(void)
213{ 214{
215 unsigned long flags;
214 int i; 216 int i;
215 int banks; 217 int banks;
216 u64 val; 218 u64 val;
217 219
218 if (!cmci_supported(&banks)) 220 if (!cmci_supported(&banks))
219 return; 221 return;
220 spin_lock(&cmci_discover_lock); 222 spin_lock_irqsave(&cmci_discover_lock, flags);
221 for (i = 0; i < banks; i++) { 223 for (i = 0; i < banks; i++) {
222 if (!test_bit(i, __get_cpu_var(mce_banks_owned))) 224 if (!test_bit(i, __get_cpu_var(mce_banks_owned)))
223 continue; 225 continue;
@@ -227,7 +229,7 @@ void cmci_clear(void)
227 wrmsrl(MSR_IA32_MC0_CTL2 + i, val); 229 wrmsrl(MSR_IA32_MC0_CTL2 + i, val);
228 __clear_bit(i, __get_cpu_var(mce_banks_owned)); 230 __clear_bit(i, __get_cpu_var(mce_banks_owned));
229 } 231 }
230 spin_unlock(&cmci_discover_lock); 232 spin_unlock_irqrestore(&cmci_discover_lock, flags);
231} 233}
232 234
233/* 235/*
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index f93047fed791..d5e30397246b 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -14,7 +14,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
14 if (c->x86_max_cores * smp_num_siblings > 1) { 14 if (c->x86_max_cores * smp_num_siblings > 1) {
15 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); 15 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
16 seq_printf(m, "siblings\t: %d\n", 16 seq_printf(m, "siblings\t: %d\n",
17 cpumask_weight(cpu_sibling_mask(cpu))); 17 cpumask_weight(cpu_core_mask(cpu)));
18 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); 18 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
19 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); 19 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
20 seq_printf(m, "apicid\t\t: %d\n", c->apicid); 20 seq_printf(m, "apicid\t\t: %d\n", c->apicid);
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index ef2c3563357d..006281302925 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1074,12 +1074,13 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
1074 u64 addr; 1074 u64 addr;
1075 u64 start; 1075 u64 start;
1076 1076
1077 start = startt; 1077 for (start = startt; ; start += size) {
1078 while (size < sizet && (start + 1))
1079 start = find_e820_area_size(start, &size, align); 1078 start = find_e820_area_size(start, &size, align);
1080 1079 if (!(start + 1))
1081 if (size < sizet) 1080 return 0;
1082 return 0; 1081 if (size >= sizet)
1082 break;
1083 }
1083 1084
1084#ifdef CONFIG_X86_32 1085#ifdef CONFIG_X86_32
1085 if (start >= MAXMEM) 1086 if (start >= MAXMEM)
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index a331ec38af9e..38946c6e8433 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1410,7 +1410,10 @@ ENTRY(paranoid_exit)
1410paranoid_swapgs: 1410paranoid_swapgs:
1411 TRACE_IRQS_IRETQ 0 1411 TRACE_IRQS_IRETQ 0
1412 SWAPGS_UNSAFE_STACK 1412 SWAPGS_UNSAFE_STACK
1413 RESTORE_ALL 8
1414 jmp irq_return
1413paranoid_restore: 1415paranoid_restore:
1416 TRACE_IRQS_IRETQ 0
1414 RESTORE_ALL 8 1417 RESTORE_ALL 8
1415 jmp irq_return 1418 jmp irq_return
1416paranoid_userspace: 1419paranoid_userspace:
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 70a10ca100f6..18dfa30795c9 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -18,6 +18,8 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/list.h> 19#include <linux/list.h>
20 20
21#include <trace/syscall.h>
22
21#include <asm/cacheflush.h> 23#include <asm/cacheflush.h>
22#include <asm/ftrace.h> 24#include <asm/ftrace.h>
23#include <asm/nops.h> 25#include <asm/nops.h>
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 648b3a2a3a44..81408b93f887 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -236,6 +236,10 @@ static void hpet_stop_counter(void)
236 unsigned long cfg = hpet_readl(HPET_CFG); 236 unsigned long cfg = hpet_readl(HPET_CFG);
237 cfg &= ~HPET_CFG_ENABLE; 237 cfg &= ~HPET_CFG_ENABLE;
238 hpet_writel(cfg, HPET_CFG); 238 hpet_writel(cfg, HPET_CFG);
239}
240
241static void hpet_reset_counter(void)
242{
239 hpet_writel(0, HPET_COUNTER); 243 hpet_writel(0, HPET_COUNTER);
240 hpet_writel(0, HPET_COUNTER + 4); 244 hpet_writel(0, HPET_COUNTER + 4);
241} 245}
@@ -250,6 +254,7 @@ static void hpet_start_counter(void)
250static void hpet_restart_counter(void) 254static void hpet_restart_counter(void)
251{ 255{
252 hpet_stop_counter(); 256 hpet_stop_counter();
257 hpet_reset_counter();
253 hpet_start_counter(); 258 hpet_start_counter();
254} 259}
255 260
@@ -309,7 +314,7 @@ static int hpet_setup_msi_irq(unsigned int irq);
309static void hpet_set_mode(enum clock_event_mode mode, 314static void hpet_set_mode(enum clock_event_mode mode,
310 struct clock_event_device *evt, int timer) 315 struct clock_event_device *evt, int timer)
311{ 316{
312 unsigned long cfg; 317 unsigned long cfg, cmp, now;
313 uint64_t delta; 318 uint64_t delta;
314 319
315 switch (mode) { 320 switch (mode) {
@@ -317,12 +322,23 @@ static void hpet_set_mode(enum clock_event_mode mode,
317 hpet_stop_counter(); 322 hpet_stop_counter();
318 delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult; 323 delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult;
319 delta >>= evt->shift; 324 delta >>= evt->shift;
325 now = hpet_readl(HPET_COUNTER);
326 cmp = now + (unsigned long) delta;
320 cfg = hpet_readl(HPET_Tn_CFG(timer)); 327 cfg = hpet_readl(HPET_Tn_CFG(timer));
321 /* Make sure we use edge triggered interrupts */ 328 /* Make sure we use edge triggered interrupts */
322 cfg &= ~HPET_TN_LEVEL; 329 cfg &= ~HPET_TN_LEVEL;
323 cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | 330 cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
324 HPET_TN_SETVAL | HPET_TN_32BIT; 331 HPET_TN_SETVAL | HPET_TN_32BIT;
325 hpet_writel(cfg, HPET_Tn_CFG(timer)); 332 hpet_writel(cfg, HPET_Tn_CFG(timer));
333 hpet_writel(cmp, HPET_Tn_CMP(timer));
334 udelay(1);
335 /*
336 * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL
337 * cleared) to T0_CMP to set the period. The HPET_TN_SETVAL
338 * bit is automatically cleared after the first write.
339 * (See AMD-8111 HyperTransport I/O Hub Data Sheet,
340 * Publication # 24674)
341 */
326 hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer)); 342 hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer));
327 hpet_start_counter(); 343 hpet_start_counter();
328 hpet_print_config(); 344 hpet_print_config();
@@ -722,7 +738,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n,
722/* 738/*
723 * Clock source related code 739 * Clock source related code
724 */ 740 */
725static cycle_t read_hpet(void) 741static cycle_t read_hpet(struct clocksource *cs)
726{ 742{
727 return (cycle_t)hpet_readl(HPET_COUNTER); 743 return (cycle_t)hpet_readl(HPET_COUNTER);
728} 744}
@@ -756,7 +772,7 @@ static int hpet_clocksource_register(void)
756 hpet_restart_counter(); 772 hpet_restart_counter();
757 773
758 /* Verify whether hpet counter works */ 774 /* Verify whether hpet counter works */
759 t1 = read_hpet(); 775 t1 = hpet_readl(HPET_COUNTER);
760 rdtscll(start); 776 rdtscll(start);
761 777
762 /* 778 /*
@@ -770,7 +786,7 @@ static int hpet_clocksource_register(void)
770 rdtscll(now); 786 rdtscll(now);
771 } while ((now - start) < 200000UL); 787 } while ((now - start) < 200000UL);
772 788
773 if (t1 == read_hpet()) { 789 if (t1 == hpet_readl(HPET_COUNTER)) {
774 printk(KERN_WARNING 790 printk(KERN_WARNING
775 "HPET counter not counting. HPET disabled\n"); 791 "HPET counter not counting. HPET disabled\n");
776 return -ENODEV; 792 return -ENODEV;
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index 3475440baa54..c2e0bb0890d4 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -129,7 +129,7 @@ void __init setup_pit_timer(void)
129 * to just read by itself. So use jiffies to emulate a free 129 * to just read by itself. So use jiffies to emulate a free
130 * running counter: 130 * running counter:
131 */ 131 */
132static cycle_t pit_read(void) 132static cycle_t pit_read(struct clocksource *cs)
133{ 133{
134 static int old_count; 134 static int old_count;
135 static u32 old_jifs; 135 static u32 old_jifs;
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 3aaf7b9e3a8b..c3fe010d74c8 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -65,7 +65,7 @@ static int show_other_interrupts(struct seq_file *p, int prec)
65 seq_printf(p, " Spurious interrupts\n"); 65 seq_printf(p, " Spurious interrupts\n");
66#endif 66#endif
67 if (generic_interrupt_extension) { 67 if (generic_interrupt_extension) {
68 seq_printf(p, "PLT: "); 68 seq_printf(p, "%*s: ", prec, "PLT");
69 for_each_online_cpu(j) 69 for_each_online_cpu(j)
70 seq_printf(p, "%10u ", irq_stats(j)->generic_irqs); 70 seq_printf(p, "%10u ", irq_stats(j)->generic_irqs);
71 seq_printf(p, " Platform interrupts\n"); 71 seq_printf(p, " Platform interrupts\n");
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 137f2e8132df..223af43f1526 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -77,6 +77,11 @@ static cycle_t kvm_clock_read(void)
77 return ret; 77 return ret;
78} 78}
79 79
80static cycle_t kvm_clock_get_cycles(struct clocksource *cs)
81{
82 return kvm_clock_read();
83}
84
80/* 85/*
81 * If we don't do that, there is the possibility that the guest 86 * If we don't do that, there is the possibility that the guest
82 * will calibrate under heavy load - thus, getting a lower lpj - 87 * will calibrate under heavy load - thus, getting a lower lpj -
@@ -107,7 +112,7 @@ static void kvm_get_preset_lpj(void)
107 112
108static struct clocksource kvm_clock = { 113static struct clocksource kvm_clock = {
109 .name = "kvm-clock", 114 .name = "kvm-clock",
110 .read = kvm_clock_read, 115 .read = kvm_clock_get_cycles,
111 .rating = 400, 116 .rating = 400,
112 .mask = CLOCKSOURCE_MASK(64), 117 .mask = CLOCKSOURCE_MASK(64),
113 .mult = 1 << KVM_SCALE, 118 .mult = 1 << KVM_SCALE,
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index e7368c1da01d..c1c429d00130 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -194,7 +194,7 @@ void machine_kexec(struct kimage *image)
194 unsigned int preserve_context); 194 unsigned int preserve_context);
195 195
196#ifdef CONFIG_KEXEC_JUMP 196#ifdef CONFIG_KEXEC_JUMP
197 if (kexec_image->preserve_context) 197 if (image->preserve_context)
198 save_processor_state(); 198 save_processor_state();
199#endif 199#endif
200 200
@@ -253,7 +253,7 @@ void machine_kexec(struct kimage *image)
253 image->preserve_context); 253 image->preserve_context);
254 254
255#ifdef CONFIG_KEXEC_JUMP 255#ifdef CONFIG_KEXEC_JUMP
256 if (kexec_image->preserve_context) 256 if (image->preserve_context)
257 restore_processor_state(); 257 restore_processor_state();
258#endif 258#endif
259 259
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 89cea4d44679..84c3bf209e98 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -274,7 +274,7 @@ void machine_kexec(struct kimage *image)
274 int save_ftrace_enabled; 274 int save_ftrace_enabled;
275 275
276#ifdef CONFIG_KEXEC_JUMP 276#ifdef CONFIG_KEXEC_JUMP
277 if (kexec_image->preserve_context) 277 if (image->preserve_context)
278 save_processor_state(); 278 save_processor_state();
279#endif 279#endif
280 280
@@ -333,7 +333,7 @@ void machine_kexec(struct kimage *image)
333 image->preserve_context); 333 image->preserve_context);
334 334
335#ifdef CONFIG_KEXEC_JUMP 335#ifdef CONFIG_KEXEC_JUMP
336 if (kexec_image->preserve_context) 336 if (image->preserve_context)
337 restore_processor_state(); 337 restore_processor_state();
338#endif 338#endif
339 339
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index a0f3851ef310..98c470c069d1 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -108,40 +108,29 @@ struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
108EXPORT_SYMBOL_GPL(ucode_cpu_info); 108EXPORT_SYMBOL_GPL(ucode_cpu_info);
109 109
110#ifdef CONFIG_MICROCODE_OLD_INTERFACE 110#ifdef CONFIG_MICROCODE_OLD_INTERFACE
111struct update_for_cpu {
112 const void __user *buf;
113 size_t size;
114};
115
116static long update_for_cpu(void *_ufc)
117{
118 struct update_for_cpu *ufc = _ufc;
119 int error;
120
121 error = microcode_ops->request_microcode_user(smp_processor_id(),
122 ufc->buf, ufc->size);
123 if (error < 0)
124 return error;
125 if (!error)
126 microcode_ops->apply_microcode(smp_processor_id());
127 return error;
128}
129
130static int do_microcode_update(const void __user *buf, size_t size) 111static int do_microcode_update(const void __user *buf, size_t size)
131{ 112{
113 cpumask_t old;
132 int error = 0; 114 int error = 0;
133 int cpu; 115 int cpu;
134 struct update_for_cpu ufc = { .buf = buf, .size = size }; 116
117 old = current->cpus_allowed;
135 118
136 for_each_online_cpu(cpu) { 119 for_each_online_cpu(cpu) {
137 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 120 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
138 121
139 if (!uci->valid) 122 if (!uci->valid)
140 continue; 123 continue;
141 error = work_on_cpu(cpu, update_for_cpu, &ufc); 124
125 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
126 error = microcode_ops->request_microcode_user(cpu, buf, size);
142 if (error < 0) 127 if (error < 0)
143 break; 128 goto out;
129 if (!error)
130 microcode_ops->apply_microcode(cpu);
144 } 131 }
132out:
133 set_cpus_allowed_ptr(current, &old);
145 return error; 134 return error;
146} 135}
147 136
@@ -391,8 +380,6 @@ static int mc_sysdev_add(struct sys_device *sys_dev)
391 return err; 380 return err;
392 381
393 err = microcode_init_cpu(cpu); 382 err = microcode_init_cpu(cpu);
394 if (err)
395 sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
396 383
397 return err; 384 return err;
398} 385}
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index dce99dca6cf8..70fd7e414c15 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -679,7 +679,7 @@ void __init get_smp_config(void)
679 __get_smp_config(0); 679 __get_smp_config(0);
680} 680}
681 681
682static void smp_reserve_bootmem(struct mpf_intel *mpf) 682static void __init smp_reserve_bootmem(struct mpf_intel *mpf)
683{ 683{
684 unsigned long size = get_mpc_size(mpf->physptr); 684 unsigned long size = get_mpc_size(mpf->physptr);
685#ifdef CONFIG_X86_32 685#ifdef CONFIG_X86_32
@@ -838,7 +838,7 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
838 838
839static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM]; 839static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
840 840
841static void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) 841static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
842{ 842{
843 int i; 843 int i;
844 844
@@ -866,7 +866,8 @@ static void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
866 } 866 }
867} 867}
868#else /* CONFIG_X86_IO_APIC */ 868#else /* CONFIG_X86_IO_APIC */
869static inline void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {} 869static
870inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
870#endif /* CONFIG_X86_IO_APIC */ 871#endif /* CONFIG_X86_IO_APIC */
871 872
872static int check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, 873static int check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length,
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 887388a1c57d..a1712f2b50f1 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -50,7 +50,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
50 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); 50 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
51} 51}
52 52
53struct dma_map_ops swiotlb_dma_ops = { 53static struct dma_map_ops swiotlb_dma_ops = {
54 .mapping_error = swiotlb_dma_mapping_error, 54 .mapping_error = swiotlb_dma_mapping_error,
55 .alloc_coherent = x86_swiotlb_alloc_coherent, 55 .alloc_coherent = x86_swiotlb_alloc_coherent,
56 .free_coherent = swiotlb_free_coherent, 56 .free_coherent = swiotlb_free_coherent,
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index fe9345c967de..23b7c8f017e2 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -21,7 +21,6 @@
21#include <linux/audit.h> 21#include <linux/audit.h>
22#include <linux/seccomp.h> 22#include <linux/seccomp.h>
23#include <linux/signal.h> 23#include <linux/signal.h>
24#include <linux/ftrace.h>
25 24
26#include <asm/uaccess.h> 25#include <asm/uaccess.h>
27#include <asm/pgtable.h> 26#include <asm/pgtable.h>
@@ -35,6 +34,8 @@
35#include <asm/proto.h> 34#include <asm/proto.h>
36#include <asm/ds.h> 35#include <asm/ds.h>
37 36
37#include <trace/syscall.h>
38
38#include "tls.h" 39#include "tls.h"
39 40
40enum x86_regset { 41enum x86_regset {
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index e95022e4f5d5..7563b31b4f03 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -261,8 +261,6 @@ static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
261{ 261{
262 if (hpet_force_user) 262 if (hpet_force_user)
263 old_ich_force_enable_hpet(dev); 263 old_ich_force_enable_hpet(dev);
264 else
265 hpet_print_force_info();
266} 264}
267 265
268DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, 266DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 2aef36d8aca2..1340dad417f4 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -224,6 +224,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
224 DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"), 224 DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"),
225 }, 225 },
226 }, 226 },
227 { /* Handle problems with rebooting on Dell DXP061 */
228 .callback = set_bios_reboot,
229 .ident = "Dell DXP061",
230 .matches = {
231 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
232 DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"),
233 },
234 },
227 { } 235 { }
228}; 236};
229 237
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index deb5ebb32c3b..ed0c33761e6d 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -25,6 +25,8 @@ static int uv_bau_retry_limit __read_mostly;
25 25
26/* position of pnode (which is nasid>>1): */ 26/* position of pnode (which is nasid>>1): */
27static int uv_nshift __read_mostly; 27static int uv_nshift __read_mostly;
28/* base pnode in this partition */
29static int uv_partition_base_pnode __read_mostly;
28 30
29static unsigned long uv_mmask __read_mostly; 31static unsigned long uv_mmask __read_mostly;
30 32
@@ -32,6 +34,34 @@ static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
32static DEFINE_PER_CPU(struct bau_control, bau_control); 34static DEFINE_PER_CPU(struct bau_control, bau_control);
33 35
34/* 36/*
37 * Determine the first node on a blade.
38 */
39static int __init blade_to_first_node(int blade)
40{
41 int node, b;
42
43 for_each_online_node(node) {
44 b = uv_node_to_blade_id(node);
45 if (blade == b)
46 return node;
47 }
48 return -1; /* shouldn't happen */
49}
50
51/*
52 * Determine the apicid of the first cpu on a blade.
53 */
54static int __init blade_to_first_apicid(int blade)
55{
56 int cpu;
57
58 for_each_present_cpu(cpu)
59 if (blade == uv_cpu_to_blade_id(cpu))
60 return per_cpu(x86_cpu_to_apicid, cpu);
61 return -1;
62}
63
64/*
35 * Free a software acknowledge hardware resource by clearing its Pending 65 * Free a software acknowledge hardware resource by clearing its Pending
36 * bit. This will return a reply to the sender. 66 * bit. This will return a reply to the sender.
37 * If the message has timed out, a reply has already been sent by the 67 * If the message has timed out, a reply has already been sent by the
@@ -67,7 +97,7 @@ static void uv_bau_process_message(struct bau_payload_queue_entry *msg,
67 msp = __get_cpu_var(bau_control).msg_statuses + msg_slot; 97 msp = __get_cpu_var(bau_control).msg_statuses + msg_slot;
68 cpu = uv_blade_processor_id(); 98 cpu = uv_blade_processor_id();
69 msg->number_of_cpus = 99 msg->number_of_cpus =
70 uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id())); 100 uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id()));
71 this_cpu_mask = 1UL << cpu; 101 this_cpu_mask = 1UL << cpu;
72 if (msp->seen_by.bits & this_cpu_mask) 102 if (msp->seen_by.bits & this_cpu_mask)
73 return; 103 return;
@@ -215,14 +245,14 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
215 * Returns @flush_mask if some remote flushing remains to be done. The 245 * Returns @flush_mask if some remote flushing remains to be done. The
216 * mask will have some bits still set. 246 * mask will have some bits still set.
217 */ 247 */
218const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade, 248const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode,
219 struct bau_desc *bau_desc, 249 struct bau_desc *bau_desc,
220 struct cpumask *flush_mask) 250 struct cpumask *flush_mask)
221{ 251{
222 int completion_status = 0; 252 int completion_status = 0;
223 int right_shift; 253 int right_shift;
224 int tries = 0; 254 int tries = 0;
225 int blade; 255 int pnode;
226 int bit; 256 int bit;
227 unsigned long mmr_offset; 257 unsigned long mmr_offset;
228 unsigned long index; 258 unsigned long index;
@@ -265,8 +295,8 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade,
265 * use the IPI method of shootdown on them. 295 * use the IPI method of shootdown on them.
266 */ 296 */
267 for_each_cpu(bit, flush_mask) { 297 for_each_cpu(bit, flush_mask) {
268 blade = uv_cpu_to_blade_id(bit); 298 pnode = uv_cpu_to_pnode(bit);
269 if (blade == this_blade) 299 if (pnode == this_pnode)
270 continue; 300 continue;
271 cpumask_clear_cpu(bit, flush_mask); 301 cpumask_clear_cpu(bit, flush_mask);
272 } 302 }
@@ -309,16 +339,16 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
309 struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask); 339 struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask);
310 int i; 340 int i;
311 int bit; 341 int bit;
312 int blade; 342 int pnode;
313 int uv_cpu; 343 int uv_cpu;
314 int this_blade; 344 int this_pnode;
315 int locals = 0; 345 int locals = 0;
316 struct bau_desc *bau_desc; 346 struct bau_desc *bau_desc;
317 347
318 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); 348 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
319 349
320 uv_cpu = uv_blade_processor_id(); 350 uv_cpu = uv_blade_processor_id();
321 this_blade = uv_numa_blade_id(); 351 this_pnode = uv_hub_info->pnode;
322 bau_desc = __get_cpu_var(bau_control).descriptor_base; 352 bau_desc = __get_cpu_var(bau_control).descriptor_base;
323 bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu; 353 bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu;
324 354
@@ -326,13 +356,14 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
326 356
327 i = 0; 357 i = 0;
328 for_each_cpu(bit, flush_mask) { 358 for_each_cpu(bit, flush_mask) {
329 blade = uv_cpu_to_blade_id(bit); 359 pnode = uv_cpu_to_pnode(bit);
330 BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1)); 360 BUG_ON(pnode > (UV_DISTRIBUTION_SIZE - 1));
331 if (blade == this_blade) { 361 if (pnode == this_pnode) {
332 locals++; 362 locals++;
333 continue; 363 continue;
334 } 364 }
335 bau_node_set(blade, &bau_desc->distribution); 365 bau_node_set(pnode - uv_partition_base_pnode,
366 &bau_desc->distribution);
336 i++; 367 i++;
337 } 368 }
338 if (i == 0) { 369 if (i == 0) {
@@ -350,7 +381,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
350 bau_desc->payload.address = va; 381 bau_desc->payload.address = va;
351 bau_desc->payload.sending_cpu = cpu; 382 bau_desc->payload.sending_cpu = cpu;
352 383
353 return uv_flush_send_and_wait(uv_cpu, this_blade, bau_desc, flush_mask); 384 return uv_flush_send_and_wait(uv_cpu, this_pnode, bau_desc, flush_mask);
354} 385}
355 386
356/* 387/*
@@ -418,24 +449,58 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
418 set_irq_regs(old_regs); 449 set_irq_regs(old_regs);
419} 450}
420 451
452/*
453 * uv_enable_timeouts
454 *
455 * Each target blade (i.e. blades that have cpu's) needs to have
456 * shootdown message timeouts enabled. The timeout does not cause
457 * an interrupt, but causes an error message to be returned to
458 * the sender.
459 */
421static void uv_enable_timeouts(void) 460static void uv_enable_timeouts(void)
422{ 461{
423 int i;
424 int blade; 462 int blade;
425 int last_blade; 463 int nblades;
426 int pnode; 464 int pnode;
427 int cur_cpu = 0; 465 unsigned long mmr_image;
428 unsigned long apicid;
429 466
430 last_blade = -1; 467 nblades = uv_num_possible_blades();
431 for_each_online_node(i) { 468
432 blade = uv_node_to_blade_id(i); 469 for (blade = 0; blade < nblades; blade++) {
433 if (blade == last_blade) 470 if (!uv_blade_nr_possible_cpus(blade))
434 continue; 471 continue;
435 last_blade = blade; 472
436 apicid = per_cpu(x86_cpu_to_apicid, cur_cpu);
437 pnode = uv_blade_to_pnode(blade); 473 pnode = uv_blade_to_pnode(blade);
438 cur_cpu += uv_blade_nr_possible_cpus(i); 474 mmr_image =
475 uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL);
476 /*
477 * Set the timeout period and then lock it in, in three
478 * steps; captures and locks in the period.
479 *
480 * To program the period, the SOFT_ACK_MODE must be off.
481 */
482 mmr_image &= ~((unsigned long)1 <<
483 UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT);
484 uv_write_global_mmr64
485 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
486 /*
487 * Set the 4-bit period.
488 */
489 mmr_image &= ~((unsigned long)0xf <<
490 UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT);
491 mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD <<
492 UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT);
493 uv_write_global_mmr64
494 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
495 /*
496 * Subsequent reversals of the timebase bit (3) cause an
497 * immediate timeout of one or all INTD resources as
498 * indicated in bits 2:0 (7 causes all of them to timeout).
499 */
500 mmr_image |= ((unsigned long)1 <<
501 UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT);
502 uv_write_global_mmr64
503 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
439 } 504 }
440} 505}
441 506
@@ -482,8 +547,7 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
482 stat->requestee, stat->onetlb, stat->alltlb, 547 stat->requestee, stat->onetlb, stat->alltlb,
483 stat->s_retry, stat->d_retry, stat->ptc_i); 548 stat->s_retry, stat->d_retry, stat->ptc_i);
484 seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n", 549 seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n",
485 uv_read_global_mmr64(uv_blade_to_pnode 550 uv_read_global_mmr64(uv_cpu_to_pnode(cpu),
486 (uv_cpu_to_blade_id(cpu)),
487 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE), 551 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
488 stat->sflush, stat->dflush, 552 stat->sflush, stat->dflush,
489 stat->retriesok, stat->nomsg, 553 stat->retriesok, stat->nomsg,
@@ -617,16 +681,18 @@ static struct bau_control * __init uv_table_bases_init(int blade, int node)
617 * finish the initialization of the per-blade control structures 681 * finish the initialization of the per-blade control structures
618 */ 682 */
619static void __init 683static void __init
620uv_table_bases_finish(int blade, int node, int cur_cpu, 684uv_table_bases_finish(int blade,
621 struct bau_control *bau_tablesp, 685 struct bau_control *bau_tablesp,
622 struct bau_desc *adp) 686 struct bau_desc *adp)
623{ 687{
624 struct bau_control *bcp; 688 struct bau_control *bcp;
625 int i; 689 int cpu;
626 690
627 for (i = cur_cpu; i < cur_cpu + uv_blade_nr_possible_cpus(blade); i++) { 691 for_each_present_cpu(cpu) {
628 bcp = (struct bau_control *)&per_cpu(bau_control, i); 692 if (blade != uv_cpu_to_blade_id(cpu))
693 continue;
629 694
695 bcp = (struct bau_control *)&per_cpu(bau_control, cpu);
630 bcp->bau_msg_head = bau_tablesp->va_queue_first; 696 bcp->bau_msg_head = bau_tablesp->va_queue_first;
631 bcp->va_queue_first = bau_tablesp->va_queue_first; 697 bcp->va_queue_first = bau_tablesp->va_queue_first;
632 bcp->va_queue_last = bau_tablesp->va_queue_last; 698 bcp->va_queue_last = bau_tablesp->va_queue_last;
@@ -649,11 +715,10 @@ uv_activation_descriptor_init(int node, int pnode)
649 struct bau_desc *adp; 715 struct bau_desc *adp;
650 struct bau_desc *ad2; 716 struct bau_desc *ad2;
651 717
652 adp = (struct bau_desc *) 718 adp = (struct bau_desc *)kmalloc_node(16384, GFP_KERNEL, node);
653 kmalloc_node(16384, GFP_KERNEL, node);
654 BUG_ON(!adp); 719 BUG_ON(!adp);
655 720
656 pa = __pa((unsigned long)adp); 721 pa = uv_gpa(adp); /* need the real nasid*/
657 n = pa >> uv_nshift; 722 n = pa >> uv_nshift;
658 m = pa & uv_mmask; 723 m = pa & uv_mmask;
659 724
@@ -667,8 +732,12 @@ uv_activation_descriptor_init(int node, int pnode)
667 for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) { 732 for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) {
668 memset(ad2, 0, sizeof(struct bau_desc)); 733 memset(ad2, 0, sizeof(struct bau_desc));
669 ad2->header.sw_ack_flag = 1; 734 ad2->header.sw_ack_flag = 1;
670 ad2->header.base_dest_nodeid = 735 /*
671 uv_blade_to_pnode(uv_cpu_to_blade_id(0)); 736 * base_dest_nodeid is the first node in the partition, so
737 * the bit map will indicate partition-relative node numbers.
738 * note that base_dest_nodeid is actually a nasid.
739 */
740 ad2->header.base_dest_nodeid = uv_partition_base_pnode << 1;
672 ad2->header.command = UV_NET_ENDPOINT_INTD; 741 ad2->header.command = UV_NET_ENDPOINT_INTD;
673 ad2->header.int_both = 1; 742 ad2->header.int_both = 1;
674 /* 743 /*
@@ -686,6 +755,8 @@ static struct bau_payload_queue_entry * __init
686uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp) 755uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
687{ 756{
688 struct bau_payload_queue_entry *pqp; 757 struct bau_payload_queue_entry *pqp;
758 unsigned long pa;
759 int pn;
689 char *cp; 760 char *cp;
690 761
691 pqp = (struct bau_payload_queue_entry *) kmalloc_node( 762 pqp = (struct bau_payload_queue_entry *) kmalloc_node(
@@ -696,10 +767,14 @@ uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
696 cp = (char *)pqp + 31; 767 cp = (char *)pqp + 31;
697 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5); 768 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
698 bau_tablesp->va_queue_first = pqp; 769 bau_tablesp->va_queue_first = pqp;
770 /*
771 * need the pnode of where the memory was really allocated
772 */
773 pa = uv_gpa(pqp);
774 pn = pa >> uv_nshift;
699 uv_write_global_mmr64(pnode, 775 uv_write_global_mmr64(pnode,
700 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, 776 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
701 ((unsigned long)pnode << 777 ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) |
702 UV_PAYLOADQ_PNODE_SHIFT) |
703 uv_physnodeaddr(pqp)); 778 uv_physnodeaddr(pqp));
704 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, 779 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
705 uv_physnodeaddr(pqp)); 780 uv_physnodeaddr(pqp));
@@ -715,8 +790,9 @@ uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
715/* 790/*
716 * Initialization of each UV blade's structures 791 * Initialization of each UV blade's structures
717 */ 792 */
718static int __init uv_init_blade(int blade, int node, int cur_cpu) 793static int __init uv_init_blade(int blade)
719{ 794{
795 int node;
720 int pnode; 796 int pnode;
721 unsigned long pa; 797 unsigned long pa;
722 unsigned long apicid; 798 unsigned long apicid;
@@ -724,16 +800,17 @@ static int __init uv_init_blade(int blade, int node, int cur_cpu)
724 struct bau_payload_queue_entry *pqp; 800 struct bau_payload_queue_entry *pqp;
725 struct bau_control *bau_tablesp; 801 struct bau_control *bau_tablesp;
726 802
803 node = blade_to_first_node(blade);
727 bau_tablesp = uv_table_bases_init(blade, node); 804 bau_tablesp = uv_table_bases_init(blade, node);
728 pnode = uv_blade_to_pnode(blade); 805 pnode = uv_blade_to_pnode(blade);
729 adp = uv_activation_descriptor_init(node, pnode); 806 adp = uv_activation_descriptor_init(node, pnode);
730 pqp = uv_payload_queue_init(node, pnode, bau_tablesp); 807 pqp = uv_payload_queue_init(node, pnode, bau_tablesp);
731 uv_table_bases_finish(blade, node, cur_cpu, bau_tablesp, adp); 808 uv_table_bases_finish(blade, bau_tablesp, adp);
732 /* 809 /*
733 * the below initialization can't be in firmware because the 810 * the below initialization can't be in firmware because the
734 * messaging IRQ will be determined by the OS 811 * messaging IRQ will be determined by the OS
735 */ 812 */
736 apicid = per_cpu(x86_cpu_to_apicid, cur_cpu); 813 apicid = blade_to_first_apicid(blade);
737 pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG); 814 pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG);
738 if ((pa & 0xff) != UV_BAU_MESSAGE) { 815 if ((pa & 0xff) != UV_BAU_MESSAGE) {
739 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, 816 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
@@ -748,9 +825,7 @@ static int __init uv_init_blade(int blade, int node, int cur_cpu)
748static int __init uv_bau_init(void) 825static int __init uv_bau_init(void)
749{ 826{
750 int blade; 827 int blade;
751 int node;
752 int nblades; 828 int nblades;
753 int last_blade;
754 int cur_cpu; 829 int cur_cpu;
755 830
756 if (!is_uv_system()) 831 if (!is_uv_system())
@@ -763,29 +838,21 @@ static int __init uv_bau_init(void)
763 uv_bau_retry_limit = 1; 838 uv_bau_retry_limit = 1;
764 uv_nshift = uv_hub_info->n_val; 839 uv_nshift = uv_hub_info->n_val;
765 uv_mmask = (1UL << uv_hub_info->n_val) - 1; 840 uv_mmask = (1UL << uv_hub_info->n_val) - 1;
766 nblades = 0; 841 nblades = uv_num_possible_blades();
767 last_blade = -1; 842
768 cur_cpu = 0;
769 for_each_online_node(node) {
770 blade = uv_node_to_blade_id(node);
771 if (blade == last_blade)
772 continue;
773 last_blade = blade;
774 nblades++;
775 }
776 uv_bau_table_bases = (struct bau_control **) 843 uv_bau_table_bases = (struct bau_control **)
777 kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL); 844 kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL);
778 BUG_ON(!uv_bau_table_bases); 845 BUG_ON(!uv_bau_table_bases);
779 846
780 last_blade = -1; 847 uv_partition_base_pnode = 0x7fffffff;
781 for_each_online_node(node) { 848 for (blade = 0; blade < nblades; blade++)
782 blade = uv_node_to_blade_id(node); 849 if (uv_blade_nr_possible_cpus(blade) &&
783 if (blade == last_blade) 850 (uv_blade_to_pnode(blade) < uv_partition_base_pnode))
784 continue; 851 uv_partition_base_pnode = uv_blade_to_pnode(blade);
785 last_blade = blade; 852 for (blade = 0; blade < nblades; blade++)
786 uv_init_blade(blade, node, cur_cpu); 853 if (uv_blade_nr_possible_cpus(blade))
787 cur_cpu += uv_blade_nr_possible_cpus(blade); 854 uv_init_blade(blade);
788 } 855
789 alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1); 856 alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1);
790 uv_enable_timeouts(); 857 uv_enable_timeouts();
791 858
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 7a567ebe6361..d57de05dc430 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -699,7 +699,7 @@ static struct clocksource clocksource_tsc;
699 * code, which is necessary to support wrapping clocksources like pm 699 * code, which is necessary to support wrapping clocksources like pm
700 * timer. 700 * timer.
701 */ 701 */
702static cycle_t read_tsc(void) 702static cycle_t read_tsc(struct clocksource *cs)
703{ 703{
704 cycle_t ret = (cycle_t)get_cycles(); 704 cycle_t ret = (cycle_t)get_cycles();
705 705
diff --git a/arch/x86/kernel/uv_sysfs.c b/arch/x86/kernel/uv_sysfs.c
index 67f9b9dbf800..36afb98675a4 100644
--- a/arch/x86/kernel/uv_sysfs.c
+++ b/arch/x86/kernel/uv_sysfs.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/sysdev.h> 22#include <linux/sysdev.h>
23#include <asm/uv/bios.h> 23#include <asm/uv/bios.h>
24#include <asm/uv/uv.h>
24 25
25struct kobject *sgi_uv_kobj; 26struct kobject *sgi_uv_kobj;
26 27
@@ -47,6 +48,9 @@ static int __init sgi_uv_sysfs_init(void)
47{ 48{
48 unsigned long ret; 49 unsigned long ret;
49 50
51 if (!is_uv_system())
52 return -ENODEV;
53
50 if (!sgi_uv_kobj) 54 if (!sgi_uv_kobj)
51 sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj); 55 sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj);
52 if (!sgi_uv_kobj) { 56 if (!sgi_uv_kobj) {
diff --git a/arch/x86/kernel/uv_time.c b/arch/x86/kernel/uv_time.c
index 2ffb6c53326e..583f11d5c480 100644
--- a/arch/x86/kernel/uv_time.c
+++ b/arch/x86/kernel/uv_time.c
@@ -29,7 +29,7 @@
29 29
30#define RTC_NAME "sgi_rtc" 30#define RTC_NAME "sgi_rtc"
31 31
32static cycle_t uv_read_rtc(void); 32static cycle_t uv_read_rtc(struct clocksource *cs);
33static int uv_rtc_next_event(unsigned long, struct clock_event_device *); 33static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
34static void uv_rtc_timer_setup(enum clock_event_mode, 34static void uv_rtc_timer_setup(enum clock_event_mode,
35 struct clock_event_device *); 35 struct clock_event_device *);
@@ -123,7 +123,7 @@ static int uv_setup_intr(int cpu, u64 expires)
123 /* Initialize comparator value */ 123 /* Initialize comparator value */
124 uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires); 124 uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires);
125 125
126 return (expires < uv_read_rtc() && !uv_intr_pending(pnode)); 126 return (expires < uv_read_rtc(NULL) && !uv_intr_pending(pnode));
127} 127}
128 128
129/* 129/*
@@ -256,7 +256,7 @@ static int uv_rtc_unset_timer(int cpu)
256 256
257 spin_lock_irqsave(&head->lock, flags); 257 spin_lock_irqsave(&head->lock, flags);
258 258
259 if (head->next_cpu == bcpu && uv_read_rtc() >= *t) 259 if (head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t)
260 rc = 1; 260 rc = 1;
261 261
262 *t = ULLONG_MAX; 262 *t = ULLONG_MAX;
@@ -278,7 +278,7 @@ static int uv_rtc_unset_timer(int cpu)
278/* 278/*
279 * Read the RTC. 279 * Read the RTC.
280 */ 280 */
281static cycle_t uv_read_rtc(void) 281static cycle_t uv_read_rtc(struct clocksource *cs)
282{ 282{
283 return (cycle_t)uv_read_local_mmr(UVH_RTC); 283 return (cycle_t)uv_read_local_mmr(UVH_RTC);
284} 284}
@@ -291,7 +291,7 @@ static int uv_rtc_next_event(unsigned long delta,
291{ 291{
292 int ced_cpu = cpumask_first(ced->cpumask); 292 int ced_cpu = cpumask_first(ced->cpumask);
293 293
294 return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc()); 294 return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc(NULL));
295} 295}
296 296
297/* 297/*
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index d303369a7bad..2b3eb82efeeb 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -283,7 +283,7 @@ void __devinit vmi_time_ap_init(void)
283/** vmi clocksource */ 283/** vmi clocksource */
284static struct clocksource clocksource_vmi; 284static struct clocksource clocksource_vmi;
285 285
286static cycle_t read_real_cycles(void) 286static cycle_t read_real_cycles(struct clocksource *cs)
287{ 287{
288 cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); 288 cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL);
289 return max(ret, clocksource_vmi.cycle_last); 289 return max(ret, clocksource_vmi.cycle_last);
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 2b54fe002e94..c5ee17e8c6d9 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -89,7 +89,7 @@ int save_i387_xstate(void __user *buf)
89 89
90 if (!used_math()) 90 if (!used_math())
91 return 0; 91 return 0;
92 clear_used_math(); /* trigger finit */ 92
93 if (task_thread_info(tsk)->status & TS_USEDFPU) { 93 if (task_thread_info(tsk)->status & TS_USEDFPU) {
94 /* 94 /*
95 * Start with clearing the user buffer. This will present a 95 * Start with clearing the user buffer. This will present a
@@ -114,6 +114,8 @@ int save_i387_xstate(void __user *buf)
114 return -1; 114 return -1;
115 } 115 }
116 116
117 clear_used_math(); /* trigger finit */
118
117 if (task_thread_info(tsk)->status & TS_XSAVE) { 119 if (task_thread_info(tsk)->status & TS_XSAVE) {
118 struct _fpstate __user *fx = buf; 120 struct _fpstate __user *fx = buf;
119 struct _xstate __user *x = buf; 121 struct _xstate __user *x = buf;
@@ -324,7 +326,7 @@ void __ref xsave_cntxt_init(void)
324 } 326 }
325 327
326 /* 328 /*
327 * for now OS knows only about FP/SSE 329 * Support only the state known to OS.
328 */ 330 */
329 pcntxt_mask = pcntxt_mask & XCNTXT_MASK; 331 pcntxt_mask = pcntxt_mask & XCNTXT_MASK;
330 xsave_init(); 332 xsave_init();