diff options
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/amd_iommu_init.c | 16 | ||||
-rw-r--r-- | arch/x86/kernel/apic/io_apic.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/apic/nmi.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/apic/x2apic_uv_x.c | 31 | ||||
-rw-r--r-- | arch/x86/kernel/bios_uv.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 38 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_64.c | 33 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/proc.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/entry_64.S | 3 | ||||
-rw-r--r-- | arch/x86/kernel/hpet.c | 24 | ||||
-rw-r--r-- | arch/x86/kernel/i8253.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/kvmclock.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/microcode_core.c | 35 | ||||
-rw-r--r-- | arch/x86/kernel/pci-swiotlb.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/quirks.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/tlb_uv.c | 189 | ||||
-rw-r--r-- | arch/x86/kernel/tsc.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/uv_sysfs.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/uv_time.c | 10 | ||||
-rw-r--r-- | arch/x86/kernel/vmiclock_32.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/xsave.c | 4 |
22 files changed, 271 insertions, 147 deletions
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 42c33cebf00f..8c0be0902dac 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -49,10 +49,10 @@ | |||
49 | #define IVHD_DEV_EXT_SELECT 0x46 | 49 | #define IVHD_DEV_EXT_SELECT 0x46 |
50 | #define IVHD_DEV_EXT_SELECT_RANGE 0x47 | 50 | #define IVHD_DEV_EXT_SELECT_RANGE 0x47 |
51 | 51 | ||
52 | #define IVHD_FLAG_HT_TUN_EN 0x00 | 52 | #define IVHD_FLAG_HT_TUN_EN_MASK 0x01 |
53 | #define IVHD_FLAG_PASSPW_EN 0x01 | 53 | #define IVHD_FLAG_PASSPW_EN_MASK 0x02 |
54 | #define IVHD_FLAG_RESPASSPW_EN 0x02 | 54 | #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04 |
55 | #define IVHD_FLAG_ISOC_EN 0x03 | 55 | #define IVHD_FLAG_ISOC_EN_MASK 0x08 |
56 | 56 | ||
57 | #define IVMD_FLAG_EXCL_RANGE 0x08 | 57 | #define IVMD_FLAG_EXCL_RANGE 0x08 |
58 | #define IVMD_FLAG_UNITY_MAP 0x01 | 58 | #define IVMD_FLAG_UNITY_MAP 0x01 |
@@ -569,19 +569,19 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu, | |||
569 | * First set the recommended feature enable bits from ACPI | 569 | * First set the recommended feature enable bits from ACPI |
570 | * into the IOMMU control registers | 570 | * into the IOMMU control registers |
571 | */ | 571 | */ |
572 | h->flags & IVHD_FLAG_HT_TUN_EN ? | 572 | h->flags & IVHD_FLAG_HT_TUN_EN_MASK ? |
573 | iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : | 573 | iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : |
574 | iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); | 574 | iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); |
575 | 575 | ||
576 | h->flags & IVHD_FLAG_PASSPW_EN ? | 576 | h->flags & IVHD_FLAG_PASSPW_EN_MASK ? |
577 | iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : | 577 | iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : |
578 | iommu_feature_disable(iommu, CONTROL_PASSPW_EN); | 578 | iommu_feature_disable(iommu, CONTROL_PASSPW_EN); |
579 | 579 | ||
580 | h->flags & IVHD_FLAG_RESPASSPW_EN ? | 580 | h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ? |
581 | iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : | 581 | iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : |
582 | iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); | 582 | iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); |
583 | 583 | ||
584 | h->flags & IVHD_FLAG_ISOC_EN ? | 584 | h->flags & IVHD_FLAG_ISOC_EN_MASK ? |
585 | iommu_feature_enable(iommu, CONTROL_ISOC_EN) : | 585 | iommu_feature_enable(iommu, CONTROL_ISOC_EN) : |
586 | iommu_feature_disable(iommu, CONTROL_ISOC_EN); | 586 | iommu_feature_disable(iommu, CONTROL_ISOC_EN); |
587 | 587 | ||
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index a2789e42e162..30da617d18e4 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -3670,12 +3670,14 @@ int arch_setup_hpet_msi(unsigned int irq) | |||
3670 | { | 3670 | { |
3671 | int ret; | 3671 | int ret; |
3672 | struct msi_msg msg; | 3672 | struct msi_msg msg; |
3673 | struct irq_desc *desc = irq_to_desc(irq); | ||
3673 | 3674 | ||
3674 | ret = msi_compose_msg(NULL, irq, &msg); | 3675 | ret = msi_compose_msg(NULL, irq, &msg); |
3675 | if (ret < 0) | 3676 | if (ret < 0) |
3676 | return ret; | 3677 | return ret; |
3677 | 3678 | ||
3678 | hpet_msi_write(irq, &msg); | 3679 | hpet_msi_write(irq, &msg); |
3680 | desc->status |= IRQ_MOVE_PCNTXT; | ||
3679 | set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq, | 3681 | set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq, |
3680 | "edge"); | 3682 | "edge"); |
3681 | 3683 | ||
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index d6bd62407152..ce4fbfa315a1 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c | |||
@@ -138,7 +138,7 @@ int __init check_nmi_watchdog(void) | |||
138 | if (!prev_nmi_count) | 138 | if (!prev_nmi_count) |
139 | goto error; | 139 | goto error; |
140 | 140 | ||
141 | alloc_cpumask_var(&backtrace_mask, GFP_KERNEL); | 141 | alloc_cpumask_var(&backtrace_mask, GFP_KERNEL|__GFP_ZERO); |
142 | printk(KERN_INFO "Testing NMI watchdog ... "); | 142 | printk(KERN_INFO "Testing NMI watchdog ... "); |
143 | 143 | ||
144 | #ifdef CONFIG_SMP | 144 | #ifdef CONFIG_SMP |
@@ -414,7 +414,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) | |||
414 | touched = 1; | 414 | touched = 1; |
415 | } | 415 | } |
416 | 416 | ||
417 | if (cpumask_test_cpu(cpu, backtrace_mask)) { | 417 | /* We can be called before check_nmi_watchdog, hence NULL check. */ |
418 | if (backtrace_mask != NULL && cpumask_test_cpu(cpu, backtrace_mask)) { | ||
418 | static DEFINE_SPINLOCK(lock); /* Serialise the printks */ | 419 | static DEFINE_SPINLOCK(lock); /* Serialise the printks */ |
419 | 420 | ||
420 | spin_lock(&lock); | 421 | spin_lock(&lock); |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 1248318436e8..2bda69352976 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/timer.h> | 19 | #include <linux/timer.h> |
20 | #include <linux/cpu.h> | 20 | #include <linux/cpu.h> |
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/io.h> | ||
22 | 23 | ||
23 | #include <asm/uv/uv_mmrs.h> | 24 | #include <asm/uv/uv_mmrs.h> |
24 | #include <asm/uv/uv_hub.h> | 25 | #include <asm/uv/uv_hub.h> |
@@ -34,6 +35,17 @@ DEFINE_PER_CPU(int, x2apic_extra_bits); | |||
34 | 35 | ||
35 | static enum uv_system_type uv_system_type; | 36 | static enum uv_system_type uv_system_type; |
36 | 37 | ||
38 | static int early_get_nodeid(void) | ||
39 | { | ||
40 | union uvh_node_id_u node_id; | ||
41 | unsigned long *mmr; | ||
42 | |||
43 | mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_NODE_ID, sizeof(*mmr)); | ||
44 | node_id.v = *mmr; | ||
45 | early_iounmap(mmr, sizeof(*mmr)); | ||
46 | return node_id.s.node_id; | ||
47 | } | ||
48 | |||
37 | static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | 49 | static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) |
38 | { | 50 | { |
39 | if (!strcmp(oem_id, "SGI")) { | 51 | if (!strcmp(oem_id, "SGI")) { |
@@ -42,6 +54,8 @@ static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
42 | else if (!strcmp(oem_table_id, "UVX")) | 54 | else if (!strcmp(oem_table_id, "UVX")) |
43 | uv_system_type = UV_X2APIC; | 55 | uv_system_type = UV_X2APIC; |
44 | else if (!strcmp(oem_table_id, "UVH")) { | 56 | else if (!strcmp(oem_table_id, "UVH")) { |
57 | __get_cpu_var(x2apic_extra_bits) = | ||
58 | early_get_nodeid() << (UV_APIC_PNODE_SHIFT - 1); | ||
45 | uv_system_type = UV_NON_UNIQUE_APIC; | 59 | uv_system_type = UV_NON_UNIQUE_APIC; |
46 | return 1; | 60 | return 1; |
47 | } | 61 | } |
@@ -549,7 +563,8 @@ void __init uv_system_init(void) | |||
549 | unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; | 563 | unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; |
550 | int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; | 564 | int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; |
551 | int max_pnode = 0; | 565 | int max_pnode = 0; |
552 | unsigned long mmr_base, present; | 566 | unsigned long mmr_base, present, paddr; |
567 | unsigned short pnode_mask; | ||
553 | 568 | ||
554 | map_low_mmrs(); | 569 | map_low_mmrs(); |
555 | 570 | ||
@@ -592,6 +607,7 @@ void __init uv_system_init(void) | |||
592 | } | 607 | } |
593 | } | 608 | } |
594 | 609 | ||
610 | pnode_mask = (1 << n_val) - 1; | ||
595 | node_id.v = uv_read_local_mmr(UVH_NODE_ID); | 611 | node_id.v = uv_read_local_mmr(UVH_NODE_ID); |
596 | gnode_upper = (((unsigned long)node_id.s.node_id) & | 612 | gnode_upper = (((unsigned long)node_id.s.node_id) & |
597 | ~((1 << n_val) - 1)) << m_val; | 613 | ~((1 << n_val) - 1)) << m_val; |
@@ -615,7 +631,7 @@ void __init uv_system_init(void) | |||
615 | uv_cpu_hub_info(cpu)->numa_blade_id = blade; | 631 | uv_cpu_hub_info(cpu)->numa_blade_id = blade; |
616 | uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; | 632 | uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; |
617 | uv_cpu_hub_info(cpu)->pnode = pnode; | 633 | uv_cpu_hub_info(cpu)->pnode = pnode; |
618 | uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) - 1; | 634 | uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask; |
619 | uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; | 635 | uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; |
620 | uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; | 636 | uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; |
621 | uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; | 637 | uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; |
@@ -631,6 +647,17 @@ void __init uv_system_init(void) | |||
631 | lcpu, blade); | 647 | lcpu, blade); |
632 | } | 648 | } |
633 | 649 | ||
650 | /* Add blade/pnode info for nodes without cpus */ | ||
651 | for_each_online_node(nid) { | ||
652 | if (uv_node_to_blade[nid] >= 0) | ||
653 | continue; | ||
654 | paddr = node_start_pfn(nid) << PAGE_SHIFT; | ||
655 | paddr = uv_soc_phys_ram_to_gpa(paddr); | ||
656 | pnode = (paddr >> m_val) & pnode_mask; | ||
657 | blade = boot_pnode_to_blade(pnode); | ||
658 | uv_node_to_blade[nid] = blade; | ||
659 | } | ||
660 | |||
634 | map_gru_high(max_pnode); | 661 | map_gru_high(max_pnode); |
635 | map_mmr_high(max_pnode); | 662 | map_mmr_high(max_pnode); |
636 | map_config_high(max_pnode); | 663 | map_config_high(max_pnode); |
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c index f63882728d91..63a88e1f987d 100644 --- a/arch/x86/kernel/bios_uv.c +++ b/arch/x86/kernel/bios_uv.c | |||
@@ -182,7 +182,8 @@ void uv_bios_init(void) | |||
182 | memcpy(&uv_systab, tab, sizeof(struct uv_systab)); | 182 | memcpy(&uv_systab, tab, sizeof(struct uv_systab)); |
183 | iounmap(tab); | 183 | iounmap(tab); |
184 | 184 | ||
185 | printk(KERN_INFO "EFI UV System Table Revision %d\n", tab->revision); | 185 | printk(KERN_INFO "EFI UV System Table Revision %d\n", |
186 | uv_systab.revision); | ||
186 | } | 187 | } |
187 | #else /* !CONFIG_EFI */ | 188 | #else /* !CONFIG_EFI */ |
188 | 189 | ||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index c4f667896c28..c1caefc82e62 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -1203,6 +1203,8 @@ void __cpuinit cpu_init(void) | |||
1203 | load_TR_desc(); | 1203 | load_TR_desc(); |
1204 | load_LDT(&init_mm.context); | 1204 | load_LDT(&init_mm.context); |
1205 | 1205 | ||
1206 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); | ||
1207 | |||
1206 | #ifdef CONFIG_DOUBLEFAULT | 1208 | #ifdef CONFIG_DOUBLEFAULT |
1207 | /* Set up doublefault TSS pointer in the GDT */ | 1209 | /* Set up doublefault TSS pointer in the GDT */ |
1208 | __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); | 1210 | __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); |
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 3e3cd3db7a0c..208ecf6643df 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -65,14 +65,18 @@ enum { | |||
65 | struct acpi_cpufreq_data { | 65 | struct acpi_cpufreq_data { |
66 | struct acpi_processor_performance *acpi_data; | 66 | struct acpi_processor_performance *acpi_data; |
67 | struct cpufreq_frequency_table *freq_table; | 67 | struct cpufreq_frequency_table *freq_table; |
68 | unsigned int max_freq; | ||
69 | unsigned int resume; | 68 | unsigned int resume; |
70 | unsigned int cpu_feature; | 69 | unsigned int cpu_feature; |
71 | u64 saved_aperf, saved_mperf; | ||
72 | }; | 70 | }; |
73 | 71 | ||
74 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); | 72 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); |
75 | 73 | ||
74 | struct acpi_msr_data { | ||
75 | u64 saved_aperf, saved_mperf; | ||
76 | }; | ||
77 | |||
78 | static DEFINE_PER_CPU(struct acpi_msr_data, msr_data); | ||
79 | |||
76 | DEFINE_TRACE(power_mark); | 80 | DEFINE_TRACE(power_mark); |
77 | 81 | ||
78 | /* acpi_perf_data is a pointer to percpu data. */ | 82 | /* acpi_perf_data is a pointer to percpu data. */ |
@@ -204,7 +208,13 @@ static void drv_read(struct drv_cmd *cmd) | |||
204 | 208 | ||
205 | static void drv_write(struct drv_cmd *cmd) | 209 | static void drv_write(struct drv_cmd *cmd) |
206 | { | 210 | { |
211 | int this_cpu; | ||
212 | |||
213 | this_cpu = get_cpu(); | ||
214 | if (cpumask_test_cpu(this_cpu, cmd->mask)) | ||
215 | do_drv_write(cmd); | ||
207 | smp_call_function_many(cmd->mask, do_drv_write, cmd, 1); | 216 | smp_call_function_many(cmd->mask, do_drv_write, cmd, 1); |
217 | put_cpu(); | ||
208 | } | 218 | } |
209 | 219 | ||
210 | static u32 get_cur_val(const struct cpumask *mask) | 220 | static u32 get_cur_val(const struct cpumask *mask) |
@@ -277,15 +287,15 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy, | |||
277 | unsigned int perf_percent; | 287 | unsigned int perf_percent; |
278 | unsigned int retval; | 288 | unsigned int retval; |
279 | 289 | ||
280 | if (smp_call_function_single(cpu, read_measured_perf_ctrs, &cur, 1)) | 290 | if (smp_call_function_single(cpu, read_measured_perf_ctrs, &readin, 1)) |
281 | return 0; | 291 | return 0; |
282 | 292 | ||
283 | cur.aperf.whole = readin.aperf.whole - | 293 | cur.aperf.whole = readin.aperf.whole - |
284 | per_cpu(drv_data, cpu)->saved_aperf; | 294 | per_cpu(msr_data, cpu).saved_aperf; |
285 | cur.mperf.whole = readin.mperf.whole - | 295 | cur.mperf.whole = readin.mperf.whole - |
286 | per_cpu(drv_data, cpu)->saved_mperf; | 296 | per_cpu(msr_data, cpu).saved_mperf; |
287 | per_cpu(drv_data, cpu)->saved_aperf = readin.aperf.whole; | 297 | per_cpu(msr_data, cpu).saved_aperf = readin.aperf.whole; |
288 | per_cpu(drv_data, cpu)->saved_mperf = readin.mperf.whole; | 298 | per_cpu(msr_data, cpu).saved_mperf = readin.mperf.whole; |
289 | 299 | ||
290 | #ifdef __i386__ | 300 | #ifdef __i386__ |
291 | /* | 301 | /* |
@@ -329,7 +339,7 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy, | |||
329 | 339 | ||
330 | #endif | 340 | #endif |
331 | 341 | ||
332 | retval = per_cpu(drv_data, policy->cpu)->max_freq * perf_percent / 100; | 342 | retval = (policy->cpuinfo.max_freq * perf_percent) / 100; |
333 | 343 | ||
334 | return retval; | 344 | return retval; |
335 | } | 345 | } |
@@ -682,16 +692,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
682 | /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */ | 692 | /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */ |
683 | if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && | 693 | if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && |
684 | policy->cpuinfo.transition_latency > 20 * 1000) { | 694 | policy->cpuinfo.transition_latency > 20 * 1000) { |
685 | static int print_once; | ||
686 | policy->cpuinfo.transition_latency = 20 * 1000; | 695 | policy->cpuinfo.transition_latency = 20 * 1000; |
687 | if (!print_once) { | 696 | printk_once(KERN_INFO "Capping off P-state tranision" |
688 | print_once = 1; | 697 | " latency at 20 uS\n"); |
689 | printk(KERN_INFO "Capping off P-state tranision latency" | ||
690 | " at 20 uS\n"); | ||
691 | } | ||
692 | } | 698 | } |
693 | 699 | ||
694 | data->max_freq = perf->states[0].core_frequency * 1000; | ||
695 | /* table init */ | 700 | /* table init */ |
696 | for (i = 0; i < perf->state_count; i++) { | 701 | for (i = 0; i < perf->state_count; i++) { |
697 | if (i > 0 && perf->states[i].core_frequency >= | 702 | if (i > 0 && perf->states[i].core_frequency >= |
@@ -710,6 +715,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
710 | if (result) | 715 | if (result) |
711 | goto err_freqfree; | 716 | goto err_freqfree; |
712 | 717 | ||
718 | if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq) | ||
719 | printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n"); | ||
720 | |||
713 | switch (perf->control_register.space_id) { | 721 | switch (perf->control_register.space_id) { |
714 | case ACPI_ADR_SPACE_SYSTEM_IO: | 722 | case ACPI_ADR_SPACE_SYSTEM_IO: |
715 | /* Current speed is unknown and not detectable by IO port */ | 723 | /* Current speed is unknown and not detectable by IO port */ |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index 863f89568b1a..6fb0b359d2a5 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
@@ -239,9 +239,10 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | |||
239 | * Don't get the IP here because it's unlikely to | 239 | * Don't get the IP here because it's unlikely to |
240 | * have anything to do with the actual error location. | 240 | * have anything to do with the actual error location. |
241 | */ | 241 | */ |
242 | 242 | if (!(flags & MCP_DONTLOG)) { | |
243 | mce_log(&m); | 243 | mce_log(&m); |
244 | add_taint(TAINT_MACHINE_CHECK); | 244 | add_taint(TAINT_MACHINE_CHECK); |
245 | } | ||
245 | 246 | ||
246 | /* | 247 | /* |
247 | * Clear state for this bank. | 248 | * Clear state for this bank. |
@@ -452,13 +453,14 @@ void mce_log_therm_throt_event(__u64 status) | |||
452 | */ | 453 | */ |
453 | 454 | ||
454 | static int check_interval = 5 * 60; /* 5 minutes */ | 455 | static int check_interval = 5 * 60; /* 5 minutes */ |
455 | static int next_interval; /* in jiffies */ | 456 | static DEFINE_PER_CPU(int, next_interval); /* in jiffies */ |
456 | static void mcheck_timer(unsigned long); | 457 | static void mcheck_timer(unsigned long); |
457 | static DEFINE_PER_CPU(struct timer_list, mce_timer); | 458 | static DEFINE_PER_CPU(struct timer_list, mce_timer); |
458 | 459 | ||
459 | static void mcheck_timer(unsigned long data) | 460 | static void mcheck_timer(unsigned long data) |
460 | { | 461 | { |
461 | struct timer_list *t = &per_cpu(mce_timer, data); | 462 | struct timer_list *t = &per_cpu(mce_timer, data); |
463 | int *n; | ||
462 | 464 | ||
463 | WARN_ON(smp_processor_id() != data); | 465 | WARN_ON(smp_processor_id() != data); |
464 | 466 | ||
@@ -470,14 +472,14 @@ static void mcheck_timer(unsigned long data) | |||
470 | * Alert userspace if needed. If we logged an MCE, reduce the | 472 | * Alert userspace if needed. If we logged an MCE, reduce the |
471 | * polling interval, otherwise increase the polling interval. | 473 | * polling interval, otherwise increase the polling interval. |
472 | */ | 474 | */ |
475 | n = &__get_cpu_var(next_interval); | ||
473 | if (mce_notify_user()) { | 476 | if (mce_notify_user()) { |
474 | next_interval = max(next_interval/2, HZ/100); | 477 | *n = max(*n/2, HZ/100); |
475 | } else { | 478 | } else { |
476 | next_interval = min(next_interval * 2, | 479 | *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ)); |
477 | (int)round_jiffies_relative(check_interval*HZ)); | ||
478 | } | 480 | } |
479 | 481 | ||
480 | t->expires = jiffies + next_interval; | 482 | t->expires = jiffies + *n; |
481 | add_timer(t); | 483 | add_timer(t); |
482 | } | 484 | } |
483 | 485 | ||
@@ -584,7 +586,7 @@ static void mce_init(void *dummy) | |||
584 | * Log the machine checks left over from the previous reset. | 586 | * Log the machine checks left over from the previous reset. |
585 | */ | 587 | */ |
586 | bitmap_fill(all_banks, MAX_NR_BANKS); | 588 | bitmap_fill(all_banks, MAX_NR_BANKS); |
587 | machine_check_poll(MCP_UC, &all_banks); | 589 | machine_check_poll(MCP_UC|(!mce_bootlog ? MCP_DONTLOG : 0), &all_banks); |
588 | 590 | ||
589 | set_in_cr4(X86_CR4_MCE); | 591 | set_in_cr4(X86_CR4_MCE); |
590 | 592 | ||
@@ -632,14 +634,13 @@ static void mce_cpu_features(struct cpuinfo_x86 *c) | |||
632 | static void mce_init_timer(void) | 634 | static void mce_init_timer(void) |
633 | { | 635 | { |
634 | struct timer_list *t = &__get_cpu_var(mce_timer); | 636 | struct timer_list *t = &__get_cpu_var(mce_timer); |
637 | int *n = &__get_cpu_var(next_interval); | ||
635 | 638 | ||
636 | /* data race harmless because everyone sets to the same value */ | 639 | *n = check_interval * HZ; |
637 | if (!next_interval) | 640 | if (!*n) |
638 | next_interval = check_interval * HZ; | ||
639 | if (!next_interval) | ||
640 | return; | 641 | return; |
641 | setup_timer(t, mcheck_timer, smp_processor_id()); | 642 | setup_timer(t, mcheck_timer, smp_processor_id()); |
642 | t->expires = round_jiffies(jiffies + next_interval); | 643 | t->expires = round_jiffies(jiffies + *n); |
643 | add_timer(t); | 644 | add_timer(t); |
644 | } | 645 | } |
645 | 646 | ||
@@ -907,7 +908,6 @@ static void mce_cpu_restart(void *data) | |||
907 | /* Reinit MCEs after user configuration changes */ | 908 | /* Reinit MCEs after user configuration changes */ |
908 | static void mce_restart(void) | 909 | static void mce_restart(void) |
909 | { | 910 | { |
910 | next_interval = check_interval * HZ; | ||
911 | on_each_cpu(mce_cpu_restart, NULL, 1); | 911 | on_each_cpu(mce_cpu_restart, NULL, 1); |
912 | } | 912 | } |
913 | 913 | ||
@@ -1110,7 +1110,8 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb, | |||
1110 | break; | 1110 | break; |
1111 | case CPU_DOWN_FAILED: | 1111 | case CPU_DOWN_FAILED: |
1112 | case CPU_DOWN_FAILED_FROZEN: | 1112 | case CPU_DOWN_FAILED_FROZEN: |
1113 | t->expires = round_jiffies(jiffies + next_interval); | 1113 | t->expires = round_jiffies(jiffies + |
1114 | __get_cpu_var(next_interval)); | ||
1114 | add_timer_on(t, cpu); | 1115 | add_timer_on(t, cpu); |
1115 | smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); | 1116 | smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); |
1116 | break; | 1117 | break; |
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index f93047fed791..d5e30397246b 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c | |||
@@ -14,7 +14,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, | |||
14 | if (c->x86_max_cores * smp_num_siblings > 1) { | 14 | if (c->x86_max_cores * smp_num_siblings > 1) { |
15 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); | 15 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); |
16 | seq_printf(m, "siblings\t: %d\n", | 16 | seq_printf(m, "siblings\t: %d\n", |
17 | cpumask_weight(cpu_sibling_mask(cpu))); | 17 | cpumask_weight(cpu_core_mask(cpu))); |
18 | seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); | 18 | seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); |
19 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); | 19 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); |
20 | seq_printf(m, "apicid\t\t: %d\n", c->apicid); | 20 | seq_printf(m, "apicid\t\t: %d\n", c->apicid); |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 1ac99865591c..987f91f0f755 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -1397,7 +1397,10 @@ ENTRY(paranoid_exit) | |||
1397 | paranoid_swapgs: | 1397 | paranoid_swapgs: |
1398 | TRACE_IRQS_IRETQ 0 | 1398 | TRACE_IRQS_IRETQ 0 |
1399 | SWAPGS_UNSAFE_STACK | 1399 | SWAPGS_UNSAFE_STACK |
1400 | RESTORE_ALL 8 | ||
1401 | jmp irq_return | ||
1400 | paranoid_restore: | 1402 | paranoid_restore: |
1403 | TRACE_IRQS_IRETQ 0 | ||
1401 | RESTORE_ALL 8 | 1404 | RESTORE_ALL 8 |
1402 | jmp irq_return | 1405 | jmp irq_return |
1403 | paranoid_userspace: | 1406 | paranoid_userspace: |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 648b3a2a3a44..81408b93f887 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -236,6 +236,10 @@ static void hpet_stop_counter(void) | |||
236 | unsigned long cfg = hpet_readl(HPET_CFG); | 236 | unsigned long cfg = hpet_readl(HPET_CFG); |
237 | cfg &= ~HPET_CFG_ENABLE; | 237 | cfg &= ~HPET_CFG_ENABLE; |
238 | hpet_writel(cfg, HPET_CFG); | 238 | hpet_writel(cfg, HPET_CFG); |
239 | } | ||
240 | |||
241 | static void hpet_reset_counter(void) | ||
242 | { | ||
239 | hpet_writel(0, HPET_COUNTER); | 243 | hpet_writel(0, HPET_COUNTER); |
240 | hpet_writel(0, HPET_COUNTER + 4); | 244 | hpet_writel(0, HPET_COUNTER + 4); |
241 | } | 245 | } |
@@ -250,6 +254,7 @@ static void hpet_start_counter(void) | |||
250 | static void hpet_restart_counter(void) | 254 | static void hpet_restart_counter(void) |
251 | { | 255 | { |
252 | hpet_stop_counter(); | 256 | hpet_stop_counter(); |
257 | hpet_reset_counter(); | ||
253 | hpet_start_counter(); | 258 | hpet_start_counter(); |
254 | } | 259 | } |
255 | 260 | ||
@@ -309,7 +314,7 @@ static int hpet_setup_msi_irq(unsigned int irq); | |||
309 | static void hpet_set_mode(enum clock_event_mode mode, | 314 | static void hpet_set_mode(enum clock_event_mode mode, |
310 | struct clock_event_device *evt, int timer) | 315 | struct clock_event_device *evt, int timer) |
311 | { | 316 | { |
312 | unsigned long cfg; | 317 | unsigned long cfg, cmp, now; |
313 | uint64_t delta; | 318 | uint64_t delta; |
314 | 319 | ||
315 | switch (mode) { | 320 | switch (mode) { |
@@ -317,12 +322,23 @@ static void hpet_set_mode(enum clock_event_mode mode, | |||
317 | hpet_stop_counter(); | 322 | hpet_stop_counter(); |
318 | delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult; | 323 | delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult; |
319 | delta >>= evt->shift; | 324 | delta >>= evt->shift; |
325 | now = hpet_readl(HPET_COUNTER); | ||
326 | cmp = now + (unsigned long) delta; | ||
320 | cfg = hpet_readl(HPET_Tn_CFG(timer)); | 327 | cfg = hpet_readl(HPET_Tn_CFG(timer)); |
321 | /* Make sure we use edge triggered interrupts */ | 328 | /* Make sure we use edge triggered interrupts */ |
322 | cfg &= ~HPET_TN_LEVEL; | 329 | cfg &= ~HPET_TN_LEVEL; |
323 | cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | | 330 | cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | |
324 | HPET_TN_SETVAL | HPET_TN_32BIT; | 331 | HPET_TN_SETVAL | HPET_TN_32BIT; |
325 | hpet_writel(cfg, HPET_Tn_CFG(timer)); | 332 | hpet_writel(cfg, HPET_Tn_CFG(timer)); |
333 | hpet_writel(cmp, HPET_Tn_CMP(timer)); | ||
334 | udelay(1); | ||
335 | /* | ||
336 | * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL | ||
337 | * cleared) to T0_CMP to set the period. The HPET_TN_SETVAL | ||
338 | * bit is automatically cleared after the first write. | ||
339 | * (See AMD-8111 HyperTransport I/O Hub Data Sheet, | ||
340 | * Publication # 24674) | ||
341 | */ | ||
326 | hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer)); | 342 | hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer)); |
327 | hpet_start_counter(); | 343 | hpet_start_counter(); |
328 | hpet_print_config(); | 344 | hpet_print_config(); |
@@ -722,7 +738,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n, | |||
722 | /* | 738 | /* |
723 | * Clock source related code | 739 | * Clock source related code |
724 | */ | 740 | */ |
725 | static cycle_t read_hpet(void) | 741 | static cycle_t read_hpet(struct clocksource *cs) |
726 | { | 742 | { |
727 | return (cycle_t)hpet_readl(HPET_COUNTER); | 743 | return (cycle_t)hpet_readl(HPET_COUNTER); |
728 | } | 744 | } |
@@ -756,7 +772,7 @@ static int hpet_clocksource_register(void) | |||
756 | hpet_restart_counter(); | 772 | hpet_restart_counter(); |
757 | 773 | ||
758 | /* Verify whether hpet counter works */ | 774 | /* Verify whether hpet counter works */ |
759 | t1 = read_hpet(); | 775 | t1 = hpet_readl(HPET_COUNTER); |
760 | rdtscll(start); | 776 | rdtscll(start); |
761 | 777 | ||
762 | /* | 778 | /* |
@@ -770,7 +786,7 @@ static int hpet_clocksource_register(void) | |||
770 | rdtscll(now); | 786 | rdtscll(now); |
771 | } while ((now - start) < 200000UL); | 787 | } while ((now - start) < 200000UL); |
772 | 788 | ||
773 | if (t1 == read_hpet()) { | 789 | if (t1 == hpet_readl(HPET_COUNTER)) { |
774 | printk(KERN_WARNING | 790 | printk(KERN_WARNING |
775 | "HPET counter not counting. HPET disabled\n"); | 791 | "HPET counter not counting. HPET disabled\n"); |
776 | return -ENODEV; | 792 | return -ENODEV; |
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c index 3475440baa54..c2e0bb0890d4 100644 --- a/arch/x86/kernel/i8253.c +++ b/arch/x86/kernel/i8253.c | |||
@@ -129,7 +129,7 @@ void __init setup_pit_timer(void) | |||
129 | * to just read by itself. So use jiffies to emulate a free | 129 | * to just read by itself. So use jiffies to emulate a free |
130 | * running counter: | 130 | * running counter: |
131 | */ | 131 | */ |
132 | static cycle_t pit_read(void) | 132 | static cycle_t pit_read(struct clocksource *cs) |
133 | { | 133 | { |
134 | static int old_count; | 134 | static int old_count; |
135 | static u32 old_jifs; | 135 | static u32 old_jifs; |
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 137f2e8132df..223af43f1526 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
@@ -77,6 +77,11 @@ static cycle_t kvm_clock_read(void) | |||
77 | return ret; | 77 | return ret; |
78 | } | 78 | } |
79 | 79 | ||
80 | static cycle_t kvm_clock_get_cycles(struct clocksource *cs) | ||
81 | { | ||
82 | return kvm_clock_read(); | ||
83 | } | ||
84 | |||
80 | /* | 85 | /* |
81 | * If we don't do that, there is the possibility that the guest | 86 | * If we don't do that, there is the possibility that the guest |
82 | * will calibrate under heavy load - thus, getting a lower lpj - | 87 | * will calibrate under heavy load - thus, getting a lower lpj - |
@@ -107,7 +112,7 @@ static void kvm_get_preset_lpj(void) | |||
107 | 112 | ||
108 | static struct clocksource kvm_clock = { | 113 | static struct clocksource kvm_clock = { |
109 | .name = "kvm-clock", | 114 | .name = "kvm-clock", |
110 | .read = kvm_clock_read, | 115 | .read = kvm_clock_get_cycles, |
111 | .rating = 400, | 116 | .rating = 400, |
112 | .mask = CLOCKSOURCE_MASK(64), | 117 | .mask = CLOCKSOURCE_MASK(64), |
113 | .mult = 1 << KVM_SCALE, | 118 | .mult = 1 << KVM_SCALE, |
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index a0f3851ef310..98c470c069d1 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c | |||
@@ -108,40 +108,29 @@ struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; | |||
108 | EXPORT_SYMBOL_GPL(ucode_cpu_info); | 108 | EXPORT_SYMBOL_GPL(ucode_cpu_info); |
109 | 109 | ||
110 | #ifdef CONFIG_MICROCODE_OLD_INTERFACE | 110 | #ifdef CONFIG_MICROCODE_OLD_INTERFACE |
111 | struct update_for_cpu { | ||
112 | const void __user *buf; | ||
113 | size_t size; | ||
114 | }; | ||
115 | |||
116 | static long update_for_cpu(void *_ufc) | ||
117 | { | ||
118 | struct update_for_cpu *ufc = _ufc; | ||
119 | int error; | ||
120 | |||
121 | error = microcode_ops->request_microcode_user(smp_processor_id(), | ||
122 | ufc->buf, ufc->size); | ||
123 | if (error < 0) | ||
124 | return error; | ||
125 | if (!error) | ||
126 | microcode_ops->apply_microcode(smp_processor_id()); | ||
127 | return error; | ||
128 | } | ||
129 | |||
130 | static int do_microcode_update(const void __user *buf, size_t size) | 111 | static int do_microcode_update(const void __user *buf, size_t size) |
131 | { | 112 | { |
113 | cpumask_t old; | ||
132 | int error = 0; | 114 | int error = 0; |
133 | int cpu; | 115 | int cpu; |
134 | struct update_for_cpu ufc = { .buf = buf, .size = size }; | 116 | |
117 | old = current->cpus_allowed; | ||
135 | 118 | ||
136 | for_each_online_cpu(cpu) { | 119 | for_each_online_cpu(cpu) { |
137 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 120 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
138 | 121 | ||
139 | if (!uci->valid) | 122 | if (!uci->valid) |
140 | continue; | 123 | continue; |
141 | error = work_on_cpu(cpu, update_for_cpu, &ufc); | 124 | |
125 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | ||
126 | error = microcode_ops->request_microcode_user(cpu, buf, size); | ||
142 | if (error < 0) | 127 | if (error < 0) |
143 | break; | 128 | goto out; |
129 | if (!error) | ||
130 | microcode_ops->apply_microcode(cpu); | ||
144 | } | 131 | } |
132 | out: | ||
133 | set_cpus_allowed_ptr(current, &old); | ||
145 | return error; | 134 | return error; |
146 | } | 135 | } |
147 | 136 | ||
@@ -391,8 +380,6 @@ static int mc_sysdev_add(struct sys_device *sys_dev) | |||
391 | return err; | 380 | return err; |
392 | 381 | ||
393 | err = microcode_init_cpu(cpu); | 382 | err = microcode_init_cpu(cpu); |
394 | if (err) | ||
395 | sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); | ||
396 | 383 | ||
397 | return err; | 384 | return err; |
398 | } | 385 | } |
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index 34f12e9996ed..221a3853e268 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c | |||
@@ -50,7 +50,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
50 | return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); | 50 | return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); |
51 | } | 51 | } |
52 | 52 | ||
53 | struct dma_map_ops swiotlb_dma_ops = { | 53 | static struct dma_map_ops swiotlb_dma_ops = { |
54 | .mapping_error = swiotlb_dma_mapping_error, | 54 | .mapping_error = swiotlb_dma_mapping_error, |
55 | .alloc_coherent = x86_swiotlb_alloc_coherent, | 55 | .alloc_coherent = x86_swiotlb_alloc_coherent, |
56 | .free_coherent = swiotlb_free_coherent, | 56 | .free_coherent = swiotlb_free_coherent, |
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index e95022e4f5d5..7563b31b4f03 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c | |||
@@ -261,8 +261,6 @@ static void old_ich_force_enable_hpet_user(struct pci_dev *dev) | |||
261 | { | 261 | { |
262 | if (hpet_force_user) | 262 | if (hpet_force_user) |
263 | old_ich_force_enable_hpet(dev); | 263 | old_ich_force_enable_hpet(dev); |
264 | else | ||
265 | hpet_print_force_info(); | ||
266 | } | 264 | } |
267 | 265 | ||
268 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, | 266 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, |
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index deb5ebb32c3b..ed0c33761e6d 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c | |||
@@ -25,6 +25,8 @@ static int uv_bau_retry_limit __read_mostly; | |||
25 | 25 | ||
26 | /* position of pnode (which is nasid>>1): */ | 26 | /* position of pnode (which is nasid>>1): */ |
27 | static int uv_nshift __read_mostly; | 27 | static int uv_nshift __read_mostly; |
28 | /* base pnode in this partition */ | ||
29 | static int uv_partition_base_pnode __read_mostly; | ||
28 | 30 | ||
29 | static unsigned long uv_mmask __read_mostly; | 31 | static unsigned long uv_mmask __read_mostly; |
30 | 32 | ||
@@ -32,6 +34,34 @@ static DEFINE_PER_CPU(struct ptc_stats, ptcstats); | |||
32 | static DEFINE_PER_CPU(struct bau_control, bau_control); | 34 | static DEFINE_PER_CPU(struct bau_control, bau_control); |
33 | 35 | ||
34 | /* | 36 | /* |
37 | * Determine the first node on a blade. | ||
38 | */ | ||
39 | static int __init blade_to_first_node(int blade) | ||
40 | { | ||
41 | int node, b; | ||
42 | |||
43 | for_each_online_node(node) { | ||
44 | b = uv_node_to_blade_id(node); | ||
45 | if (blade == b) | ||
46 | return node; | ||
47 | } | ||
48 | return -1; /* shouldn't happen */ | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * Determine the apicid of the first cpu on a blade. | ||
53 | */ | ||
54 | static int __init blade_to_first_apicid(int blade) | ||
55 | { | ||
56 | int cpu; | ||
57 | |||
58 | for_each_present_cpu(cpu) | ||
59 | if (blade == uv_cpu_to_blade_id(cpu)) | ||
60 | return per_cpu(x86_cpu_to_apicid, cpu); | ||
61 | return -1; | ||
62 | } | ||
63 | |||
64 | /* | ||
35 | * Free a software acknowledge hardware resource by clearing its Pending | 65 | * Free a software acknowledge hardware resource by clearing its Pending |
36 | * bit. This will return a reply to the sender. | 66 | * bit. This will return a reply to the sender. |
37 | * If the message has timed out, a reply has already been sent by the | 67 | * If the message has timed out, a reply has already been sent by the |
@@ -67,7 +97,7 @@ static void uv_bau_process_message(struct bau_payload_queue_entry *msg, | |||
67 | msp = __get_cpu_var(bau_control).msg_statuses + msg_slot; | 97 | msp = __get_cpu_var(bau_control).msg_statuses + msg_slot; |
68 | cpu = uv_blade_processor_id(); | 98 | cpu = uv_blade_processor_id(); |
69 | msg->number_of_cpus = | 99 | msg->number_of_cpus = |
70 | uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id())); | 100 | uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id())); |
71 | this_cpu_mask = 1UL << cpu; | 101 | this_cpu_mask = 1UL << cpu; |
72 | if (msp->seen_by.bits & this_cpu_mask) | 102 | if (msp->seen_by.bits & this_cpu_mask) |
73 | return; | 103 | return; |
@@ -215,14 +245,14 @@ static int uv_wait_completion(struct bau_desc *bau_desc, | |||
215 | * Returns @flush_mask if some remote flushing remains to be done. The | 245 | * Returns @flush_mask if some remote flushing remains to be done. The |
216 | * mask will have some bits still set. | 246 | * mask will have some bits still set. |
217 | */ | 247 | */ |
218 | const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade, | 248 | const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode, |
219 | struct bau_desc *bau_desc, | 249 | struct bau_desc *bau_desc, |
220 | struct cpumask *flush_mask) | 250 | struct cpumask *flush_mask) |
221 | { | 251 | { |
222 | int completion_status = 0; | 252 | int completion_status = 0; |
223 | int right_shift; | 253 | int right_shift; |
224 | int tries = 0; | 254 | int tries = 0; |
225 | int blade; | 255 | int pnode; |
226 | int bit; | 256 | int bit; |
227 | unsigned long mmr_offset; | 257 | unsigned long mmr_offset; |
228 | unsigned long index; | 258 | unsigned long index; |
@@ -265,8 +295,8 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade, | |||
265 | * use the IPI method of shootdown on them. | 295 | * use the IPI method of shootdown on them. |
266 | */ | 296 | */ |
267 | for_each_cpu(bit, flush_mask) { | 297 | for_each_cpu(bit, flush_mask) { |
268 | blade = uv_cpu_to_blade_id(bit); | 298 | pnode = uv_cpu_to_pnode(bit); |
269 | if (blade == this_blade) | 299 | if (pnode == this_pnode) |
270 | continue; | 300 | continue; |
271 | cpumask_clear_cpu(bit, flush_mask); | 301 | cpumask_clear_cpu(bit, flush_mask); |
272 | } | 302 | } |
@@ -309,16 +339,16 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
309 | struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask); | 339 | struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask); |
310 | int i; | 340 | int i; |
311 | int bit; | 341 | int bit; |
312 | int blade; | 342 | int pnode; |
313 | int uv_cpu; | 343 | int uv_cpu; |
314 | int this_blade; | 344 | int this_pnode; |
315 | int locals = 0; | 345 | int locals = 0; |
316 | struct bau_desc *bau_desc; | 346 | struct bau_desc *bau_desc; |
317 | 347 | ||
318 | cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); | 348 | cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); |
319 | 349 | ||
320 | uv_cpu = uv_blade_processor_id(); | 350 | uv_cpu = uv_blade_processor_id(); |
321 | this_blade = uv_numa_blade_id(); | 351 | this_pnode = uv_hub_info->pnode; |
322 | bau_desc = __get_cpu_var(bau_control).descriptor_base; | 352 | bau_desc = __get_cpu_var(bau_control).descriptor_base; |
323 | bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu; | 353 | bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu; |
324 | 354 | ||
@@ -326,13 +356,14 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
326 | 356 | ||
327 | i = 0; | 357 | i = 0; |
328 | for_each_cpu(bit, flush_mask) { | 358 | for_each_cpu(bit, flush_mask) { |
329 | blade = uv_cpu_to_blade_id(bit); | 359 | pnode = uv_cpu_to_pnode(bit); |
330 | BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1)); | 360 | BUG_ON(pnode > (UV_DISTRIBUTION_SIZE - 1)); |
331 | if (blade == this_blade) { | 361 | if (pnode == this_pnode) { |
332 | locals++; | 362 | locals++; |
333 | continue; | 363 | continue; |
334 | } | 364 | } |
335 | bau_node_set(blade, &bau_desc->distribution); | 365 | bau_node_set(pnode - uv_partition_base_pnode, |
366 | &bau_desc->distribution); | ||
336 | i++; | 367 | i++; |
337 | } | 368 | } |
338 | if (i == 0) { | 369 | if (i == 0) { |
@@ -350,7 +381,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
350 | bau_desc->payload.address = va; | 381 | bau_desc->payload.address = va; |
351 | bau_desc->payload.sending_cpu = cpu; | 382 | bau_desc->payload.sending_cpu = cpu; |
352 | 383 | ||
353 | return uv_flush_send_and_wait(uv_cpu, this_blade, bau_desc, flush_mask); | 384 | return uv_flush_send_and_wait(uv_cpu, this_pnode, bau_desc, flush_mask); |
354 | } | 385 | } |
355 | 386 | ||
356 | /* | 387 | /* |
@@ -418,24 +449,58 @@ void uv_bau_message_interrupt(struct pt_regs *regs) | |||
418 | set_irq_regs(old_regs); | 449 | set_irq_regs(old_regs); |
419 | } | 450 | } |
420 | 451 | ||
452 | /* | ||
453 | * uv_enable_timeouts | ||
454 | * | ||
455 | * Each target blade (i.e. blades that have cpu's) needs to have | ||
456 | * shootdown message timeouts enabled. The timeout does not cause | ||
457 | * an interrupt, but causes an error message to be returned to | ||
458 | * the sender. | ||
459 | */ | ||
421 | static void uv_enable_timeouts(void) | 460 | static void uv_enable_timeouts(void) |
422 | { | 461 | { |
423 | int i; | ||
424 | int blade; | 462 | int blade; |
425 | int last_blade; | 463 | int nblades; |
426 | int pnode; | 464 | int pnode; |
427 | int cur_cpu = 0; | 465 | unsigned long mmr_image; |
428 | unsigned long apicid; | ||
429 | 466 | ||
430 | last_blade = -1; | 467 | nblades = uv_num_possible_blades(); |
431 | for_each_online_node(i) { | 468 | |
432 | blade = uv_node_to_blade_id(i); | 469 | for (blade = 0; blade < nblades; blade++) { |
433 | if (blade == last_blade) | 470 | if (!uv_blade_nr_possible_cpus(blade)) |
434 | continue; | 471 | continue; |
435 | last_blade = blade; | 472 | |
436 | apicid = per_cpu(x86_cpu_to_apicid, cur_cpu); | ||
437 | pnode = uv_blade_to_pnode(blade); | 473 | pnode = uv_blade_to_pnode(blade); |
438 | cur_cpu += uv_blade_nr_possible_cpus(i); | 474 | mmr_image = |
475 | uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL); | ||
476 | /* | ||
477 | * Set the timeout period and then lock it in, in three | ||
478 | * steps; captures and locks in the period. | ||
479 | * | ||
480 | * To program the period, the SOFT_ACK_MODE must be off. | ||
481 | */ | ||
482 | mmr_image &= ~((unsigned long)1 << | ||
483 | UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT); | ||
484 | uv_write_global_mmr64 | ||
485 | (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); | ||
486 | /* | ||
487 | * Set the 4-bit period. | ||
488 | */ | ||
489 | mmr_image &= ~((unsigned long)0xf << | ||
490 | UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT); | ||
491 | mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD << | ||
492 | UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT); | ||
493 | uv_write_global_mmr64 | ||
494 | (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); | ||
495 | /* | ||
496 | * Subsequent reversals of the timebase bit (3) cause an | ||
497 | * immediate timeout of one or all INTD resources as | ||
498 | * indicated in bits 2:0 (7 causes all of them to timeout). | ||
499 | */ | ||
500 | mmr_image |= ((unsigned long)1 << | ||
501 | UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT); | ||
502 | uv_write_global_mmr64 | ||
503 | (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); | ||
439 | } | 504 | } |
440 | } | 505 | } |
441 | 506 | ||
@@ -482,8 +547,7 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data) | |||
482 | stat->requestee, stat->onetlb, stat->alltlb, | 547 | stat->requestee, stat->onetlb, stat->alltlb, |
483 | stat->s_retry, stat->d_retry, stat->ptc_i); | 548 | stat->s_retry, stat->d_retry, stat->ptc_i); |
484 | seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n", | 549 | seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n", |
485 | uv_read_global_mmr64(uv_blade_to_pnode | 550 | uv_read_global_mmr64(uv_cpu_to_pnode(cpu), |
486 | (uv_cpu_to_blade_id(cpu)), | ||
487 | UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE), | 551 | UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE), |
488 | stat->sflush, stat->dflush, | 552 | stat->sflush, stat->dflush, |
489 | stat->retriesok, stat->nomsg, | 553 | stat->retriesok, stat->nomsg, |
@@ -617,16 +681,18 @@ static struct bau_control * __init uv_table_bases_init(int blade, int node) | |||
617 | * finish the initialization of the per-blade control structures | 681 | * finish the initialization of the per-blade control structures |
618 | */ | 682 | */ |
619 | static void __init | 683 | static void __init |
620 | uv_table_bases_finish(int blade, int node, int cur_cpu, | 684 | uv_table_bases_finish(int blade, |
621 | struct bau_control *bau_tablesp, | 685 | struct bau_control *bau_tablesp, |
622 | struct bau_desc *adp) | 686 | struct bau_desc *adp) |
623 | { | 687 | { |
624 | struct bau_control *bcp; | 688 | struct bau_control *bcp; |
625 | int i; | 689 | int cpu; |
626 | 690 | ||
627 | for (i = cur_cpu; i < cur_cpu + uv_blade_nr_possible_cpus(blade); i++) { | 691 | for_each_present_cpu(cpu) { |
628 | bcp = (struct bau_control *)&per_cpu(bau_control, i); | 692 | if (blade != uv_cpu_to_blade_id(cpu)) |
693 | continue; | ||
629 | 694 | ||
695 | bcp = (struct bau_control *)&per_cpu(bau_control, cpu); | ||
630 | bcp->bau_msg_head = bau_tablesp->va_queue_first; | 696 | bcp->bau_msg_head = bau_tablesp->va_queue_first; |
631 | bcp->va_queue_first = bau_tablesp->va_queue_first; | 697 | bcp->va_queue_first = bau_tablesp->va_queue_first; |
632 | bcp->va_queue_last = bau_tablesp->va_queue_last; | 698 | bcp->va_queue_last = bau_tablesp->va_queue_last; |
@@ -649,11 +715,10 @@ uv_activation_descriptor_init(int node, int pnode) | |||
649 | struct bau_desc *adp; | 715 | struct bau_desc *adp; |
650 | struct bau_desc *ad2; | 716 | struct bau_desc *ad2; |
651 | 717 | ||
652 | adp = (struct bau_desc *) | 718 | adp = (struct bau_desc *)kmalloc_node(16384, GFP_KERNEL, node); |
653 | kmalloc_node(16384, GFP_KERNEL, node); | ||
654 | BUG_ON(!adp); | 719 | BUG_ON(!adp); |
655 | 720 | ||
656 | pa = __pa((unsigned long)adp); | 721 | pa = uv_gpa(adp); /* need the real nasid*/ |
657 | n = pa >> uv_nshift; | 722 | n = pa >> uv_nshift; |
658 | m = pa & uv_mmask; | 723 | m = pa & uv_mmask; |
659 | 724 | ||
@@ -667,8 +732,12 @@ uv_activation_descriptor_init(int node, int pnode) | |||
667 | for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) { | 732 | for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) { |
668 | memset(ad2, 0, sizeof(struct bau_desc)); | 733 | memset(ad2, 0, sizeof(struct bau_desc)); |
669 | ad2->header.sw_ack_flag = 1; | 734 | ad2->header.sw_ack_flag = 1; |
670 | ad2->header.base_dest_nodeid = | 735 | /* |
671 | uv_blade_to_pnode(uv_cpu_to_blade_id(0)); | 736 | * base_dest_nodeid is the first node in the partition, so |
737 | * the bit map will indicate partition-relative node numbers. | ||
738 | * note that base_dest_nodeid is actually a nasid. | ||
739 | */ | ||
740 | ad2->header.base_dest_nodeid = uv_partition_base_pnode << 1; | ||
672 | ad2->header.command = UV_NET_ENDPOINT_INTD; | 741 | ad2->header.command = UV_NET_ENDPOINT_INTD; |
673 | ad2->header.int_both = 1; | 742 | ad2->header.int_both = 1; |
674 | /* | 743 | /* |
@@ -686,6 +755,8 @@ static struct bau_payload_queue_entry * __init | |||
686 | uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp) | 755 | uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp) |
687 | { | 756 | { |
688 | struct bau_payload_queue_entry *pqp; | 757 | struct bau_payload_queue_entry *pqp; |
758 | unsigned long pa; | ||
759 | int pn; | ||
689 | char *cp; | 760 | char *cp; |
690 | 761 | ||
691 | pqp = (struct bau_payload_queue_entry *) kmalloc_node( | 762 | pqp = (struct bau_payload_queue_entry *) kmalloc_node( |
@@ -696,10 +767,14 @@ uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp) | |||
696 | cp = (char *)pqp + 31; | 767 | cp = (char *)pqp + 31; |
697 | pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5); | 768 | pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5); |
698 | bau_tablesp->va_queue_first = pqp; | 769 | bau_tablesp->va_queue_first = pqp; |
770 | /* | ||
771 | * need the pnode of where the memory was really allocated | ||
772 | */ | ||
773 | pa = uv_gpa(pqp); | ||
774 | pn = pa >> uv_nshift; | ||
699 | uv_write_global_mmr64(pnode, | 775 | uv_write_global_mmr64(pnode, |
700 | UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, | 776 | UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, |
701 | ((unsigned long)pnode << | 777 | ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | |
702 | UV_PAYLOADQ_PNODE_SHIFT) | | ||
703 | uv_physnodeaddr(pqp)); | 778 | uv_physnodeaddr(pqp)); |
704 | uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, | 779 | uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, |
705 | uv_physnodeaddr(pqp)); | 780 | uv_physnodeaddr(pqp)); |
@@ -715,8 +790,9 @@ uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp) | |||
715 | /* | 790 | /* |
716 | * Initialization of each UV blade's structures | 791 | * Initialization of each UV blade's structures |
717 | */ | 792 | */ |
718 | static int __init uv_init_blade(int blade, int node, int cur_cpu) | 793 | static int __init uv_init_blade(int blade) |
719 | { | 794 | { |
795 | int node; | ||
720 | int pnode; | 796 | int pnode; |
721 | unsigned long pa; | 797 | unsigned long pa; |
722 | unsigned long apicid; | 798 | unsigned long apicid; |
@@ -724,16 +800,17 @@ static int __init uv_init_blade(int blade, int node, int cur_cpu) | |||
724 | struct bau_payload_queue_entry *pqp; | 800 | struct bau_payload_queue_entry *pqp; |
725 | struct bau_control *bau_tablesp; | 801 | struct bau_control *bau_tablesp; |
726 | 802 | ||
803 | node = blade_to_first_node(blade); | ||
727 | bau_tablesp = uv_table_bases_init(blade, node); | 804 | bau_tablesp = uv_table_bases_init(blade, node); |
728 | pnode = uv_blade_to_pnode(blade); | 805 | pnode = uv_blade_to_pnode(blade); |
729 | adp = uv_activation_descriptor_init(node, pnode); | 806 | adp = uv_activation_descriptor_init(node, pnode); |
730 | pqp = uv_payload_queue_init(node, pnode, bau_tablesp); | 807 | pqp = uv_payload_queue_init(node, pnode, bau_tablesp); |
731 | uv_table_bases_finish(blade, node, cur_cpu, bau_tablesp, adp); | 808 | uv_table_bases_finish(blade, bau_tablesp, adp); |
732 | /* | 809 | /* |
733 | * the below initialization can't be in firmware because the | 810 | * the below initialization can't be in firmware because the |
734 | * messaging IRQ will be determined by the OS | 811 | * messaging IRQ will be determined by the OS |
735 | */ | 812 | */ |
736 | apicid = per_cpu(x86_cpu_to_apicid, cur_cpu); | 813 | apicid = blade_to_first_apicid(blade); |
737 | pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG); | 814 | pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG); |
738 | if ((pa & 0xff) != UV_BAU_MESSAGE) { | 815 | if ((pa & 0xff) != UV_BAU_MESSAGE) { |
739 | uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, | 816 | uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, |
@@ -748,9 +825,7 @@ static int __init uv_init_blade(int blade, int node, int cur_cpu) | |||
748 | static int __init uv_bau_init(void) | 825 | static int __init uv_bau_init(void) |
749 | { | 826 | { |
750 | int blade; | 827 | int blade; |
751 | int node; | ||
752 | int nblades; | 828 | int nblades; |
753 | int last_blade; | ||
754 | int cur_cpu; | 829 | int cur_cpu; |
755 | 830 | ||
756 | if (!is_uv_system()) | 831 | if (!is_uv_system()) |
@@ -763,29 +838,21 @@ static int __init uv_bau_init(void) | |||
763 | uv_bau_retry_limit = 1; | 838 | uv_bau_retry_limit = 1; |
764 | uv_nshift = uv_hub_info->n_val; | 839 | uv_nshift = uv_hub_info->n_val; |
765 | uv_mmask = (1UL << uv_hub_info->n_val) - 1; | 840 | uv_mmask = (1UL << uv_hub_info->n_val) - 1; |
766 | nblades = 0; | 841 | nblades = uv_num_possible_blades(); |
767 | last_blade = -1; | 842 | |
768 | cur_cpu = 0; | ||
769 | for_each_online_node(node) { | ||
770 | blade = uv_node_to_blade_id(node); | ||
771 | if (blade == last_blade) | ||
772 | continue; | ||
773 | last_blade = blade; | ||
774 | nblades++; | ||
775 | } | ||
776 | uv_bau_table_bases = (struct bau_control **) | 843 | uv_bau_table_bases = (struct bau_control **) |
777 | kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL); | 844 | kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL); |
778 | BUG_ON(!uv_bau_table_bases); | 845 | BUG_ON(!uv_bau_table_bases); |
779 | 846 | ||
780 | last_blade = -1; | 847 | uv_partition_base_pnode = 0x7fffffff; |
781 | for_each_online_node(node) { | 848 | for (blade = 0; blade < nblades; blade++) |
782 | blade = uv_node_to_blade_id(node); | 849 | if (uv_blade_nr_possible_cpus(blade) && |
783 | if (blade == last_blade) | 850 | (uv_blade_to_pnode(blade) < uv_partition_base_pnode)) |
784 | continue; | 851 | uv_partition_base_pnode = uv_blade_to_pnode(blade); |
785 | last_blade = blade; | 852 | for (blade = 0; blade < nblades; blade++) |
786 | uv_init_blade(blade, node, cur_cpu); | 853 | if (uv_blade_nr_possible_cpus(blade)) |
787 | cur_cpu += uv_blade_nr_possible_cpus(blade); | 854 | uv_init_blade(blade); |
788 | } | 855 | |
789 | alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1); | 856 | alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1); |
790 | uv_enable_timeouts(); | 857 | uv_enable_timeouts(); |
791 | 858 | ||
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 7a567ebe6361..d57de05dc430 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -699,7 +699,7 @@ static struct clocksource clocksource_tsc; | |||
699 | * code, which is necessary to support wrapping clocksources like pm | 699 | * code, which is necessary to support wrapping clocksources like pm |
700 | * timer. | 700 | * timer. |
701 | */ | 701 | */ |
702 | static cycle_t read_tsc(void) | 702 | static cycle_t read_tsc(struct clocksource *cs) |
703 | { | 703 | { |
704 | cycle_t ret = (cycle_t)get_cycles(); | 704 | cycle_t ret = (cycle_t)get_cycles(); |
705 | 705 | ||
diff --git a/arch/x86/kernel/uv_sysfs.c b/arch/x86/kernel/uv_sysfs.c index 67f9b9dbf800..36afb98675a4 100644 --- a/arch/x86/kernel/uv_sysfs.c +++ b/arch/x86/kernel/uv_sysfs.c | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #include <linux/sysdev.h> | 22 | #include <linux/sysdev.h> |
23 | #include <asm/uv/bios.h> | 23 | #include <asm/uv/bios.h> |
24 | #include <asm/uv/uv.h> | ||
24 | 25 | ||
25 | struct kobject *sgi_uv_kobj; | 26 | struct kobject *sgi_uv_kobj; |
26 | 27 | ||
@@ -47,6 +48,9 @@ static int __init sgi_uv_sysfs_init(void) | |||
47 | { | 48 | { |
48 | unsigned long ret; | 49 | unsigned long ret; |
49 | 50 | ||
51 | if (!is_uv_system()) | ||
52 | return -ENODEV; | ||
53 | |||
50 | if (!sgi_uv_kobj) | 54 | if (!sgi_uv_kobj) |
51 | sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj); | 55 | sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj); |
52 | if (!sgi_uv_kobj) { | 56 | if (!sgi_uv_kobj) { |
diff --git a/arch/x86/kernel/uv_time.c b/arch/x86/kernel/uv_time.c index 2ffb6c53326e..583f11d5c480 100644 --- a/arch/x86/kernel/uv_time.c +++ b/arch/x86/kernel/uv_time.c | |||
@@ -29,7 +29,7 @@ | |||
29 | 29 | ||
30 | #define RTC_NAME "sgi_rtc" | 30 | #define RTC_NAME "sgi_rtc" |
31 | 31 | ||
32 | static cycle_t uv_read_rtc(void); | 32 | static cycle_t uv_read_rtc(struct clocksource *cs); |
33 | static int uv_rtc_next_event(unsigned long, struct clock_event_device *); | 33 | static int uv_rtc_next_event(unsigned long, struct clock_event_device *); |
34 | static void uv_rtc_timer_setup(enum clock_event_mode, | 34 | static void uv_rtc_timer_setup(enum clock_event_mode, |
35 | struct clock_event_device *); | 35 | struct clock_event_device *); |
@@ -123,7 +123,7 @@ static int uv_setup_intr(int cpu, u64 expires) | |||
123 | /* Initialize comparator value */ | 123 | /* Initialize comparator value */ |
124 | uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires); | 124 | uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires); |
125 | 125 | ||
126 | return (expires < uv_read_rtc() && !uv_intr_pending(pnode)); | 126 | return (expires < uv_read_rtc(NULL) && !uv_intr_pending(pnode)); |
127 | } | 127 | } |
128 | 128 | ||
129 | /* | 129 | /* |
@@ -256,7 +256,7 @@ static int uv_rtc_unset_timer(int cpu) | |||
256 | 256 | ||
257 | spin_lock_irqsave(&head->lock, flags); | 257 | spin_lock_irqsave(&head->lock, flags); |
258 | 258 | ||
259 | if (head->next_cpu == bcpu && uv_read_rtc() >= *t) | 259 | if (head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) |
260 | rc = 1; | 260 | rc = 1; |
261 | 261 | ||
262 | *t = ULLONG_MAX; | 262 | *t = ULLONG_MAX; |
@@ -278,7 +278,7 @@ static int uv_rtc_unset_timer(int cpu) | |||
278 | /* | 278 | /* |
279 | * Read the RTC. | 279 | * Read the RTC. |
280 | */ | 280 | */ |
281 | static cycle_t uv_read_rtc(void) | 281 | static cycle_t uv_read_rtc(struct clocksource *cs) |
282 | { | 282 | { |
283 | return (cycle_t)uv_read_local_mmr(UVH_RTC); | 283 | return (cycle_t)uv_read_local_mmr(UVH_RTC); |
284 | } | 284 | } |
@@ -291,7 +291,7 @@ static int uv_rtc_next_event(unsigned long delta, | |||
291 | { | 291 | { |
292 | int ced_cpu = cpumask_first(ced->cpumask); | 292 | int ced_cpu = cpumask_first(ced->cpumask); |
293 | 293 | ||
294 | return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc()); | 294 | return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc(NULL)); |
295 | } | 295 | } |
296 | 296 | ||
297 | /* | 297 | /* |
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c index d303369a7bad..2b3eb82efeeb 100644 --- a/arch/x86/kernel/vmiclock_32.c +++ b/arch/x86/kernel/vmiclock_32.c | |||
@@ -283,7 +283,7 @@ void __devinit vmi_time_ap_init(void) | |||
283 | /** vmi clocksource */ | 283 | /** vmi clocksource */ |
284 | static struct clocksource clocksource_vmi; | 284 | static struct clocksource clocksource_vmi; |
285 | 285 | ||
286 | static cycle_t read_real_cycles(void) | 286 | static cycle_t read_real_cycles(struct clocksource *cs) |
287 | { | 287 | { |
288 | cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); | 288 | cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); |
289 | return max(ret, clocksource_vmi.cycle_last); | 289 | return max(ret, clocksource_vmi.cycle_last); |
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 0a5b04aa98f1..c5ee17e8c6d9 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
@@ -89,7 +89,7 @@ int save_i387_xstate(void __user *buf) | |||
89 | 89 | ||
90 | if (!used_math()) | 90 | if (!used_math()) |
91 | return 0; | 91 | return 0; |
92 | clear_used_math(); /* trigger finit */ | 92 | |
93 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | 93 | if (task_thread_info(tsk)->status & TS_USEDFPU) { |
94 | /* | 94 | /* |
95 | * Start with clearing the user buffer. This will present a | 95 | * Start with clearing the user buffer. This will present a |
@@ -114,6 +114,8 @@ int save_i387_xstate(void __user *buf) | |||
114 | return -1; | 114 | return -1; |
115 | } | 115 | } |
116 | 116 | ||
117 | clear_used_math(); /* trigger finit */ | ||
118 | |||
117 | if (task_thread_info(tsk)->status & TS_XSAVE) { | 119 | if (task_thread_info(tsk)->status & TS_XSAVE) { |
118 | struct _fpstate __user *fx = buf; | 120 | struct _fpstate __user *fx = buf; |
119 | struct _xstate __user *x = buf; | 121 | struct _xstate __user *x = buf; |