aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/Makefile6
-rw-r--r--arch/x86/kernel/apic/apic.c40
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c63
-rw-r--r--arch/x86/kernel/apic/nmi.c4
-rw-r--r--arch/x86/kernel/apic/numaq_32.c57
-rw-r--r--arch/x86/kernel/apic/probe_64.c15
-rw-r--r--arch/x86/kernel/apic/summit_32.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c11
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/common.c5
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c28
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c44
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c14
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c3
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event.c (renamed from arch/x86/kernel/cpu/perf_counter.c)601
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c2
-rw-r--r--arch/x86/kernel/cpu/vmware.c27
-rw-r--r--arch/x86/kernel/cpuid.c4
-rw-r--r--arch/x86/kernel/dumpstack_32.c1
-rw-r--r--arch/x86/kernel/dumpstack_64.c1
-rw-r--r--arch/x86/kernel/e820.c21
-rw-r--r--arch/x86/kernel/early_printk.c780
-rw-r--r--arch/x86/kernel/efi.c4
-rw-r--r--arch/x86/kernel/entry_64.S24
-rw-r--r--arch/x86/kernel/head32.c26
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/head_32.S5
-rw-r--r--arch/x86/kernel/head_64.S2
-rw-r--r--arch/x86/kernel/i8253.c19
-rw-r--r--arch/x86/kernel/init_task.c5
-rw-r--r--arch/x86/kernel/irqinit.c38
-rw-r--r--arch/x86/kernel/kvmclock.c11
-rw-r--r--arch/x86/kernel/ldt.c4
-rw-r--r--arch/x86/kernel/microcode_core.c6
-rw-r--r--arch/x86/kernel/mpparse.c75
-rw-r--r--arch/x86/kernel/mrst.c24
-rw-r--r--arch/x86/kernel/msr.c4
-rw-r--r--arch/x86/kernel/paravirt.c36
-rw-r--r--arch/x86/kernel/pci-swiotlb.c5
-rw-r--r--arch/x86/kernel/process.c31
-rw-r--r--arch/x86/kernel/ptrace.c21
-rw-r--r--arch/x86/kernel/rtc.c17
-rw-r--r--arch/x86/kernel/setup.c148
-rw-r--r--arch/x86/kernel/sfi.c122
-rw-r--r--arch/x86/kernel/smpboot.c13
-rw-r--r--arch/x86/kernel/syscall_table_32.S2
-rw-r--r--arch/x86/kernel/time.c120
-rw-r--r--arch/x86/kernel/time_32.c137
-rw-r--r--arch/x86/kernel/time_64.c135
-rw-r--r--arch/x86/kernel/trampoline.c4
-rw-r--r--arch/x86/kernel/trampoline_32.S8
-rw-r--r--arch/x86/kernel/trampoline_64.S5
-rw-r--r--arch/x86/kernel/traps.c7
-rw-r--r--arch/x86/kernel/tsc.c88
-rw-r--r--arch/x86/kernel/visws_quirks.c54
-rw-r--r--arch/x86/kernel/vmi_32.c12
-rw-r--r--arch/x86/kernel/vmiclock_32.c2
-rw-r--r--arch/x86/kernel/vmlinux.lds.S4
-rw-r--r--arch/x86/kernel/vsyscall_64.c11
-rw-r--r--arch/x86/kernel/x86_init.c75
63 files changed, 1134 insertions, 1919 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 832cb838cb48..d8e5d0cdd678 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -31,8 +31,8 @@ GCOV_PROFILE_paravirt.o := n
31 31
32obj-y := process_$(BITS).o signal.o entry_$(BITS).o 32obj-y := process_$(BITS).o signal.o entry_$(BITS).o
33obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o 33obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
34obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o 34obj-y += time.o ioport.o ldt.o dumpstack.o
35obj-y += setup.o i8259.o irqinit.o 35obj-y += setup.o x86_init.o i8259.o irqinit.o
36obj-$(CONFIG_X86_VISWS) += visws_quirks.o 36obj-$(CONFIG_X86_VISWS) += visws_quirks.o
37obj-$(CONFIG_X86_32) += probe_roms_32.o 37obj-$(CONFIG_X86_32) += probe_roms_32.o
38obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o 38obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
@@ -56,6 +56,7 @@ obj-$(CONFIG_INTEL_TXT) += tboot.o
56obj-$(CONFIG_STACKTRACE) += stacktrace.o 56obj-$(CONFIG_STACKTRACE) += stacktrace.o
57obj-y += cpu/ 57obj-y += cpu/
58obj-y += acpi/ 58obj-y += acpi/
59obj-$(CONFIG_SFI) += sfi.o
59obj-y += reboot.o 60obj-y += reboot.o
60obj-$(CONFIG_MCA) += mca_32.o 61obj-$(CONFIG_MCA) += mca_32.o
61obj-$(CONFIG_X86_MSR) += msr.o 62obj-$(CONFIG_X86_MSR) += msr.o
@@ -105,6 +106,7 @@ obj-$(CONFIG_SCx200) += scx200.o
105scx200-y += scx200_32.o 106scx200-y += scx200_32.o
106 107
107obj-$(CONFIG_OLPC) += olpc.o 108obj-$(CONFIG_OLPC) += olpc.o
109obj-$(CONFIG_X86_MRST) += mrst.o
108 110
109microcode-y := microcode_core.o 111microcode-y := microcode_core.o
110microcode-$(CONFIG_MICROCODE_INTEL) += microcode_intel.o 112microcode-$(CONFIG_MICROCODE_INTEL) += microcode_intel.o
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 159740decc41..894aa97f0717 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -14,7 +14,7 @@
14 * Mikael Pettersson : PM converted to driver model. 14 * Mikael Pettersson : PM converted to driver model.
15 */ 15 */
16 16
17#include <linux/perf_counter.h> 17#include <linux/perf_event.h>
18#include <linux/kernel_stat.h> 18#include <linux/kernel_stat.h>
19#include <linux/mc146818rtc.h> 19#include <linux/mc146818rtc.h>
20#include <linux/acpi_pmtmr.h> 20#include <linux/acpi_pmtmr.h>
@@ -35,7 +35,8 @@
35#include <linux/smp.h> 35#include <linux/smp.h>
36#include <linux/mm.h> 36#include <linux/mm.h>
37 37
38#include <asm/perf_counter.h> 38#include <asm/perf_event.h>
39#include <asm/x86_init.h>
39#include <asm/pgalloc.h> 40#include <asm/pgalloc.h>
40#include <asm/atomic.h> 41#include <asm/atomic.h>
41#include <asm/mpspec.h> 42#include <asm/mpspec.h>
@@ -61,7 +62,7 @@ unsigned int boot_cpu_physical_apicid = -1U;
61/* 62/*
62 * The highest APIC ID seen during enumeration. 63 * The highest APIC ID seen during enumeration.
63 * 64 *
64 * This determines the messaging protocol we can use: if all APIC IDs 65 * On AMD, this determines the messaging protocol we can use: if all APIC IDs
65 * are in the 0 ... 7 range, then we can use logical addressing which 66 * are in the 0 ... 7 range, then we can use logical addressing which
66 * has some performance advantages (better broadcasting). 67 * has some performance advantages (better broadcasting).
67 * 68 *
@@ -978,7 +979,7 @@ void lapic_shutdown(void)
978{ 979{
979 unsigned long flags; 980 unsigned long flags;
980 981
981 if (!cpu_has_apic) 982 if (!cpu_has_apic && !apic_from_smp_config())
982 return; 983 return;
983 984
984 local_irq_save(flags); 985 local_irq_save(flags);
@@ -1188,7 +1189,7 @@ void __cpuinit setup_local_APIC(void)
1188 apic_write(APIC_ESR, 0); 1189 apic_write(APIC_ESR, 0);
1189 } 1190 }
1190#endif 1191#endif
1191 perf_counters_lapic_init(); 1192 perf_events_lapic_init();
1192 1193
1193 preempt_disable(); 1194 preempt_disable();
1194 1195
@@ -1196,8 +1197,7 @@ void __cpuinit setup_local_APIC(void)
1196 * Double-check whether this APIC is really registered. 1197 * Double-check whether this APIC is really registered.
1197 * This is meaningless in clustered apic mode, so we skip it. 1198 * This is meaningless in clustered apic mode, so we skip it.
1198 */ 1199 */
1199 if (!apic->apic_id_registered()) 1200 BUG_ON(!apic->apic_id_registered());
1200 BUG();
1201 1201
1202 /* 1202 /*
1203 * Intel recommends to set DFR, LDR and TPR before enabling 1203 * Intel recommends to set DFR, LDR and TPR before enabling
@@ -1709,7 +1709,7 @@ int __init APIC_init_uniprocessor(void)
1709 localise_nmi_watchdog(); 1709 localise_nmi_watchdog();
1710#endif 1710#endif
1711 1711
1712 setup_boot_clock(); 1712 x86_init.timers.setup_percpu_clockev();
1713#ifdef CONFIG_X86_64 1713#ifdef CONFIG_X86_64
1714 check_nmi_watchdog(); 1714 check_nmi_watchdog();
1715#endif 1715#endif
@@ -1916,24 +1916,14 @@ void __cpuinit generic_processor_info(int apicid, int version)
1916 max_physical_apicid = apicid; 1916 max_physical_apicid = apicid;
1917 1917
1918#ifdef CONFIG_X86_32 1918#ifdef CONFIG_X86_32
1919 /* 1919 switch (boot_cpu_data.x86_vendor) {
1920 * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y 1920 case X86_VENDOR_INTEL:
1921 * but we need to work other dependencies like SMP_SUSPEND etc 1921 if (num_processors > 8)
1922 * before this can be done without some confusion. 1922 def_to_bigsmp = 1;
1923 * if (CPU_HOTPLUG_ENABLED || num_processors > 8) 1923 break;
1924 * - Ashok Raj <ashok.raj@intel.com> 1924 case X86_VENDOR_AMD:
1925 */ 1925 if (max_physical_apicid >= 8)
1926 if (max_physical_apicid >= 8) {
1927 switch (boot_cpu_data.x86_vendor) {
1928 case X86_VENDOR_INTEL:
1929 if (!APIC_XAPIC(version)) {
1930 def_to_bigsmp = 0;
1931 break;
1932 }
1933 /* If P4 and above fall through */
1934 case X86_VENDOR_AMD:
1935 def_to_bigsmp = 1; 1926 def_to_bigsmp = 1;
1936 }
1937 } 1927 }
1938#endif 1928#endif
1939 1929
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index 676cdac385c0..77a06413b6b2 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -112,7 +112,7 @@ static physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map)
112 return physids_promote(0xFFL); 112 return physids_promote(0xFFL);
113} 113}
114 114
115static int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid) 115static int bigsmp_check_phys_apicid_present(int phys_apicid)
116{ 116{
117 return 1; 117 return 1;
118} 118}
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 3c8f9e75d038..dc69f28489f5 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -96,6 +96,11 @@ struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
96/* # of MP IRQ source entries */ 96/* # of MP IRQ source entries */
97int mp_irq_entries; 97int mp_irq_entries;
98 98
99/* Number of legacy interrupts */
100static int nr_legacy_irqs __read_mostly = NR_IRQS_LEGACY;
101/* GSI interrupts */
102static int nr_irqs_gsi = NR_IRQS_LEGACY;
103
99#if defined (CONFIG_MCA) || defined (CONFIG_EISA) 104#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
100int mp_bus_id_to_type[MAX_MP_BUSSES]; 105int mp_bus_id_to_type[MAX_MP_BUSSES];
101#endif 106#endif
@@ -173,6 +178,12 @@ static struct irq_cfg irq_cfgx[NR_IRQS] = {
173 [15] = { .vector = IRQ15_VECTOR, }, 178 [15] = { .vector = IRQ15_VECTOR, },
174}; 179};
175 180
181void __init io_apic_disable_legacy(void)
182{
183 nr_legacy_irqs = 0;
184 nr_irqs_gsi = 0;
185}
186
176int __init arch_early_irq_init(void) 187int __init arch_early_irq_init(void)
177{ 188{
178 struct irq_cfg *cfg; 189 struct irq_cfg *cfg;
@@ -190,7 +201,7 @@ int __init arch_early_irq_init(void)
190 desc->chip_data = &cfg[i]; 201 desc->chip_data = &cfg[i];
191 zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node); 202 zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node);
192 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node); 203 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node);
193 if (i < NR_IRQS_LEGACY) 204 if (i < nr_legacy_irqs)
194 cpumask_setall(cfg[i].domain); 205 cpumask_setall(cfg[i].domain);
195 } 206 }
196 207
@@ -216,17 +227,14 @@ static struct irq_cfg *get_one_free_irq_cfg(int node)
216 227
217 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); 228 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
218 if (cfg) { 229 if (cfg) {
219 if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { 230 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
220 kfree(cfg); 231 kfree(cfg);
221 cfg = NULL; 232 cfg = NULL;
222 } else if (!alloc_cpumask_var_node(&cfg->old_domain, 233 } else if (!zalloc_cpumask_var_node(&cfg->old_domain,
223 GFP_ATOMIC, node)) { 234 GFP_ATOMIC, node)) {
224 free_cpumask_var(cfg->domain); 235 free_cpumask_var(cfg->domain);
225 kfree(cfg); 236 kfree(cfg);
226 cfg = NULL; 237 cfg = NULL;
227 } else {
228 cpumask_clear(cfg->domain);
229 cpumask_clear(cfg->old_domain);
230 } 238 }
231 } 239 }
232 240
@@ -867,7 +875,7 @@ static int __init find_isa_irq_apic(int irq, int type)
867 */ 875 */
868static int EISA_ELCR(unsigned int irq) 876static int EISA_ELCR(unsigned int irq)
869{ 877{
870 if (irq < NR_IRQS_LEGACY) { 878 if (irq < nr_legacy_irqs) {
871 unsigned int port = 0x4d0 + (irq >> 3); 879 unsigned int port = 0x4d0 + (irq >> 3);
872 return (inb(port) >> (irq & 7)) & 1; 880 return (inb(port) >> (irq & 7)) & 1;
873 } 881 }
@@ -1464,7 +1472,7 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq
1464 } 1472 }
1465 1473
1466 ioapic_register_intr(irq, desc, trigger); 1474 ioapic_register_intr(irq, desc, trigger);
1467 if (irq < NR_IRQS_LEGACY) 1475 if (irq < nr_legacy_irqs)
1468 disable_8259A_irq(irq); 1476 disable_8259A_irq(irq);
1469 1477
1470 ioapic_write_entry(apic_id, pin, entry); 1478 ioapic_write_entry(apic_id, pin, entry);
@@ -1831,7 +1839,7 @@ __apicdebuginit(void) print_PIC(void)
1831 unsigned int v; 1839 unsigned int v;
1832 unsigned long flags; 1840 unsigned long flags;
1833 1841
1834 if (apic_verbosity == APIC_QUIET) 1842 if (apic_verbosity == APIC_QUIET || !nr_legacy_irqs)
1835 return; 1843 return;
1836 1844
1837 printk(KERN_DEBUG "\nprinting PIC contents\n"); 1845 printk(KERN_DEBUG "\nprinting PIC contents\n");
@@ -1863,7 +1871,7 @@ __apicdebuginit(int) print_all_ICs(void)
1863 print_PIC(); 1871 print_PIC();
1864 1872
1865 /* don't print out if apic is not there */ 1873 /* don't print out if apic is not there */
1866 if (!cpu_has_apic || disable_apic) 1874 if (!cpu_has_apic && !apic_from_smp_config())
1867 return 0; 1875 return 0;
1868 1876
1869 print_all_local_APICs(); 1877 print_all_local_APICs();
@@ -1894,6 +1902,10 @@ void __init enable_IO_APIC(void)
1894 spin_unlock_irqrestore(&ioapic_lock, flags); 1902 spin_unlock_irqrestore(&ioapic_lock, flags);
1895 nr_ioapic_registers[apic] = reg_01.bits.entries+1; 1903 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1896 } 1904 }
1905
1906 if (!nr_legacy_irqs)
1907 return;
1908
1897 for(apic = 0; apic < nr_ioapics; apic++) { 1909 for(apic = 0; apic < nr_ioapics; apic++) {
1898 int pin; 1910 int pin;
1899 /* See if any of the pins is in ExtINT mode */ 1911 /* See if any of the pins is in ExtINT mode */
@@ -1948,6 +1960,9 @@ void disable_IO_APIC(void)
1948 */ 1960 */
1949 clear_IO_APIC(); 1961 clear_IO_APIC();
1950 1962
1963 if (!nr_legacy_irqs)
1964 return;
1965
1951 /* 1966 /*
1952 * If the i8259 is routed through an IOAPIC 1967 * If the i8259 is routed through an IOAPIC
1953 * Put that IOAPIC in virtual wire mode 1968 * Put that IOAPIC in virtual wire mode
@@ -1981,7 +1996,7 @@ void disable_IO_APIC(void)
1981 /* 1996 /*
1982 * Use virtual wire A mode when interrupt remapping is enabled. 1997 * Use virtual wire A mode when interrupt remapping is enabled.
1983 */ 1998 */
1984 if (cpu_has_apic) 1999 if (cpu_has_apic || apic_from_smp_config())
1985 disconnect_bsp_APIC(!intr_remapping_enabled && 2000 disconnect_bsp_APIC(!intr_remapping_enabled &&
1986 ioapic_i8259.pin != -1); 2001 ioapic_i8259.pin != -1);
1987} 2002}
@@ -1994,7 +2009,7 @@ void disable_IO_APIC(void)
1994 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999 2009 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
1995 */ 2010 */
1996 2011
1997static void __init setup_ioapic_ids_from_mpc(void) 2012void __init setup_ioapic_ids_from_mpc(void)
1998{ 2013{
1999 union IO_APIC_reg_00 reg_00; 2014 union IO_APIC_reg_00 reg_00;
2000 physid_mask_t phys_id_present_map; 2015 physid_mask_t phys_id_present_map;
@@ -2003,9 +2018,8 @@ static void __init setup_ioapic_ids_from_mpc(void)
2003 unsigned char old_id; 2018 unsigned char old_id;
2004 unsigned long flags; 2019 unsigned long flags;
2005 2020
2006 if (x86_quirks->setup_ioapic_ids && x86_quirks->setup_ioapic_ids()) 2021 if (acpi_ioapic)
2007 return; 2022 return;
2008
2009 /* 2023 /*
2010 * Don't check I/O APIC IDs for xAPIC systems. They have 2024 * Don't check I/O APIC IDs for xAPIC systems. They have
2011 * no meaning without the serial APIC bus. 2025 * no meaning without the serial APIC bus.
@@ -2179,7 +2193,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
2179 struct irq_cfg *cfg; 2193 struct irq_cfg *cfg;
2180 2194
2181 spin_lock_irqsave(&ioapic_lock, flags); 2195 spin_lock_irqsave(&ioapic_lock, flags);
2182 if (irq < NR_IRQS_LEGACY) { 2196 if (irq < nr_legacy_irqs) {
2183 disable_8259A_irq(irq); 2197 disable_8259A_irq(irq);
2184 if (i8259A_irq_pending(irq)) 2198 if (i8259A_irq_pending(irq))
2185 was_pending = 1; 2199 was_pending = 1;
@@ -2657,7 +2671,7 @@ static inline void init_IO_APIC_traps(void)
2657 * so default to an old-fashioned 8259 2671 * so default to an old-fashioned 8259
2658 * interrupt if we can.. 2672 * interrupt if we can..
2659 */ 2673 */
2660 if (irq < NR_IRQS_LEGACY) 2674 if (irq < nr_legacy_irqs)
2661 make_8259A_irq(irq); 2675 make_8259A_irq(irq);
2662 else 2676 else
2663 /* Strange. Oh, well.. */ 2677 /* Strange. Oh, well.. */
@@ -2993,7 +3007,7 @@ out:
2993 * the I/O APIC in all cases now. No actual device should request 3007 * the I/O APIC in all cases now. No actual device should request
2994 * it anyway. --macro 3008 * it anyway. --macro
2995 */ 3009 */
2996#define PIC_IRQS (1 << PIC_CASCADE_IR) 3010#define PIC_IRQS (1UL << PIC_CASCADE_IR)
2997 3011
2998void __init setup_IO_APIC(void) 3012void __init setup_IO_APIC(void)
2999{ 3013{
@@ -3001,21 +3015,19 @@ void __init setup_IO_APIC(void)
3001 /* 3015 /*
3002 * calling enable_IO_APIC() is moved to setup_local_APIC for BP 3016 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
3003 */ 3017 */
3004 3018 io_apic_irqs = nr_legacy_irqs ? ~PIC_IRQS : ~0UL;
3005 io_apic_irqs = ~PIC_IRQS;
3006 3019
3007 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); 3020 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
3008 /* 3021 /*
3009 * Set up IO-APIC IRQ routing. 3022 * Set up IO-APIC IRQ routing.
3010 */ 3023 */
3011#ifdef CONFIG_X86_32 3024 x86_init.mpparse.setup_ioapic_ids();
3012 if (!acpi_ioapic) 3025
3013 setup_ioapic_ids_from_mpc();
3014#endif
3015 sync_Arb_IDs(); 3026 sync_Arb_IDs();
3016 setup_IO_APIC_irqs(); 3027 setup_IO_APIC_irqs();
3017 init_IO_APIC_traps(); 3028 init_IO_APIC_traps();
3018 check_timer(); 3029 if (nr_legacy_irqs)
3030 check_timer();
3019} 3031}
3020 3032
3021/* 3033/*
@@ -3116,7 +3128,6 @@ static int __init ioapic_init_sysfs(void)
3116 3128
3117device_initcall(ioapic_init_sysfs); 3129device_initcall(ioapic_init_sysfs);
3118 3130
3119static int nr_irqs_gsi = NR_IRQS_LEGACY;
3120/* 3131/*
3121 * Dynamic irq allocate and deallocation 3132 * Dynamic irq allocate and deallocation
3122 */ 3133 */
@@ -3856,7 +3867,7 @@ static int __io_apic_set_pci_routing(struct device *dev, int irq,
3856 /* 3867 /*
3857 * IRQs < 16 are already in the irq_2_pin[] map 3868 * IRQs < 16 are already in the irq_2_pin[] map
3858 */ 3869 */
3859 if (irq >= NR_IRQS_LEGACY) { 3870 if (irq >= nr_legacy_irqs) {
3860 cfg = desc->chip_data; 3871 cfg = desc->chip_data;
3861 if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) { 3872 if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) {
3862 printk(KERN_INFO "can not add pin %d for irq %d\n", 3873 printk(KERN_INFO "can not add pin %d for irq %d\n",
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index cb66a22d98ad..7ff61d6a188a 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -508,14 +508,14 @@ static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
508/* 508/*
509 * proc handler for /proc/sys/kernel/nmi 509 * proc handler for /proc/sys/kernel/nmi
510 */ 510 */
511int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file, 511int proc_nmi_enabled(struct ctl_table *table, int write,
512 void __user *buffer, size_t *length, loff_t *ppos) 512 void __user *buffer, size_t *length, loff_t *ppos)
513{ 513{
514 int old_state; 514 int old_state;
515 515
516 nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0; 516 nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0;
517 old_state = nmi_watchdog_enabled; 517 old_state = nmi_watchdog_enabled;
518 proc_dointvec(table, write, file, buffer, length, ppos); 518 proc_dointvec(table, write, buffer, length, ppos);
519 if (!!old_state == !!nmi_watchdog_enabled) 519 if (!!old_state == !!nmi_watchdog_enabled)
520 return 0; 520 return 0;
521 521
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
index ca96e68f0d23..efa00e2b8505 100644
--- a/arch/x86/kernel/apic/numaq_32.c
+++ b/arch/x86/kernel/apic/numaq_32.c
@@ -66,7 +66,6 @@ struct mpc_trans {
66 unsigned short trans_reserved; 66 unsigned short trans_reserved;
67}; 67};
68 68
69/* x86_quirks member */
70static int mpc_record; 69static int mpc_record;
71 70
72static struct mpc_trans *translation_table[MAX_MPC_ENTRY]; 71static struct mpc_trans *translation_table[MAX_MPC_ENTRY];
@@ -130,10 +129,9 @@ void __cpuinit numaq_tsc_disable(void)
130 } 129 }
131} 130}
132 131
133static int __init numaq_pre_time_init(void) 132static void __init numaq_tsc_init(void)
134{ 133{
135 numaq_tsc_disable(); 134 numaq_tsc_disable();
136 return 0;
137} 135}
138 136
139static inline int generate_logical_apicid(int quad, int phys_apicid) 137static inline int generate_logical_apicid(int quad, int phys_apicid)
@@ -177,6 +175,19 @@ static void mpc_oem_pci_bus(struct mpc_bus *m)
177 quad_local_to_mp_bus_id[quad][local] = m->busid; 175 quad_local_to_mp_bus_id[quad][local] = m->busid;
178} 176}
179 177
178/*
179 * Called from mpparse code.
180 * mode = 0: prescan
181 * mode = 1: one mpc entry scanned
182 */
183static void numaq_mpc_record(unsigned int mode)
184{
185 if (!mode)
186 mpc_record = 0;
187 else
188 mpc_record++;
189}
190
180static void __init MP_translation_info(struct mpc_trans *m) 191static void __init MP_translation_info(struct mpc_trans *m)
181{ 192{
182 printk(KERN_INFO 193 printk(KERN_INFO
@@ -206,9 +217,9 @@ static int __init mpf_checksum(unsigned char *mp, int len)
206/* 217/*
207 * Read/parse the MPC oem tables 218 * Read/parse the MPC oem tables
208 */ 219 */
209static void __init 220static void __init smp_read_mpc_oem(struct mpc_table *mpc)
210 smp_read_mpc_oem(struct mpc_oemtable *oemtable, unsigned short oemsize)
211{ 221{
222 struct mpc_oemtable *oemtable = (void *)(long)mpc->oemptr;
212 int count = sizeof(*oemtable); /* the header size */ 223 int count = sizeof(*oemtable); /* the header size */
213 unsigned char *oemptr = ((unsigned char *)oemtable) + count; 224 unsigned char *oemptr = ((unsigned char *)oemtable) + count;
214 225
@@ -250,29 +261,6 @@ static void __init
250 } 261 }
251} 262}
252 263
253static int __init numaq_setup_ioapic_ids(void)
254{
255 /* so can skip it */
256 return 1;
257}
258
259static struct x86_quirks numaq_x86_quirks __initdata = {
260 .arch_pre_time_init = numaq_pre_time_init,
261 .arch_time_init = NULL,
262 .arch_pre_intr_init = NULL,
263 .arch_memory_setup = NULL,
264 .arch_intr_init = NULL,
265 .arch_trap_init = NULL,
266 .mach_get_smp_config = NULL,
267 .mach_find_smp_config = NULL,
268 .mpc_record = &mpc_record,
269 .mpc_apic_id = mpc_apic_id,
270 .mpc_oem_bus_info = mpc_oem_bus_info,
271 .mpc_oem_pci_bus = mpc_oem_pci_bus,
272 .smp_read_mpc_oem = smp_read_mpc_oem,
273 .setup_ioapic_ids = numaq_setup_ioapic_ids,
274};
275
276static __init void early_check_numaq(void) 264static __init void early_check_numaq(void)
277{ 265{
278 /* 266 /*
@@ -286,8 +274,15 @@ static __init void early_check_numaq(void)
286 if (smp_found_config) 274 if (smp_found_config)
287 early_get_smp_config(); 275 early_get_smp_config();
288 276
289 if (found_numaq) 277 if (found_numaq) {
290 x86_quirks = &numaq_x86_quirks; 278 x86_init.mpparse.mpc_record = numaq_mpc_record;
279 x86_init.mpparse.setup_ioapic_ids = x86_init_noop;
280 x86_init.mpparse.mpc_apic_id = mpc_apic_id;
281 x86_init.mpparse.smp_read_mpc_oem = smp_read_mpc_oem;
282 x86_init.mpparse.mpc_oem_pci_bus = mpc_oem_pci_bus;
283 x86_init.mpparse.mpc_oem_bus_info = mpc_oem_bus_info;
284 x86_init.timers.tsc_pre_init = numaq_tsc_init;
285 }
291} 286}
292 287
293int __init get_memcfg_numaq(void) 288int __init get_memcfg_numaq(void)
@@ -418,7 +413,7 @@ static inline physid_mask_t numaq_apicid_to_cpu_present(int logical_apicid)
418/* Where the IO area was mapped on multiquad, always 0 otherwise */ 413/* Where the IO area was mapped on multiquad, always 0 otherwise */
419void *xquad_portio; 414void *xquad_portio;
420 415
421static inline int numaq_check_phys_apicid_present(int boot_cpu_physical_apicid) 416static inline int numaq_check_phys_apicid_present(int phys_apicid)
422{ 417{
423 return 1; 418 return 1;
424} 419}
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
index 65edc180fc82..c4cbd3080c1c 100644
--- a/arch/x86/kernel/apic/probe_64.c
+++ b/arch/x86/kernel/apic/probe_64.c
@@ -64,16 +64,23 @@ void __init default_setup_apic_routing(void)
64 apic = &apic_x2apic_phys; 64 apic = &apic_x2apic_phys;
65 else 65 else
66 apic = &apic_x2apic_cluster; 66 apic = &apic_x2apic_cluster;
67 printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
68 } 67 }
69#endif 68#endif
70 69
71 if (apic == &apic_flat) { 70 if (apic == &apic_flat) {
72 if (max_physical_apicid >= 8) 71 switch (boot_cpu_data.x86_vendor) {
73 apic = &apic_physflat; 72 case X86_VENDOR_INTEL:
74 printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); 73 if (num_processors > 8)
74 apic = &apic_physflat;
75 break;
76 case X86_VENDOR_AMD:
77 if (max_physical_apicid >= 8)
78 apic = &apic_physflat;
79 }
75 } 80 }
76 81
82 printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
83
77 if (is_vsmp_box()) { 84 if (is_vsmp_box()) {
78 /* need to update phys_pkg_id */ 85 /* need to update phys_pkg_id */
79 apic->phys_pkg_id = apicid_phys_pkg_id; 86 apic->phys_pkg_id = apicid_phys_pkg_id;
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
index eafdfbd1ea95..645ecc4ff0be 100644
--- a/arch/x86/kernel/apic/summit_32.c
+++ b/arch/x86/kernel/apic/summit_32.c
@@ -272,7 +272,7 @@ static physid_mask_t summit_apicid_to_cpu_present(int apicid)
272 return physid_mask_of_physid(0); 272 return physid_mask_of_physid(0);
273} 273}
274 274
275static int summit_check_phys_apicid_present(int boot_cpu_physical_apicid) 275static int summit_check_phys_apicid_present(int physical_apicid)
276{ 276{
277 return 1; 277 return 1;
278} 278}
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 601159374e87..f5f5886a6b53 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -389,6 +389,16 @@ static __init void map_gru_high(int max_pnode)
389 map_high("GRU", gru.s.base, shift, max_pnode, map_wb); 389 map_high("GRU", gru.s.base, shift, max_pnode, map_wb);
390} 390}
391 391
392static __init void map_mmr_high(int max_pnode)
393{
394 union uvh_rh_gam_mmr_overlay_config_mmr_u mmr;
395 int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT;
396
397 mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
398 if (mmr.s.enable)
399 map_high("MMR", mmr.s.base, shift, max_pnode, map_uc);
400}
401
392static __init void map_mmioh_high(int max_pnode) 402static __init void map_mmioh_high(int max_pnode)
393{ 403{
394 union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh; 404 union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
@@ -643,6 +653,7 @@ void __init uv_system_init(void)
643 } 653 }
644 654
645 map_gru_high(max_pnode); 655 map_gru_high(max_pnode);
656 map_mmr_high(max_pnode);
646 map_mmioh_high(max_pnode); 657 map_mmioh_high(max_pnode);
647 658
648 uv_cpu_init(); 659 uv_cpu_init();
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 8dd30638fe44..68537e957a9b 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -27,7 +27,7 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
27obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o 27obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
28obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o 28obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
29 29
30obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o 30obj-$(CONFIG_PERF_EVENTS) += perf_event.o
31 31
32obj-$(CONFIG_X86_MCE) += mcheck/ 32obj-$(CONFIG_X86_MCE) += mcheck/
33obj-$(CONFIG_MTRR) += mtrr/ 33obj-$(CONFIG_MTRR) += mtrr/
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index f32fa71ccf97..c910a716a71c 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -184,7 +184,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
184 * approved Athlon 184 * approved Athlon
185 */ 185 */
186 WARN_ONCE(1, "WARNING: This combination of AMD" 186 WARN_ONCE(1, "WARNING: This combination of AMD"
187 "processors is not suitable for SMP.\n"); 187 " processors is not suitable for SMP.\n");
188 if (!test_taint(TAINT_UNSAFE_SMP)) 188 if (!test_taint(TAINT_UNSAFE_SMP))
189 add_taint(TAINT_UNSAFE_SMP); 189 add_taint(TAINT_UNSAFE_SMP);
190 190
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 2055fc2b2e6b..cc25c2b4a567 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -13,7 +13,7 @@
13#include <linux/io.h> 13#include <linux/io.h>
14 14
15#include <asm/stackprotector.h> 15#include <asm/stackprotector.h>
16#include <asm/perf_counter.h> 16#include <asm/perf_event.h>
17#include <asm/mmu_context.h> 17#include <asm/mmu_context.h>
18#include <asm/hypervisor.h> 18#include <asm/hypervisor.h>
19#include <asm/processor.h> 19#include <asm/processor.h>
@@ -34,7 +34,6 @@
34#include <asm/mce.h> 34#include <asm/mce.h>
35#include <asm/msr.h> 35#include <asm/msr.h>
36#include <asm/pat.h> 36#include <asm/pat.h>
37#include <linux/smp.h>
38 37
39#ifdef CONFIG_X86_LOCAL_APIC 38#ifdef CONFIG_X86_LOCAL_APIC
40#include <asm/uv/uv.h> 39#include <asm/uv/uv.h>
@@ -870,7 +869,7 @@ void __init identify_boot_cpu(void)
870#else 869#else
871 vgetcpu_set_mode(); 870 vgetcpu_set_mode();
872#endif 871#endif
873 init_hw_perf_counters(); 872 init_hw_perf_events();
874} 873}
875 874
876void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) 875void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 4109679863c1..7d5c3b0ea8da 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -33,7 +33,7 @@
33#include <linux/cpufreq.h> 33#include <linux/cpufreq.h>
34#include <linux/compiler.h> 34#include <linux/compiler.h>
35#include <linux/dmi.h> 35#include <linux/dmi.h>
36#include <trace/power.h> 36#include <trace/events/power.h>
37 37
38#include <linux/acpi.h> 38#include <linux/acpi.h>
39#include <linux/io.h> 39#include <linux/io.h>
@@ -72,8 +72,6 @@ static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data);
72 72
73static DEFINE_PER_CPU(struct aperfmperf, old_perf); 73static DEFINE_PER_CPU(struct aperfmperf, old_perf);
74 74
75DEFINE_TRACE(power_mark);
76
77/* acpi_perf_data is a pointer to percpu data. */ 75/* acpi_perf_data is a pointer to percpu data. */
78static struct acpi_processor_performance *acpi_perf_data; 76static struct acpi_processor_performance *acpi_perf_data;
79 77
@@ -332,7 +330,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
332 unsigned int next_perf_state = 0; /* Index into perf table */ 330 unsigned int next_perf_state = 0; /* Index into perf table */
333 unsigned int i; 331 unsigned int i;
334 int result = 0; 332 int result = 0;
335 struct power_trace it;
336 333
337 dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); 334 dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
338 335
@@ -364,7 +361,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
364 } 361 }
365 } 362 }
366 363
367 trace_power_mark(&it, POWER_PSTATE, next_perf_state); 364 trace_power_frequency(POWER_PSTATE, data->freq_table[next_state].frequency);
368 365
369 switch (data->cpu_feature) { 366 switch (data->cpu_feature) {
370 case SYSTEM_INTEL_MSR_CAPABLE: 367 case SYSTEM_INTEL_MSR_CAPABLE:
@@ -526,6 +523,21 @@ static const struct dmi_system_id sw_any_bug_dmi_table[] = {
526 }, 523 },
527 { } 524 { }
528}; 525};
526
527static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
528{
529 /* http://www.intel.com/Assets/PDF/specupdate/314554.pdf
530 * AL30: A Machine Check Exception (MCE) Occurring during an
531 * Enhanced Intel SpeedStep Technology Ratio Change May Cause
532 * Both Processor Cores to Lock Up when HT is enabled*/
533 if (c->x86_vendor == X86_VENDOR_INTEL) {
534 if ((c->x86 == 15) &&
535 (c->x86_model == 6) &&
536 (c->x86_mask == 8) && smt_capable())
537 return -ENODEV;
538 }
539 return 0;
540}
529#endif 541#endif
530 542
531static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) 543static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
@@ -540,6 +552,12 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
540 552
541 dprintk("acpi_cpufreq_cpu_init\n"); 553 dprintk("acpi_cpufreq_cpu_init\n");
542 554
555#ifdef CONFIG_SMP
556 result = acpi_cpufreq_blacklist(c);
557 if (result)
558 return result;
559#endif
560
543 data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL); 561 data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
544 if (!data) 562 if (!data)
545 return -ENOMEM; 563 return -ENOMEM;
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 2a50ef891000..6394aa5c7985 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -605,9 +605,10 @@ static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst,
605 return 0; 605 return 0;
606} 606}
607 607
608static void invalidate_entry(struct powernow_k8_data *data, unsigned int entry) 608static void invalidate_entry(struct cpufreq_frequency_table *powernow_table,
609 unsigned int entry)
609{ 610{
610 data->powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID; 611 powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID;
611} 612}
612 613
613static void print_basics(struct powernow_k8_data *data) 614static void print_basics(struct powernow_k8_data *data)
@@ -854,6 +855,10 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
854 goto err_out; 855 goto err_out;
855 } 856 }
856 857
858 /* fill in data */
859 data->numps = data->acpi_data.state_count;
860 powernow_k8_acpi_pst_values(data, 0);
861
857 if (cpu_family == CPU_HW_PSTATE) 862 if (cpu_family == CPU_HW_PSTATE)
858 ret_val = fill_powernow_table_pstate(data, powernow_table); 863 ret_val = fill_powernow_table_pstate(data, powernow_table);
859 else 864 else
@@ -866,11 +871,8 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
866 powernow_table[data->acpi_data.state_count].index = 0; 871 powernow_table[data->acpi_data.state_count].index = 0;
867 data->powernow_table = powernow_table; 872 data->powernow_table = powernow_table;
868 873
869 /* fill in data */
870 data->numps = data->acpi_data.state_count;
871 if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) 874 if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
872 print_basics(data); 875 print_basics(data);
873 powernow_k8_acpi_pst_values(data, 0);
874 876
875 /* notify BIOS that we exist */ 877 /* notify BIOS that we exist */
876 acpi_processor_notify_smm(THIS_MODULE); 878 acpi_processor_notify_smm(THIS_MODULE);
@@ -914,13 +916,13 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data,
914 "bad value %d.\n", i, index); 916 "bad value %d.\n", i, index);
915 printk(KERN_ERR PFX "Please report to BIOS " 917 printk(KERN_ERR PFX "Please report to BIOS "
916 "manufacturer\n"); 918 "manufacturer\n");
917 invalidate_entry(data, i); 919 invalidate_entry(powernow_table, i);
918 continue; 920 continue;
919 } 921 }
920 rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); 922 rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
921 if (!(hi & HW_PSTATE_VALID_MASK)) { 923 if (!(hi & HW_PSTATE_VALID_MASK)) {
922 dprintk("invalid pstate %d, ignoring\n", index); 924 dprintk("invalid pstate %d, ignoring\n", index);
923 invalidate_entry(data, i); 925 invalidate_entry(powernow_table, i);
924 continue; 926 continue;
925 } 927 }
926 928
@@ -941,7 +943,6 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
941 struct cpufreq_frequency_table *powernow_table) 943 struct cpufreq_frequency_table *powernow_table)
942{ 944{
943 int i; 945 int i;
944 int cntlofreq = 0;
945 946
946 for (i = 0; i < data->acpi_data.state_count; i++) { 947 for (i = 0; i < data->acpi_data.state_count; i++) {
947 u32 fid; 948 u32 fid;
@@ -970,7 +971,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
970 /* verify frequency is OK */ 971 /* verify frequency is OK */
971 if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) { 972 if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) {
972 dprintk("invalid freq %u kHz, ignoring\n", freq); 973 dprintk("invalid freq %u kHz, ignoring\n", freq);
973 invalidate_entry(data, i); 974 invalidate_entry(powernow_table, i);
974 continue; 975 continue;
975 } 976 }
976 977
@@ -978,38 +979,17 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
978 * BIOSs are using "off" to indicate invalid */ 979 * BIOSs are using "off" to indicate invalid */
979 if (vid == VID_OFF) { 980 if (vid == VID_OFF) {
980 dprintk("invalid vid %u, ignoring\n", vid); 981 dprintk("invalid vid %u, ignoring\n", vid);
981 invalidate_entry(data, i); 982 invalidate_entry(powernow_table, i);
982 continue; 983 continue;
983 } 984 }
984 985
985 /* verify only 1 entry from the lo frequency table */
986 if (fid < HI_FID_TABLE_BOTTOM) {
987 if (cntlofreq) {
988 /* if both entries are the same,
989 * ignore this one ... */
990 if ((freq != powernow_table[cntlofreq].frequency) ||
991 (index != powernow_table[cntlofreq].index)) {
992 printk(KERN_ERR PFX
993 "Too many lo freq table "
994 "entries\n");
995 return 1;
996 }
997
998 dprintk("double low frequency table entry, "
999 "ignoring it.\n");
1000 invalidate_entry(data, i);
1001 continue;
1002 } else
1003 cntlofreq = i;
1004 }
1005
1006 if (freq != (data->acpi_data.states[i].core_frequency * 1000)) { 986 if (freq != (data->acpi_data.states[i].core_frequency * 1000)) {
1007 printk(KERN_INFO PFX "invalid freq entries " 987 printk(KERN_INFO PFX "invalid freq entries "
1008 "%u kHz vs. %u kHz\n", freq, 988 "%u kHz vs. %u kHz\n", freq,
1009 (unsigned int) 989 (unsigned int)
1010 (data->acpi_data.states[i].core_frequency 990 (data->acpi_data.states[i].core_frequency
1011 * 1000)); 991 * 1000));
1012 invalidate_entry(data, i); 992 invalidate_entry(powernow_table, i);
1013 continue; 993 continue;
1014 } 994 }
1015 } 995 }
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index 93ba8eeb100a..08be922de33a 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -34,13 +34,6 @@ detect_hypervisor_vendor(struct cpuinfo_x86 *c)
34 c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE; 34 c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE;
35} 35}
36 36
37unsigned long get_hypervisor_tsc_freq(void)
38{
39 if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE)
40 return vmware_get_tsc_khz();
41 return 0;
42}
43
44static inline void __cpuinit 37static inline void __cpuinit
45hypervisor_set_feature_bits(struct cpuinfo_x86 *c) 38hypervisor_set_feature_bits(struct cpuinfo_x86 *c)
46{ 39{
@@ -55,3 +48,10 @@ void __cpuinit init_hypervisor(struct cpuinfo_x86 *c)
55 detect_hypervisor_vendor(c); 48 detect_hypervisor_vendor(c);
56 hypervisor_set_feature_bits(c); 49 hypervisor_set_feature_bits(c);
57} 50}
51
52void __init init_hypervisor_platform(void)
53{
54 init_hypervisor(&boot_cpu_data);
55 if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE)
56 vmware_platform_setup();
57}
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 8cd5224943b5..83a3d1f4efca 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -489,8 +489,9 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
489 int i, err = 0; 489 int i, err = 0;
490 struct threshold_bank *b = NULL; 490 struct threshold_bank *b = NULL;
491 char name[32]; 491 char name[32];
492#ifdef CONFIG_SMP
492 struct cpuinfo_x86 *c = &cpu_data(cpu); 493 struct cpuinfo_x86 *c = &cpu_data(cpu);
493 494#endif
494 495
495 sprintf(name, "threshold_bank%i", bank); 496 sprintf(name, "threshold_bank%i", bank);
496 497
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index 08b6ea4c62b4..f04e72527604 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -126,8 +126,8 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
126 return -EINVAL; 126 return -EINVAL;
127 127
128 base = simple_strtoull(line + 5, &ptr, 0); 128 base = simple_strtoull(line + 5, &ptr, 0);
129 for (; isspace(*ptr); ++ptr) 129 while (isspace(*ptr))
130 ; 130 ptr++;
131 131
132 if (strncmp(ptr, "size=", 5)) 132 if (strncmp(ptr, "size=", 5))
133 return -EINVAL; 133 return -EINVAL;
@@ -135,14 +135,14 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
135 size = simple_strtoull(ptr + 5, &ptr, 0); 135 size = simple_strtoull(ptr + 5, &ptr, 0);
136 if ((base & 0xfff) || (size & 0xfff)) 136 if ((base & 0xfff) || (size & 0xfff))
137 return -EINVAL; 137 return -EINVAL;
138 for (; isspace(*ptr); ++ptr) 138 while (isspace(*ptr))
139 ; 139 ptr++;
140 140
141 if (strncmp(ptr, "type=", 5)) 141 if (strncmp(ptr, "type=", 5))
142 return -EINVAL; 142 return -EINVAL;
143 ptr += 5; 143 ptr += 5;
144 for (; isspace(*ptr); ++ptr) 144 while (isspace(*ptr))
145 ; 145 ptr++;
146 146
147 for (i = 0; i < MTRR_NUM_TYPES; ++i) { 147 for (i = 0; i < MTRR_NUM_TYPES; ++i) {
148 if (strcmp(ptr, mtrr_strings[i])) 148 if (strcmp(ptr, mtrr_strings[i]))
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_event.c
index 2732e2c1e4d3..a3c7adb06b78 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Performance counter x86 architecture code 2 * Performance events x86 architecture code
3 * 3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
@@ -11,7 +11,7 @@
11 * For licencing details see kernel-base/COPYING 11 * For licencing details see kernel-base/COPYING
12 */ 12 */
13 13
14#include <linux/perf_counter.h> 14#include <linux/perf_event.h>
15#include <linux/capability.h> 15#include <linux/capability.h>
16#include <linux/notifier.h> 16#include <linux/notifier.h>
17#include <linux/hardirq.h> 17#include <linux/hardirq.h>
@@ -27,19 +27,19 @@
27#include <asm/stacktrace.h> 27#include <asm/stacktrace.h>
28#include <asm/nmi.h> 28#include <asm/nmi.h>
29 29
30static u64 perf_counter_mask __read_mostly; 30static u64 perf_event_mask __read_mostly;
31 31
32/* The maximal number of PEBS counters: */ 32/* The maximal number of PEBS events: */
33#define MAX_PEBS_COUNTERS 4 33#define MAX_PEBS_EVENTS 4
34 34
35/* The size of a BTS record in bytes: */ 35/* The size of a BTS record in bytes: */
36#define BTS_RECORD_SIZE 24 36#define BTS_RECORD_SIZE 24
37 37
38/* The size of a per-cpu BTS buffer in bytes: */ 38/* The size of a per-cpu BTS buffer in bytes: */
39#define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 1024) 39#define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 2048)
40 40
41/* The BTS overflow threshold in bytes from the end of the buffer: */ 41/* The BTS overflow threshold in bytes from the end of the buffer: */
42#define BTS_OVFL_TH (BTS_RECORD_SIZE * 64) 42#define BTS_OVFL_TH (BTS_RECORD_SIZE * 128)
43 43
44 44
45/* 45/*
@@ -65,11 +65,11 @@ struct debug_store {
65 u64 pebs_index; 65 u64 pebs_index;
66 u64 pebs_absolute_maximum; 66 u64 pebs_absolute_maximum;
67 u64 pebs_interrupt_threshold; 67 u64 pebs_interrupt_threshold;
68 u64 pebs_counter_reset[MAX_PEBS_COUNTERS]; 68 u64 pebs_event_reset[MAX_PEBS_EVENTS];
69}; 69};
70 70
71struct cpu_hw_counters { 71struct cpu_hw_events {
72 struct perf_counter *counters[X86_PMC_IDX_MAX]; 72 struct perf_event *events[X86_PMC_IDX_MAX];
73 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 73 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
74 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 74 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
75 unsigned long interrupts; 75 unsigned long interrupts;
@@ -86,17 +86,17 @@ struct x86_pmu {
86 int (*handle_irq)(struct pt_regs *); 86 int (*handle_irq)(struct pt_regs *);
87 void (*disable_all)(void); 87 void (*disable_all)(void);
88 void (*enable_all)(void); 88 void (*enable_all)(void);
89 void (*enable)(struct hw_perf_counter *, int); 89 void (*enable)(struct hw_perf_event *, int);
90 void (*disable)(struct hw_perf_counter *, int); 90 void (*disable)(struct hw_perf_event *, int);
91 unsigned eventsel; 91 unsigned eventsel;
92 unsigned perfctr; 92 unsigned perfctr;
93 u64 (*event_map)(int); 93 u64 (*event_map)(int);
94 u64 (*raw_event)(u64); 94 u64 (*raw_event)(u64);
95 int max_events; 95 int max_events;
96 int num_counters; 96 int num_events;
97 int num_counters_fixed; 97 int num_events_fixed;
98 int counter_bits; 98 int event_bits;
99 u64 counter_mask; 99 u64 event_mask;
100 int apic; 100 int apic;
101 u64 max_period; 101 u64 max_period;
102 u64 intel_ctrl; 102 u64 intel_ctrl;
@@ -106,7 +106,7 @@ struct x86_pmu {
106 106
107static struct x86_pmu x86_pmu __read_mostly; 107static struct x86_pmu x86_pmu __read_mostly;
108 108
109static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { 109static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
110 .enabled = 1, 110 .enabled = 1,
111}; 111};
112 112
@@ -124,35 +124,35 @@ static const u64 p6_perfmon_event_map[] =
124 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062, 124 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
125}; 125};
126 126
127static u64 p6_pmu_event_map(int event) 127static u64 p6_pmu_event_map(int hw_event)
128{ 128{
129 return p6_perfmon_event_map[event]; 129 return p6_perfmon_event_map[hw_event];
130} 130}
131 131
132/* 132/*
133 * Counter setting that is specified not to count anything. 133 * Event setting that is specified not to count anything.
134 * We use this to effectively disable a counter. 134 * We use this to effectively disable a counter.
135 * 135 *
136 * L2_RQSTS with 0 MESI unit mask. 136 * L2_RQSTS with 0 MESI unit mask.
137 */ 137 */
138#define P6_NOP_COUNTER 0x0000002EULL 138#define P6_NOP_EVENT 0x0000002EULL
139 139
140static u64 p6_pmu_raw_event(u64 event) 140static u64 p6_pmu_raw_event(u64 hw_event)
141{ 141{
142#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL 142#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
143#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL 143#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
144#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL 144#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
145#define P6_EVNTSEL_INV_MASK 0x00800000ULL 145#define P6_EVNTSEL_INV_MASK 0x00800000ULL
146#define P6_EVNTSEL_COUNTER_MASK 0xFF000000ULL 146#define P6_EVNTSEL_REG_MASK 0xFF000000ULL
147 147
148#define P6_EVNTSEL_MASK \ 148#define P6_EVNTSEL_MASK \
149 (P6_EVNTSEL_EVENT_MASK | \ 149 (P6_EVNTSEL_EVENT_MASK | \
150 P6_EVNTSEL_UNIT_MASK | \ 150 P6_EVNTSEL_UNIT_MASK | \
151 P6_EVNTSEL_EDGE_MASK | \ 151 P6_EVNTSEL_EDGE_MASK | \
152 P6_EVNTSEL_INV_MASK | \ 152 P6_EVNTSEL_INV_MASK | \
153 P6_EVNTSEL_COUNTER_MASK) 153 P6_EVNTSEL_REG_MASK)
154 154
155 return event & P6_EVNTSEL_MASK; 155 return hw_event & P6_EVNTSEL_MASK;
156} 156}
157 157
158 158
@@ -170,16 +170,16 @@ static const u64 intel_perfmon_event_map[] =
170 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, 170 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
171}; 171};
172 172
173static u64 intel_pmu_event_map(int event) 173static u64 intel_pmu_event_map(int hw_event)
174{ 174{
175 return intel_perfmon_event_map[event]; 175 return intel_perfmon_event_map[hw_event];
176} 176}
177 177
178/* 178/*
179 * Generalized hw caching related event table, filled 179 * Generalized hw caching related hw_event table, filled
180 * in on a per model basis. A value of 0 means 180 * in on a per model basis. A value of 0 means
181 * 'not supported', -1 means 'event makes no sense on 181 * 'not supported', -1 means 'hw_event makes no sense on
182 * this CPU', any other value means the raw event 182 * this CPU', any other value means the raw hw_event
183 * ID. 183 * ID.
184 */ 184 */
185 185
@@ -463,22 +463,22 @@ static const u64 atom_hw_cache_event_ids
463 }, 463 },
464}; 464};
465 465
466static u64 intel_pmu_raw_event(u64 event) 466static u64 intel_pmu_raw_event(u64 hw_event)
467{ 467{
468#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL 468#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
469#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL 469#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
470#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL 470#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
471#define CORE_EVNTSEL_INV_MASK 0x00800000ULL 471#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
472#define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL 472#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
473 473
474#define CORE_EVNTSEL_MASK \ 474#define CORE_EVNTSEL_MASK \
475 (CORE_EVNTSEL_EVENT_MASK | \ 475 (CORE_EVNTSEL_EVENT_MASK | \
476 CORE_EVNTSEL_UNIT_MASK | \ 476 CORE_EVNTSEL_UNIT_MASK | \
477 CORE_EVNTSEL_EDGE_MASK | \ 477 CORE_EVNTSEL_EDGE_MASK | \
478 CORE_EVNTSEL_INV_MASK | \ 478 CORE_EVNTSEL_INV_MASK | \
479 CORE_EVNTSEL_COUNTER_MASK) 479 CORE_EVNTSEL_REG_MASK)
480 480
481 return event & CORE_EVNTSEL_MASK; 481 return hw_event & CORE_EVNTSEL_MASK;
482} 482}
483 483
484static const u64 amd_hw_cache_event_ids 484static const u64 amd_hw_cache_event_ids
@@ -585,39 +585,39 @@ static const u64 amd_perfmon_event_map[] =
585 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, 585 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
586}; 586};
587 587
588static u64 amd_pmu_event_map(int event) 588static u64 amd_pmu_event_map(int hw_event)
589{ 589{
590 return amd_perfmon_event_map[event]; 590 return amd_perfmon_event_map[hw_event];
591} 591}
592 592
593static u64 amd_pmu_raw_event(u64 event) 593static u64 amd_pmu_raw_event(u64 hw_event)
594{ 594{
595#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL 595#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
596#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL 596#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
597#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL 597#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
598#define K7_EVNTSEL_INV_MASK 0x000800000ULL 598#define K7_EVNTSEL_INV_MASK 0x000800000ULL
599#define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL 599#define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
600 600
601#define K7_EVNTSEL_MASK \ 601#define K7_EVNTSEL_MASK \
602 (K7_EVNTSEL_EVENT_MASK | \ 602 (K7_EVNTSEL_EVENT_MASK | \
603 K7_EVNTSEL_UNIT_MASK | \ 603 K7_EVNTSEL_UNIT_MASK | \
604 K7_EVNTSEL_EDGE_MASK | \ 604 K7_EVNTSEL_EDGE_MASK | \
605 K7_EVNTSEL_INV_MASK | \ 605 K7_EVNTSEL_INV_MASK | \
606 K7_EVNTSEL_COUNTER_MASK) 606 K7_EVNTSEL_REG_MASK)
607 607
608 return event & K7_EVNTSEL_MASK; 608 return hw_event & K7_EVNTSEL_MASK;
609} 609}
610 610
611/* 611/*
612 * Propagate counter elapsed time into the generic counter. 612 * Propagate event elapsed time into the generic event.
613 * Can only be executed on the CPU where the counter is active. 613 * Can only be executed on the CPU where the event is active.
614 * Returns the delta events processed. 614 * Returns the delta events processed.
615 */ 615 */
616static u64 616static u64
617x86_perf_counter_update(struct perf_counter *counter, 617x86_perf_event_update(struct perf_event *event,
618 struct hw_perf_counter *hwc, int idx) 618 struct hw_perf_event *hwc, int idx)
619{ 619{
620 int shift = 64 - x86_pmu.counter_bits; 620 int shift = 64 - x86_pmu.event_bits;
621 u64 prev_raw_count, new_raw_count; 621 u64 prev_raw_count, new_raw_count;
622 s64 delta; 622 s64 delta;
623 623
@@ -625,15 +625,15 @@ x86_perf_counter_update(struct perf_counter *counter,
625 return 0; 625 return 0;
626 626
627 /* 627 /*
628 * Careful: an NMI might modify the previous counter value. 628 * Careful: an NMI might modify the previous event value.
629 * 629 *
630 * Our tactic to handle this is to first atomically read and 630 * Our tactic to handle this is to first atomically read and
631 * exchange a new raw count - then add that new-prev delta 631 * exchange a new raw count - then add that new-prev delta
632 * count to the generic counter atomically: 632 * count to the generic event atomically:
633 */ 633 */
634again: 634again:
635 prev_raw_count = atomic64_read(&hwc->prev_count); 635 prev_raw_count = atomic64_read(&hwc->prev_count);
636 rdmsrl(hwc->counter_base + idx, new_raw_count); 636 rdmsrl(hwc->event_base + idx, new_raw_count);
637 637
638 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, 638 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
639 new_raw_count) != prev_raw_count) 639 new_raw_count) != prev_raw_count)
@@ -642,7 +642,7 @@ again:
642 /* 642 /*
643 * Now we have the new raw value and have updated the prev 643 * Now we have the new raw value and have updated the prev
644 * timestamp already. We can now calculate the elapsed delta 644 * timestamp already. We can now calculate the elapsed delta
645 * (counter-)time and add that to the generic counter. 645 * (event-)time and add that to the generic event.
646 * 646 *
647 * Careful, not all hw sign-extends above the physical width 647 * Careful, not all hw sign-extends above the physical width
648 * of the count. 648 * of the count.
@@ -650,13 +650,13 @@ again:
650 delta = (new_raw_count << shift) - (prev_raw_count << shift); 650 delta = (new_raw_count << shift) - (prev_raw_count << shift);
651 delta >>= shift; 651 delta >>= shift;
652 652
653 atomic64_add(delta, &counter->count); 653 atomic64_add(delta, &event->count);
654 atomic64_sub(delta, &hwc->period_left); 654 atomic64_sub(delta, &hwc->period_left);
655 655
656 return new_raw_count; 656 return new_raw_count;
657} 657}
658 658
659static atomic_t active_counters; 659static atomic_t active_events;
660static DEFINE_MUTEX(pmc_reserve_mutex); 660static DEFINE_MUTEX(pmc_reserve_mutex);
661 661
662static bool reserve_pmc_hardware(void) 662static bool reserve_pmc_hardware(void)
@@ -667,12 +667,12 @@ static bool reserve_pmc_hardware(void)
667 if (nmi_watchdog == NMI_LOCAL_APIC) 667 if (nmi_watchdog == NMI_LOCAL_APIC)
668 disable_lapic_nmi_watchdog(); 668 disable_lapic_nmi_watchdog();
669 669
670 for (i = 0; i < x86_pmu.num_counters; i++) { 670 for (i = 0; i < x86_pmu.num_events; i++) {
671 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) 671 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
672 goto perfctr_fail; 672 goto perfctr_fail;
673 } 673 }
674 674
675 for (i = 0; i < x86_pmu.num_counters; i++) { 675 for (i = 0; i < x86_pmu.num_events; i++) {
676 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) 676 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
677 goto eventsel_fail; 677 goto eventsel_fail;
678 } 678 }
@@ -685,7 +685,7 @@ eventsel_fail:
685 for (i--; i >= 0; i--) 685 for (i--; i >= 0; i--)
686 release_evntsel_nmi(x86_pmu.eventsel + i); 686 release_evntsel_nmi(x86_pmu.eventsel + i);
687 687
688 i = x86_pmu.num_counters; 688 i = x86_pmu.num_events;
689 689
690perfctr_fail: 690perfctr_fail:
691 for (i--; i >= 0; i--) 691 for (i--; i >= 0; i--)
@@ -703,7 +703,7 @@ static void release_pmc_hardware(void)
703#ifdef CONFIG_X86_LOCAL_APIC 703#ifdef CONFIG_X86_LOCAL_APIC
704 int i; 704 int i;
705 705
706 for (i = 0; i < x86_pmu.num_counters; i++) { 706 for (i = 0; i < x86_pmu.num_events; i++) {
707 release_perfctr_nmi(x86_pmu.perfctr + i); 707 release_perfctr_nmi(x86_pmu.perfctr + i);
708 release_evntsel_nmi(x86_pmu.eventsel + i); 708 release_evntsel_nmi(x86_pmu.eventsel + i);
709 } 709 }
@@ -720,7 +720,7 @@ static inline bool bts_available(void)
720 720
721static inline void init_debug_store_on_cpu(int cpu) 721static inline void init_debug_store_on_cpu(int cpu)
722{ 722{
723 struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds; 723 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
724 724
725 if (!ds) 725 if (!ds)
726 return; 726 return;
@@ -732,7 +732,7 @@ static inline void init_debug_store_on_cpu(int cpu)
732 732
733static inline void fini_debug_store_on_cpu(int cpu) 733static inline void fini_debug_store_on_cpu(int cpu)
734{ 734{
735 if (!per_cpu(cpu_hw_counters, cpu).ds) 735 if (!per_cpu(cpu_hw_events, cpu).ds)
736 return; 736 return;
737 737
738 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0); 738 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
@@ -751,12 +751,12 @@ static void release_bts_hardware(void)
751 fini_debug_store_on_cpu(cpu); 751 fini_debug_store_on_cpu(cpu);
752 752
753 for_each_possible_cpu(cpu) { 753 for_each_possible_cpu(cpu) {
754 struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds; 754 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
755 755
756 if (!ds) 756 if (!ds)
757 continue; 757 continue;
758 758
759 per_cpu(cpu_hw_counters, cpu).ds = NULL; 759 per_cpu(cpu_hw_events, cpu).ds = NULL;
760 760
761 kfree((void *)(unsigned long)ds->bts_buffer_base); 761 kfree((void *)(unsigned long)ds->bts_buffer_base);
762 kfree(ds); 762 kfree(ds);
@@ -796,7 +796,7 @@ static int reserve_bts_hardware(void)
796 ds->bts_interrupt_threshold = 796 ds->bts_interrupt_threshold =
797 ds->bts_absolute_maximum - BTS_OVFL_TH; 797 ds->bts_absolute_maximum - BTS_OVFL_TH;
798 798
799 per_cpu(cpu_hw_counters, cpu).ds = ds; 799 per_cpu(cpu_hw_events, cpu).ds = ds;
800 err = 0; 800 err = 0;
801 } 801 }
802 802
@@ -812,9 +812,9 @@ static int reserve_bts_hardware(void)
812 return err; 812 return err;
813} 813}
814 814
815static void hw_perf_counter_destroy(struct perf_counter *counter) 815static void hw_perf_event_destroy(struct perf_event *event)
816{ 816{
817 if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) { 817 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
818 release_pmc_hardware(); 818 release_pmc_hardware();
819 release_bts_hardware(); 819 release_bts_hardware();
820 mutex_unlock(&pmc_reserve_mutex); 820 mutex_unlock(&pmc_reserve_mutex);
@@ -827,7 +827,7 @@ static inline int x86_pmu_initialized(void)
827} 827}
828 828
829static inline int 829static inline int
830set_ext_hw_attr(struct hw_perf_counter *hwc, struct perf_counter_attr *attr) 830set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
831{ 831{
832 unsigned int cache_type, cache_op, cache_result; 832 unsigned int cache_type, cache_op, cache_result;
833 u64 config, val; 833 u64 config, val;
@@ -880,7 +880,7 @@ static void intel_pmu_enable_bts(u64 config)
880 880
881static void intel_pmu_disable_bts(void) 881static void intel_pmu_disable_bts(void)
882{ 882{
883 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 883 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
884 unsigned long debugctlmsr; 884 unsigned long debugctlmsr;
885 885
886 if (!cpuc->ds) 886 if (!cpuc->ds)
@@ -898,10 +898,10 @@ static void intel_pmu_disable_bts(void)
898/* 898/*
899 * Setup the hardware configuration for a given attr_type 899 * Setup the hardware configuration for a given attr_type
900 */ 900 */
901static int __hw_perf_counter_init(struct perf_counter *counter) 901static int __hw_perf_event_init(struct perf_event *event)
902{ 902{
903 struct perf_counter_attr *attr = &counter->attr; 903 struct perf_event_attr *attr = &event->attr;
904 struct hw_perf_counter *hwc = &counter->hw; 904 struct hw_perf_event *hwc = &event->hw;
905 u64 config; 905 u64 config;
906 int err; 906 int err;
907 907
@@ -909,21 +909,23 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
909 return -ENODEV; 909 return -ENODEV;
910 910
911 err = 0; 911 err = 0;
912 if (!atomic_inc_not_zero(&active_counters)) { 912 if (!atomic_inc_not_zero(&active_events)) {
913 mutex_lock(&pmc_reserve_mutex); 913 mutex_lock(&pmc_reserve_mutex);
914 if (atomic_read(&active_counters) == 0) { 914 if (atomic_read(&active_events) == 0) {
915 if (!reserve_pmc_hardware()) 915 if (!reserve_pmc_hardware())
916 err = -EBUSY; 916 err = -EBUSY;
917 else 917 else
918 err = reserve_bts_hardware(); 918 err = reserve_bts_hardware();
919 } 919 }
920 if (!err) 920 if (!err)
921 atomic_inc(&active_counters); 921 atomic_inc(&active_events);
922 mutex_unlock(&pmc_reserve_mutex); 922 mutex_unlock(&pmc_reserve_mutex);
923 } 923 }
924 if (err) 924 if (err)
925 return err; 925 return err;
926 926
927 event->destroy = hw_perf_event_destroy;
928
927 /* 929 /*
928 * Generate PMC IRQs: 930 * Generate PMC IRQs:
929 * (keep 'enabled' bit clear for now) 931 * (keep 'enabled' bit clear for now)
@@ -946,17 +948,15 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
946 /* 948 /*
947 * If we have a PMU initialized but no APIC 949 * If we have a PMU initialized but no APIC
948 * interrupts, we cannot sample hardware 950 * interrupts, we cannot sample hardware
949 * counters (user-space has to fall back and 951 * events (user-space has to fall back and
950 * sample via a hrtimer based software counter): 952 * sample via a hrtimer based software event):
951 */ 953 */
952 if (!x86_pmu.apic) 954 if (!x86_pmu.apic)
953 return -EOPNOTSUPP; 955 return -EOPNOTSUPP;
954 } 956 }
955 957
956 counter->destroy = hw_perf_counter_destroy;
957
958 /* 958 /*
959 * Raw event type provide the config in the event structure 959 * Raw hw_event type provide the config in the hw_event structure
960 */ 960 */
961 if (attr->type == PERF_TYPE_RAW) { 961 if (attr->type == PERF_TYPE_RAW) {
962 hwc->config |= x86_pmu.raw_event(attr->config); 962 hwc->config |= x86_pmu.raw_event(attr->config);
@@ -1001,7 +1001,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
1001 1001
1002static void p6_pmu_disable_all(void) 1002static void p6_pmu_disable_all(void)
1003{ 1003{
1004 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1004 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1005 u64 val; 1005 u64 val;
1006 1006
1007 if (!cpuc->enabled) 1007 if (!cpuc->enabled)
@@ -1018,7 +1018,7 @@ static void p6_pmu_disable_all(void)
1018 1018
1019static void intel_pmu_disable_all(void) 1019static void intel_pmu_disable_all(void)
1020{ 1020{
1021 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1021 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1022 1022
1023 if (!cpuc->enabled) 1023 if (!cpuc->enabled)
1024 return; 1024 return;
@@ -1034,7 +1034,7 @@ static void intel_pmu_disable_all(void)
1034 1034
1035static void amd_pmu_disable_all(void) 1035static void amd_pmu_disable_all(void)
1036{ 1036{
1037 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1037 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1038 int idx; 1038 int idx;
1039 1039
1040 if (!cpuc->enabled) 1040 if (!cpuc->enabled)
@@ -1043,12 +1043,12 @@ static void amd_pmu_disable_all(void)
1043 cpuc->enabled = 0; 1043 cpuc->enabled = 0;
1044 /* 1044 /*
1045 * ensure we write the disable before we start disabling the 1045 * ensure we write the disable before we start disabling the
1046 * counters proper, so that amd_pmu_enable_counter() does the 1046 * events proper, so that amd_pmu_enable_event() does the
1047 * right thing. 1047 * right thing.
1048 */ 1048 */
1049 barrier(); 1049 barrier();
1050 1050
1051 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1051 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1052 u64 val; 1052 u64 val;
1053 1053
1054 if (!test_bit(idx, cpuc->active_mask)) 1054 if (!test_bit(idx, cpuc->active_mask))
@@ -1070,7 +1070,7 @@ void hw_perf_disable(void)
1070 1070
1071static void p6_pmu_enable_all(void) 1071static void p6_pmu_enable_all(void)
1072{ 1072{
1073 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1073 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1074 unsigned long val; 1074 unsigned long val;
1075 1075
1076 if (cpuc->enabled) 1076 if (cpuc->enabled)
@@ -1087,7 +1087,7 @@ static void p6_pmu_enable_all(void)
1087 1087
1088static void intel_pmu_enable_all(void) 1088static void intel_pmu_enable_all(void)
1089{ 1089{
1090 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1090 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1091 1091
1092 if (cpuc->enabled) 1092 if (cpuc->enabled)
1093 return; 1093 return;
@@ -1098,19 +1098,19 @@ static void intel_pmu_enable_all(void)
1098 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); 1098 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
1099 1099
1100 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { 1100 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1101 struct perf_counter *counter = 1101 struct perf_event *event =
1102 cpuc->counters[X86_PMC_IDX_FIXED_BTS]; 1102 cpuc->events[X86_PMC_IDX_FIXED_BTS];
1103 1103
1104 if (WARN_ON_ONCE(!counter)) 1104 if (WARN_ON_ONCE(!event))
1105 return; 1105 return;
1106 1106
1107 intel_pmu_enable_bts(counter->hw.config); 1107 intel_pmu_enable_bts(event->hw.config);
1108 } 1108 }
1109} 1109}
1110 1110
1111static void amd_pmu_enable_all(void) 1111static void amd_pmu_enable_all(void)
1112{ 1112{
1113 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1113 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1114 int idx; 1114 int idx;
1115 1115
1116 if (cpuc->enabled) 1116 if (cpuc->enabled)
@@ -1119,14 +1119,14 @@ static void amd_pmu_enable_all(void)
1119 cpuc->enabled = 1; 1119 cpuc->enabled = 1;
1120 barrier(); 1120 barrier();
1121 1121
1122 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1122 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1123 struct perf_counter *counter = cpuc->counters[idx]; 1123 struct perf_event *event = cpuc->events[idx];
1124 u64 val; 1124 u64 val;
1125 1125
1126 if (!test_bit(idx, cpuc->active_mask)) 1126 if (!test_bit(idx, cpuc->active_mask))
1127 continue; 1127 continue;
1128 1128
1129 val = counter->hw.config; 1129 val = event->hw.config;
1130 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 1130 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1131 wrmsrl(MSR_K7_EVNTSEL0 + idx, val); 1131 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1132 } 1132 }
@@ -1153,19 +1153,19 @@ static inline void intel_pmu_ack_status(u64 ack)
1153 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); 1153 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1154} 1154}
1155 1155
1156static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) 1156static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1157{ 1157{
1158 (void)checking_wrmsrl(hwc->config_base + idx, 1158 (void)checking_wrmsrl(hwc->config_base + idx,
1159 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); 1159 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
1160} 1160}
1161 1161
1162static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) 1162static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1163{ 1163{
1164 (void)checking_wrmsrl(hwc->config_base + idx, hwc->config); 1164 (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
1165} 1165}
1166 1166
1167static inline void 1167static inline void
1168intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx) 1168intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
1169{ 1169{
1170 int idx = __idx - X86_PMC_IDX_FIXED; 1170 int idx = __idx - X86_PMC_IDX_FIXED;
1171 u64 ctrl_val, mask; 1171 u64 ctrl_val, mask;
@@ -1178,10 +1178,10 @@ intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
1178} 1178}
1179 1179
1180static inline void 1180static inline void
1181p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) 1181p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1182{ 1182{
1183 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1183 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1184 u64 val = P6_NOP_COUNTER; 1184 u64 val = P6_NOP_EVENT;
1185 1185
1186 if (cpuc->enabled) 1186 if (cpuc->enabled)
1187 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 1187 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
@@ -1190,7 +1190,7 @@ p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
1190} 1190}
1191 1191
1192static inline void 1192static inline void
1193intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) 1193intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1194{ 1194{
1195 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { 1195 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1196 intel_pmu_disable_bts(); 1196 intel_pmu_disable_bts();
@@ -1202,24 +1202,24 @@ intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
1202 return; 1202 return;
1203 } 1203 }
1204 1204
1205 x86_pmu_disable_counter(hwc, idx); 1205 x86_pmu_disable_event(hwc, idx);
1206} 1206}
1207 1207
1208static inline void 1208static inline void
1209amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) 1209amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1210{ 1210{
1211 x86_pmu_disable_counter(hwc, idx); 1211 x86_pmu_disable_event(hwc, idx);
1212} 1212}
1213 1213
1214static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); 1214static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1215 1215
1216/* 1216/*
1217 * Set the next IRQ period, based on the hwc->period_left value. 1217 * Set the next IRQ period, based on the hwc->period_left value.
1218 * To be called with the counter disabled in hw: 1218 * To be called with the event disabled in hw:
1219 */ 1219 */
1220static int 1220static int
1221x86_perf_counter_set_period(struct perf_counter *counter, 1221x86_perf_event_set_period(struct perf_event *event,
1222 struct hw_perf_counter *hwc, int idx) 1222 struct hw_perf_event *hwc, int idx)
1223{ 1223{
1224 s64 left = atomic64_read(&hwc->period_left); 1224 s64 left = atomic64_read(&hwc->period_left);
1225 s64 period = hwc->sample_period; 1225 s64 period = hwc->sample_period;
@@ -1245,7 +1245,7 @@ x86_perf_counter_set_period(struct perf_counter *counter,
1245 ret = 1; 1245 ret = 1;
1246 } 1246 }
1247 /* 1247 /*
1248 * Quirk: certain CPUs dont like it if just 1 event is left: 1248 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1249 */ 1249 */
1250 if (unlikely(left < 2)) 1250 if (unlikely(left < 2))
1251 left = 2; 1251 left = 2;
@@ -1256,21 +1256,21 @@ x86_perf_counter_set_period(struct perf_counter *counter,
1256 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; 1256 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
1257 1257
1258 /* 1258 /*
1259 * The hw counter starts counting from this counter offset, 1259 * The hw event starts counting from this event offset,
1260 * mark it to be able to extra future deltas: 1260 * mark it to be able to extra future deltas:
1261 */ 1261 */
1262 atomic64_set(&hwc->prev_count, (u64)-left); 1262 atomic64_set(&hwc->prev_count, (u64)-left);
1263 1263
1264 err = checking_wrmsrl(hwc->counter_base + idx, 1264 err = checking_wrmsrl(hwc->event_base + idx,
1265 (u64)(-left) & x86_pmu.counter_mask); 1265 (u64)(-left) & x86_pmu.event_mask);
1266 1266
1267 perf_counter_update_userpage(counter); 1267 perf_event_update_userpage(event);
1268 1268
1269 return ret; 1269 return ret;
1270} 1270}
1271 1271
1272static inline void 1272static inline void
1273intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx) 1273intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
1274{ 1274{
1275 int idx = __idx - X86_PMC_IDX_FIXED; 1275 int idx = __idx - X86_PMC_IDX_FIXED;
1276 u64 ctrl_val, bits, mask; 1276 u64 ctrl_val, bits, mask;
@@ -1295,9 +1295,9 @@ intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
1295 err = checking_wrmsrl(hwc->config_base, ctrl_val); 1295 err = checking_wrmsrl(hwc->config_base, ctrl_val);
1296} 1296}
1297 1297
1298static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) 1298static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1299{ 1299{
1300 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1300 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1301 u64 val; 1301 u64 val;
1302 1302
1303 val = hwc->config; 1303 val = hwc->config;
@@ -1308,10 +1308,10 @@ static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
1308} 1308}
1309 1309
1310 1310
1311static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) 1311static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1312{ 1312{
1313 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { 1313 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1314 if (!__get_cpu_var(cpu_hw_counters).enabled) 1314 if (!__get_cpu_var(cpu_hw_events).enabled)
1315 return; 1315 return;
1316 1316
1317 intel_pmu_enable_bts(hwc->config); 1317 intel_pmu_enable_bts(hwc->config);
@@ -1323,134 +1323,134 @@ static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
1323 return; 1323 return;
1324 } 1324 }
1325 1325
1326 x86_pmu_enable_counter(hwc, idx); 1326 x86_pmu_enable_event(hwc, idx);
1327} 1327}
1328 1328
1329static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) 1329static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1330{ 1330{
1331 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1331 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1332 1332
1333 if (cpuc->enabled) 1333 if (cpuc->enabled)
1334 x86_pmu_enable_counter(hwc, idx); 1334 x86_pmu_enable_event(hwc, idx);
1335} 1335}
1336 1336
1337static int 1337static int
1338fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) 1338fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc)
1339{ 1339{
1340 unsigned int event; 1340 unsigned int hw_event;
1341 1341
1342 event = hwc->config & ARCH_PERFMON_EVENT_MASK; 1342 hw_event = hwc->config & ARCH_PERFMON_EVENT_MASK;
1343 1343
1344 if (unlikely((event == 1344 if (unlikely((hw_event ==
1345 x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) && 1345 x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
1346 (hwc->sample_period == 1))) 1346 (hwc->sample_period == 1)))
1347 return X86_PMC_IDX_FIXED_BTS; 1347 return X86_PMC_IDX_FIXED_BTS;
1348 1348
1349 if (!x86_pmu.num_counters_fixed) 1349 if (!x86_pmu.num_events_fixed)
1350 return -1; 1350 return -1;
1351 1351
1352 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) 1352 if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
1353 return X86_PMC_IDX_FIXED_INSTRUCTIONS; 1353 return X86_PMC_IDX_FIXED_INSTRUCTIONS;
1354 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) 1354 if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
1355 return X86_PMC_IDX_FIXED_CPU_CYCLES; 1355 return X86_PMC_IDX_FIXED_CPU_CYCLES;
1356 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES))) 1356 if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES)))
1357 return X86_PMC_IDX_FIXED_BUS_CYCLES; 1357 return X86_PMC_IDX_FIXED_BUS_CYCLES;
1358 1358
1359 return -1; 1359 return -1;
1360} 1360}
1361 1361
1362/* 1362/*
1363 * Find a PMC slot for the freshly enabled / scheduled in counter: 1363 * Find a PMC slot for the freshly enabled / scheduled in event:
1364 */ 1364 */
1365static int x86_pmu_enable(struct perf_counter *counter) 1365static int x86_pmu_enable(struct perf_event *event)
1366{ 1366{
1367 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1367 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1368 struct hw_perf_counter *hwc = &counter->hw; 1368 struct hw_perf_event *hwc = &event->hw;
1369 int idx; 1369 int idx;
1370 1370
1371 idx = fixed_mode_idx(counter, hwc); 1371 idx = fixed_mode_idx(event, hwc);
1372 if (idx == X86_PMC_IDX_FIXED_BTS) { 1372 if (idx == X86_PMC_IDX_FIXED_BTS) {
1373 /* BTS is already occupied. */ 1373 /* BTS is already occupied. */
1374 if (test_and_set_bit(idx, cpuc->used_mask)) 1374 if (test_and_set_bit(idx, cpuc->used_mask))
1375 return -EAGAIN; 1375 return -EAGAIN;
1376 1376
1377 hwc->config_base = 0; 1377 hwc->config_base = 0;
1378 hwc->counter_base = 0; 1378 hwc->event_base = 0;
1379 hwc->idx = idx; 1379 hwc->idx = idx;
1380 } else if (idx >= 0) { 1380 } else if (idx >= 0) {
1381 /* 1381 /*
1382 * Try to get the fixed counter, if that is already taken 1382 * Try to get the fixed event, if that is already taken
1383 * then try to get a generic counter: 1383 * then try to get a generic event:
1384 */ 1384 */
1385 if (test_and_set_bit(idx, cpuc->used_mask)) 1385 if (test_and_set_bit(idx, cpuc->used_mask))
1386 goto try_generic; 1386 goto try_generic;
1387 1387
1388 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; 1388 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1389 /* 1389 /*
1390 * We set it so that counter_base + idx in wrmsr/rdmsr maps to 1390 * We set it so that event_base + idx in wrmsr/rdmsr maps to
1391 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2: 1391 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1392 */ 1392 */
1393 hwc->counter_base = 1393 hwc->event_base =
1394 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED; 1394 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
1395 hwc->idx = idx; 1395 hwc->idx = idx;
1396 } else { 1396 } else {
1397 idx = hwc->idx; 1397 idx = hwc->idx;
1398 /* Try to get the previous generic counter again */ 1398 /* Try to get the previous generic event again */
1399 if (test_and_set_bit(idx, cpuc->used_mask)) { 1399 if (test_and_set_bit(idx, cpuc->used_mask)) {
1400try_generic: 1400try_generic:
1401 idx = find_first_zero_bit(cpuc->used_mask, 1401 idx = find_first_zero_bit(cpuc->used_mask,
1402 x86_pmu.num_counters); 1402 x86_pmu.num_events);
1403 if (idx == x86_pmu.num_counters) 1403 if (idx == x86_pmu.num_events)
1404 return -EAGAIN; 1404 return -EAGAIN;
1405 1405
1406 set_bit(idx, cpuc->used_mask); 1406 set_bit(idx, cpuc->used_mask);
1407 hwc->idx = idx; 1407 hwc->idx = idx;
1408 } 1408 }
1409 hwc->config_base = x86_pmu.eventsel; 1409 hwc->config_base = x86_pmu.eventsel;
1410 hwc->counter_base = x86_pmu.perfctr; 1410 hwc->event_base = x86_pmu.perfctr;
1411 } 1411 }
1412 1412
1413 perf_counters_lapic_init(); 1413 perf_events_lapic_init();
1414 1414
1415 x86_pmu.disable(hwc, idx); 1415 x86_pmu.disable(hwc, idx);
1416 1416
1417 cpuc->counters[idx] = counter; 1417 cpuc->events[idx] = event;
1418 set_bit(idx, cpuc->active_mask); 1418 set_bit(idx, cpuc->active_mask);
1419 1419
1420 x86_perf_counter_set_period(counter, hwc, idx); 1420 x86_perf_event_set_period(event, hwc, idx);
1421 x86_pmu.enable(hwc, idx); 1421 x86_pmu.enable(hwc, idx);
1422 1422
1423 perf_counter_update_userpage(counter); 1423 perf_event_update_userpage(event);
1424 1424
1425 return 0; 1425 return 0;
1426} 1426}
1427 1427
1428static void x86_pmu_unthrottle(struct perf_counter *counter) 1428static void x86_pmu_unthrottle(struct perf_event *event)
1429{ 1429{
1430 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1430 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1431 struct hw_perf_counter *hwc = &counter->hw; 1431 struct hw_perf_event *hwc = &event->hw;
1432 1432
1433 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX || 1433 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
1434 cpuc->counters[hwc->idx] != counter)) 1434 cpuc->events[hwc->idx] != event))
1435 return; 1435 return;
1436 1436
1437 x86_pmu.enable(hwc, hwc->idx); 1437 x86_pmu.enable(hwc, hwc->idx);
1438} 1438}
1439 1439
1440void perf_counter_print_debug(void) 1440void perf_event_print_debug(void)
1441{ 1441{
1442 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed; 1442 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1443 struct cpu_hw_counters *cpuc; 1443 struct cpu_hw_events *cpuc;
1444 unsigned long flags; 1444 unsigned long flags;
1445 int cpu, idx; 1445 int cpu, idx;
1446 1446
1447 if (!x86_pmu.num_counters) 1447 if (!x86_pmu.num_events)
1448 return; 1448 return;
1449 1449
1450 local_irq_save(flags); 1450 local_irq_save(flags);
1451 1451
1452 cpu = smp_processor_id(); 1452 cpu = smp_processor_id();
1453 cpuc = &per_cpu(cpu_hw_counters, cpu); 1453 cpuc = &per_cpu(cpu_hw_events, cpu);
1454 1454
1455 if (x86_pmu.version >= 2) { 1455 if (x86_pmu.version >= 2) {
1456 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); 1456 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
@@ -1466,7 +1466,7 @@ void perf_counter_print_debug(void)
1466 } 1466 }
1467 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask); 1467 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask);
1468 1468
1469 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1469 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1470 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); 1470 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1471 rdmsrl(x86_pmu.perfctr + idx, pmc_count); 1471 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
1472 1472
@@ -1479,7 +1479,7 @@ void perf_counter_print_debug(void)
1479 pr_info("CPU#%d: gen-PMC%d left: %016llx\n", 1479 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
1480 cpu, idx, prev_left); 1480 cpu, idx, prev_left);
1481 } 1481 }
1482 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { 1482 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1483 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); 1483 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1484 1484
1485 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", 1485 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
@@ -1488,8 +1488,7 @@ void perf_counter_print_debug(void)
1488 local_irq_restore(flags); 1488 local_irq_restore(flags);
1489} 1489}
1490 1490
1491static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc, 1491static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
1492 struct perf_sample_data *data)
1493{ 1492{
1494 struct debug_store *ds = cpuc->ds; 1493 struct debug_store *ds = cpuc->ds;
1495 struct bts_record { 1494 struct bts_record {
@@ -1497,11 +1496,14 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc,
1497 u64 to; 1496 u64 to;
1498 u64 flags; 1497 u64 flags;
1499 }; 1498 };
1500 struct perf_counter *counter = cpuc->counters[X86_PMC_IDX_FIXED_BTS]; 1499 struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
1501 unsigned long orig_ip = data->regs->ip;
1502 struct bts_record *at, *top; 1500 struct bts_record *at, *top;
1501 struct perf_output_handle handle;
1502 struct perf_event_header header;
1503 struct perf_sample_data data;
1504 struct pt_regs regs;
1503 1505
1504 if (!counter) 1506 if (!event)
1505 return; 1507 return;
1506 1508
1507 if (!ds) 1509 if (!ds)
@@ -1510,26 +1512,45 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc,
1510 at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; 1512 at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
1511 top = (struct bts_record *)(unsigned long)ds->bts_index; 1513 top = (struct bts_record *)(unsigned long)ds->bts_index;
1512 1514
1515 if (top <= at)
1516 return;
1517
1513 ds->bts_index = ds->bts_buffer_base; 1518 ds->bts_index = ds->bts_buffer_base;
1514 1519
1520
1521 data.period = event->hw.last_period;
1522 data.addr = 0;
1523 regs.ip = 0;
1524
1525 /*
1526 * Prepare a generic sample, i.e. fill in the invariant fields.
1527 * We will overwrite the from and to address before we output
1528 * the sample.
1529 */
1530 perf_prepare_sample(&header, &data, event, &regs);
1531
1532 if (perf_output_begin(&handle, event,
1533 header.size * (top - at), 1, 1))
1534 return;
1535
1515 for (; at < top; at++) { 1536 for (; at < top; at++) {
1516 data->regs->ip = at->from; 1537 data.ip = at->from;
1517 data->addr = at->to; 1538 data.addr = at->to;
1518 1539
1519 perf_counter_output(counter, 1, data); 1540 perf_output_sample(&handle, &header, &data, event);
1520 } 1541 }
1521 1542
1522 data->regs->ip = orig_ip; 1543 perf_output_end(&handle);
1523 data->addr = 0;
1524 1544
1525 /* There's new data available. */ 1545 /* There's new data available. */
1526 counter->pending_kill = POLL_IN; 1546 event->hw.interrupts++;
1547 event->pending_kill = POLL_IN;
1527} 1548}
1528 1549
1529static void x86_pmu_disable(struct perf_counter *counter) 1550static void x86_pmu_disable(struct perf_event *event)
1530{ 1551{
1531 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1552 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1532 struct hw_perf_counter *hwc = &counter->hw; 1553 struct hw_perf_event *hwc = &event->hw;
1533 int idx = hwc->idx; 1554 int idx = hwc->idx;
1534 1555
1535 /* 1556 /*
@@ -1541,67 +1562,63 @@ static void x86_pmu_disable(struct perf_counter *counter)
1541 1562
1542 /* 1563 /*
1543 * Make sure the cleared pointer becomes visible before we 1564 * Make sure the cleared pointer becomes visible before we
1544 * (potentially) free the counter: 1565 * (potentially) free the event:
1545 */ 1566 */
1546 barrier(); 1567 barrier();
1547 1568
1548 /* 1569 /*
1549 * Drain the remaining delta count out of a counter 1570 * Drain the remaining delta count out of a event
1550 * that we are disabling: 1571 * that we are disabling:
1551 */ 1572 */
1552 x86_perf_counter_update(counter, hwc, idx); 1573 x86_perf_event_update(event, hwc, idx);
1553 1574
1554 /* Drain the remaining BTS records. */ 1575 /* Drain the remaining BTS records. */
1555 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { 1576 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
1556 struct perf_sample_data data; 1577 intel_pmu_drain_bts_buffer(cpuc);
1557 struct pt_regs regs;
1558 1578
1559 data.regs = &regs; 1579 cpuc->events[idx] = NULL;
1560 intel_pmu_drain_bts_buffer(cpuc, &data);
1561 }
1562 cpuc->counters[idx] = NULL;
1563 clear_bit(idx, cpuc->used_mask); 1580 clear_bit(idx, cpuc->used_mask);
1564 1581
1565 perf_counter_update_userpage(counter); 1582 perf_event_update_userpage(event);
1566} 1583}
1567 1584
1568/* 1585/*
1569 * Save and restart an expired counter. Called by NMI contexts, 1586 * Save and restart an expired event. Called by NMI contexts,
1570 * so it has to be careful about preempting normal counter ops: 1587 * so it has to be careful about preempting normal event ops:
1571 */ 1588 */
1572static int intel_pmu_save_and_restart(struct perf_counter *counter) 1589static int intel_pmu_save_and_restart(struct perf_event *event)
1573{ 1590{
1574 struct hw_perf_counter *hwc = &counter->hw; 1591 struct hw_perf_event *hwc = &event->hw;
1575 int idx = hwc->idx; 1592 int idx = hwc->idx;
1576 int ret; 1593 int ret;
1577 1594
1578 x86_perf_counter_update(counter, hwc, idx); 1595 x86_perf_event_update(event, hwc, idx);
1579 ret = x86_perf_counter_set_period(counter, hwc, idx); 1596 ret = x86_perf_event_set_period(event, hwc, idx);
1580 1597
1581 if (counter->state == PERF_COUNTER_STATE_ACTIVE) 1598 if (event->state == PERF_EVENT_STATE_ACTIVE)
1582 intel_pmu_enable_counter(hwc, idx); 1599 intel_pmu_enable_event(hwc, idx);
1583 1600
1584 return ret; 1601 return ret;
1585} 1602}
1586 1603
1587static void intel_pmu_reset(void) 1604static void intel_pmu_reset(void)
1588{ 1605{
1589 struct debug_store *ds = __get_cpu_var(cpu_hw_counters).ds; 1606 struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
1590 unsigned long flags; 1607 unsigned long flags;
1591 int idx; 1608 int idx;
1592 1609
1593 if (!x86_pmu.num_counters) 1610 if (!x86_pmu.num_events)
1594 return; 1611 return;
1595 1612
1596 local_irq_save(flags); 1613 local_irq_save(flags);
1597 1614
1598 printk("clearing PMU state on CPU#%d\n", smp_processor_id()); 1615 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1599 1616
1600 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1617 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1601 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull); 1618 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
1602 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull); 1619 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
1603 } 1620 }
1604 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { 1621 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1605 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); 1622 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1606 } 1623 }
1607 if (ds) 1624 if (ds)
@@ -1613,39 +1630,38 @@ static void intel_pmu_reset(void)
1613static int p6_pmu_handle_irq(struct pt_regs *regs) 1630static int p6_pmu_handle_irq(struct pt_regs *regs)
1614{ 1631{
1615 struct perf_sample_data data; 1632 struct perf_sample_data data;
1616 struct cpu_hw_counters *cpuc; 1633 struct cpu_hw_events *cpuc;
1617 struct perf_counter *counter; 1634 struct perf_event *event;
1618 struct hw_perf_counter *hwc; 1635 struct hw_perf_event *hwc;
1619 int idx, handled = 0; 1636 int idx, handled = 0;
1620 u64 val; 1637 u64 val;
1621 1638
1622 data.regs = regs;
1623 data.addr = 0; 1639 data.addr = 0;
1624 1640
1625 cpuc = &__get_cpu_var(cpu_hw_counters); 1641 cpuc = &__get_cpu_var(cpu_hw_events);
1626 1642
1627 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1643 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1628 if (!test_bit(idx, cpuc->active_mask)) 1644 if (!test_bit(idx, cpuc->active_mask))
1629 continue; 1645 continue;
1630 1646
1631 counter = cpuc->counters[idx]; 1647 event = cpuc->events[idx];
1632 hwc = &counter->hw; 1648 hwc = &event->hw;
1633 1649
1634 val = x86_perf_counter_update(counter, hwc, idx); 1650 val = x86_perf_event_update(event, hwc, idx);
1635 if (val & (1ULL << (x86_pmu.counter_bits - 1))) 1651 if (val & (1ULL << (x86_pmu.event_bits - 1)))
1636 continue; 1652 continue;
1637 1653
1638 /* 1654 /*
1639 * counter overflow 1655 * event overflow
1640 */ 1656 */
1641 handled = 1; 1657 handled = 1;
1642 data.period = counter->hw.last_period; 1658 data.period = event->hw.last_period;
1643 1659
1644 if (!x86_perf_counter_set_period(counter, hwc, idx)) 1660 if (!x86_perf_event_set_period(event, hwc, idx))
1645 continue; 1661 continue;
1646 1662
1647 if (perf_counter_overflow(counter, 1, &data)) 1663 if (perf_event_overflow(event, 1, &data, regs))
1648 p6_pmu_disable_counter(hwc, idx); 1664 p6_pmu_disable_event(hwc, idx);
1649 } 1665 }
1650 1666
1651 if (handled) 1667 if (handled)
@@ -1661,17 +1677,16 @@ static int p6_pmu_handle_irq(struct pt_regs *regs)
1661static int intel_pmu_handle_irq(struct pt_regs *regs) 1677static int intel_pmu_handle_irq(struct pt_regs *regs)
1662{ 1678{
1663 struct perf_sample_data data; 1679 struct perf_sample_data data;
1664 struct cpu_hw_counters *cpuc; 1680 struct cpu_hw_events *cpuc;
1665 int bit, loops; 1681 int bit, loops;
1666 u64 ack, status; 1682 u64 ack, status;
1667 1683
1668 data.regs = regs;
1669 data.addr = 0; 1684 data.addr = 0;
1670 1685
1671 cpuc = &__get_cpu_var(cpu_hw_counters); 1686 cpuc = &__get_cpu_var(cpu_hw_events);
1672 1687
1673 perf_disable(); 1688 perf_disable();
1674 intel_pmu_drain_bts_buffer(cpuc, &data); 1689 intel_pmu_drain_bts_buffer(cpuc);
1675 status = intel_pmu_get_status(); 1690 status = intel_pmu_get_status();
1676 if (!status) { 1691 if (!status) {
1677 perf_enable(); 1692 perf_enable();
@@ -1681,8 +1696,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
1681 loops = 0; 1696 loops = 0;
1682again: 1697again:
1683 if (++loops > 100) { 1698 if (++loops > 100) {
1684 WARN_ONCE(1, "perfcounters: irq loop stuck!\n"); 1699 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1685 perf_counter_print_debug(); 1700 perf_event_print_debug();
1686 intel_pmu_reset(); 1701 intel_pmu_reset();
1687 perf_enable(); 1702 perf_enable();
1688 return 1; 1703 return 1;
@@ -1691,19 +1706,19 @@ again:
1691 inc_irq_stat(apic_perf_irqs); 1706 inc_irq_stat(apic_perf_irqs);
1692 ack = status; 1707 ack = status;
1693 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { 1708 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
1694 struct perf_counter *counter = cpuc->counters[bit]; 1709 struct perf_event *event = cpuc->events[bit];
1695 1710
1696 clear_bit(bit, (unsigned long *) &status); 1711 clear_bit(bit, (unsigned long *) &status);
1697 if (!test_bit(bit, cpuc->active_mask)) 1712 if (!test_bit(bit, cpuc->active_mask))
1698 continue; 1713 continue;
1699 1714
1700 if (!intel_pmu_save_and_restart(counter)) 1715 if (!intel_pmu_save_and_restart(event))
1701 continue; 1716 continue;
1702 1717
1703 data.period = counter->hw.last_period; 1718 data.period = event->hw.last_period;
1704 1719
1705 if (perf_counter_overflow(counter, 1, &data)) 1720 if (perf_event_overflow(event, 1, &data, regs))
1706 intel_pmu_disable_counter(&counter->hw, bit); 1721 intel_pmu_disable_event(&event->hw, bit);
1707 } 1722 }
1708 1723
1709 intel_pmu_ack_status(ack); 1724 intel_pmu_ack_status(ack);
@@ -1723,39 +1738,38 @@ again:
1723static int amd_pmu_handle_irq(struct pt_regs *regs) 1738static int amd_pmu_handle_irq(struct pt_regs *regs)
1724{ 1739{
1725 struct perf_sample_data data; 1740 struct perf_sample_data data;
1726 struct cpu_hw_counters *cpuc; 1741 struct cpu_hw_events *cpuc;
1727 struct perf_counter *counter; 1742 struct perf_event *event;
1728 struct hw_perf_counter *hwc; 1743 struct hw_perf_event *hwc;
1729 int idx, handled = 0; 1744 int idx, handled = 0;
1730 u64 val; 1745 u64 val;
1731 1746
1732 data.regs = regs;
1733 data.addr = 0; 1747 data.addr = 0;
1734 1748
1735 cpuc = &__get_cpu_var(cpu_hw_counters); 1749 cpuc = &__get_cpu_var(cpu_hw_events);
1736 1750
1737 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1751 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1738 if (!test_bit(idx, cpuc->active_mask)) 1752 if (!test_bit(idx, cpuc->active_mask))
1739 continue; 1753 continue;
1740 1754
1741 counter = cpuc->counters[idx]; 1755 event = cpuc->events[idx];
1742 hwc = &counter->hw; 1756 hwc = &event->hw;
1743 1757
1744 val = x86_perf_counter_update(counter, hwc, idx); 1758 val = x86_perf_event_update(event, hwc, idx);
1745 if (val & (1ULL << (x86_pmu.counter_bits - 1))) 1759 if (val & (1ULL << (x86_pmu.event_bits - 1)))
1746 continue; 1760 continue;
1747 1761
1748 /* 1762 /*
1749 * counter overflow 1763 * event overflow
1750 */ 1764 */
1751 handled = 1; 1765 handled = 1;
1752 data.period = counter->hw.last_period; 1766 data.period = event->hw.last_period;
1753 1767
1754 if (!x86_perf_counter_set_period(counter, hwc, idx)) 1768 if (!x86_perf_event_set_period(event, hwc, idx))
1755 continue; 1769 continue;
1756 1770
1757 if (perf_counter_overflow(counter, 1, &data)) 1771 if (perf_event_overflow(event, 1, &data, regs))
1758 amd_pmu_disable_counter(hwc, idx); 1772 amd_pmu_disable_event(hwc, idx);
1759 } 1773 }
1760 1774
1761 if (handled) 1775 if (handled)
@@ -1769,18 +1783,18 @@ void smp_perf_pending_interrupt(struct pt_regs *regs)
1769 irq_enter(); 1783 irq_enter();
1770 ack_APIC_irq(); 1784 ack_APIC_irq();
1771 inc_irq_stat(apic_pending_irqs); 1785 inc_irq_stat(apic_pending_irqs);
1772 perf_counter_do_pending(); 1786 perf_event_do_pending();
1773 irq_exit(); 1787 irq_exit();
1774} 1788}
1775 1789
1776void set_perf_counter_pending(void) 1790void set_perf_event_pending(void)
1777{ 1791{
1778#ifdef CONFIG_X86_LOCAL_APIC 1792#ifdef CONFIG_X86_LOCAL_APIC
1779 apic->send_IPI_self(LOCAL_PENDING_VECTOR); 1793 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
1780#endif 1794#endif
1781} 1795}
1782 1796
1783void perf_counters_lapic_init(void) 1797void perf_events_lapic_init(void)
1784{ 1798{
1785#ifdef CONFIG_X86_LOCAL_APIC 1799#ifdef CONFIG_X86_LOCAL_APIC
1786 if (!x86_pmu.apic || !x86_pmu_initialized()) 1800 if (!x86_pmu.apic || !x86_pmu_initialized())
@@ -1794,13 +1808,13 @@ void perf_counters_lapic_init(void)
1794} 1808}
1795 1809
1796static int __kprobes 1810static int __kprobes
1797perf_counter_nmi_handler(struct notifier_block *self, 1811perf_event_nmi_handler(struct notifier_block *self,
1798 unsigned long cmd, void *__args) 1812 unsigned long cmd, void *__args)
1799{ 1813{
1800 struct die_args *args = __args; 1814 struct die_args *args = __args;
1801 struct pt_regs *regs; 1815 struct pt_regs *regs;
1802 1816
1803 if (!atomic_read(&active_counters)) 1817 if (!atomic_read(&active_events))
1804 return NOTIFY_DONE; 1818 return NOTIFY_DONE;
1805 1819
1806 switch (cmd) { 1820 switch (cmd) {
@@ -1819,7 +1833,7 @@ perf_counter_nmi_handler(struct notifier_block *self,
1819#endif 1833#endif
1820 /* 1834 /*
1821 * Can't rely on the handled return value to say it was our NMI, two 1835 * Can't rely on the handled return value to say it was our NMI, two
1822 * counters could trigger 'simultaneously' raising two back-to-back NMIs. 1836 * events could trigger 'simultaneously' raising two back-to-back NMIs.
1823 * 1837 *
1824 * If the first NMI handles both, the latter will be empty and daze 1838 * If the first NMI handles both, the latter will be empty and daze
1825 * the CPU. 1839 * the CPU.
@@ -1829,8 +1843,8 @@ perf_counter_nmi_handler(struct notifier_block *self,
1829 return NOTIFY_STOP; 1843 return NOTIFY_STOP;
1830} 1844}
1831 1845
1832static __read_mostly struct notifier_block perf_counter_nmi_notifier = { 1846static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1833 .notifier_call = perf_counter_nmi_handler, 1847 .notifier_call = perf_event_nmi_handler,
1834 .next = NULL, 1848 .next = NULL,
1835 .priority = 1 1849 .priority = 1
1836}; 1850};
@@ -1840,8 +1854,8 @@ static struct x86_pmu p6_pmu = {
1840 .handle_irq = p6_pmu_handle_irq, 1854 .handle_irq = p6_pmu_handle_irq,
1841 .disable_all = p6_pmu_disable_all, 1855 .disable_all = p6_pmu_disable_all,
1842 .enable_all = p6_pmu_enable_all, 1856 .enable_all = p6_pmu_enable_all,
1843 .enable = p6_pmu_enable_counter, 1857 .enable = p6_pmu_enable_event,
1844 .disable = p6_pmu_disable_counter, 1858 .disable = p6_pmu_disable_event,
1845 .eventsel = MSR_P6_EVNTSEL0, 1859 .eventsel = MSR_P6_EVNTSEL0,
1846 .perfctr = MSR_P6_PERFCTR0, 1860 .perfctr = MSR_P6_PERFCTR0,
1847 .event_map = p6_pmu_event_map, 1861 .event_map = p6_pmu_event_map,
@@ -1850,16 +1864,16 @@ static struct x86_pmu p6_pmu = {
1850 .apic = 1, 1864 .apic = 1,
1851 .max_period = (1ULL << 31) - 1, 1865 .max_period = (1ULL << 31) - 1,
1852 .version = 0, 1866 .version = 0,
1853 .num_counters = 2, 1867 .num_events = 2,
1854 /* 1868 /*
1855 * Counters have 40 bits implemented. However they are designed such 1869 * Events have 40 bits implemented. However they are designed such
1856 * that bits [32-39] are sign extensions of bit 31. As such the 1870 * that bits [32-39] are sign extensions of bit 31. As such the
1857 * effective width of a counter for P6-like PMU is 32 bits only. 1871 * effective width of a event for P6-like PMU is 32 bits only.
1858 * 1872 *
1859 * See IA-32 Intel Architecture Software developer manual Vol 3B 1873 * See IA-32 Intel Architecture Software developer manual Vol 3B
1860 */ 1874 */
1861 .counter_bits = 32, 1875 .event_bits = 32,
1862 .counter_mask = (1ULL << 32) - 1, 1876 .event_mask = (1ULL << 32) - 1,
1863}; 1877};
1864 1878
1865static struct x86_pmu intel_pmu = { 1879static struct x86_pmu intel_pmu = {
@@ -1867,8 +1881,8 @@ static struct x86_pmu intel_pmu = {
1867 .handle_irq = intel_pmu_handle_irq, 1881 .handle_irq = intel_pmu_handle_irq,
1868 .disable_all = intel_pmu_disable_all, 1882 .disable_all = intel_pmu_disable_all,
1869 .enable_all = intel_pmu_enable_all, 1883 .enable_all = intel_pmu_enable_all,
1870 .enable = intel_pmu_enable_counter, 1884 .enable = intel_pmu_enable_event,
1871 .disable = intel_pmu_disable_counter, 1885 .disable = intel_pmu_disable_event,
1872 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, 1886 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
1873 .perfctr = MSR_ARCH_PERFMON_PERFCTR0, 1887 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
1874 .event_map = intel_pmu_event_map, 1888 .event_map = intel_pmu_event_map,
@@ -1878,7 +1892,7 @@ static struct x86_pmu intel_pmu = {
1878 /* 1892 /*
1879 * Intel PMCs cannot be accessed sanely above 32 bit width, 1893 * Intel PMCs cannot be accessed sanely above 32 bit width,
1880 * so we install an artificial 1<<31 period regardless of 1894 * so we install an artificial 1<<31 period regardless of
1881 * the generic counter period: 1895 * the generic event period:
1882 */ 1896 */
1883 .max_period = (1ULL << 31) - 1, 1897 .max_period = (1ULL << 31) - 1,
1884 .enable_bts = intel_pmu_enable_bts, 1898 .enable_bts = intel_pmu_enable_bts,
@@ -1890,16 +1904,16 @@ static struct x86_pmu amd_pmu = {
1890 .handle_irq = amd_pmu_handle_irq, 1904 .handle_irq = amd_pmu_handle_irq,
1891 .disable_all = amd_pmu_disable_all, 1905 .disable_all = amd_pmu_disable_all,
1892 .enable_all = amd_pmu_enable_all, 1906 .enable_all = amd_pmu_enable_all,
1893 .enable = amd_pmu_enable_counter, 1907 .enable = amd_pmu_enable_event,
1894 .disable = amd_pmu_disable_counter, 1908 .disable = amd_pmu_disable_event,
1895 .eventsel = MSR_K7_EVNTSEL0, 1909 .eventsel = MSR_K7_EVNTSEL0,
1896 .perfctr = MSR_K7_PERFCTR0, 1910 .perfctr = MSR_K7_PERFCTR0,
1897 .event_map = amd_pmu_event_map, 1911 .event_map = amd_pmu_event_map,
1898 .raw_event = amd_pmu_raw_event, 1912 .raw_event = amd_pmu_raw_event,
1899 .max_events = ARRAY_SIZE(amd_perfmon_event_map), 1913 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
1900 .num_counters = 4, 1914 .num_events = 4,
1901 .counter_bits = 48, 1915 .event_bits = 48,
1902 .counter_mask = (1ULL << 48) - 1, 1916 .event_mask = (1ULL << 48) - 1,
1903 .apic = 1, 1917 .apic = 1,
1904 /* use highest bit to detect overflow */ 1918 /* use highest bit to detect overflow */
1905 .max_period = (1ULL << 47) - 1, 1919 .max_period = (1ULL << 47) - 1,
@@ -1956,7 +1970,7 @@ static int intel_pmu_init(void)
1956 1970
1957 /* 1971 /*
1958 * Check whether the Architectural PerfMon supports 1972 * Check whether the Architectural PerfMon supports
1959 * Branch Misses Retired Event or not. 1973 * Branch Misses Retired hw_event or not.
1960 */ 1974 */
1961 cpuid(10, &eax.full, &ebx, &unused, &edx.full); 1975 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
1962 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) 1976 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
@@ -1968,15 +1982,15 @@ static int intel_pmu_init(void)
1968 1982
1969 x86_pmu = intel_pmu; 1983 x86_pmu = intel_pmu;
1970 x86_pmu.version = version; 1984 x86_pmu.version = version;
1971 x86_pmu.num_counters = eax.split.num_counters; 1985 x86_pmu.num_events = eax.split.num_events;
1972 x86_pmu.counter_bits = eax.split.bit_width; 1986 x86_pmu.event_bits = eax.split.bit_width;
1973 x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1; 1987 x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
1974 1988
1975 /* 1989 /*
1976 * Quirk: v2 perfmon does not report fixed-purpose counters, so 1990 * Quirk: v2 perfmon does not report fixed-purpose events, so
1977 * assume at least 3 counters: 1991 * assume at least 3 events:
1978 */ 1992 */
1979 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); 1993 x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
1980 1994
1981 /* 1995 /*
1982 * Install the hw-cache-events table: 1996 * Install the hw-cache-events table:
@@ -2023,11 +2037,11 @@ static int amd_pmu_init(void)
2023 return 0; 2037 return 0;
2024} 2038}
2025 2039
2026void __init init_hw_perf_counters(void) 2040void __init init_hw_perf_events(void)
2027{ 2041{
2028 int err; 2042 int err;
2029 2043
2030 pr_info("Performance Counters: "); 2044 pr_info("Performance Events: ");
2031 2045
2032 switch (boot_cpu_data.x86_vendor) { 2046 switch (boot_cpu_data.x86_vendor) {
2033 case X86_VENDOR_INTEL: 2047 case X86_VENDOR_INTEL:
@@ -2040,45 +2054,45 @@ void __init init_hw_perf_counters(void)
2040 return; 2054 return;
2041 } 2055 }
2042 if (err != 0) { 2056 if (err != 0) {
2043 pr_cont("no PMU driver, software counters only.\n"); 2057 pr_cont("no PMU driver, software events only.\n");
2044 return; 2058 return;
2045 } 2059 }
2046 2060
2047 pr_cont("%s PMU driver.\n", x86_pmu.name); 2061 pr_cont("%s PMU driver.\n", x86_pmu.name);
2048 2062
2049 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { 2063 if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
2050 WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", 2064 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2051 x86_pmu.num_counters, X86_PMC_MAX_GENERIC); 2065 x86_pmu.num_events, X86_PMC_MAX_GENERIC);
2052 x86_pmu.num_counters = X86_PMC_MAX_GENERIC; 2066 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
2053 } 2067 }
2054 perf_counter_mask = (1 << x86_pmu.num_counters) - 1; 2068 perf_event_mask = (1 << x86_pmu.num_events) - 1;
2055 perf_max_counters = x86_pmu.num_counters; 2069 perf_max_events = x86_pmu.num_events;
2056 2070
2057 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { 2071 if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
2058 WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!", 2072 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2059 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED); 2073 x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
2060 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED; 2074 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
2061 } 2075 }
2062 2076
2063 perf_counter_mask |= 2077 perf_event_mask |=
2064 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; 2078 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
2065 x86_pmu.intel_ctrl = perf_counter_mask; 2079 x86_pmu.intel_ctrl = perf_event_mask;
2066 2080
2067 perf_counters_lapic_init(); 2081 perf_events_lapic_init();
2068 register_die_notifier(&perf_counter_nmi_notifier); 2082 register_die_notifier(&perf_event_nmi_notifier);
2069 2083
2070 pr_info("... version: %d\n", x86_pmu.version); 2084 pr_info("... version: %d\n", x86_pmu.version);
2071 pr_info("... bit width: %d\n", x86_pmu.counter_bits); 2085 pr_info("... bit width: %d\n", x86_pmu.event_bits);
2072 pr_info("... generic counters: %d\n", x86_pmu.num_counters); 2086 pr_info("... generic registers: %d\n", x86_pmu.num_events);
2073 pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask); 2087 pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
2074 pr_info("... max period: %016Lx\n", x86_pmu.max_period); 2088 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
2075 pr_info("... fixed-purpose counters: %d\n", x86_pmu.num_counters_fixed); 2089 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
2076 pr_info("... counter mask: %016Lx\n", perf_counter_mask); 2090 pr_info("... event mask: %016Lx\n", perf_event_mask);
2077} 2091}
2078 2092
2079static inline void x86_pmu_read(struct perf_counter *counter) 2093static inline void x86_pmu_read(struct perf_event *event)
2080{ 2094{
2081 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); 2095 x86_perf_event_update(event, &event->hw, event->hw.idx);
2082} 2096}
2083 2097
2084static const struct pmu pmu = { 2098static const struct pmu pmu = {
@@ -2088,13 +2102,16 @@ static const struct pmu pmu = {
2088 .unthrottle = x86_pmu_unthrottle, 2102 .unthrottle = x86_pmu_unthrottle,
2089}; 2103};
2090 2104
2091const struct pmu *hw_perf_counter_init(struct perf_counter *counter) 2105const struct pmu *hw_perf_event_init(struct perf_event *event)
2092{ 2106{
2093 int err; 2107 int err;
2094 2108
2095 err = __hw_perf_counter_init(counter); 2109 err = __hw_perf_event_init(event);
2096 if (err) 2110 if (err) {
2111 if (event->destroy)
2112 event->destroy(event);
2097 return ERR_PTR(err); 2113 return ERR_PTR(err);
2114 }
2098 2115
2099 return &pmu; 2116 return &pmu;
2100} 2117}
@@ -2275,7 +2292,7 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2275 return entry; 2292 return entry;
2276} 2293}
2277 2294
2278void hw_perf_counter_setup_online(int cpu) 2295void hw_perf_event_setup_online(int cpu)
2279{ 2296{
2280 init_debug_store_on_cpu(cpu); 2297 init_debug_store_on_cpu(cpu);
2281} 2298}
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 392bea43b890..fab786f60ed6 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -20,7 +20,7 @@
20#include <linux/kprobes.h> 20#include <linux/kprobes.h>
21 21
22#include <asm/apic.h> 22#include <asm/apic.h>
23#include <asm/perf_counter.h> 23#include <asm/perf_event.h>
24 24
25struct nmi_watchdog_ctlblk { 25struct nmi_watchdog_ctlblk {
26 unsigned int cccr_msr; 26 unsigned int cccr_msr;
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index bc24f514ec93..1cbed97b59cf 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -24,6 +24,7 @@
24#include <linux/dmi.h> 24#include <linux/dmi.h>
25#include <asm/div64.h> 25#include <asm/div64.h>
26#include <asm/vmware.h> 26#include <asm/vmware.h>
27#include <asm/x86_init.h>
27 28
28#define CPUID_VMWARE_INFO_LEAF 0x40000000 29#define CPUID_VMWARE_INFO_LEAF 0x40000000
29#define VMWARE_HYPERVISOR_MAGIC 0x564D5868 30#define VMWARE_HYPERVISOR_MAGIC 0x564D5868
@@ -47,21 +48,35 @@ static inline int __vmware_platform(void)
47 return eax != (uint32_t)-1 && ebx == VMWARE_HYPERVISOR_MAGIC; 48 return eax != (uint32_t)-1 && ebx == VMWARE_HYPERVISOR_MAGIC;
48} 49}
49 50
50static unsigned long __vmware_get_tsc_khz(void) 51static unsigned long vmware_get_tsc_khz(void)
51{ 52{
52 uint64_t tsc_hz; 53 uint64_t tsc_hz;
53 uint32_t eax, ebx, ecx, edx; 54 uint32_t eax, ebx, ecx, edx;
54 55
55 VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); 56 VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
56 57
57 if (ebx == UINT_MAX)
58 return 0;
59 tsc_hz = eax | (((uint64_t)ebx) << 32); 58 tsc_hz = eax | (((uint64_t)ebx) << 32);
60 do_div(tsc_hz, 1000); 59 do_div(tsc_hz, 1000);
61 BUG_ON(tsc_hz >> 32); 60 BUG_ON(tsc_hz >> 32);
61 printk(KERN_INFO "TSC freq read from hypervisor : %lu.%03lu MHz\n",
62 (unsigned long) tsc_hz / 1000,
63 (unsigned long) tsc_hz % 1000);
62 return tsc_hz; 64 return tsc_hz;
63} 65}
64 66
67void __init vmware_platform_setup(void)
68{
69 uint32_t eax, ebx, ecx, edx;
70
71 VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
72
73 if (ebx != UINT_MAX)
74 x86_platform.calibrate_tsc = vmware_get_tsc_khz;
75 else
76 printk(KERN_WARNING
77 "Failed to get TSC freq from the hypervisor\n");
78}
79
65/* 80/*
66 * While checking the dmi string infomation, just checking the product 81 * While checking the dmi string infomation, just checking the product
67 * serial key should be enough, as this will always have a VMware 82 * serial key should be enough, as this will always have a VMware
@@ -87,12 +102,6 @@ int vmware_platform(void)
87 return 0; 102 return 0;
88} 103}
89 104
90unsigned long vmware_get_tsc_khz(void)
91{
92 BUG_ON(!vmware_platform());
93 return __vmware_get_tsc_khz();
94}
95
96/* 105/*
97 * VMware hypervisor takes care of exporting a reliable TSC to the guest. 106 * VMware hypervisor takes care of exporting a reliable TSC to the guest.
98 * Still, due to timing difference when running on virtual cpus, the TSC can 107 * Still, due to timing difference when running on virtual cpus, the TSC can
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index b07af8861244..6a52d4b36a30 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -182,7 +182,7 @@ static struct notifier_block __refdata cpuid_class_cpu_notifier =
182 .notifier_call = cpuid_class_cpu_callback, 182 .notifier_call = cpuid_class_cpu_callback,
183}; 183};
184 184
185static char *cpuid_nodename(struct device *dev) 185static char *cpuid_devnode(struct device *dev, mode_t *mode)
186{ 186{
187 return kasprintf(GFP_KERNEL, "cpu/%u/cpuid", MINOR(dev->devt)); 187 return kasprintf(GFP_KERNEL, "cpu/%u/cpuid", MINOR(dev->devt));
188} 188}
@@ -203,7 +203,7 @@ static int __init cpuid_init(void)
203 err = PTR_ERR(cpuid_class); 203 err = PTR_ERR(cpuid_class);
204 goto out_chrdev; 204 goto out_chrdev;
205 } 205 }
206 cpuid_class->nodename = cpuid_nodename; 206 cpuid_class->devnode = cpuid_devnode;
207 for_each_online_cpu(i) { 207 for_each_online_cpu(i) {
208 err = cpuid_device_create(i); 208 err = cpuid_device_create(i);
209 if (err != 0) 209 if (err != 0)
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index bca5fba91c9e..f7dd2a7c3bf4 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -5,7 +5,6 @@
5#include <linux/kallsyms.h> 5#include <linux/kallsyms.h>
6#include <linux/kprobes.h> 6#include <linux/kprobes.h>
7#include <linux/uaccess.h> 7#include <linux/uaccess.h>
8#include <linux/utsname.h>
9#include <linux/hardirq.h> 8#include <linux/hardirq.h>
10#include <linux/kdebug.h> 9#include <linux/kdebug.h>
11#include <linux/module.h> 10#include <linux/module.h>
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 54b0a3276766..a071e6be177e 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -5,7 +5,6 @@
5#include <linux/kallsyms.h> 5#include <linux/kallsyms.h>
6#include <linux/kprobes.h> 6#include <linux/kprobes.h>
7#include <linux/uaccess.h> 7#include <linux/uaccess.h>
8#include <linux/utsname.h>
9#include <linux/hardirq.h> 8#include <linux/hardirq.h>
10#include <linux/kdebug.h> 9#include <linux/kdebug.h>
11#include <linux/module.h> 10#include <linux/module.h>
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 147005a1cc3c..85419bb7d4ab 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1331,7 +1331,7 @@ void __init e820_reserve_resources(void)
1331 struct resource *res; 1331 struct resource *res;
1332 u64 end; 1332 u64 end;
1333 1333
1334 res = alloc_bootmem_low(sizeof(struct resource) * e820.nr_map); 1334 res = alloc_bootmem(sizeof(struct resource) * e820.nr_map);
1335 e820_res = res; 1335 e820_res = res;
1336 for (i = 0; i < e820.nr_map; i++) { 1336 for (i = 0; i < e820.nr_map; i++) {
1337 end = e820.map[i].addr + e820.map[i].size - 1; 1337 end = e820.map[i].addr + e820.map[i].size - 1;
@@ -1455,28 +1455,11 @@ char *__init default_machine_specific_memory_setup(void)
1455 return who; 1455 return who;
1456} 1456}
1457 1457
1458char *__init __attribute__((weak)) machine_specific_memory_setup(void)
1459{
1460 if (x86_quirks->arch_memory_setup) {
1461 char *who = x86_quirks->arch_memory_setup();
1462
1463 if (who)
1464 return who;
1465 }
1466 return default_machine_specific_memory_setup();
1467}
1468
1469/* Overridden in paravirt.c if CONFIG_PARAVIRT */
1470char * __init __attribute__((weak)) memory_setup(void)
1471{
1472 return machine_specific_memory_setup();
1473}
1474
1475void __init setup_memory_map(void) 1458void __init setup_memory_map(void)
1476{ 1459{
1477 char *who; 1460 char *who;
1478 1461
1479 who = memory_setup(); 1462 who = x86_init.resources.memory_setup();
1480 memcpy(&e820_saved, &e820, sizeof(struct e820map)); 1463 memcpy(&e820_saved, &e820, sizeof(struct e820map));
1481 printk(KERN_INFO "BIOS-provided physical RAM map:\n"); 1464 printk(KERN_INFO "BIOS-provided physical RAM map:\n");
1482 e820_print_map(who); 1465 e820_print_map(who);
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 335f049d110f..2acfd3fdc0cc 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -160,721 +160,6 @@ static struct console early_serial_console = {
160 .index = -1, 160 .index = -1,
161}; 161};
162 162
163#ifdef CONFIG_EARLY_PRINTK_DBGP
164
165static struct ehci_caps __iomem *ehci_caps;
166static struct ehci_regs __iomem *ehci_regs;
167static struct ehci_dbg_port __iomem *ehci_debug;
168static unsigned int dbgp_endpoint_out;
169
170struct ehci_dev {
171 u32 bus;
172 u32 slot;
173 u32 func;
174};
175
176static struct ehci_dev ehci_dev;
177
178#define USB_DEBUG_DEVNUM 127
179
180#define DBGP_DATA_TOGGLE 0x8800
181
182static inline u32 dbgp_pid_update(u32 x, u32 tok)
183{
184 return ((x ^ DBGP_DATA_TOGGLE) & 0xffff00) | (tok & 0xff);
185}
186
187static inline u32 dbgp_len_update(u32 x, u32 len)
188{
189 return (x & ~0x0f) | (len & 0x0f);
190}
191
192/*
193 * USB Packet IDs (PIDs)
194 */
195
196/* token */
197#define USB_PID_OUT 0xe1
198#define USB_PID_IN 0x69
199#define USB_PID_SOF 0xa5
200#define USB_PID_SETUP 0x2d
201/* handshake */
202#define USB_PID_ACK 0xd2
203#define USB_PID_NAK 0x5a
204#define USB_PID_STALL 0x1e
205#define USB_PID_NYET 0x96
206/* data */
207#define USB_PID_DATA0 0xc3
208#define USB_PID_DATA1 0x4b
209#define USB_PID_DATA2 0x87
210#define USB_PID_MDATA 0x0f
211/* Special */
212#define USB_PID_PREAMBLE 0x3c
213#define USB_PID_ERR 0x3c
214#define USB_PID_SPLIT 0x78
215#define USB_PID_PING 0xb4
216#define USB_PID_UNDEF_0 0xf0
217
218#define USB_PID_DATA_TOGGLE 0x88
219#define DBGP_CLAIM (DBGP_OWNER | DBGP_ENABLED | DBGP_INUSE)
220
221#define PCI_CAP_ID_EHCI_DEBUG 0xa
222
223#define HUB_ROOT_RESET_TIME 50 /* times are in msec */
224#define HUB_SHORT_RESET_TIME 10
225#define HUB_LONG_RESET_TIME 200
226#define HUB_RESET_TIMEOUT 500
227
228#define DBGP_MAX_PACKET 8
229
230static int dbgp_wait_until_complete(void)
231{
232 u32 ctrl;
233 int loop = 0x100000;
234
235 do {
236 ctrl = readl(&ehci_debug->control);
237 /* Stop when the transaction is finished */
238 if (ctrl & DBGP_DONE)
239 break;
240 } while (--loop > 0);
241
242 if (!loop)
243 return -1;
244
245 /*
246 * Now that we have observed the completed transaction,
247 * clear the done bit.
248 */
249 writel(ctrl | DBGP_DONE, &ehci_debug->control);
250 return (ctrl & DBGP_ERROR) ? -DBGP_ERRCODE(ctrl) : DBGP_LEN(ctrl);
251}
252
253static void __init dbgp_mdelay(int ms)
254{
255 int i;
256
257 while (ms--) {
258 for (i = 0; i < 1000; i++)
259 outb(0x1, 0x80);
260 }
261}
262
263static void dbgp_breath(void)
264{
265 /* Sleep to give the debug port a chance to breathe */
266}
267
268static int dbgp_wait_until_done(unsigned ctrl)
269{
270 u32 pids, lpid;
271 int ret;
272 int loop = 3;
273
274retry:
275 writel(ctrl | DBGP_GO, &ehci_debug->control);
276 ret = dbgp_wait_until_complete();
277 pids = readl(&ehci_debug->pids);
278 lpid = DBGP_PID_GET(pids);
279
280 if (ret < 0)
281 return ret;
282
283 /*
284 * If the port is getting full or it has dropped data
285 * start pacing ourselves, not necessary but it's friendly.
286 */
287 if ((lpid == USB_PID_NAK) || (lpid == USB_PID_NYET))
288 dbgp_breath();
289
290 /* If I get a NACK reissue the transmission */
291 if (lpid == USB_PID_NAK) {
292 if (--loop > 0)
293 goto retry;
294 }
295
296 return ret;
297}
298
299static void dbgp_set_data(const void *buf, int size)
300{
301 const unsigned char *bytes = buf;
302 u32 lo, hi;
303 int i;
304
305 lo = hi = 0;
306 for (i = 0; i < 4 && i < size; i++)
307 lo |= bytes[i] << (8*i);
308 for (; i < 8 && i < size; i++)
309 hi |= bytes[i] << (8*(i - 4));
310 writel(lo, &ehci_debug->data03);
311 writel(hi, &ehci_debug->data47);
312}
313
314static void __init dbgp_get_data(void *buf, int size)
315{
316 unsigned char *bytes = buf;
317 u32 lo, hi;
318 int i;
319
320 lo = readl(&ehci_debug->data03);
321 hi = readl(&ehci_debug->data47);
322 for (i = 0; i < 4 && i < size; i++)
323 bytes[i] = (lo >> (8*i)) & 0xff;
324 for (; i < 8 && i < size; i++)
325 bytes[i] = (hi >> (8*(i - 4))) & 0xff;
326}
327
328static int dbgp_bulk_write(unsigned devnum, unsigned endpoint,
329 const char *bytes, int size)
330{
331 u32 pids, addr, ctrl;
332 int ret;
333
334 if (size > DBGP_MAX_PACKET)
335 return -1;
336
337 addr = DBGP_EPADDR(devnum, endpoint);
338
339 pids = readl(&ehci_debug->pids);
340 pids = dbgp_pid_update(pids, USB_PID_OUT);
341
342 ctrl = readl(&ehci_debug->control);
343 ctrl = dbgp_len_update(ctrl, size);
344 ctrl |= DBGP_OUT;
345 ctrl |= DBGP_GO;
346
347 dbgp_set_data(bytes, size);
348 writel(addr, &ehci_debug->address);
349 writel(pids, &ehci_debug->pids);
350
351 ret = dbgp_wait_until_done(ctrl);
352 if (ret < 0)
353 return ret;
354
355 return ret;
356}
357
358static int __init dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
359 int size)
360{
361 u32 pids, addr, ctrl;
362 int ret;
363
364 if (size > DBGP_MAX_PACKET)
365 return -1;
366
367 addr = DBGP_EPADDR(devnum, endpoint);
368
369 pids = readl(&ehci_debug->pids);
370 pids = dbgp_pid_update(pids, USB_PID_IN);
371
372 ctrl = readl(&ehci_debug->control);
373 ctrl = dbgp_len_update(ctrl, size);
374 ctrl &= ~DBGP_OUT;
375 ctrl |= DBGP_GO;
376
377 writel(addr, &ehci_debug->address);
378 writel(pids, &ehci_debug->pids);
379 ret = dbgp_wait_until_done(ctrl);
380 if (ret < 0)
381 return ret;
382
383 if (size > ret)
384 size = ret;
385 dbgp_get_data(data, size);
386 return ret;
387}
388
389static int __init dbgp_control_msg(unsigned devnum, int requesttype,
390 int request, int value, int index, void *data, int size)
391{
392 u32 pids, addr, ctrl;
393 struct usb_ctrlrequest req;
394 int read;
395 int ret;
396
397 read = (requesttype & USB_DIR_IN) != 0;
398 if (size > (read ? DBGP_MAX_PACKET:0))
399 return -1;
400
401 /* Compute the control message */
402 req.bRequestType = requesttype;
403 req.bRequest = request;
404 req.wValue = cpu_to_le16(value);
405 req.wIndex = cpu_to_le16(index);
406 req.wLength = cpu_to_le16(size);
407
408 pids = DBGP_PID_SET(USB_PID_DATA0, USB_PID_SETUP);
409 addr = DBGP_EPADDR(devnum, 0);
410
411 ctrl = readl(&ehci_debug->control);
412 ctrl = dbgp_len_update(ctrl, sizeof(req));
413 ctrl |= DBGP_OUT;
414 ctrl |= DBGP_GO;
415
416 /* Send the setup message */
417 dbgp_set_data(&req, sizeof(req));
418 writel(addr, &ehci_debug->address);
419 writel(pids, &ehci_debug->pids);
420 ret = dbgp_wait_until_done(ctrl);
421 if (ret < 0)
422 return ret;
423
424 /* Read the result */
425 return dbgp_bulk_read(devnum, 0, data, size);
426}
427
428
429/* Find a PCI capability */
430static u32 __init find_cap(u32 num, u32 slot, u32 func, int cap)
431{
432 u8 pos;
433 int bytes;
434
435 if (!(read_pci_config_16(num, slot, func, PCI_STATUS) &
436 PCI_STATUS_CAP_LIST))
437 return 0;
438
439 pos = read_pci_config_byte(num, slot, func, PCI_CAPABILITY_LIST);
440 for (bytes = 0; bytes < 48 && pos >= 0x40; bytes++) {
441 u8 id;
442
443 pos &= ~3;
444 id = read_pci_config_byte(num, slot, func, pos+PCI_CAP_LIST_ID);
445 if (id == 0xff)
446 break;
447 if (id == cap)
448 return pos;
449
450 pos = read_pci_config_byte(num, slot, func,
451 pos+PCI_CAP_LIST_NEXT);
452 }
453 return 0;
454}
455
456static u32 __init __find_dbgp(u32 bus, u32 slot, u32 func)
457{
458 u32 class;
459
460 class = read_pci_config(bus, slot, func, PCI_CLASS_REVISION);
461 if ((class >> 8) != PCI_CLASS_SERIAL_USB_EHCI)
462 return 0;
463
464 return find_cap(bus, slot, func, PCI_CAP_ID_EHCI_DEBUG);
465}
466
467static u32 __init find_dbgp(int ehci_num, u32 *rbus, u32 *rslot, u32 *rfunc)
468{
469 u32 bus, slot, func;
470
471 for (bus = 0; bus < 256; bus++) {
472 for (slot = 0; slot < 32; slot++) {
473 for (func = 0; func < 8; func++) {
474 unsigned cap;
475
476 cap = __find_dbgp(bus, slot, func);
477
478 if (!cap)
479 continue;
480 if (ehci_num-- != 0)
481 continue;
482 *rbus = bus;
483 *rslot = slot;
484 *rfunc = func;
485 return cap;
486 }
487 }
488 }
489 return 0;
490}
491
492static int __init ehci_reset_port(int port)
493{
494 u32 portsc;
495 u32 delay_time, delay;
496 int loop;
497
498 /* Reset the usb debug port */
499 portsc = readl(&ehci_regs->port_status[port - 1]);
500 portsc &= ~PORT_PE;
501 portsc |= PORT_RESET;
502 writel(portsc, &ehci_regs->port_status[port - 1]);
503
504 delay = HUB_ROOT_RESET_TIME;
505 for (delay_time = 0; delay_time < HUB_RESET_TIMEOUT;
506 delay_time += delay) {
507 dbgp_mdelay(delay);
508
509 portsc = readl(&ehci_regs->port_status[port - 1]);
510 if (portsc & PORT_RESET) {
511 /* force reset to complete */
512 loop = 2;
513 writel(portsc & ~(PORT_RWC_BITS | PORT_RESET),
514 &ehci_regs->port_status[port - 1]);
515 do {
516 portsc = readl(&ehci_regs->port_status[port-1]);
517 } while ((portsc & PORT_RESET) && (--loop > 0));
518 }
519
520 /* Device went away? */
521 if (!(portsc & PORT_CONNECT))
522 return -ENOTCONN;
523
524 /* bomb out completely if something weird happend */
525 if ((portsc & PORT_CSC))
526 return -EINVAL;
527
528 /* If we've finished resetting, then break out of the loop */
529 if (!(portsc & PORT_RESET) && (portsc & PORT_PE))
530 return 0;
531 }
532 return -EBUSY;
533}
534
535static int __init ehci_wait_for_port(int port)
536{
537 u32 status;
538 int ret, reps;
539
540 for (reps = 0; reps < 3; reps++) {
541 dbgp_mdelay(100);
542 status = readl(&ehci_regs->status);
543 if (status & STS_PCD) {
544 ret = ehci_reset_port(port);
545 if (ret == 0)
546 return 0;
547 }
548 }
549 return -ENOTCONN;
550}
551
552#ifdef DBGP_DEBUG
553# define dbgp_printk early_printk
554#else
555static inline void dbgp_printk(const char *fmt, ...) { }
556#endif
557
558typedef void (*set_debug_port_t)(int port);
559
560static void __init default_set_debug_port(int port)
561{
562}
563
564static set_debug_port_t __initdata set_debug_port = default_set_debug_port;
565
566static void __init nvidia_set_debug_port(int port)
567{
568 u32 dword;
569 dword = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func,
570 0x74);
571 dword &= ~(0x0f<<12);
572 dword |= ((port & 0x0f)<<12);
573 write_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func, 0x74,
574 dword);
575 dbgp_printk("set debug port to %d\n", port);
576}
577
578static void __init detect_set_debug_port(void)
579{
580 u32 vendorid;
581
582 vendorid = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func,
583 0x00);
584
585 if ((vendorid & 0xffff) == 0x10de) {
586 dbgp_printk("using nvidia set_debug_port\n");
587 set_debug_port = nvidia_set_debug_port;
588 }
589}
590
591static int __init ehci_setup(void)
592{
593 struct usb_debug_descriptor dbgp_desc;
594 u32 cmd, ctrl, status, portsc, hcs_params;
595 u32 debug_port, new_debug_port = 0, n_ports;
596 u32 devnum;
597 int ret, i;
598 int loop;
599 int port_map_tried;
600 int playtimes = 3;
601
602try_next_time:
603 port_map_tried = 0;
604
605try_next_port:
606
607 hcs_params = readl(&ehci_caps->hcs_params);
608 debug_port = HCS_DEBUG_PORT(hcs_params);
609 n_ports = HCS_N_PORTS(hcs_params);
610
611 dbgp_printk("debug_port: %d\n", debug_port);
612 dbgp_printk("n_ports: %d\n", n_ports);
613
614 for (i = 1; i <= n_ports; i++) {
615 portsc = readl(&ehci_regs->port_status[i-1]);
616 dbgp_printk("portstatus%d: %08x\n", i, portsc);
617 }
618
619 if (port_map_tried && (new_debug_port != debug_port)) {
620 if (--playtimes) {
621 set_debug_port(new_debug_port);
622 goto try_next_time;
623 }
624 return -1;
625 }
626
627 loop = 10;
628 /* Reset the EHCI controller */
629 cmd = readl(&ehci_regs->command);
630 cmd |= CMD_RESET;
631 writel(cmd, &ehci_regs->command);
632 do {
633 cmd = readl(&ehci_regs->command);
634 } while ((cmd & CMD_RESET) && (--loop > 0));
635
636 if (!loop) {
637 dbgp_printk("can not reset ehci\n");
638 return -1;
639 }
640 dbgp_printk("ehci reset done\n");
641
642 /* Claim ownership, but do not enable yet */
643 ctrl = readl(&ehci_debug->control);
644 ctrl |= DBGP_OWNER;
645 ctrl &= ~(DBGP_ENABLED | DBGP_INUSE);
646 writel(ctrl, &ehci_debug->control);
647
648 /* Start the ehci running */
649 cmd = readl(&ehci_regs->command);
650 cmd &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE | CMD_ASE | CMD_RESET);
651 cmd |= CMD_RUN;
652 writel(cmd, &ehci_regs->command);
653
654 /* Ensure everything is routed to the EHCI */
655 writel(FLAG_CF, &ehci_regs->configured_flag);
656
657 /* Wait until the controller is no longer halted */
658 loop = 10;
659 do {
660 status = readl(&ehci_regs->status);
661 } while ((status & STS_HALT) && (--loop > 0));
662
663 if (!loop) {
664 dbgp_printk("ehci can be started\n");
665 return -1;
666 }
667 dbgp_printk("ehci started\n");
668
669 /* Wait for a device to show up in the debug port */
670 ret = ehci_wait_for_port(debug_port);
671 if (ret < 0) {
672 dbgp_printk("No device found in debug port\n");
673 goto next_debug_port;
674 }
675 dbgp_printk("ehci wait for port done\n");
676
677 /* Enable the debug port */
678 ctrl = readl(&ehci_debug->control);
679 ctrl |= DBGP_CLAIM;
680 writel(ctrl, &ehci_debug->control);
681 ctrl = readl(&ehci_debug->control);
682 if ((ctrl & DBGP_CLAIM) != DBGP_CLAIM) {
683 dbgp_printk("No device in debug port\n");
684 writel(ctrl & ~DBGP_CLAIM, &ehci_debug->control);
685 goto err;
686 }
687 dbgp_printk("debug ported enabled\n");
688
689 /* Completely transfer the debug device to the debug controller */
690 portsc = readl(&ehci_regs->port_status[debug_port - 1]);
691 portsc &= ~PORT_PE;
692 writel(portsc, &ehci_regs->port_status[debug_port - 1]);
693
694 dbgp_mdelay(100);
695
696 /* Find the debug device and make it device number 127 */
697 for (devnum = 0; devnum <= 127; devnum++) {
698 ret = dbgp_control_msg(devnum,
699 USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
700 USB_REQ_GET_DESCRIPTOR, (USB_DT_DEBUG << 8), 0,
701 &dbgp_desc, sizeof(dbgp_desc));
702 if (ret > 0)
703 break;
704 }
705 if (devnum > 127) {
706 dbgp_printk("Could not find attached debug device\n");
707 goto err;
708 }
709 if (ret < 0) {
710 dbgp_printk("Attached device is not a debug device\n");
711 goto err;
712 }
713 dbgp_endpoint_out = dbgp_desc.bDebugOutEndpoint;
714
715 /* Move the device to 127 if it isn't already there */
716 if (devnum != USB_DEBUG_DEVNUM) {
717 ret = dbgp_control_msg(devnum,
718 USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
719 USB_REQ_SET_ADDRESS, USB_DEBUG_DEVNUM, 0, NULL, 0);
720 if (ret < 0) {
721 dbgp_printk("Could not move attached device to %d\n",
722 USB_DEBUG_DEVNUM);
723 goto err;
724 }
725 devnum = USB_DEBUG_DEVNUM;
726 dbgp_printk("debug device renamed to 127\n");
727 }
728
729 /* Enable the debug interface */
730 ret = dbgp_control_msg(USB_DEBUG_DEVNUM,
731 USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
732 USB_REQ_SET_FEATURE, USB_DEVICE_DEBUG_MODE, 0, NULL, 0);
733 if (ret < 0) {
734 dbgp_printk(" Could not enable the debug device\n");
735 goto err;
736 }
737 dbgp_printk("debug interface enabled\n");
738
739 /* Perform a small write to get the even/odd data state in sync
740 */
741 ret = dbgp_bulk_write(USB_DEBUG_DEVNUM, dbgp_endpoint_out, " ", 1);
742 if (ret < 0) {
743 dbgp_printk("dbgp_bulk_write failed: %d\n", ret);
744 goto err;
745 }
746 dbgp_printk("small write doned\n");
747
748 return 0;
749err:
750 /* Things didn't work so remove my claim */
751 ctrl = readl(&ehci_debug->control);
752 ctrl &= ~(DBGP_CLAIM | DBGP_OUT);
753 writel(ctrl, &ehci_debug->control);
754 return -1;
755
756next_debug_port:
757 port_map_tried |= (1<<(debug_port - 1));
758 new_debug_port = ((debug_port-1+1)%n_ports) + 1;
759 if (port_map_tried != ((1<<n_ports) - 1)) {
760 set_debug_port(new_debug_port);
761 goto try_next_port;
762 }
763 if (--playtimes) {
764 set_debug_port(new_debug_port);
765 goto try_next_time;
766 }
767
768 return -1;
769}
770
771static int __init early_dbgp_init(char *s)
772{
773 u32 debug_port, bar, offset;
774 u32 bus, slot, func, cap;
775 void __iomem *ehci_bar;
776 u32 dbgp_num;
777 u32 bar_val;
778 char *e;
779 int ret;
780 u8 byte;
781
782 if (!early_pci_allowed())
783 return -1;
784
785 dbgp_num = 0;
786 if (*s)
787 dbgp_num = simple_strtoul(s, &e, 10);
788 dbgp_printk("dbgp_num: %d\n", dbgp_num);
789
790 cap = find_dbgp(dbgp_num, &bus, &slot, &func);
791 if (!cap)
792 return -1;
793
794 dbgp_printk("Found EHCI debug port on %02x:%02x.%1x\n", bus, slot,
795 func);
796
797 debug_port = read_pci_config(bus, slot, func, cap);
798 bar = (debug_port >> 29) & 0x7;
799 bar = (bar * 4) + 0xc;
800 offset = (debug_port >> 16) & 0xfff;
801 dbgp_printk("bar: %02x offset: %03x\n", bar, offset);
802 if (bar != PCI_BASE_ADDRESS_0) {
803 dbgp_printk("only debug ports on bar 1 handled.\n");
804
805 return -1;
806 }
807
808 bar_val = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_0);
809 dbgp_printk("bar_val: %02x offset: %03x\n", bar_val, offset);
810 if (bar_val & ~PCI_BASE_ADDRESS_MEM_MASK) {
811 dbgp_printk("only simple 32bit mmio bars supported\n");
812
813 return -1;
814 }
815
816 /* double check if the mem space is enabled */
817 byte = read_pci_config_byte(bus, slot, func, 0x04);
818 if (!(byte & 0x2)) {
819 byte |= 0x02;
820 write_pci_config_byte(bus, slot, func, 0x04, byte);
821 dbgp_printk("mmio for ehci enabled\n");
822 }
823
824 /*
825 * FIXME I don't have the bar size so just guess PAGE_SIZE is more
826 * than enough. 1K is the biggest I have seen.
827 */
828 set_fixmap_nocache(FIX_DBGP_BASE, bar_val & PAGE_MASK);
829 ehci_bar = (void __iomem *)__fix_to_virt(FIX_DBGP_BASE);
830 ehci_bar += bar_val & ~PAGE_MASK;
831 dbgp_printk("ehci_bar: %p\n", ehci_bar);
832
833 ehci_caps = ehci_bar;
834 ehci_regs = ehci_bar + HC_LENGTH(readl(&ehci_caps->hc_capbase));
835 ehci_debug = ehci_bar + offset;
836 ehci_dev.bus = bus;
837 ehci_dev.slot = slot;
838 ehci_dev.func = func;
839
840 detect_set_debug_port();
841
842 ret = ehci_setup();
843 if (ret < 0) {
844 dbgp_printk("ehci_setup failed\n");
845 ehci_debug = NULL;
846
847 return -1;
848 }
849
850 return 0;
851}
852
853static void early_dbgp_write(struct console *con, const char *str, u32 n)
854{
855 int chunk, ret;
856
857 if (!ehci_debug)
858 return;
859 while (n > 0) {
860 chunk = n;
861 if (chunk > DBGP_MAX_PACKET)
862 chunk = DBGP_MAX_PACKET;
863 ret = dbgp_bulk_write(USB_DEBUG_DEVNUM,
864 dbgp_endpoint_out, str, chunk);
865 str += chunk;
866 n -= chunk;
867 }
868}
869
870static struct console early_dbgp_console = {
871 .name = "earlydbg",
872 .write = early_dbgp_write,
873 .flags = CON_PRINTBUFFER,
874 .index = -1,
875};
876#endif
877
878/* Direct interface for emergencies */ 163/* Direct interface for emergencies */
879static struct console *early_console = &early_vga_console; 164static struct console *early_console = &early_vga_console;
880static int __initdata early_console_initialized; 165static int __initdata early_console_initialized;
@@ -891,10 +176,19 @@ asmlinkage void early_printk(const char *fmt, ...)
891 va_end(ap); 176 va_end(ap);
892} 177}
893 178
179static inline void early_console_register(struct console *con, int keep_early)
180{
181 early_console = con;
182 if (keep_early)
183 early_console->flags &= ~CON_BOOT;
184 else
185 early_console->flags |= CON_BOOT;
186 register_console(early_console);
187}
894 188
895static int __init setup_early_printk(char *buf) 189static int __init setup_early_printk(char *buf)
896{ 190{
897 int keep_early; 191 int keep;
898 192
899 if (!buf) 193 if (!buf)
900 return 0; 194 return 0;
@@ -903,42 +197,34 @@ static int __init setup_early_printk(char *buf)
903 return 0; 197 return 0;
904 early_console_initialized = 1; 198 early_console_initialized = 1;
905 199
906 keep_early = (strstr(buf, "keep") != NULL); 200 keep = (strstr(buf, "keep") != NULL);
907 201
908 if (!strncmp(buf, "serial", 6)) { 202 while (*buf != '\0') {
909 early_serial_init(buf + 6); 203 if (!strncmp(buf, "serial", 6)) {
910 early_console = &early_serial_console; 204 early_serial_init(buf + 6);
911 } else if (!strncmp(buf, "ttyS", 4)) { 205 early_console_register(&early_serial_console, keep);
912 early_serial_init(buf); 206 }
913 early_console = &early_serial_console; 207 if (!strncmp(buf, "ttyS", 4)) {
914 } else if (!strncmp(buf, "vga", 3) 208 early_serial_init(buf + 4);
915 && boot_params.screen_info.orig_video_isVGA == 1) { 209 early_console_register(&early_serial_console, keep);
916 max_xpos = boot_params.screen_info.orig_video_cols; 210 }
917 max_ypos = boot_params.screen_info.orig_video_lines; 211 if (!strncmp(buf, "vga", 3) &&
918 current_ypos = boot_params.screen_info.orig_y; 212 boot_params.screen_info.orig_video_isVGA == 1) {
919 early_console = &early_vga_console; 213 max_xpos = boot_params.screen_info.orig_video_cols;
214 max_ypos = boot_params.screen_info.orig_video_lines;
215 current_ypos = boot_params.screen_info.orig_y;
216 early_console_register(&early_vga_console, keep);
217 }
920#ifdef CONFIG_EARLY_PRINTK_DBGP 218#ifdef CONFIG_EARLY_PRINTK_DBGP
921 } else if (!strncmp(buf, "dbgp", 4)) { 219 if (!strncmp(buf, "dbgp", 4) && !early_dbgp_init(buf + 4))
922 if (early_dbgp_init(buf+4) < 0) 220 early_console_register(&early_dbgp_console, keep);
923 return 0;
924 early_console = &early_dbgp_console;
925 /*
926 * usb subsys will reset ehci controller, so don't keep
927 * that early console
928 */
929 keep_early = 0;
930#endif 221#endif
931#ifdef CONFIG_HVC_XEN 222#ifdef CONFIG_HVC_XEN
932 } else if (!strncmp(buf, "xen", 3)) { 223 if (!strncmp(buf, "xen", 3))
933 early_console = &xenboot_console; 224 early_console_register(&xenboot_console, keep);
934#endif 225#endif
226 buf++;
935 } 227 }
936
937 if (keep_early)
938 early_console->flags &= ~CON_BOOT;
939 else
940 early_console->flags |= CON_BOOT;
941 register_console(early_console);
942 return 0; 228 return 0;
943} 229}
944 230
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
index fe26ba3e3451..ad5bd988fb79 100644
--- a/arch/x86/kernel/efi.c
+++ b/arch/x86/kernel/efi.c
@@ -42,6 +42,7 @@
42#include <asm/time.h> 42#include <asm/time.h>
43#include <asm/cacheflush.h> 43#include <asm/cacheflush.h>
44#include <asm/tlbflush.h> 44#include <asm/tlbflush.h>
45#include <asm/x86_init.h>
45 46
46#define EFI_DEBUG 1 47#define EFI_DEBUG 1
47#define PFX "EFI: " 48#define PFX "EFI: "
@@ -453,6 +454,9 @@ void __init efi_init(void)
453 if (add_efi_memmap) 454 if (add_efi_memmap)
454 do_add_efi_memmap(); 455 do_add_efi_memmap();
455 456
457 x86_platform.get_wallclock = efi_get_time;
458 x86_platform.set_wallclock = efi_set_rtc_mmss;
459
456 /* Setup for EFI runtime service */ 460 /* Setup for EFI runtime service */
457 reboot_type = BOOT_EFI; 461 reboot_type = BOOT_EFI;
458 462
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index d59fe323807e..b5c061f8f358 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -536,20 +536,13 @@ sysret_signal:
536 bt $TIF_SYSCALL_AUDIT,%edx 536 bt $TIF_SYSCALL_AUDIT,%edx
537 jc sysret_audit 537 jc sysret_audit
538#endif 538#endif
539 /* edx: work flags (arg3) */ 539 /*
540 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1 540 * We have a signal, or exit tracing or single-step.
541 xorl %esi,%esi # oldset -> arg2 541 * These all wind up with the iret return path anyway,
542 SAVE_REST 542 * so just join that path right now.
543 FIXUP_TOP_OF_STACK %r11 543 */
544 call do_notify_resume 544 FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
545 RESTORE_TOP_OF_STACK %r11 545 jmp int_check_syscall_exit_work
546 RESTORE_REST
547 movl $_TIF_WORK_MASK,%edi
548 /* Use IRET because user could have changed frame. This
549 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
550 DISABLE_INTERRUPTS(CLBR_NONE)
551 TRACE_IRQS_OFF
552 jmp int_with_check
553 546
554badsys: 547badsys:
555 movq $-ENOSYS,RAX-ARGOFFSET(%rsp) 548 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
@@ -654,6 +647,7 @@ int_careful:
654int_very_careful: 647int_very_careful:
655 TRACE_IRQS_ON 648 TRACE_IRQS_ON
656 ENABLE_INTERRUPTS(CLBR_NONE) 649 ENABLE_INTERRUPTS(CLBR_NONE)
650int_check_syscall_exit_work:
657 SAVE_REST 651 SAVE_REST
658 /* Check for syscall exit trace */ 652 /* Check for syscall exit trace */
659 testl $_TIF_WORK_SYSCALL_EXIT,%edx 653 testl $_TIF_WORK_SYSCALL_EXIT,%edx
@@ -1021,7 +1015,7 @@ apicinterrupt ERROR_APIC_VECTOR \
1021apicinterrupt SPURIOUS_APIC_VECTOR \ 1015apicinterrupt SPURIOUS_APIC_VECTOR \
1022 spurious_interrupt smp_spurious_interrupt 1016 spurious_interrupt smp_spurious_interrupt
1023 1017
1024#ifdef CONFIG_PERF_COUNTERS 1018#ifdef CONFIG_PERF_EVENTS
1025apicinterrupt LOCAL_PENDING_VECTOR \ 1019apicinterrupt LOCAL_PENDING_VECTOR \
1026 perf_pending_interrupt smp_perf_pending_interrupt 1020 perf_pending_interrupt smp_perf_pending_interrupt
1027#endif 1021#endif
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 3f8579f8d42c..4f8e2507e8f3 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -11,8 +11,21 @@
11#include <asm/setup.h> 11#include <asm/setup.h>
12#include <asm/sections.h> 12#include <asm/sections.h>
13#include <asm/e820.h> 13#include <asm/e820.h>
14#include <asm/bios_ebda.h> 14#include <asm/page.h>
15#include <asm/trampoline.h> 15#include <asm/trampoline.h>
16#include <asm/apic.h>
17#include <asm/io_apic.h>
18#include <asm/bios_ebda.h>
19
20static void __init i386_default_early_setup(void)
21{
22 /* Initilize 32bit specific setup functions */
23 x86_init.resources.probe_roms = probe_roms;
24 x86_init.resources.reserve_resources = i386_reserve_resources;
25 x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc;
26
27 reserve_ebda_region();
28}
16 29
17void __init i386_start_kernel(void) 30void __init i386_start_kernel(void)
18{ 31{
@@ -29,7 +42,16 @@ void __init i386_start_kernel(void)
29 reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); 42 reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
30 } 43 }
31#endif 44#endif
32 reserve_ebda_region(); 45
46 /* Call the subarch specific early setup function */
47 switch (boot_params.hdr.hardware_subarch) {
48 case X86_SUBARCH_MRST:
49 x86_mrst_early_setup();
50 break;
51 default:
52 i386_default_early_setup();
53 break;
54 }
33 55
34 /* 56 /*
35 * At this point everything still needed from the boot loader 57 * At this point everything still needed from the boot loader
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 70eaa852c732..0b06cd778fd9 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -23,8 +23,8 @@
23#include <asm/sections.h> 23#include <asm/sections.h>
24#include <asm/kdebug.h> 24#include <asm/kdebug.h>
25#include <asm/e820.h> 25#include <asm/e820.h>
26#include <asm/bios_ebda.h>
27#include <asm/trampoline.h> 26#include <asm/trampoline.h>
27#include <asm/bios_ebda.h>
28 28
29static void __init zap_identity_mappings(void) 29static void __init zap_identity_mappings(void)
30{ 30{
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 7ffec6b3b331..218aad7ee76e 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -157,6 +157,7 @@ subarch_entries:
157 .long default_entry /* normal x86/PC */ 157 .long default_entry /* normal x86/PC */
158 .long lguest_entry /* lguest hypervisor */ 158 .long lguest_entry /* lguest hypervisor */
159 .long xen_entry /* Xen hypervisor */ 159 .long xen_entry /* Xen hypervisor */
160 .long default_entry /* Moorestown MID */
160num_subarch_entries = (. - subarch_entries) / 4 161num_subarch_entries = (. - subarch_entries) / 4
161.previous 162.previous
162#endif /* CONFIG_PARAVIRT */ 163#endif /* CONFIG_PARAVIRT */
@@ -607,7 +608,7 @@ ENTRY(initial_code)
607/* 608/*
608 * BSS section 609 * BSS section
609 */ 610 */
610.section ".bss.page_aligned","wa" 611__PAGE_ALIGNED_BSS
611 .align PAGE_SIZE_asm 612 .align PAGE_SIZE_asm
612#ifdef CONFIG_X86_PAE 613#ifdef CONFIG_X86_PAE
613swapper_pg_pmd: 614swapper_pg_pmd:
@@ -625,7 +626,7 @@ ENTRY(empty_zero_page)
625 * This starts the data section. 626 * This starts the data section.
626 */ 627 */
627#ifdef CONFIG_X86_PAE 628#ifdef CONFIG_X86_PAE
628.section ".data.page_aligned","wa" 629__PAGE_ALIGNED_DATA
629 /* Page-aligned for the benefit of paravirt? */ 630 /* Page-aligned for the benefit of paravirt? */
630 .align PAGE_SIZE_asm 631 .align PAGE_SIZE_asm
631ENTRY(swapper_pg_dir) 632ENTRY(swapper_pg_dir)
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index fa54f78e2a05..d0bc0a13a437 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -418,7 +418,7 @@ ENTRY(phys_base)
418ENTRY(idt_table) 418ENTRY(idt_table)
419 .skip IDT_ENTRIES * 16 419 .skip IDT_ENTRIES * 16
420 420
421 .section .bss.page_aligned, "aw", @nobits 421 __PAGE_ALIGNED_BSS
422 .align PAGE_SIZE 422 .align PAGE_SIZE
423ENTRY(empty_zero_page) 423ENTRY(empty_zero_page)
424 .skip PAGE_SIZE 424 .skip PAGE_SIZE
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index 5cf36c053ac4..23c167925a5c 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -19,12 +19,6 @@
19DEFINE_SPINLOCK(i8253_lock); 19DEFINE_SPINLOCK(i8253_lock);
20EXPORT_SYMBOL(i8253_lock); 20EXPORT_SYMBOL(i8253_lock);
21 21
22#ifdef CONFIG_X86_32
23static void pit_disable_clocksource(void);
24#else
25static inline void pit_disable_clocksource(void) { }
26#endif
27
28/* 22/*
29 * HPET replaces the PIT, when enabled. So we need to know, which of 23 * HPET replaces the PIT, when enabled. So we need to know, which of
30 * the two timers is used 24 * the two timers is used
@@ -57,12 +51,10 @@ static void init_pit_timer(enum clock_event_mode mode,
57 outb_pit(0, PIT_CH0); 51 outb_pit(0, PIT_CH0);
58 outb_pit(0, PIT_CH0); 52 outb_pit(0, PIT_CH0);
59 } 53 }
60 pit_disable_clocksource();
61 break; 54 break;
62 55
63 case CLOCK_EVT_MODE_ONESHOT: 56 case CLOCK_EVT_MODE_ONESHOT:
64 /* One shot setup */ 57 /* One shot setup */
65 pit_disable_clocksource();
66 outb_pit(0x38, PIT_MODE); 58 outb_pit(0x38, PIT_MODE);
67 break; 59 break;
68 60
@@ -200,17 +192,6 @@ static struct clocksource pit_cs = {
200 .shift = 20, 192 .shift = 20,
201}; 193};
202 194
203static void pit_disable_clocksource(void)
204{
205 /*
206 * Use mult to check whether it is registered or not
207 */
208 if (pit_cs.mult) {
209 clocksource_unregister(&pit_cs);
210 pit_cs.mult = 0;
211 }
212}
213
214static int __init init_pit_clocksource(void) 195static int __init init_pit_clocksource(void)
215{ 196{
216 /* 197 /*
diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
index 270ff83efc11..3a54dcb9cd0e 100644
--- a/arch/x86/kernel/init_task.c
+++ b/arch/x86/kernel/init_task.c
@@ -20,9 +20,8 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
20 * way process stacks are handled. This is done by having a special 20 * way process stacks are handled. This is done by having a special
21 * "init_task" linker map entry.. 21 * "init_task" linker map entry..
22 */ 22 */
23union thread_union init_thread_union 23union thread_union init_thread_union __init_task_data =
24 __attribute__((__section__(".data.init_task"))) = 24 { INIT_THREAD_INFO(init_task) };
25 { INIT_THREAD_INFO(init_task) };
26 25
27/* 26/*
28 * Initial task structure. 27 * Initial task structure.
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index ccf8ab54f31a..40f30773fb29 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -116,7 +116,7 @@ int vector_used_by_percpu_irq(unsigned int vector)
116 return 0; 116 return 0;
117} 117}
118 118
119static void __init init_ISA_irqs(void) 119void __init init_ISA_irqs(void)
120{ 120{
121 int i; 121 int i;
122 122
@@ -140,8 +140,10 @@ static void __init init_ISA_irqs(void)
140 } 140 }
141} 141}
142 142
143/* Overridden in paravirt.c */ 143void __init init_IRQ(void)
144void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); 144{
145 x86_init.irqs.intr_init();
146}
145 147
146static void __init smp_intr_init(void) 148static void __init smp_intr_init(void)
147{ 149{
@@ -206,39 +208,19 @@ static void __init apic_intr_init(void)
206 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); 208 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
207 209
208 /* Performance monitoring interrupts: */ 210 /* Performance monitoring interrupts: */
209# ifdef CONFIG_PERF_COUNTERS 211# ifdef CONFIG_PERF_EVENTS
210 alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt); 212 alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt);
211# endif 213# endif
212 214
213#endif 215#endif
214} 216}
215 217
216/**
217 * x86_quirk_pre_intr_init - initialisation prior to setting up interrupt vectors
218 *
219 * Description:
220 * Perform any necessary interrupt initialisation prior to setting up
221 * the "ordinary" interrupt call gates. For legacy reasons, the ISA
222 * interrupts should be initialised here if the machine emulates a PC
223 * in any way.
224 **/
225static void __init x86_quirk_pre_intr_init(void)
226{
227#ifdef CONFIG_X86_32
228 if (x86_quirks->arch_pre_intr_init) {
229 if (x86_quirks->arch_pre_intr_init())
230 return;
231 }
232#endif
233 init_ISA_irqs();
234}
235
236void __init native_init_IRQ(void) 218void __init native_init_IRQ(void)
237{ 219{
238 int i; 220 int i;
239 221
240 /* Execute any quirks before the call gates are initialised: */ 222 /* Execute any quirks before the call gates are initialised: */
241 x86_quirk_pre_intr_init(); 223 x86_init.irqs.pre_vector_init();
242 224
243 apic_intr_init(); 225 apic_intr_init();
244 226
@@ -258,12 +240,6 @@ void __init native_init_IRQ(void)
258 240
259#ifdef CONFIG_X86_32 241#ifdef CONFIG_X86_32
260 /* 242 /*
261 * Call quirks after call gates are initialised (usually add in
262 * the architecture specific gates):
263 */
264 x86_quirk_intr_init();
265
266 /*
267 * External FPU? Set up irq13 if so, for 243 * External FPU? Set up irq13 if so, for
268 * original braindamaged IBM FERR coupling. 244 * original braindamaged IBM FERR coupling.
269 */ 245 */
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index e5efcdcca31b..feaeb0d3aa4f 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -22,6 +22,8 @@
22#include <asm/msr.h> 22#include <asm/msr.h>
23#include <asm/apic.h> 23#include <asm/apic.h>
24#include <linux/percpu.h> 24#include <linux/percpu.h>
25
26#include <asm/x86_init.h>
25#include <asm/reboot.h> 27#include <asm/reboot.h>
26 28
27#define KVM_SCALE 22 29#define KVM_SCALE 22
@@ -182,12 +184,13 @@ void __init kvmclock_init(void)
182 if (kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) { 184 if (kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
183 if (kvm_register_clock("boot clock")) 185 if (kvm_register_clock("boot clock"))
184 return; 186 return;
185 pv_time_ops.get_wallclock = kvm_get_wallclock;
186 pv_time_ops.set_wallclock = kvm_set_wallclock;
187 pv_time_ops.sched_clock = kvm_clock_read; 187 pv_time_ops.sched_clock = kvm_clock_read;
188 pv_time_ops.get_tsc_khz = kvm_get_tsc_khz; 188 x86_platform.calibrate_tsc = kvm_get_tsc_khz;
189 x86_platform.get_wallclock = kvm_get_wallclock;
190 x86_platform.set_wallclock = kvm_set_wallclock;
189#ifdef CONFIG_X86_LOCAL_APIC 191#ifdef CONFIG_X86_LOCAL_APIC
190 pv_apic_ops.setup_secondary_clock = kvm_setup_secondary_clock; 192 x86_cpuinit.setup_percpu_clockev =
193 kvm_setup_secondary_clock;
191#endif 194#endif
192#ifdef CONFIG_SMP 195#ifdef CONFIG_SMP
193 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; 196 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 71f1d99a635d..ec6ef60cbd17 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -67,8 +67,8 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
67#ifdef CONFIG_SMP 67#ifdef CONFIG_SMP
68 preempt_disable(); 68 preempt_disable();
69 load_LDT(pc); 69 load_LDT(pc);
70 if (!cpus_equal(current->mm->cpu_vm_mask, 70 if (!cpumask_equal(mm_cpumask(current->mm),
71 cpumask_of_cpu(smp_processor_id()))) 71 cpumask_of(smp_processor_id())))
72 smp_call_function(flush_ldt, current->mm, 1); 72 smp_call_function(flush_ldt, current->mm, 1);
73 preempt_enable(); 73 preempt_enable();
74#else 74#else
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 9371448290ac..378e9a8f1bf8 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -210,8 +210,8 @@ static ssize_t microcode_write(struct file *file, const char __user *buf,
210{ 210{
211 ssize_t ret = -EINVAL; 211 ssize_t ret = -EINVAL;
212 212
213 if ((len >> PAGE_SHIFT) > num_physpages) { 213 if ((len >> PAGE_SHIFT) > totalram_pages) {
214 pr_err("microcode: too much data (max %ld pages)\n", num_physpages); 214 pr_err("microcode: too much data (max %ld pages)\n", totalram_pages);
215 return ret; 215 return ret;
216 } 216 }
217 217
@@ -236,7 +236,7 @@ static const struct file_operations microcode_fops = {
236static struct miscdevice microcode_dev = { 236static struct miscdevice microcode_dev = {
237 .minor = MICROCODE_MINOR, 237 .minor = MICROCODE_MINOR,
238 .name = "microcode", 238 .name = "microcode",
239 .devnode = "cpu/microcode", 239 .nodename = "cpu/microcode",
240 .fops = &microcode_fops, 240 .fops = &microcode_fops,
241}; 241};
242 242
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index fcd513bf2846..5be95ef4ffec 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -45,6 +45,11 @@ static int __init mpf_checksum(unsigned char *mp, int len)
45 return sum & 0xFF; 45 return sum & 0xFF;
46} 46}
47 47
48int __init default_mpc_apic_id(struct mpc_cpu *m)
49{
50 return m->apicid;
51}
52
48static void __init MP_processor_info(struct mpc_cpu *m) 53static void __init MP_processor_info(struct mpc_cpu *m)
49{ 54{
50 int apicid; 55 int apicid;
@@ -55,10 +60,7 @@ static void __init MP_processor_info(struct mpc_cpu *m)
55 return; 60 return;
56 } 61 }
57 62
58 if (x86_quirks->mpc_apic_id) 63 apicid = x86_init.mpparse.mpc_apic_id(m);
59 apicid = x86_quirks->mpc_apic_id(m);
60 else
61 apicid = m->apicid;
62 64
63 if (m->cpuflag & CPU_BOOTPROCESSOR) { 65 if (m->cpuflag & CPU_BOOTPROCESSOR) {
64 bootup_cpu = " (Bootup-CPU)"; 66 bootup_cpu = " (Bootup-CPU)";
@@ -70,16 +72,18 @@ static void __init MP_processor_info(struct mpc_cpu *m)
70} 72}
71 73
72#ifdef CONFIG_X86_IO_APIC 74#ifdef CONFIG_X86_IO_APIC
73static void __init MP_bus_info(struct mpc_bus *m) 75void __init default_mpc_oem_bus_info(struct mpc_bus *m, char *str)
74{ 76{
75 char str[7];
76 memcpy(str, m->bustype, 6); 77 memcpy(str, m->bustype, 6);
77 str[6] = 0; 78 str[6] = 0;
79 apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str);
80}
78 81
79 if (x86_quirks->mpc_oem_bus_info) 82static void __init MP_bus_info(struct mpc_bus *m)
80 x86_quirks->mpc_oem_bus_info(m, str); 83{
81 else 84 char str[7];
82 apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str); 85
86 x86_init.mpparse.mpc_oem_bus_info(m, str);
83 87
84#if MAX_MP_BUSSES < 256 88#if MAX_MP_BUSSES < 256
85 if (m->busid >= MAX_MP_BUSSES) { 89 if (m->busid >= MAX_MP_BUSSES) {
@@ -96,8 +100,8 @@ static void __init MP_bus_info(struct mpc_bus *m)
96 mp_bus_id_to_type[m->busid] = MP_BUS_ISA; 100 mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
97#endif 101#endif
98 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) { 102 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
99 if (x86_quirks->mpc_oem_pci_bus) 103 if (x86_init.mpparse.mpc_oem_pci_bus)
100 x86_quirks->mpc_oem_pci_bus(m); 104 x86_init.mpparse.mpc_oem_pci_bus(m);
101 105
102 clear_bit(m->busid, mp_bus_not_pci); 106 clear_bit(m->busid, mp_bus_not_pci);
103#if defined(CONFIG_EISA) || defined(CONFIG_MCA) 107#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
@@ -291,6 +295,8 @@ static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
291 1, mpc, mpc->length, 1); 295 1, mpc, mpc->length, 1);
292} 296}
293 297
298void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { }
299
294static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) 300static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
295{ 301{
296 char str[16]; 302 char str[16];
@@ -312,16 +318,13 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
312 if (early) 318 if (early)
313 return 1; 319 return 1;
314 320
315 if (mpc->oemptr && x86_quirks->smp_read_mpc_oem) { 321 if (mpc->oemptr)
316 struct mpc_oemtable *oem_table = (void *)(long)mpc->oemptr; 322 x86_init.mpparse.smp_read_mpc_oem(mpc);
317 x86_quirks->smp_read_mpc_oem(oem_table, mpc->oemsize);
318 }
319 323
320 /* 324 /*
321 * Now process the configuration blocks. 325 * Now process the configuration blocks.
322 */ 326 */
323 if (x86_quirks->mpc_record) 327 x86_init.mpparse.mpc_record(0);
324 *x86_quirks->mpc_record = 0;
325 328
326 while (count < mpc->length) { 329 while (count < mpc->length) {
327 switch (*mpt) { 330 switch (*mpt) {
@@ -353,8 +356,7 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
353 count = mpc->length; 356 count = mpc->length;
354 break; 357 break;
355 } 358 }
356 if (x86_quirks->mpc_record) 359 x86_init.mpparse.mpc_record(1);
357 (*x86_quirks->mpc_record)++;
358 } 360 }
359 361
360#ifdef CONFIG_X86_BIGSMP 362#ifdef CONFIG_X86_BIGSMP
@@ -608,7 +610,7 @@ static int __init check_physptr(struct mpf_intel *mpf, unsigned int early)
608/* 610/*
609 * Scan the memory blocks for an SMP configuration block. 611 * Scan the memory blocks for an SMP configuration block.
610 */ 612 */
611static void __init __get_smp_config(unsigned int early) 613void __init default_get_smp_config(unsigned int early)
612{ 614{
613 struct mpf_intel *mpf = mpf_found; 615 struct mpf_intel *mpf = mpf_found;
614 616
@@ -625,11 +627,6 @@ static void __init __get_smp_config(unsigned int early)
625 if (acpi_lapic && acpi_ioapic) 627 if (acpi_lapic && acpi_ioapic)
626 return; 628 return;
627 629
628 if (x86_quirks->mach_get_smp_config) {
629 if (x86_quirks->mach_get_smp_config(early))
630 return;
631 }
632
633 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", 630 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
634 mpf->specification); 631 mpf->specification);
635#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) 632#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
@@ -670,16 +667,6 @@ static void __init __get_smp_config(unsigned int early)
670 */ 667 */
671} 668}
672 669
673void __init early_get_smp_config(void)
674{
675 __get_smp_config(1);
676}
677
678void __init get_smp_config(void)
679{
680 __get_smp_config(0);
681}
682
683static void __init smp_reserve_bootmem(struct mpf_intel *mpf) 670static void __init smp_reserve_bootmem(struct mpf_intel *mpf)
684{ 671{
685 unsigned long size = get_mpc_size(mpf->physptr); 672 unsigned long size = get_mpc_size(mpf->physptr);
@@ -745,14 +732,10 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
745 return 0; 732 return 0;
746} 733}
747 734
748static void __init __find_smp_config(unsigned int reserve) 735void __init default_find_smp_config(unsigned int reserve)
749{ 736{
750 unsigned int address; 737 unsigned int address;
751 738
752 if (x86_quirks->mach_find_smp_config) {
753 if (x86_quirks->mach_find_smp_config(reserve))
754 return;
755 }
756 /* 739 /*
757 * FIXME: Linux assumes you have 640K of base ram.. 740 * FIXME: Linux assumes you have 640K of base ram..
758 * this continues the error... 741 * this continues the error...
@@ -787,16 +770,6 @@ static void __init __find_smp_config(unsigned int reserve)
787 smp_scan_config(address, 0x400, reserve); 770 smp_scan_config(address, 0x400, reserve);
788} 771}
789 772
790void __init early_find_smp_config(void)
791{
792 __find_smp_config(0);
793}
794
795void __init find_smp_config(void)
796{
797 __find_smp_config(1);
798}
799
800#ifdef CONFIG_X86_IO_APIC 773#ifdef CONFIG_X86_IO_APIC
801static u8 __initdata irq_used[MAX_IRQ_SOURCES]; 774static u8 __initdata irq_used[MAX_IRQ_SOURCES];
802 775
diff --git a/arch/x86/kernel/mrst.c b/arch/x86/kernel/mrst.c
new file mode 100644
index 000000000000..3b7078abc871
--- /dev/null
+++ b/arch/x86/kernel/mrst.c
@@ -0,0 +1,24 @@
1/*
2 * mrst.c: Intel Moorestown platform specific setup code
3 *
4 * (C) Copyright 2008 Intel Corporation
5 * Author: Jacob Pan (jacob.jun.pan@intel.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12#include <linux/init.h>
13
14#include <asm/setup.h>
15
16/*
17 * Moorestown specific x86_init function overrides and early setup
18 * calls.
19 */
20void __init x86_mrst_early_setup(void)
21{
22 x86_init.resources.probe_roms = x86_init_noop;
23 x86_init.resources.reserve_resources = x86_init_noop;
24}
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 7dd950094178..6a3cefc7dda1 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -241,7 +241,7 @@ static struct notifier_block __refdata msr_class_cpu_notifier = {
241 .notifier_call = msr_class_cpu_callback, 241 .notifier_call = msr_class_cpu_callback,
242}; 242};
243 243
244static char *msr_nodename(struct device *dev) 244static char *msr_devnode(struct device *dev, mode_t *mode)
245{ 245{
246 return kasprintf(GFP_KERNEL, "cpu/%u/msr", MINOR(dev->devt)); 246 return kasprintf(GFP_KERNEL, "cpu/%u/msr", MINOR(dev->devt));
247} 247}
@@ -262,7 +262,7 @@ static int __init msr_init(void)
262 err = PTR_ERR(msr_class); 262 err = PTR_ERR(msr_class);
263 goto out_chrdev; 263 goto out_chrdev;
264 } 264 }
265 msr_class->nodename = msr_nodename; 265 msr_class->devnode = msr_devnode;
266 for_each_online_cpu(i) { 266 for_each_online_cpu(i) {
267 err = msr_device_create(i); 267 err = msr_device_create(i);
268 if (err != 0) 268 if (err != 0)
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index f5b0b4a01fb2..1b1739d16310 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -54,17 +54,12 @@ u64 _paravirt_ident_64(u64 x)
54 return x; 54 return x;
55} 55}
56 56
57static void __init default_banner(void) 57void __init default_banner(void)
58{ 58{
59 printk(KERN_INFO "Booting paravirtualized kernel on %s\n", 59 printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
60 pv_info.name); 60 pv_info.name);
61} 61}
62 62
63char *memory_setup(void)
64{
65 return pv_init_ops.memory_setup();
66}
67
68/* Simple instruction patching code. */ 63/* Simple instruction patching code. */
69#define DEF_NATIVE(ops, name, code) \ 64#define DEF_NATIVE(ops, name, code) \
70 extern const char start_##ops##_##name[], end_##ops##_##name[]; \ 65 extern const char start_##ops##_##name[], end_##ops##_##name[]; \
@@ -188,11 +183,6 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
188 return insn_len; 183 return insn_len;
189} 184}
190 185
191void init_IRQ(void)
192{
193 pv_irq_ops.init_IRQ();
194}
195
196static void native_flush_tlb(void) 186static void native_flush_tlb(void)
197{ 187{
198 __native_flush_tlb(); 188 __native_flush_tlb();
@@ -218,13 +208,6 @@ extern void native_irq_enable_sysexit(void);
218extern void native_usergs_sysret32(void); 208extern void native_usergs_sysret32(void);
219extern void native_usergs_sysret64(void); 209extern void native_usergs_sysret64(void);
220 210
221static int __init print_banner(void)
222{
223 pv_init_ops.banner();
224 return 0;
225}
226core_initcall(print_banner);
227
228static struct resource reserve_ioports = { 211static struct resource reserve_ioports = {
229 .start = 0, 212 .start = 0,
230 .end = IO_SPACE_LIMIT, 213 .end = IO_SPACE_LIMIT,
@@ -320,21 +303,13 @@ struct pv_info pv_info = {
320 303
321struct pv_init_ops pv_init_ops = { 304struct pv_init_ops pv_init_ops = {
322 .patch = native_patch, 305 .patch = native_patch,
323 .banner = default_banner,
324 .arch_setup = paravirt_nop,
325 .memory_setup = machine_specific_memory_setup,
326}; 306};
327 307
328struct pv_time_ops pv_time_ops = { 308struct pv_time_ops pv_time_ops = {
329 .time_init = hpet_time_init,
330 .get_wallclock = native_get_wallclock,
331 .set_wallclock = native_set_wallclock,
332 .sched_clock = native_sched_clock, 309 .sched_clock = native_sched_clock,
333 .get_tsc_khz = native_calibrate_tsc,
334}; 310};
335 311
336struct pv_irq_ops pv_irq_ops = { 312struct pv_irq_ops pv_irq_ops = {
337 .init_IRQ = native_init_IRQ,
338 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), 313 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
339 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl), 314 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
340 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable), 315 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
@@ -409,8 +384,6 @@ struct pv_cpu_ops pv_cpu_ops = {
409 384
410struct pv_apic_ops pv_apic_ops = { 385struct pv_apic_ops pv_apic_ops = {
411#ifdef CONFIG_X86_LOCAL_APIC 386#ifdef CONFIG_X86_LOCAL_APIC
412 .setup_boot_clock = setup_boot_APIC_clock,
413 .setup_secondary_clock = setup_secondary_APIC_clock,
414 .startup_ipi_hook = paravirt_nop, 387 .startup_ipi_hook = paravirt_nop,
415#endif 388#endif
416}; 389};
@@ -424,13 +397,6 @@ struct pv_apic_ops pv_apic_ops = {
424#endif 397#endif
425 398
426struct pv_mmu_ops pv_mmu_ops = { 399struct pv_mmu_ops pv_mmu_ops = {
427#ifndef CONFIG_X86_64
428 .pagetable_setup_start = native_pagetable_setup_start,
429 .pagetable_setup_done = native_pagetable_setup_done,
430#else
431 .pagetable_setup_start = paravirt_nop,
432 .pagetable_setup_done = paravirt_nop,
433#endif
434 400
435 .read_cr2 = native_read_cr2, 401 .read_cr2 = native_read_cr2,
436 .write_cr2 = native_write_cr2, 402 .write_cr2 = native_write_cr2,
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index e8a35016115f..aaa6b7839f1e 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -46,9 +46,8 @@ void __init pci_swiotlb_init(void)
46{ 46{
47 /* don't initialize swiotlb if iommu=off (no_iommu=1) */ 47 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
48#ifdef CONFIG_X86_64 48#ifdef CONFIG_X86_64
49 if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN) || 49 if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN))
50 iommu_pass_through) 50 swiotlb = 1;
51 swiotlb = 1;
52#endif 51#endif
53 if (swiotlb_force) 52 if (swiotlb_force)
54 swiotlb = 1; 53 swiotlb = 1;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 071166a4ba83..5284cd2b5776 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -9,7 +9,7 @@
9#include <linux/pm.h> 9#include <linux/pm.h>
10#include <linux/clockchips.h> 10#include <linux/clockchips.h>
11#include <linux/random.h> 11#include <linux/random.h>
12#include <trace/power.h> 12#include <trace/events/power.h>
13#include <asm/system.h> 13#include <asm/system.h>
14#include <asm/apic.h> 14#include <asm/apic.h>
15#include <asm/syscalls.h> 15#include <asm/syscalls.h>
@@ -25,9 +25,6 @@ EXPORT_SYMBOL(idle_nomwait);
25 25
26struct kmem_cache *task_xstate_cachep; 26struct kmem_cache *task_xstate_cachep;
27 27
28DEFINE_TRACE(power_start);
29DEFINE_TRACE(power_end);
30
31int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 28int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
32{ 29{
33 *dst = *src; 30 *dst = *src;
@@ -299,9 +296,7 @@ static inline int hlt_use_halt(void)
299void default_idle(void) 296void default_idle(void)
300{ 297{
301 if (hlt_use_halt()) { 298 if (hlt_use_halt()) {
302 struct power_trace it; 299 trace_power_start(POWER_CSTATE, 1);
303
304 trace_power_start(&it, POWER_CSTATE, 1);
305 current_thread_info()->status &= ~TS_POLLING; 300 current_thread_info()->status &= ~TS_POLLING;
306 /* 301 /*
307 * TS_POLLING-cleared state must be visible before we 302 * TS_POLLING-cleared state must be visible before we
@@ -314,7 +309,6 @@ void default_idle(void)
314 else 309 else
315 local_irq_enable(); 310 local_irq_enable();
316 current_thread_info()->status |= TS_POLLING; 311 current_thread_info()->status |= TS_POLLING;
317 trace_power_end(&it);
318 } else { 312 } else {
319 local_irq_enable(); 313 local_irq_enable();
320 /* loop is done by the caller */ 314 /* loop is done by the caller */
@@ -372,9 +366,7 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait);
372 */ 366 */
373void mwait_idle_with_hints(unsigned long ax, unsigned long cx) 367void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
374{ 368{
375 struct power_trace it; 369 trace_power_start(POWER_CSTATE, (ax>>4)+1);
376
377 trace_power_start(&it, POWER_CSTATE, (ax>>4)+1);
378 if (!need_resched()) { 370 if (!need_resched()) {
379 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) 371 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
380 clflush((void *)&current_thread_info()->flags); 372 clflush((void *)&current_thread_info()->flags);
@@ -384,15 +376,13 @@ void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
384 if (!need_resched()) 376 if (!need_resched())
385 __mwait(ax, cx); 377 __mwait(ax, cx);
386 } 378 }
387 trace_power_end(&it);
388} 379}
389 380
390/* Default MONITOR/MWAIT with no hints, used for default C1 state */ 381/* Default MONITOR/MWAIT with no hints, used for default C1 state */
391static void mwait_idle(void) 382static void mwait_idle(void)
392{ 383{
393 struct power_trace it;
394 if (!need_resched()) { 384 if (!need_resched()) {
395 trace_power_start(&it, POWER_CSTATE, 1); 385 trace_power_start(POWER_CSTATE, 1);
396 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) 386 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
397 clflush((void *)&current_thread_info()->flags); 387 clflush((void *)&current_thread_info()->flags);
398 388
@@ -402,7 +392,6 @@ static void mwait_idle(void)
402 __sti_mwait(0, 0); 392 __sti_mwait(0, 0);
403 else 393 else
404 local_irq_enable(); 394 local_irq_enable();
405 trace_power_end(&it);
406 } else 395 } else
407 local_irq_enable(); 396 local_irq_enable();
408} 397}
@@ -414,13 +403,11 @@ static void mwait_idle(void)
414 */ 403 */
415static void poll_idle(void) 404static void poll_idle(void)
416{ 405{
417 struct power_trace it; 406 trace_power_start(POWER_CSTATE, 0);
418
419 trace_power_start(&it, POWER_CSTATE, 0);
420 local_irq_enable(); 407 local_irq_enable();
421 while (!need_resched()) 408 while (!need_resched())
422 cpu_relax(); 409 cpu_relax();
423 trace_power_end(&it); 410 trace_power_end(0);
424} 411}
425 412
426/* 413/*
@@ -568,10 +555,8 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
568void __init init_c1e_mask(void) 555void __init init_c1e_mask(void)
569{ 556{
570 /* If we're using c1e_idle, we need to allocate c1e_mask. */ 557 /* If we're using c1e_idle, we need to allocate c1e_mask. */
571 if (pm_idle == c1e_idle) { 558 if (pm_idle == c1e_idle)
572 alloc_cpumask_var(&c1e_mask, GFP_KERNEL); 559 zalloc_cpumask_var(&c1e_mask, GFP_KERNEL);
573 cpumask_clear(c1e_mask);
574 }
575} 560}
576 561
577static int __init idle_setup(char *str) 562static int __init idle_setup(char *str)
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 8d7d5c9c1be3..7b058a2dc66a 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -325,16 +325,6 @@ static int putreg(struct task_struct *child,
325 return set_flags(child, value); 325 return set_flags(child, value);
326 326
327#ifdef CONFIG_X86_64 327#ifdef CONFIG_X86_64
328 /*
329 * Orig_ax is really just a flag with small positive and
330 * negative values, so make sure to always sign-extend it
331 * from 32 bits so that it works correctly regardless of
332 * whether we come from a 32-bit environment or not.
333 */
334 case offsetof(struct user_regs_struct, orig_ax):
335 value = (long) (s32) value;
336 break;
337
338 case offsetof(struct user_regs_struct,fs_base): 328 case offsetof(struct user_regs_struct,fs_base):
339 if (value >= TASK_SIZE_OF(child)) 329 if (value >= TASK_SIZE_OF(child))
340 return -EIO; 330 return -EIO;
@@ -1126,10 +1116,15 @@ static int putreg32(struct task_struct *child, unsigned regno, u32 value)
1126 1116
1127 case offsetof(struct user32, regs.orig_eax): 1117 case offsetof(struct user32, regs.orig_eax):
1128 /* 1118 /*
1129 * Sign-extend the value so that orig_eax = -1 1119 * A 32-bit debugger setting orig_eax means to restore
1130 * causes (long)orig_ax < 0 tests to fire correctly. 1120 * the state of the task restarting a 32-bit syscall.
1121 * Make sure we interpret the -ERESTART* codes correctly
1122 * in case the task is not actually still sitting at the
1123 * exit from a 32-bit syscall with TS_COMPAT still set.
1131 */ 1124 */
1132 regs->orig_ax = (long) (s32) value; 1125 regs->orig_ax = value;
1126 if (syscall_get_nr(child, regs) >= 0)
1127 task_thread_info(child)->status |= TS_COMPAT;
1133 break; 1128 break;
1134 1129
1135 case offsetof(struct user32, regs.eflags): 1130 case offsetof(struct user32, regs.eflags):
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 5d465b207e72..1cfbbfc3ae26 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -8,6 +8,7 @@
8#include <linux/pnp.h> 8#include <linux/pnp.h>
9 9
10#include <asm/vsyscall.h> 10#include <asm/vsyscall.h>
11#include <asm/x86_init.h>
11#include <asm/time.h> 12#include <asm/time.h>
12 13
13#ifdef CONFIG_X86_32 14#ifdef CONFIG_X86_32
@@ -165,33 +166,29 @@ void rtc_cmos_write(unsigned char val, unsigned char addr)
165} 166}
166EXPORT_SYMBOL(rtc_cmos_write); 167EXPORT_SYMBOL(rtc_cmos_write);
167 168
168static int set_rtc_mmss(unsigned long nowtime) 169int update_persistent_clock(struct timespec now)
169{ 170{
170 unsigned long flags; 171 unsigned long flags;
171 int retval; 172 int retval;
172 173
173 spin_lock_irqsave(&rtc_lock, flags); 174 spin_lock_irqsave(&rtc_lock, flags);
174 retval = set_wallclock(nowtime); 175 retval = x86_platform.set_wallclock(now.tv_sec);
175 spin_unlock_irqrestore(&rtc_lock, flags); 176 spin_unlock_irqrestore(&rtc_lock, flags);
176 177
177 return retval; 178 return retval;
178} 179}
179 180
180/* not static: needed by APM */ 181/* not static: needed by APM */
181unsigned long read_persistent_clock(void) 182void read_persistent_clock(struct timespec *ts)
182{ 183{
183 unsigned long retval, flags; 184 unsigned long retval, flags;
184 185
185 spin_lock_irqsave(&rtc_lock, flags); 186 spin_lock_irqsave(&rtc_lock, flags);
186 retval = get_wallclock(); 187 retval = x86_platform.get_wallclock();
187 spin_unlock_irqrestore(&rtc_lock, flags); 188 spin_unlock_irqrestore(&rtc_lock, flags);
188 189
189 return retval; 190 ts->tv_sec = retval;
190} 191 ts->tv_nsec = 0;
191
192int update_persistent_clock(struct timespec now)
193{
194 return set_rtc_mmss(now.tv_sec);
195} 192}
196 193
197unsigned long long native_read_tsc(void) 194unsigned long long native_read_tsc(void)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 19f15c4076fb..e09f0e2c14b5 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -27,6 +27,7 @@
27#include <linux/screen_info.h> 27#include <linux/screen_info.h>
28#include <linux/ioport.h> 28#include <linux/ioport.h>
29#include <linux/acpi.h> 29#include <linux/acpi.h>
30#include <linux/sfi.h>
30#include <linux/apm_bios.h> 31#include <linux/apm_bios.h>
31#include <linux/initrd.h> 32#include <linux/initrd.h>
32#include <linux/bootmem.h> 33#include <linux/bootmem.h>
@@ -109,10 +110,6 @@
109#include <asm/numa_64.h> 110#include <asm/numa_64.h>
110#endif 111#endif
111 112
112#ifndef ARCH_SETUP
113#define ARCH_SETUP
114#endif
115
116/* 113/*
117 * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. 114 * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
118 * The direct mapping extends to max_pfn_mapped, so that we can directly access 115 * The direct mapping extends to max_pfn_mapped, so that we can directly access
@@ -134,9 +131,9 @@ int default_cpu_present_to_apicid(int mps_cpu)
134 return __default_cpu_present_to_apicid(mps_cpu); 131 return __default_cpu_present_to_apicid(mps_cpu);
135} 132}
136 133
137int default_check_phys_apicid_present(int boot_cpu_physical_apicid) 134int default_check_phys_apicid_present(int phys_apicid)
138{ 135{
139 return __default_check_phys_apicid_present(boot_cpu_physical_apicid); 136 return __default_check_phys_apicid_present(phys_apicid);
140} 137}
141#endif 138#endif
142 139
@@ -172,13 +169,6 @@ static struct resource bss_resource = {
172 169
173 170
174#ifdef CONFIG_X86_32 171#ifdef CONFIG_X86_32
175static struct resource video_ram_resource = {
176 .name = "Video RAM area",
177 .start = 0xa0000,
178 .end = 0xbffff,
179 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
180};
181
182/* cpu data as detected by the assembly code in head.S */ 172/* cpu data as detected by the assembly code in head.S */
183struct cpuinfo_x86 new_cpu_data __cpuinitdata = {0, 0, 0, 0, -1, 1, 0, 0, -1}; 173struct cpuinfo_x86 new_cpu_data __cpuinitdata = {0, 0, 0, 0, -1, 1, 0, 0, -1};
184/* common cpu data for all cpus */ 174/* common cpu data for all cpus */
@@ -606,7 +596,7 @@ static struct resource standard_io_resources[] = {
606 .flags = IORESOURCE_BUSY | IORESOURCE_IO } 596 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
607}; 597};
608 598
609static void __init reserve_standard_io_resources(void) 599void __init reserve_standard_io_resources(void)
610{ 600{
611 int i; 601 int i;
612 602
@@ -638,10 +628,6 @@ static int __init setup_elfcorehdr(char *arg)
638early_param("elfcorehdr", setup_elfcorehdr); 628early_param("elfcorehdr", setup_elfcorehdr);
639#endif 629#endif
640 630
641static struct x86_quirks default_x86_quirks __initdata;
642
643struct x86_quirks *x86_quirks __initdata = &default_x86_quirks;
644
645#ifdef CONFIG_X86_RESERVE_LOW_64K 631#ifdef CONFIG_X86_RESERVE_LOW_64K
646static int __init dmi_low_memory_corruption(const struct dmi_system_id *d) 632static int __init dmi_low_memory_corruption(const struct dmi_system_id *d)
647{ 633{
@@ -712,21 +698,6 @@ void __init setup_arch(char **cmdline_p)
712 printk(KERN_INFO "Command line: %s\n", boot_command_line); 698 printk(KERN_INFO "Command line: %s\n", boot_command_line);
713#endif 699#endif
714 700
715 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
716 *cmdline_p = command_line;
717
718#ifdef CONFIG_X86_64
719 /*
720 * Must call this twice: Once just to detect whether hardware doesn't
721 * support NX (so that the early EHCI debug console setup can safely
722 * call set_fixmap(), and then again after parsing early parameters to
723 * honor the respective command line option.
724 */
725 check_efer();
726#endif
727
728 parse_early_param();
729
730 /* VMI may relocate the fixmap; do this before touching ioremap area */ 701 /* VMI may relocate the fixmap; do this before touching ioremap area */
731 vmi_init(); 702 vmi_init();
732 703
@@ -773,7 +744,7 @@ void __init setup_arch(char **cmdline_p)
773 } 744 }
774#endif 745#endif
775 746
776 ARCH_SETUP 747 x86_init.oem.arch_setup();
777 748
778 setup_memory_map(); 749 setup_memory_map();
779 parse_setup_data(); 750 parse_setup_data();
@@ -809,6 +780,21 @@ void __init setup_arch(char **cmdline_p)
809#endif 780#endif
810#endif 781#endif
811 782
783 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
784 *cmdline_p = command_line;
785
786#ifdef CONFIG_X86_64
787 /*
788 * Must call this twice: Once just to detect whether hardware doesn't
789 * support NX (so that the early EHCI debug console setup can safely
790 * call set_fixmap(), and then again after parsing early parameters to
791 * honor the respective command line option.
792 */
793 check_efer();
794#endif
795
796 parse_early_param();
797
812#ifdef CONFIG_X86_64 798#ifdef CONFIG_X86_64
813 check_efer(); 799 check_efer();
814#endif 800#endif
@@ -844,11 +830,9 @@ void __init setup_arch(char **cmdline_p)
844 * VMware detection requires dmi to be available, so this 830 * VMware detection requires dmi to be available, so this
845 * needs to be done after dmi_scan_machine, for the BP. 831 * needs to be done after dmi_scan_machine, for the BP.
846 */ 832 */
847 init_hypervisor(&boot_cpu_data); 833 init_hypervisor_platform();
848 834
849#ifdef CONFIG_X86_32 835 x86_init.resources.probe_roms();
850 probe_roms();
851#endif
852 836
853 /* after parse_early_param, so could debug it */ 837 /* after parse_early_param, so could debug it */
854 insert_resource(&iomem_resource, &code_resource); 838 insert_resource(&iomem_resource, &code_resource);
@@ -983,10 +967,9 @@ void __init setup_arch(char **cmdline_p)
983 kvmclock_init(); 967 kvmclock_init();
984#endif 968#endif
985 969
986 paravirt_pagetable_setup_start(swapper_pg_dir); 970 x86_init.paging.pagetable_setup_start(swapper_pg_dir);
987 paging_init(); 971 paging_init();
988 paravirt_pagetable_setup_done(swapper_pg_dir); 972 x86_init.paging.pagetable_setup_done(swapper_pg_dir);
989 paravirt_post_allocator_init();
990 973
991 tboot_probe(); 974 tboot_probe();
992 975
@@ -1003,13 +986,13 @@ void __init setup_arch(char **cmdline_p)
1003 */ 986 */
1004 acpi_boot_init(); 987 acpi_boot_init();
1005 988
1006#if defined(CONFIG_X86_MPPARSE) || defined(CONFIG_X86_VISWS) 989 sfi_init();
990
1007 /* 991 /*
1008 * get boot-time SMP configuration: 992 * get boot-time SMP configuration:
1009 */ 993 */
1010 if (smp_found_config) 994 if (smp_found_config)
1011 get_smp_config(); 995 get_smp_config();
1012#endif
1013 996
1014 prefill_possible_map(); 997 prefill_possible_map();
1015 998
@@ -1028,10 +1011,7 @@ void __init setup_arch(char **cmdline_p)
1028 e820_reserve_resources(); 1011 e820_reserve_resources();
1029 e820_mark_nosave_regions(max_low_pfn); 1012 e820_mark_nosave_regions(max_low_pfn);
1030 1013
1031#ifdef CONFIG_X86_32 1014 x86_init.resources.reserve_resources();
1032 request_resource(&iomem_resource, &video_ram_resource);
1033#endif
1034 reserve_standard_io_resources();
1035 1015
1036 e820_setup_gap(); 1016 e820_setup_gap();
1037 1017
@@ -1043,78 +1023,22 @@ void __init setup_arch(char **cmdline_p)
1043 conswitchp = &dummy_con; 1023 conswitchp = &dummy_con;
1044#endif 1024#endif
1045#endif 1025#endif
1026 x86_init.oem.banner();
1046} 1027}
1047 1028
1048#ifdef CONFIG_X86_32 1029#ifdef CONFIG_X86_32
1049 1030
1050/** 1031static struct resource video_ram_resource = {
1051 * x86_quirk_intr_init - post gate setup interrupt initialisation 1032 .name = "Video RAM area",
1052 * 1033 .start = 0xa0000,
1053 * Description: 1034 .end = 0xbffff,
1054 * Fill in any interrupts that may have been left out by the general 1035 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
1055 * init_IRQ() routine. interrupts having to do with the machine rather
1056 * than the devices on the I/O bus (like APIC interrupts in intel MP
1057 * systems) are started here.
1058 **/
1059void __init x86_quirk_intr_init(void)
1060{
1061 if (x86_quirks->arch_intr_init) {
1062 if (x86_quirks->arch_intr_init())
1063 return;
1064 }
1065}
1066
1067/**
1068 * x86_quirk_trap_init - initialise system specific traps
1069 *
1070 * Description:
1071 * Called as the final act of trap_init(). Used in VISWS to initialise
1072 * the various board specific APIC traps.
1073 **/
1074void __init x86_quirk_trap_init(void)
1075{
1076 if (x86_quirks->arch_trap_init) {
1077 if (x86_quirks->arch_trap_init())
1078 return;
1079 }
1080}
1081
1082static struct irqaction irq0 = {
1083 .handler = timer_interrupt,
1084 .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER,
1085 .name = "timer"
1086}; 1036};
1087 1037
1088/** 1038void __init i386_reserve_resources(void)
1089 * x86_quirk_pre_time_init - do any specific initialisations before.
1090 *
1091 **/
1092void __init x86_quirk_pre_time_init(void)
1093{ 1039{
1094 if (x86_quirks->arch_pre_time_init) 1040 request_resource(&iomem_resource, &video_ram_resource);
1095 x86_quirks->arch_pre_time_init(); 1041 reserve_standard_io_resources();
1096} 1042}
1097 1043
1098/**
1099 * x86_quirk_time_init - do any specific initialisations for the system timer.
1100 *
1101 * Description:
1102 * Must plug the system timer interrupt source at HZ into the IRQ listed
1103 * in irq_vectors.h:TIMER_IRQ
1104 **/
1105void __init x86_quirk_time_init(void)
1106{
1107 if (x86_quirks->arch_time_init) {
1108 /*
1109 * A nonzero return code does not mean failure, it means
1110 * that the architecture quirk does not want any
1111 * generic (timer) setup to be performed after this:
1112 */
1113 if (x86_quirks->arch_time_init())
1114 return;
1115 }
1116
1117 irq0.mask = cpumask_of_cpu(0);
1118 setup_irq(0, &irq0);
1119}
1120#endif /* CONFIG_X86_32 */ 1044#endif /* CONFIG_X86_32 */
diff --git a/arch/x86/kernel/sfi.c b/arch/x86/kernel/sfi.c
new file mode 100644
index 000000000000..34e099382651
--- /dev/null
+++ b/arch/x86/kernel/sfi.c
@@ -0,0 +1,122 @@
1/*
2 * sfi.c - x86 architecture SFI support.
3 *
4 * Copyright (c) 2009, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 */
20
21#define KMSG_COMPONENT "SFI"
22#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
23
24#include <linux/acpi.h>
25#include <linux/init.h>
26#include <linux/sfi.h>
27#include <linux/io.h>
28
29#include <asm/io_apic.h>
30#include <asm/mpspec.h>
31#include <asm/setup.h>
32#include <asm/apic.h>
33
34#ifdef CONFIG_X86_LOCAL_APIC
35static unsigned long sfi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
36
37void __init mp_sfi_register_lapic_address(unsigned long address)
38{
39 mp_lapic_addr = address;
40
41 set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
42 if (boot_cpu_physical_apicid == -1U)
43 boot_cpu_physical_apicid = read_apic_id();
44
45 pr_info("Boot CPU = %d\n", boot_cpu_physical_apicid);
46}
47
48/* All CPUs enumerated by SFI must be present and enabled */
49void __cpuinit mp_sfi_register_lapic(u8 id)
50{
51 if (MAX_APICS - id <= 0) {
52 pr_warning("Processor #%d invalid (max %d)\n",
53 id, MAX_APICS);
54 return;
55 }
56
57 pr_info("registering lapic[%d]\n", id);
58
59 generic_processor_info(id, GET_APIC_VERSION(apic_read(APIC_LVR)));
60}
61
62static int __init sfi_parse_cpus(struct sfi_table_header *table)
63{
64 struct sfi_table_simple *sb;
65 struct sfi_cpu_table_entry *pentry;
66 int i;
67 int cpu_num;
68
69 sb = (struct sfi_table_simple *)table;
70 cpu_num = SFI_GET_NUM_ENTRIES(sb, struct sfi_cpu_table_entry);
71 pentry = (struct sfi_cpu_table_entry *)sb->pentry;
72
73 for (i = 0; i < cpu_num; i++) {
74 mp_sfi_register_lapic(pentry->apic_id);
75 pentry++;
76 }
77
78 smp_found_config = 1;
79 return 0;
80}
81#endif /* CONFIG_X86_LOCAL_APIC */
82
83#ifdef CONFIG_X86_IO_APIC
84static u32 gsi_base;
85
86static int __init sfi_parse_ioapic(struct sfi_table_header *table)
87{
88 struct sfi_table_simple *sb;
89 struct sfi_apic_table_entry *pentry;
90 int i, num;
91
92 sb = (struct sfi_table_simple *)table;
93 num = SFI_GET_NUM_ENTRIES(sb, struct sfi_apic_table_entry);
94 pentry = (struct sfi_apic_table_entry *)sb->pentry;
95
96 for (i = 0; i < num; i++) {
97 mp_register_ioapic(i, pentry->phys_addr, gsi_base);
98 gsi_base += io_apic_get_redir_entries(i);
99 pentry++;
100 }
101
102 WARN(pic_mode, KERN_WARNING
103 "SFI: pic_mod shouldn't be 1 when IOAPIC table is present\n");
104 pic_mode = 0;
105 return 0;
106}
107#endif /* CONFIG_X86_IO_APIC */
108
109/*
110 * sfi_platform_init(): register lapics & io-apics
111 */
112int __init sfi_platform_init(void)
113{
114#ifdef CONFIG_X86_LOCAL_APIC
115 mp_sfi_register_lapic_address(sfi_lapic_addr);
116 sfi_table_parse(SFI_SIG_CPUS, NULL, NULL, sfi_parse_cpus);
117#endif
118#ifdef CONFIG_X86_IO_APIC
119 sfi_table_parse(SFI_SIG_APIC, NULL, NULL, sfi_parse_ioapic);
120#endif
121 return 0;
122}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index a25eeec00080..565ebc65920e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -324,7 +324,7 @@ notrace static void __cpuinit start_secondary(void *unused)
324 /* enable local interrupts */ 324 /* enable local interrupts */
325 local_irq_enable(); 325 local_irq_enable();
326 326
327 setup_secondary_clock(); 327 x86_cpuinit.setup_percpu_clockev();
328 328
329 wmb(); 329 wmb();
330 cpu_idle(); 330 cpu_idle();
@@ -1059,12 +1059,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1059#endif 1059#endif
1060 current_thread_info()->cpu = 0; /* needed? */ 1060 current_thread_info()->cpu = 0; /* needed? */
1061 for_each_possible_cpu(i) { 1061 for_each_possible_cpu(i) {
1062 alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); 1062 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
1063 alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); 1063 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
1064 alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL); 1064 zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
1065 cpumask_clear(per_cpu(cpu_core_map, i));
1066 cpumask_clear(per_cpu(cpu_sibling_map, i));
1067 cpumask_clear(cpu_data(i).llc_shared_map);
1068 } 1065 }
1069 set_cpu_sibling_map(0); 1066 set_cpu_sibling_map(0);
1070 1067
@@ -1114,7 +1111,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1114 1111
1115 printk(KERN_INFO "CPU%d: ", 0); 1112 printk(KERN_INFO "CPU%d: ", 0);
1116 print_cpu_info(&cpu_data(0)); 1113 print_cpu_info(&cpu_data(0));
1117 setup_boot_clock(); 1114 x86_init.timers.setup_percpu_clockev();
1118 1115
1119 if (is_uv_system()) 1116 if (is_uv_system())
1120 uv_system_init(); 1117 uv_system_init();
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index d51321ddafda..0157cd26d7cc 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -335,4 +335,4 @@ ENTRY(sys_call_table)
335 .long sys_preadv 335 .long sys_preadv
336 .long sys_pwritev 336 .long sys_pwritev
337 .long sys_rt_tgsigqueueinfo /* 335 */ 337 .long sys_rt_tgsigqueueinfo /* 335 */
338 .long sys_perf_counter_open 338 .long sys_perf_event_open
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
new file mode 100644
index 000000000000..dcb00d278512
--- /dev/null
+++ b/arch/x86/kernel/time.c
@@ -0,0 +1,120 @@
1/*
2 * Copyright (c) 1991,1992,1995 Linus Torvalds
3 * Copyright (c) 1994 Alan Modra
4 * Copyright (c) 1995 Markus Kuhn
5 * Copyright (c) 1996 Ingo Molnar
6 * Copyright (c) 1998 Andrea Arcangeli
7 * Copyright (c) 2002,2006 Vojtech Pavlik
8 * Copyright (c) 2003 Andi Kleen
9 *
10 */
11
12#include <linux/clockchips.h>
13#include <linux/interrupt.h>
14#include <linux/time.h>
15#include <linux/mca.h>
16
17#include <asm/vsyscall.h>
18#include <asm/x86_init.h>
19#include <asm/i8259.h>
20#include <asm/i8253.h>
21#include <asm/timer.h>
22#include <asm/hpet.h>
23#include <asm/time.h>
24
25#if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC)
26int timer_ack;
27#endif
28
29#ifdef CONFIG_X86_64
30volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
31#endif
32
33unsigned long profile_pc(struct pt_regs *regs)
34{
35 unsigned long pc = instruction_pointer(regs);
36
37 if (!user_mode_vm(regs) && in_lock_functions(pc)) {
38#ifdef CONFIG_FRAME_POINTER
39 return *(unsigned long *)(regs->bp + sizeof(long));
40#else
41 unsigned long *sp = (unsigned long *)regs->sp;
42 /*
43 * Return address is either directly at stack pointer
44 * or above a saved flags. Eflags has bits 22-31 zero,
45 * kernel addresses don't.
46 */
47 if (sp[0] >> 22)
48 return sp[0];
49 if (sp[1] >> 22)
50 return sp[1];
51#endif
52 }
53 return pc;
54}
55EXPORT_SYMBOL(profile_pc);
56
57/*
58 * Default timer interrupt handler for PIT/HPET
59 */
60static irqreturn_t timer_interrupt(int irq, void *dev_id)
61{
62 /* Keep nmi watchdog up to date */
63 inc_irq_stat(irq0_irqs);
64
65 /* Optimized out for !IO_APIC and x86_64 */
66 if (timer_ack) {
67 /*
68 * Subtle, when I/O APICs are used we have to ack timer IRQ
69 * manually to deassert NMI lines for the watchdog if run
70 * on an 82489DX-based system.
71 */
72 spin_lock(&i8259A_lock);
73 outb(0x0c, PIC_MASTER_OCW3);
74 /* Ack the IRQ; AEOI will end it automatically. */
75 inb(PIC_MASTER_POLL);
76 spin_unlock(&i8259A_lock);
77 }
78
79 global_clock_event->event_handler(global_clock_event);
80
81 /* MCA bus quirk: Acknowledge irq0 by setting bit 7 in port 0x61 */
82 if (MCA_bus)
83 outb_p(inb_p(0x61)| 0x80, 0x61);
84
85 return IRQ_HANDLED;
86}
87
88static struct irqaction irq0 = {
89 .handler = timer_interrupt,
90 .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER,
91 .name = "timer"
92};
93
94void __init setup_default_timer_irq(void)
95{
96 setup_irq(0, &irq0);
97}
98
99/* Default timer init function */
100void __init hpet_time_init(void)
101{
102 if (!hpet_enable())
103 setup_pit_timer();
104 setup_default_timer_irq();
105}
106
107static __init void x86_late_time_init(void)
108{
109 x86_init.timers.timer_init();
110 tsc_init();
111}
112
113/*
114 * Initialize TSC and delay the periodic timer init to
115 * late x86_late_time_init() so ioremap works.
116 */
117void __init time_init(void)
118{
119 late_time_init = x86_late_time_init;
120}
diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c
deleted file mode 100644
index 5c5d87f0b2e1..000000000000
--- a/arch/x86/kernel/time_32.c
+++ /dev/null
@@ -1,137 +0,0 @@
1/*
2 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
3 *
4 * This file contains the PC-specific time handling details:
5 * reading the RTC at bootup, etc..
6 * 1994-07-02 Alan Modra
7 * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
8 * 1995-03-26 Markus Kuhn
9 * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
10 * precision CMOS clock update
11 * 1996-05-03 Ingo Molnar
12 * fixed time warps in do_[slow|fast]_gettimeoffset()
13 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
14 * "A Kernel Model for Precision Timekeeping" by Dave Mills
15 * 1998-09-05 (Various)
16 * More robust do_fast_gettimeoffset() algorithm implemented
17 * (works with APM, Cyrix 6x86MX and Centaur C6),
18 * monotonic gettimeofday() with fast_get_timeoffset(),
19 * drift-proof precision TSC calibration on boot
20 * (C. Scott Ananian <cananian@alumni.princeton.edu>, Andrew D.
21 * Balsa <andrebalsa@altern.org>, Philip Gladstone <philip@raptor.com>;
22 * ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause@tu-harburg.de>).
23 * 1998-12-16 Andrea Arcangeli
24 * Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
25 * because was not accounting lost_ticks.
26 * 1998-12-24 Copyright (C) 1998 Andrea Arcangeli
27 * Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
28 * serialize accesses to xtime/lost_ticks).
29 */
30
31#include <linux/init.h>
32#include <linux/interrupt.h>
33#include <linux/time.h>
34#include <linux/mca.h>
35
36#include <asm/setup.h>
37#include <asm/hpet.h>
38#include <asm/time.h>
39#include <asm/timer.h>
40
41#include <asm/do_timer.h>
42
43int timer_ack;
44
45unsigned long profile_pc(struct pt_regs *regs)
46{
47 unsigned long pc = instruction_pointer(regs);
48
49#ifdef CONFIG_SMP
50 if (!user_mode_vm(regs) && in_lock_functions(pc)) {
51#ifdef CONFIG_FRAME_POINTER
52 return *(unsigned long *)(regs->bp + sizeof(long));
53#else
54 unsigned long *sp = (unsigned long *)&regs->sp;
55
56 /* Return address is either directly at stack pointer
57 or above a saved flags. Eflags has bits 22-31 zero,
58 kernel addresses don't. */
59 if (sp[0] >> 22)
60 return sp[0];
61 if (sp[1] >> 22)
62 return sp[1];
63#endif
64 }
65#endif
66 return pc;
67}
68EXPORT_SYMBOL(profile_pc);
69
70/*
71 * This is the same as the above, except we _also_ save the current
72 * Time Stamp Counter value at the time of the timer interrupt, so that
73 * we later on can estimate the time of day more exactly.
74 */
75irqreturn_t timer_interrupt(int irq, void *dev_id)
76{
77 /* Keep nmi watchdog up to date */
78 inc_irq_stat(irq0_irqs);
79
80#ifdef CONFIG_X86_IO_APIC
81 if (timer_ack) {
82 /*
83 * Subtle, when I/O APICs are used we have to ack timer IRQ
84 * manually to deassert NMI lines for the watchdog if run
85 * on an 82489DX-based system.
86 */
87 spin_lock(&i8259A_lock);
88 outb(0x0c, PIC_MASTER_OCW3);
89 /* Ack the IRQ; AEOI will end it automatically. */
90 inb(PIC_MASTER_POLL);
91 spin_unlock(&i8259A_lock);
92 }
93#endif
94
95 do_timer_interrupt_hook();
96
97#ifdef CONFIG_MCA
98 if (MCA_bus) {
99 /* The PS/2 uses level-triggered interrupts. You can't
100 turn them off, nor would you want to (any attempt to
101 enable edge-triggered interrupts usually gets intercepted by a
102 special hardware circuit). Hence we have to acknowledge
103 the timer interrupt. Through some incredibly stupid
104 design idea, the reset for IRQ 0 is done by setting the
105 high bit of the PPI port B (0x61). Note that some PS/2s,
106 notably the 55SX, work fine if this is removed. */
107
108 u8 irq_v = inb_p(0x61); /* read the current state */
109 outb_p(irq_v | 0x80, 0x61); /* reset the IRQ */
110 }
111#endif
112
113 return IRQ_HANDLED;
114}
115
116/* Duplicate of time_init() below, with hpet_enable part added */
117void __init hpet_time_init(void)
118{
119 if (!hpet_enable())
120 setup_pit_timer();
121 x86_quirk_time_init();
122}
123
124/*
125 * This is called directly from init code; we must delay timer setup in the
126 * HPET case as we can't make the decision to turn on HPET this early in the
127 * boot process.
128 *
129 * The chosen time_init function will usually be hpet_time_init, above, but
130 * in the case of virtual hardware, an alternative function may be substituted.
131 */
132void __init time_init(void)
133{
134 x86_quirk_pre_time_init();
135 tsc_init();
136 late_time_init = choose_time_init();
137}
diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c
deleted file mode 100644
index 5ba343e61844..000000000000
--- a/arch/x86/kernel/time_64.c
+++ /dev/null
@@ -1,135 +0,0 @@
1/*
2 * "High Precision Event Timer" based timekeeping.
3 *
4 * Copyright (c) 1991,1992,1995 Linus Torvalds
5 * Copyright (c) 1994 Alan Modra
6 * Copyright (c) 1995 Markus Kuhn
7 * Copyright (c) 1996 Ingo Molnar
8 * Copyright (c) 1998 Andrea Arcangeli
9 * Copyright (c) 2002,2006 Vojtech Pavlik
10 * Copyright (c) 2003 Andi Kleen
11 * RTC support code taken from arch/i386/kernel/timers/time_hpet.c
12 */
13
14#include <linux/clockchips.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/module.h>
18#include <linux/time.h>
19#include <linux/mca.h>
20#include <linux/nmi.h>
21
22#include <asm/i8253.h>
23#include <asm/hpet.h>
24#include <asm/vgtod.h>
25#include <asm/time.h>
26#include <asm/timer.h>
27
28volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
29
30unsigned long profile_pc(struct pt_regs *regs)
31{
32 unsigned long pc = instruction_pointer(regs);
33
34 /* Assume the lock function has either no stack frame or a copy
35 of flags from PUSHF
36 Eflags always has bits 22 and up cleared unlike kernel addresses. */
37 if (!user_mode_vm(regs) && in_lock_functions(pc)) {
38#ifdef CONFIG_FRAME_POINTER
39 return *(unsigned long *)(regs->bp + sizeof(long));
40#else
41 unsigned long *sp = (unsigned long *)regs->sp;
42 if (sp[0] >> 22)
43 return sp[0];
44 if (sp[1] >> 22)
45 return sp[1];
46#endif
47 }
48 return pc;
49}
50EXPORT_SYMBOL(profile_pc);
51
52static irqreturn_t timer_interrupt(int irq, void *dev_id)
53{
54 inc_irq_stat(irq0_irqs);
55
56 global_clock_event->event_handler(global_clock_event);
57
58#ifdef CONFIG_MCA
59 if (MCA_bus) {
60 u8 irq_v = inb_p(0x61); /* read the current state */
61 outb_p(irq_v|0x80, 0x61); /* reset the IRQ */
62 }
63#endif
64
65 return IRQ_HANDLED;
66}
67
68/* calibrate_cpu is used on systems with fixed rate TSCs to determine
69 * processor frequency */
70#define TICK_COUNT 100000000
71unsigned long __init calibrate_cpu(void)
72{
73 int tsc_start, tsc_now;
74 int i, no_ctr_free;
75 unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
76 unsigned long flags;
77
78 for (i = 0; i < 4; i++)
79 if (avail_to_resrv_perfctr_nmi_bit(i))
80 break;
81 no_ctr_free = (i == 4);
82 if (no_ctr_free) {
83 WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
84 "cpu_khz value may be incorrect.\n");
85 i = 3;
86 rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
87 wrmsrl(MSR_K7_EVNTSEL3, 0);
88 rdmsrl(MSR_K7_PERFCTR3, pmc3);
89 } else {
90 reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
91 reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
92 }
93 local_irq_save(flags);
94 /* start measuring cycles, incrementing from 0 */
95 wrmsrl(MSR_K7_PERFCTR0 + i, 0);
96 wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
97 rdtscl(tsc_start);
98 do {
99 rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
100 tsc_now = get_cycles();
101 } while ((tsc_now - tsc_start) < TICK_COUNT);
102
103 local_irq_restore(flags);
104 if (no_ctr_free) {
105 wrmsrl(MSR_K7_EVNTSEL3, 0);
106 wrmsrl(MSR_K7_PERFCTR3, pmc3);
107 wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
108 } else {
109 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
110 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
111 }
112
113 return pmc_now * tsc_khz / (tsc_now - tsc_start);
114}
115
116static struct irqaction irq0 = {
117 .handler = timer_interrupt,
118 .flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING | IRQF_TIMER,
119 .name = "timer"
120};
121
122void __init hpet_time_init(void)
123{
124 if (!hpet_enable())
125 setup_pit_timer();
126
127 setup_irq(0, &irq0);
128}
129
130void __init time_init(void)
131{
132 tsc_init();
133
134 late_time_init = choose_time_init();
135}
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
index 808031a5ba19..699f7eeb896a 100644
--- a/arch/x86/kernel/trampoline.c
+++ b/arch/x86/kernel/trampoline.c
@@ -4,7 +4,7 @@
4#include <asm/e820.h> 4#include <asm/e820.h>
5 5
6/* ready for x86_64 and x86 */ 6/* ready for x86_64 and x86 */
7unsigned char *trampoline_base = __va(TRAMPOLINE_BASE); 7unsigned char *__cpuinitdata trampoline_base = __va(TRAMPOLINE_BASE);
8 8
9void __init reserve_trampoline_memory(void) 9void __init reserve_trampoline_memory(void)
10{ 10{
@@ -26,7 +26,7 @@ void __init reserve_trampoline_memory(void)
26 * bootstrap into the page concerned. The caller 26 * bootstrap into the page concerned. The caller
27 * has made sure it's suitably aligned. 27 * has made sure it's suitably aligned.
28 */ 28 */
29unsigned long setup_trampoline(void) 29unsigned long __cpuinit setup_trampoline(void)
30{ 30{
31 memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); 31 memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE);
32 return virt_to_phys(trampoline_base); 32 return virt_to_phys(trampoline_base);
diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
index 66d874e5404c..8508237e8e43 100644
--- a/arch/x86/kernel/trampoline_32.S
+++ b/arch/x86/kernel/trampoline_32.S
@@ -28,16 +28,12 @@
28 */ 28 */
29 29
30#include <linux/linkage.h> 30#include <linux/linkage.h>
31#include <linux/init.h>
31#include <asm/segment.h> 32#include <asm/segment.h>
32#include <asm/page_types.h> 33#include <asm/page_types.h>
33 34
34/* We can free up trampoline after bootup if cpu hotplug is not supported. */ 35/* We can free up trampoline after bootup if cpu hotplug is not supported. */
35#ifndef CONFIG_HOTPLUG_CPU 36__CPUINITRODATA
36.section ".cpuinit.data","aw",@progbits
37#else
38.section .rodata,"a",@progbits
39#endif
40
41.code16 37.code16
42 38
43ENTRY(trampoline_data) 39ENTRY(trampoline_data)
diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
index cddfb8d386b9..596d54c660a5 100644
--- a/arch/x86/kernel/trampoline_64.S
+++ b/arch/x86/kernel/trampoline_64.S
@@ -25,14 +25,15 @@
25 */ 25 */
26 26
27#include <linux/linkage.h> 27#include <linux/linkage.h>
28#include <linux/init.h>
28#include <asm/pgtable_types.h> 29#include <asm/pgtable_types.h>
29#include <asm/page_types.h> 30#include <asm/page_types.h>
30#include <asm/msr.h> 31#include <asm/msr.h>
31#include <asm/segment.h> 32#include <asm/segment.h>
32#include <asm/processor-flags.h> 33#include <asm/processor-flags.h>
33 34
34.section .rodata, "a", @progbits 35/* We can free up the trampoline after bootup if cpu hotplug is not supported. */
35 36__CPUINITRODATA
36.code16 37.code16
37 38
38ENTRY(trampoline_data) 39ENTRY(trampoline_data)
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 83264922a878..a665c71352b8 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -14,7 +14,6 @@
14#include <linux/spinlock.h> 14#include <linux/spinlock.h>
15#include <linux/kprobes.h> 15#include <linux/kprobes.h>
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17#include <linux/utsname.h>
18#include <linux/kdebug.h> 17#include <linux/kdebug.h>
19#include <linux/kernel.h> 18#include <linux/kernel.h>
20#include <linux/module.h> 19#include <linux/module.h>
@@ -59,12 +58,12 @@
59#include <asm/mach_traps.h> 58#include <asm/mach_traps.h>
60 59
61#ifdef CONFIG_X86_64 60#ifdef CONFIG_X86_64
61#include <asm/x86_init.h>
62#include <asm/pgalloc.h> 62#include <asm/pgalloc.h>
63#include <asm/proto.h> 63#include <asm/proto.h>
64#else 64#else
65#include <asm/processor-flags.h> 65#include <asm/processor-flags.h>
66#include <asm/setup.h> 66#include <asm/setup.h>
67#include <asm/traps.h>
68 67
69asmlinkage int system_call(void); 68asmlinkage int system_call(void);
70 69
@@ -972,7 +971,5 @@ void __init trap_init(void)
972 */ 971 */
973 cpu_init(); 972 cpu_init();
974 973
975#ifdef CONFIG_X86_32 974 x86_init.irqs.trap_init();
976 x86_quirk_trap_init();
977#endif
978} 975}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 71f4368b357e..cd982f48e23e 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -17,6 +17,8 @@
17#include <asm/time.h> 17#include <asm/time.h>
18#include <asm/delay.h> 18#include <asm/delay.h>
19#include <asm/hypervisor.h> 19#include <asm/hypervisor.h>
20#include <asm/nmi.h>
21#include <asm/x86_init.h>
20 22
21unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ 23unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
22EXPORT_SYMBOL(cpu_khz); 24EXPORT_SYMBOL(cpu_khz);
@@ -400,15 +402,9 @@ unsigned long native_calibrate_tsc(void)
400{ 402{
401 u64 tsc1, tsc2, delta, ref1, ref2; 403 u64 tsc1, tsc2, delta, ref1, ref2;
402 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; 404 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
403 unsigned long flags, latch, ms, fast_calibrate, hv_tsc_khz; 405 unsigned long flags, latch, ms, fast_calibrate;
404 int hpet = is_hpet_enabled(), i, loopmin; 406 int hpet = is_hpet_enabled(), i, loopmin;
405 407
406 hv_tsc_khz = get_hypervisor_tsc_freq();
407 if (hv_tsc_khz) {
408 printk(KERN_INFO "TSC: Frequency read from the hypervisor\n");
409 return hv_tsc_khz;
410 }
411
412 local_irq_save(flags); 408 local_irq_save(flags);
413 fast_calibrate = quick_pit_calibrate(); 409 fast_calibrate = quick_pit_calibrate();
414 local_irq_restore(flags); 410 local_irq_restore(flags);
@@ -566,7 +562,7 @@ int recalibrate_cpu_khz(void)
566 unsigned long cpu_khz_old = cpu_khz; 562 unsigned long cpu_khz_old = cpu_khz;
567 563
568 if (cpu_has_tsc) { 564 if (cpu_has_tsc) {
569 tsc_khz = calibrate_tsc(); 565 tsc_khz = x86_platform.calibrate_tsc();
570 cpu_khz = tsc_khz; 566 cpu_khz = tsc_khz;
571 cpu_data(0).loops_per_jiffy = 567 cpu_data(0).loops_per_jiffy =
572 cpufreq_scale(cpu_data(0).loops_per_jiffy, 568 cpufreq_scale(cpu_data(0).loops_per_jiffy,
@@ -670,7 +666,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
670 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || 666 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
671 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || 667 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
672 (val == CPUFREQ_RESUMECHANGE)) { 668 (val == CPUFREQ_RESUMECHANGE)) {
673 *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); 669 *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
674 670
675 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); 671 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
676 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) 672 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
@@ -744,10 +740,16 @@ static cycle_t __vsyscall_fn vread_tsc(void)
744} 740}
745#endif 741#endif
746 742
743static void resume_tsc(void)
744{
745 clocksource_tsc.cycle_last = 0;
746}
747
747static struct clocksource clocksource_tsc = { 748static struct clocksource clocksource_tsc = {
748 .name = "tsc", 749 .name = "tsc",
749 .rating = 300, 750 .rating = 300,
750 .read = read_tsc, 751 .read = read_tsc,
752 .resume = resume_tsc,
751 .mask = CLOCKSOURCE_MASK(64), 753 .mask = CLOCKSOURCE_MASK(64),
752 .shift = 22, 754 .shift = 22,
753 .flags = CLOCK_SOURCE_IS_CONTINUOUS | 755 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
@@ -761,12 +763,14 @@ void mark_tsc_unstable(char *reason)
761{ 763{
762 if (!tsc_unstable) { 764 if (!tsc_unstable) {
763 tsc_unstable = 1; 765 tsc_unstable = 1;
764 printk("Marking TSC unstable due to %s\n", reason); 766 printk(KERN_INFO "Marking TSC unstable due to %s\n", reason);
765 /* Change only the rating, when not registered */ 767 /* Change only the rating, when not registered */
766 if (clocksource_tsc.mult) 768 if (clocksource_tsc.mult)
767 clocksource_change_rating(&clocksource_tsc, 0); 769 clocksource_mark_unstable(&clocksource_tsc);
768 else 770 else {
771 clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
769 clocksource_tsc.rating = 0; 772 clocksource_tsc.rating = 0;
773 }
770 } 774 }
771} 775}
772 776
@@ -852,15 +856,71 @@ static void __init init_tsc_clocksource(void)
852 clocksource_register(&clocksource_tsc); 856 clocksource_register(&clocksource_tsc);
853} 857}
854 858
859#ifdef CONFIG_X86_64
860/*
861 * calibrate_cpu is used on systems with fixed rate TSCs to determine
862 * processor frequency
863 */
864#define TICK_COUNT 100000000
865static unsigned long __init calibrate_cpu(void)
866{
867 int tsc_start, tsc_now;
868 int i, no_ctr_free;
869 unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
870 unsigned long flags;
871
872 for (i = 0; i < 4; i++)
873 if (avail_to_resrv_perfctr_nmi_bit(i))
874 break;
875 no_ctr_free = (i == 4);
876 if (no_ctr_free) {
877 WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
878 "cpu_khz value may be incorrect.\n");
879 i = 3;
880 rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
881 wrmsrl(MSR_K7_EVNTSEL3, 0);
882 rdmsrl(MSR_K7_PERFCTR3, pmc3);
883 } else {
884 reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
885 reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
886 }
887 local_irq_save(flags);
888 /* start measuring cycles, incrementing from 0 */
889 wrmsrl(MSR_K7_PERFCTR0 + i, 0);
890 wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
891 rdtscl(tsc_start);
892 do {
893 rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
894 tsc_now = get_cycles();
895 } while ((tsc_now - tsc_start) < TICK_COUNT);
896
897 local_irq_restore(flags);
898 if (no_ctr_free) {
899 wrmsrl(MSR_K7_EVNTSEL3, 0);
900 wrmsrl(MSR_K7_PERFCTR3, pmc3);
901 wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
902 } else {
903 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
904 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
905 }
906
907 return pmc_now * tsc_khz / (tsc_now - tsc_start);
908}
909#else
910static inline unsigned long calibrate_cpu(void) { return cpu_khz; }
911#endif
912
855void __init tsc_init(void) 913void __init tsc_init(void)
856{ 914{
857 u64 lpj; 915 u64 lpj;
858 int cpu; 916 int cpu;
859 917
918 x86_init.timers.tsc_pre_init();
919
860 if (!cpu_has_tsc) 920 if (!cpu_has_tsc)
861 return; 921 return;
862 922
863 tsc_khz = calibrate_tsc(); 923 tsc_khz = x86_platform.calibrate_tsc();
864 cpu_khz = tsc_khz; 924 cpu_khz = tsc_khz;
865 925
866 if (!tsc_khz) { 926 if (!tsc_khz) {
@@ -868,11 +928,9 @@ void __init tsc_init(void)
868 return; 928 return;
869 } 929 }
870 930
871#ifdef CONFIG_X86_64
872 if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) && 931 if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) &&
873 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)) 932 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD))
874 cpu_khz = calibrate_cpu(); 933 cpu_khz = calibrate_cpu();
875#endif
876 934
877 printk("Detected %lu.%03lu MHz processor.\n", 935 printk("Detected %lu.%03lu MHz processor.\n",
878 (unsigned long)cpu_khz / 1000, 936 (unsigned long)cpu_khz / 1000,
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index 31ffc24eec4d..f068553a1b17 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -30,6 +30,7 @@
30#include <asm/setup.h> 30#include <asm/setup.h>
31#include <asm/apic.h> 31#include <asm/apic.h>
32#include <asm/e820.h> 32#include <asm/e820.h>
33#include <asm/time.h>
33#include <asm/io.h> 34#include <asm/io.h>
34 35
35#include <linux/kernel_stat.h> 36#include <linux/kernel_stat.h>
@@ -53,7 +54,7 @@ int is_visws_box(void)
53 return visws_board_type >= 0; 54 return visws_board_type >= 0;
54} 55}
55 56
56static int __init visws_time_init(void) 57static void __init visws_time_init(void)
57{ 58{
58 printk(KERN_INFO "Starting Cobalt Timer system clock\n"); 59 printk(KERN_INFO "Starting Cobalt Timer system clock\n");
59 60
@@ -66,21 +67,13 @@ static int __init visws_time_init(void)
66 /* Enable (unmask) the timer interrupt */ 67 /* Enable (unmask) the timer interrupt */
67 co_cpu_write(CO_CPU_CTRL, co_cpu_read(CO_CPU_CTRL) & ~CO_CTRL_TIMEMASK); 68 co_cpu_write(CO_CPU_CTRL, co_cpu_read(CO_CPU_CTRL) & ~CO_CTRL_TIMEMASK);
68 69
69 /* 70 setup_default_timer_irq();
70 * Zero return means the generic timer setup code will set up
71 * the standard vector:
72 */
73 return 0;
74} 71}
75 72
76static int __init visws_pre_intr_init(void) 73/* Replaces the default init_ISA_irqs in the generic setup */
74static void __init visws_pre_intr_init(void)
77{ 75{
78 init_VISWS_APIC_irqs(); 76 init_VISWS_APIC_irqs();
79
80 /*
81 * We dont want ISA irqs to be set up by the generic code:
82 */
83 return 1;
84} 77}
85 78
86/* Quirk for machine specific memory setup. */ 79/* Quirk for machine specific memory setup. */
@@ -156,12 +149,8 @@ static void visws_machine_power_off(void)
156 outl(PIIX_SPECIAL_STOP, 0xCFC); 149 outl(PIIX_SPECIAL_STOP, 0xCFC);
157} 150}
158 151
159static int __init visws_get_smp_config(unsigned int early) 152static void __init visws_get_smp_config(unsigned int early)
160{ 153{
161 /*
162 * Prevent MP-table parsing by the generic code:
163 */
164 return 1;
165} 154}
166 155
167/* 156/*
@@ -208,7 +197,7 @@ static void __init MP_processor_info(struct mpc_cpu *m)
208 apic_version[m->apicid] = ver; 197 apic_version[m->apicid] = ver;
209} 198}
210 199
211static int __init visws_find_smp_config(unsigned int reserve) 200static void __init visws_find_smp_config(unsigned int reserve)
212{ 201{
213 struct mpc_cpu *mp = phys_to_virt(CO_CPU_TAB_PHYS); 202 struct mpc_cpu *mp = phys_to_virt(CO_CPU_TAB_PHYS);
214 unsigned short ncpus = readw(phys_to_virt(CO_CPU_NUM_PHYS)); 203 unsigned short ncpus = readw(phys_to_virt(CO_CPU_NUM_PHYS));
@@ -230,21 +219,9 @@ static int __init visws_find_smp_config(unsigned int reserve)
230 MP_processor_info(mp++); 219 MP_processor_info(mp++);
231 220
232 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; 221 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
233
234 return 1;
235} 222}
236 223
237static int visws_trap_init(void); 224static void visws_trap_init(void);
238
239static struct x86_quirks visws_x86_quirks __initdata = {
240 .arch_time_init = visws_time_init,
241 .arch_pre_intr_init = visws_pre_intr_init,
242 .arch_memory_setup = visws_memory_setup,
243 .arch_intr_init = NULL,
244 .arch_trap_init = visws_trap_init,
245 .mach_get_smp_config = visws_get_smp_config,
246 .mach_find_smp_config = visws_find_smp_config,
247};
248 225
249void __init visws_early_detect(void) 226void __init visws_early_detect(void)
250{ 227{
@@ -257,11 +234,14 @@ void __init visws_early_detect(void)
257 return; 234 return;
258 235
259 /* 236 /*
260 * Install special quirks for timer, interrupt and memory setup: 237 * Override the default platform setup functions
261 * Fall back to generic behavior for traps:
262 * Override generic MP-table parsing:
263 */ 238 */
264 x86_quirks = &visws_x86_quirks; 239 x86_init.resources.memory_setup = visws_memory_setup;
240 x86_init.mpparse.get_smp_config = visws_get_smp_config;
241 x86_init.mpparse.find_smp_config = visws_find_smp_config;
242 x86_init.irqs.pre_vector_init = visws_pre_intr_init;
243 x86_init.irqs.trap_init = visws_trap_init;
244 x86_init.timers.timer_init = visws_time_init;
265 245
266 /* 246 /*
267 * Install reboot quirks: 247 * Install reboot quirks:
@@ -400,12 +380,10 @@ static __init void cobalt_init(void)
400 co_apic_read(CO_APIC_ID)); 380 co_apic_read(CO_APIC_ID));
401} 381}
402 382
403static int __init visws_trap_init(void) 383static void __init visws_trap_init(void)
404{ 384{
405 lithium_init(); 385 lithium_init();
406 cobalt_init(); 386 cobalt_init();
407
408 return 1;
409} 387}
410 388
411/* 389/*
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index 95a7289e4b0c..31e6f6cfe53e 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -817,15 +817,15 @@ static inline int __init activate_vmi(void)
817 vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm); 817 vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm);
818 vmi_timer_ops.cancel_alarm = 818 vmi_timer_ops.cancel_alarm =
819 vmi_get_function(VMI_CALL_CancelAlarm); 819 vmi_get_function(VMI_CALL_CancelAlarm);
820 pv_time_ops.time_init = vmi_time_init; 820 x86_init.timers.timer_init = vmi_time_init;
821 pv_time_ops.get_wallclock = vmi_get_wallclock;
822 pv_time_ops.set_wallclock = vmi_set_wallclock;
823#ifdef CONFIG_X86_LOCAL_APIC 821#ifdef CONFIG_X86_LOCAL_APIC
824 pv_apic_ops.setup_boot_clock = vmi_time_bsp_init; 822 x86_init.timers.setup_percpu_clockev = vmi_time_bsp_init;
825 pv_apic_ops.setup_secondary_clock = vmi_time_ap_init; 823 x86_cpuinit.setup_percpu_clockev = vmi_time_ap_init;
826#endif 824#endif
827 pv_time_ops.sched_clock = vmi_sched_clock; 825 pv_time_ops.sched_clock = vmi_sched_clock;
828 pv_time_ops.get_tsc_khz = vmi_tsc_khz; 826 x86_platform.calibrate_tsc = vmi_tsc_khz;
827 x86_platform.get_wallclock = vmi_get_wallclock;
828 x86_platform.set_wallclock = vmi_set_wallclock;
829 829
830 /* We have true wallclock functions; disable CMOS clock sync */ 830 /* We have true wallclock functions; disable CMOS clock sync */
831 no_sync_cmos_clock = 1; 831 no_sync_cmos_clock = 1;
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index 2b3eb82efeeb..611b9e2360d3 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -68,7 +68,7 @@ unsigned long long vmi_sched_clock(void)
68 return cycles_2_ns(vmi_timer_ops.get_cycle_counter(VMI_CYCLES_AVAILABLE)); 68 return cycles_2_ns(vmi_timer_ops.get_cycle_counter(VMI_CYCLES_AVAILABLE));
69} 69}
70 70
71/* paravirt_ops.get_tsc_khz = vmi_tsc_khz */ 71/* x86_platform.calibrate_tsc = vmi_tsc_khz */
72unsigned long vmi_tsc_khz(void) 72unsigned long vmi_tsc_khz(void)
73{ 73{
74 unsigned long long khz; 74 unsigned long long khz;
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 0ccb57d5ee35..a46acccec38a 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -45,9 +45,9 @@ PHDRS {
45 text PT_LOAD FLAGS(5); /* R_E */ 45 text PT_LOAD FLAGS(5); /* R_E */
46 data PT_LOAD FLAGS(7); /* RWE */ 46 data PT_LOAD FLAGS(7); /* RWE */
47#ifdef CONFIG_X86_64 47#ifdef CONFIG_X86_64
48 user PT_LOAD FLAGS(7); /* RWE */ 48 user PT_LOAD FLAGS(5); /* R_E */
49#ifdef CONFIG_SMP 49#ifdef CONFIG_SMP
50 percpu PT_LOAD FLAGS(7); /* RWE */ 50 percpu PT_LOAD FLAGS(6); /* RW_ */
51#endif 51#endif
52 init PT_LOAD FLAGS(7); /* RWE */ 52 init PT_LOAD FLAGS(7); /* RWE */
53#endif 53#endif
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 25ee06a80aad..8cb4974ff599 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -87,6 +87,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
87 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; 87 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
88 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; 88 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
89 vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic; 89 vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic;
90 vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
90 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); 91 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
91} 92}
92 93
@@ -227,19 +228,11 @@ static long __vsyscall(3) venosys_1(void)
227} 228}
228 229
229#ifdef CONFIG_SYSCTL 230#ifdef CONFIG_SYSCTL
230
231static int
232vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
233 void __user *buffer, size_t *lenp, loff_t *ppos)
234{
235 return proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
236}
237
238static ctl_table kernel_table2[] = { 231static ctl_table kernel_table2[] = {
239 { .procname = "vsyscall64", 232 { .procname = "vsyscall64",
240 .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int), 233 .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
241 .mode = 0644, 234 .mode = 0644,
242 .proc_handler = vsyscall_sysctl_change }, 235 .proc_handler = proc_dointvec },
243 {} 236 {}
244}; 237};
245 238
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
new file mode 100644
index 000000000000..4449a4a2c2ed
--- /dev/null
+++ b/arch/x86/kernel/x86_init.c
@@ -0,0 +1,75 @@
1/*
2 * Copyright (C) 2009 Thomas Gleixner <tglx@linutronix.de>
3 *
4 * For licencing details see kernel-base/COPYING
5 */
6#include <linux/init.h>
7
8#include <asm/bios_ebda.h>
9#include <asm/paravirt.h>
10#include <asm/mpspec.h>
11#include <asm/setup.h>
12#include <asm/apic.h>
13#include <asm/e820.h>
14#include <asm/time.h>
15#include <asm/irq.h>
16#include <asm/tsc.h>
17
18void __cpuinit x86_init_noop(void) { }
19void __init x86_init_uint_noop(unsigned int unused) { }
20void __init x86_init_pgd_noop(pgd_t *unused) { }
21
22/*
23 * The platform setup functions are preset with the default functions
24 * for standard PC hardware.
25 */
26struct x86_init_ops x86_init __initdata = {
27
28 .resources = {
29 .probe_roms = x86_init_noop,
30 .reserve_resources = reserve_standard_io_resources,
31 .memory_setup = default_machine_specific_memory_setup,
32 },
33
34 .mpparse = {
35 .mpc_record = x86_init_uint_noop,
36 .setup_ioapic_ids = x86_init_noop,
37 .mpc_apic_id = default_mpc_apic_id,
38 .smp_read_mpc_oem = default_smp_read_mpc_oem,
39 .mpc_oem_bus_info = default_mpc_oem_bus_info,
40 .find_smp_config = default_find_smp_config,
41 .get_smp_config = default_get_smp_config,
42 },
43
44 .irqs = {
45 .pre_vector_init = init_ISA_irqs,
46 .intr_init = native_init_IRQ,
47 .trap_init = x86_init_noop,
48 },
49
50 .oem = {
51 .arch_setup = x86_init_noop,
52 .banner = default_banner,
53 },
54
55 .paging = {
56 .pagetable_setup_start = native_pagetable_setup_start,
57 .pagetable_setup_done = native_pagetable_setup_done,
58 },
59
60 .timers = {
61 .setup_percpu_clockev = setup_boot_APIC_clock,
62 .tsc_pre_init = x86_init_noop,
63 .timer_init = hpet_time_init,
64 },
65};
66
67struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
68 .setup_percpu_clockev = setup_secondary_APIC_clock,
69};
70
71struct x86_platform_ops x86_platform = {
72 .calibrate_tsc = native_calibrate_tsc,
73 .get_wallclock = mach_get_cmos_time,
74 .set_wallclock = mach_set_rtc_mmss,
75};