diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-09-20 14:24:58 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-20 14:25:03 -0400 |
commit | bfefb7a0c6e08736f2d5917c468467f134bf28bb (patch) | |
tree | 7aa7084114dc083fe5b4d7b532901bdeb67188e7 /arch/x86/kernel | |
parent | 8d0cc631f6dd0a9283ceb7d61d8b85ecbcd355ea (diff) | |
parent | 78f28b7c555359c67c2a0d23f7436e915329421e (diff) |
Merge branch 'linus' into x86/urgent
Merge reason: Bring in changes that the next patch will depend on.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
34 files changed, 532 insertions, 706 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 832cb838cb48..4ba419b668a5 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -31,8 +31,8 @@ GCOV_PROFILE_paravirt.o := n | |||
31 | 31 | ||
32 | obj-y := process_$(BITS).o signal.o entry_$(BITS).o | 32 | obj-y := process_$(BITS).o signal.o entry_$(BITS).o |
33 | obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o | 33 | obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o |
34 | obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o | 34 | obj-y += time.o ioport.o ldt.o dumpstack.o |
35 | obj-y += setup.o i8259.o irqinit.o | 35 | obj-y += setup.o x86_init.o i8259.o irqinit.o |
36 | obj-$(CONFIG_X86_VISWS) += visws_quirks.o | 36 | obj-$(CONFIG_X86_VISWS) += visws_quirks.o |
37 | obj-$(CONFIG_X86_32) += probe_roms_32.o | 37 | obj-$(CONFIG_X86_32) += probe_roms_32.o |
38 | obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o | 38 | obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o |
@@ -105,6 +105,7 @@ obj-$(CONFIG_SCx200) += scx200.o | |||
105 | scx200-y += scx200_32.o | 105 | scx200-y += scx200_32.o |
106 | 106 | ||
107 | obj-$(CONFIG_OLPC) += olpc.o | 107 | obj-$(CONFIG_OLPC) += olpc.o |
108 | obj-$(CONFIG_X86_MRST) += mrst.o | ||
108 | 109 | ||
109 | microcode-y := microcode_core.o | 110 | microcode-y := microcode_core.o |
110 | microcode-$(CONFIG_MICROCODE_INTEL) += microcode_intel.o | 111 | microcode-$(CONFIG_MICROCODE_INTEL) += microcode_intel.o |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 2d59dfa29785..a58ef98be155 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/mm.h> | 36 | #include <linux/mm.h> |
37 | 37 | ||
38 | #include <asm/perf_counter.h> | 38 | #include <asm/perf_counter.h> |
39 | #include <asm/x86_init.h> | ||
39 | #include <asm/pgalloc.h> | 40 | #include <asm/pgalloc.h> |
40 | #include <asm/atomic.h> | 41 | #include <asm/atomic.h> |
41 | #include <asm/mpspec.h> | 42 | #include <asm/mpspec.h> |
@@ -1708,7 +1709,7 @@ int __init APIC_init_uniprocessor(void) | |||
1708 | localise_nmi_watchdog(); | 1709 | localise_nmi_watchdog(); |
1709 | #endif | 1710 | #endif |
1710 | 1711 | ||
1711 | setup_boot_clock(); | 1712 | x86_init.timers.setup_percpu_clockev(); |
1712 | #ifdef CONFIG_X86_64 | 1713 | #ifdef CONFIG_X86_64 |
1713 | check_nmi_watchdog(); | 1714 | check_nmi_watchdog(); |
1714 | #endif | 1715 | #endif |
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c index 676cdac385c0..77a06413b6b2 100644 --- a/arch/x86/kernel/apic/bigsmp_32.c +++ b/arch/x86/kernel/apic/bigsmp_32.c | |||
@@ -112,7 +112,7 @@ static physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map) | |||
112 | return physids_promote(0xFFL); | 112 | return physids_promote(0xFFL); |
113 | } | 113 | } |
114 | 114 | ||
115 | static int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid) | 115 | static int bigsmp_check_phys_apicid_present(int phys_apicid) |
116 | { | 116 | { |
117 | return 1; | 117 | return 1; |
118 | } | 118 | } |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 593c4f8a8bb8..64970b9885f2 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -96,6 +96,11 @@ struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; | |||
96 | /* # of MP IRQ source entries */ | 96 | /* # of MP IRQ source entries */ |
97 | int mp_irq_entries; | 97 | int mp_irq_entries; |
98 | 98 | ||
99 | /* Number of legacy interrupts */ | ||
100 | static int nr_legacy_irqs __read_mostly = NR_IRQS_LEGACY; | ||
101 | /* GSI interrupts */ | ||
102 | static int nr_irqs_gsi = NR_IRQS_LEGACY; | ||
103 | |||
99 | #if defined (CONFIG_MCA) || defined (CONFIG_EISA) | 104 | #if defined (CONFIG_MCA) || defined (CONFIG_EISA) |
100 | int mp_bus_id_to_type[MAX_MP_BUSSES]; | 105 | int mp_bus_id_to_type[MAX_MP_BUSSES]; |
101 | #endif | 106 | #endif |
@@ -173,6 +178,12 @@ static struct irq_cfg irq_cfgx[NR_IRQS] = { | |||
173 | [15] = { .vector = IRQ15_VECTOR, }, | 178 | [15] = { .vector = IRQ15_VECTOR, }, |
174 | }; | 179 | }; |
175 | 180 | ||
181 | void __init io_apic_disable_legacy(void) | ||
182 | { | ||
183 | nr_legacy_irqs = 0; | ||
184 | nr_irqs_gsi = 0; | ||
185 | } | ||
186 | |||
176 | int __init arch_early_irq_init(void) | 187 | int __init arch_early_irq_init(void) |
177 | { | 188 | { |
178 | struct irq_cfg *cfg; | 189 | struct irq_cfg *cfg; |
@@ -190,7 +201,7 @@ int __init arch_early_irq_init(void) | |||
190 | desc->chip_data = &cfg[i]; | 201 | desc->chip_data = &cfg[i]; |
191 | zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node); | 202 | zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node); |
192 | zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node); | 203 | zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node); |
193 | if (i < NR_IRQS_LEGACY) | 204 | if (i < nr_legacy_irqs) |
194 | cpumask_setall(cfg[i].domain); | 205 | cpumask_setall(cfg[i].domain); |
195 | } | 206 | } |
196 | 207 | ||
@@ -867,7 +878,7 @@ static int __init find_isa_irq_apic(int irq, int type) | |||
867 | */ | 878 | */ |
868 | static int EISA_ELCR(unsigned int irq) | 879 | static int EISA_ELCR(unsigned int irq) |
869 | { | 880 | { |
870 | if (irq < NR_IRQS_LEGACY) { | 881 | if (irq < nr_legacy_irqs) { |
871 | unsigned int port = 0x4d0 + (irq >> 3); | 882 | unsigned int port = 0x4d0 + (irq >> 3); |
872 | return (inb(port) >> (irq & 7)) & 1; | 883 | return (inb(port) >> (irq & 7)) & 1; |
873 | } | 884 | } |
@@ -1464,7 +1475,7 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq | |||
1464 | } | 1475 | } |
1465 | 1476 | ||
1466 | ioapic_register_intr(irq, desc, trigger); | 1477 | ioapic_register_intr(irq, desc, trigger); |
1467 | if (irq < NR_IRQS_LEGACY) | 1478 | if (irq < nr_legacy_irqs) |
1468 | disable_8259A_irq(irq); | 1479 | disable_8259A_irq(irq); |
1469 | 1480 | ||
1470 | ioapic_write_entry(apic_id, pin, entry); | 1481 | ioapic_write_entry(apic_id, pin, entry); |
@@ -1831,7 +1842,7 @@ __apicdebuginit(void) print_PIC(void) | |||
1831 | unsigned int v; | 1842 | unsigned int v; |
1832 | unsigned long flags; | 1843 | unsigned long flags; |
1833 | 1844 | ||
1834 | if (apic_verbosity == APIC_QUIET) | 1845 | if (apic_verbosity == APIC_QUIET || !nr_legacy_irqs) |
1835 | return; | 1846 | return; |
1836 | 1847 | ||
1837 | printk(KERN_DEBUG "\nprinting PIC contents\n"); | 1848 | printk(KERN_DEBUG "\nprinting PIC contents\n"); |
@@ -1894,6 +1905,10 @@ void __init enable_IO_APIC(void) | |||
1894 | spin_unlock_irqrestore(&ioapic_lock, flags); | 1905 | spin_unlock_irqrestore(&ioapic_lock, flags); |
1895 | nr_ioapic_registers[apic] = reg_01.bits.entries+1; | 1906 | nr_ioapic_registers[apic] = reg_01.bits.entries+1; |
1896 | } | 1907 | } |
1908 | |||
1909 | if (!nr_legacy_irqs) | ||
1910 | return; | ||
1911 | |||
1897 | for(apic = 0; apic < nr_ioapics; apic++) { | 1912 | for(apic = 0; apic < nr_ioapics; apic++) { |
1898 | int pin; | 1913 | int pin; |
1899 | /* See if any of the pins is in ExtINT mode */ | 1914 | /* See if any of the pins is in ExtINT mode */ |
@@ -1948,6 +1963,9 @@ void disable_IO_APIC(void) | |||
1948 | */ | 1963 | */ |
1949 | clear_IO_APIC(); | 1964 | clear_IO_APIC(); |
1950 | 1965 | ||
1966 | if (!nr_legacy_irqs) | ||
1967 | return; | ||
1968 | |||
1951 | /* | 1969 | /* |
1952 | * If the i8259 is routed through an IOAPIC | 1970 | * If the i8259 is routed through an IOAPIC |
1953 | * Put that IOAPIC in virtual wire mode | 1971 | * Put that IOAPIC in virtual wire mode |
@@ -1994,7 +2012,7 @@ void disable_IO_APIC(void) | |||
1994 | * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999 | 2012 | * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999 |
1995 | */ | 2013 | */ |
1996 | 2014 | ||
1997 | static void __init setup_ioapic_ids_from_mpc(void) | 2015 | void __init setup_ioapic_ids_from_mpc(void) |
1998 | { | 2016 | { |
1999 | union IO_APIC_reg_00 reg_00; | 2017 | union IO_APIC_reg_00 reg_00; |
2000 | physid_mask_t phys_id_present_map; | 2018 | physid_mask_t phys_id_present_map; |
@@ -2003,9 +2021,8 @@ static void __init setup_ioapic_ids_from_mpc(void) | |||
2003 | unsigned char old_id; | 2021 | unsigned char old_id; |
2004 | unsigned long flags; | 2022 | unsigned long flags; |
2005 | 2023 | ||
2006 | if (x86_quirks->setup_ioapic_ids && x86_quirks->setup_ioapic_ids()) | 2024 | if (acpi_ioapic) |
2007 | return; | 2025 | return; |
2008 | |||
2009 | /* | 2026 | /* |
2010 | * Don't check I/O APIC IDs for xAPIC systems. They have | 2027 | * Don't check I/O APIC IDs for xAPIC systems. They have |
2011 | * no meaning without the serial APIC bus. | 2028 | * no meaning without the serial APIC bus. |
@@ -2179,7 +2196,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq) | |||
2179 | struct irq_cfg *cfg; | 2196 | struct irq_cfg *cfg; |
2180 | 2197 | ||
2181 | spin_lock_irqsave(&ioapic_lock, flags); | 2198 | spin_lock_irqsave(&ioapic_lock, flags); |
2182 | if (irq < NR_IRQS_LEGACY) { | 2199 | if (irq < nr_legacy_irqs) { |
2183 | disable_8259A_irq(irq); | 2200 | disable_8259A_irq(irq); |
2184 | if (i8259A_irq_pending(irq)) | 2201 | if (i8259A_irq_pending(irq)) |
2185 | was_pending = 1; | 2202 | was_pending = 1; |
@@ -2657,7 +2674,7 @@ static inline void init_IO_APIC_traps(void) | |||
2657 | * so default to an old-fashioned 8259 | 2674 | * so default to an old-fashioned 8259 |
2658 | * interrupt if we can.. | 2675 | * interrupt if we can.. |
2659 | */ | 2676 | */ |
2660 | if (irq < NR_IRQS_LEGACY) | 2677 | if (irq < nr_legacy_irqs) |
2661 | make_8259A_irq(irq); | 2678 | make_8259A_irq(irq); |
2662 | else | 2679 | else |
2663 | /* Strange. Oh, well.. */ | 2680 | /* Strange. Oh, well.. */ |
@@ -2993,7 +3010,7 @@ out: | |||
2993 | * the I/O APIC in all cases now. No actual device should request | 3010 | * the I/O APIC in all cases now. No actual device should request |
2994 | * it anyway. --macro | 3011 | * it anyway. --macro |
2995 | */ | 3012 | */ |
2996 | #define PIC_IRQS (1 << PIC_CASCADE_IR) | 3013 | #define PIC_IRQS (1UL << PIC_CASCADE_IR) |
2997 | 3014 | ||
2998 | void __init setup_IO_APIC(void) | 3015 | void __init setup_IO_APIC(void) |
2999 | { | 3016 | { |
@@ -3001,21 +3018,19 @@ void __init setup_IO_APIC(void) | |||
3001 | /* | 3018 | /* |
3002 | * calling enable_IO_APIC() is moved to setup_local_APIC for BP | 3019 | * calling enable_IO_APIC() is moved to setup_local_APIC for BP |
3003 | */ | 3020 | */ |
3004 | 3021 | io_apic_irqs = nr_legacy_irqs ? ~PIC_IRQS : ~0UL; | |
3005 | io_apic_irqs = ~PIC_IRQS; | ||
3006 | 3022 | ||
3007 | apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); | 3023 | apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); |
3008 | /* | 3024 | /* |
3009 | * Set up IO-APIC IRQ routing. | 3025 | * Set up IO-APIC IRQ routing. |
3010 | */ | 3026 | */ |
3011 | #ifdef CONFIG_X86_32 | 3027 | x86_init.mpparse.setup_ioapic_ids(); |
3012 | if (!acpi_ioapic) | 3028 | |
3013 | setup_ioapic_ids_from_mpc(); | ||
3014 | #endif | ||
3015 | sync_Arb_IDs(); | 3029 | sync_Arb_IDs(); |
3016 | setup_IO_APIC_irqs(); | 3030 | setup_IO_APIC_irqs(); |
3017 | init_IO_APIC_traps(); | 3031 | init_IO_APIC_traps(); |
3018 | check_timer(); | 3032 | if (nr_legacy_irqs) |
3033 | check_timer(); | ||
3019 | } | 3034 | } |
3020 | 3035 | ||
3021 | /* | 3036 | /* |
@@ -3116,7 +3131,6 @@ static int __init ioapic_init_sysfs(void) | |||
3116 | 3131 | ||
3117 | device_initcall(ioapic_init_sysfs); | 3132 | device_initcall(ioapic_init_sysfs); |
3118 | 3133 | ||
3119 | static int nr_irqs_gsi = NR_IRQS_LEGACY; | ||
3120 | /* | 3134 | /* |
3121 | * Dynamic irq allocate and deallocation | 3135 | * Dynamic irq allocate and deallocation |
3122 | */ | 3136 | */ |
@@ -3856,7 +3870,7 @@ static int __io_apic_set_pci_routing(struct device *dev, int irq, | |||
3856 | /* | 3870 | /* |
3857 | * IRQs < 16 are already in the irq_2_pin[] map | 3871 | * IRQs < 16 are already in the irq_2_pin[] map |
3858 | */ | 3872 | */ |
3859 | if (irq >= NR_IRQS_LEGACY) { | 3873 | if (irq >= nr_legacy_irqs) { |
3860 | cfg = desc->chip_data; | 3874 | cfg = desc->chip_data; |
3861 | if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) { | 3875 | if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) { |
3862 | printk(KERN_INFO "can not add pin %d for irq %d\n", | 3876 | printk(KERN_INFO "can not add pin %d for irq %d\n", |
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c index ca96e68f0d23..efa00e2b8505 100644 --- a/arch/x86/kernel/apic/numaq_32.c +++ b/arch/x86/kernel/apic/numaq_32.c | |||
@@ -66,7 +66,6 @@ struct mpc_trans { | |||
66 | unsigned short trans_reserved; | 66 | unsigned short trans_reserved; |
67 | }; | 67 | }; |
68 | 68 | ||
69 | /* x86_quirks member */ | ||
70 | static int mpc_record; | 69 | static int mpc_record; |
71 | 70 | ||
72 | static struct mpc_trans *translation_table[MAX_MPC_ENTRY]; | 71 | static struct mpc_trans *translation_table[MAX_MPC_ENTRY]; |
@@ -130,10 +129,9 @@ void __cpuinit numaq_tsc_disable(void) | |||
130 | } | 129 | } |
131 | } | 130 | } |
132 | 131 | ||
133 | static int __init numaq_pre_time_init(void) | 132 | static void __init numaq_tsc_init(void) |
134 | { | 133 | { |
135 | numaq_tsc_disable(); | 134 | numaq_tsc_disable(); |
136 | return 0; | ||
137 | } | 135 | } |
138 | 136 | ||
139 | static inline int generate_logical_apicid(int quad, int phys_apicid) | 137 | static inline int generate_logical_apicid(int quad, int phys_apicid) |
@@ -177,6 +175,19 @@ static void mpc_oem_pci_bus(struct mpc_bus *m) | |||
177 | quad_local_to_mp_bus_id[quad][local] = m->busid; | 175 | quad_local_to_mp_bus_id[quad][local] = m->busid; |
178 | } | 176 | } |
179 | 177 | ||
178 | /* | ||
179 | * Called from mpparse code. | ||
180 | * mode = 0: prescan | ||
181 | * mode = 1: one mpc entry scanned | ||
182 | */ | ||
183 | static void numaq_mpc_record(unsigned int mode) | ||
184 | { | ||
185 | if (!mode) | ||
186 | mpc_record = 0; | ||
187 | else | ||
188 | mpc_record++; | ||
189 | } | ||
190 | |||
180 | static void __init MP_translation_info(struct mpc_trans *m) | 191 | static void __init MP_translation_info(struct mpc_trans *m) |
181 | { | 192 | { |
182 | printk(KERN_INFO | 193 | printk(KERN_INFO |
@@ -206,9 +217,9 @@ static int __init mpf_checksum(unsigned char *mp, int len) | |||
206 | /* | 217 | /* |
207 | * Read/parse the MPC oem tables | 218 | * Read/parse the MPC oem tables |
208 | */ | 219 | */ |
209 | static void __init | 220 | static void __init smp_read_mpc_oem(struct mpc_table *mpc) |
210 | smp_read_mpc_oem(struct mpc_oemtable *oemtable, unsigned short oemsize) | ||
211 | { | 221 | { |
222 | struct mpc_oemtable *oemtable = (void *)(long)mpc->oemptr; | ||
212 | int count = sizeof(*oemtable); /* the header size */ | 223 | int count = sizeof(*oemtable); /* the header size */ |
213 | unsigned char *oemptr = ((unsigned char *)oemtable) + count; | 224 | unsigned char *oemptr = ((unsigned char *)oemtable) + count; |
214 | 225 | ||
@@ -250,29 +261,6 @@ static void __init | |||
250 | } | 261 | } |
251 | } | 262 | } |
252 | 263 | ||
253 | static int __init numaq_setup_ioapic_ids(void) | ||
254 | { | ||
255 | /* so can skip it */ | ||
256 | return 1; | ||
257 | } | ||
258 | |||
259 | static struct x86_quirks numaq_x86_quirks __initdata = { | ||
260 | .arch_pre_time_init = numaq_pre_time_init, | ||
261 | .arch_time_init = NULL, | ||
262 | .arch_pre_intr_init = NULL, | ||
263 | .arch_memory_setup = NULL, | ||
264 | .arch_intr_init = NULL, | ||
265 | .arch_trap_init = NULL, | ||
266 | .mach_get_smp_config = NULL, | ||
267 | .mach_find_smp_config = NULL, | ||
268 | .mpc_record = &mpc_record, | ||
269 | .mpc_apic_id = mpc_apic_id, | ||
270 | .mpc_oem_bus_info = mpc_oem_bus_info, | ||
271 | .mpc_oem_pci_bus = mpc_oem_pci_bus, | ||
272 | .smp_read_mpc_oem = smp_read_mpc_oem, | ||
273 | .setup_ioapic_ids = numaq_setup_ioapic_ids, | ||
274 | }; | ||
275 | |||
276 | static __init void early_check_numaq(void) | 264 | static __init void early_check_numaq(void) |
277 | { | 265 | { |
278 | /* | 266 | /* |
@@ -286,8 +274,15 @@ static __init void early_check_numaq(void) | |||
286 | if (smp_found_config) | 274 | if (smp_found_config) |
287 | early_get_smp_config(); | 275 | early_get_smp_config(); |
288 | 276 | ||
289 | if (found_numaq) | 277 | if (found_numaq) { |
290 | x86_quirks = &numaq_x86_quirks; | 278 | x86_init.mpparse.mpc_record = numaq_mpc_record; |
279 | x86_init.mpparse.setup_ioapic_ids = x86_init_noop; | ||
280 | x86_init.mpparse.mpc_apic_id = mpc_apic_id; | ||
281 | x86_init.mpparse.smp_read_mpc_oem = smp_read_mpc_oem; | ||
282 | x86_init.mpparse.mpc_oem_pci_bus = mpc_oem_pci_bus; | ||
283 | x86_init.mpparse.mpc_oem_bus_info = mpc_oem_bus_info; | ||
284 | x86_init.timers.tsc_pre_init = numaq_tsc_init; | ||
285 | } | ||
291 | } | 286 | } |
292 | 287 | ||
293 | int __init get_memcfg_numaq(void) | 288 | int __init get_memcfg_numaq(void) |
@@ -418,7 +413,7 @@ static inline physid_mask_t numaq_apicid_to_cpu_present(int logical_apicid) | |||
418 | /* Where the IO area was mapped on multiquad, always 0 otherwise */ | 413 | /* Where the IO area was mapped on multiquad, always 0 otherwise */ |
419 | void *xquad_portio; | 414 | void *xquad_portio; |
420 | 415 | ||
421 | static inline int numaq_check_phys_apicid_present(int boot_cpu_physical_apicid) | 416 | static inline int numaq_check_phys_apicid_present(int phys_apicid) |
422 | { | 417 | { |
423 | return 1; | 418 | return 1; |
424 | } | 419 | } |
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index eafdfbd1ea95..645ecc4ff0be 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c | |||
@@ -272,7 +272,7 @@ static physid_mask_t summit_apicid_to_cpu_present(int apicid) | |||
272 | return physid_mask_of_physid(0); | 272 | return physid_mask_of_physid(0); |
273 | } | 273 | } |
274 | 274 | ||
275 | static int summit_check_phys_apicid_present(int boot_cpu_physical_apicid) | 275 | static int summit_check_phys_apicid_present(int physical_apicid) |
276 | { | 276 | { |
277 | return 1; | 277 | return 1; |
278 | } | 278 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 4109679863c1..7bb676c533aa 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -526,6 +526,21 @@ static const struct dmi_system_id sw_any_bug_dmi_table[] = { | |||
526 | }, | 526 | }, |
527 | { } | 527 | { } |
528 | }; | 528 | }; |
529 | |||
530 | static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) | ||
531 | { | ||
532 | /* http://www.intel.com/Assets/PDF/specupdate/314554.pdf | ||
533 | * AL30: A Machine Check Exception (MCE) Occurring during an | ||
534 | * Enhanced Intel SpeedStep Technology Ratio Change May Cause | ||
535 | * Both Processor Cores to Lock Up when HT is enabled*/ | ||
536 | if (c->x86_vendor == X86_VENDOR_INTEL) { | ||
537 | if ((c->x86 == 15) && | ||
538 | (c->x86_model == 6) && | ||
539 | (c->x86_mask == 8) && smt_capable()) | ||
540 | return -ENODEV; | ||
541 | } | ||
542 | return 0; | ||
543 | } | ||
529 | #endif | 544 | #endif |
530 | 545 | ||
531 | static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | 546 | static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) |
@@ -540,6 +555,12 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
540 | 555 | ||
541 | dprintk("acpi_cpufreq_cpu_init\n"); | 556 | dprintk("acpi_cpufreq_cpu_init\n"); |
542 | 557 | ||
558 | #ifdef CONFIG_SMP | ||
559 | result = acpi_cpufreq_blacklist(c); | ||
560 | if (result) | ||
561 | return result; | ||
562 | #endif | ||
563 | |||
543 | data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL); | 564 | data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL); |
544 | if (!data) | 565 | if (!data) |
545 | return -ENOMEM; | 566 | return -ENOMEM; |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 2a50ef891000..6394aa5c7985 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -605,9 +605,10 @@ static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, | |||
605 | return 0; | 605 | return 0; |
606 | } | 606 | } |
607 | 607 | ||
608 | static void invalidate_entry(struct powernow_k8_data *data, unsigned int entry) | 608 | static void invalidate_entry(struct cpufreq_frequency_table *powernow_table, |
609 | unsigned int entry) | ||
609 | { | 610 | { |
610 | data->powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID; | 611 | powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID; |
611 | } | 612 | } |
612 | 613 | ||
613 | static void print_basics(struct powernow_k8_data *data) | 614 | static void print_basics(struct powernow_k8_data *data) |
@@ -854,6 +855,10 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | |||
854 | goto err_out; | 855 | goto err_out; |
855 | } | 856 | } |
856 | 857 | ||
858 | /* fill in data */ | ||
859 | data->numps = data->acpi_data.state_count; | ||
860 | powernow_k8_acpi_pst_values(data, 0); | ||
861 | |||
857 | if (cpu_family == CPU_HW_PSTATE) | 862 | if (cpu_family == CPU_HW_PSTATE) |
858 | ret_val = fill_powernow_table_pstate(data, powernow_table); | 863 | ret_val = fill_powernow_table_pstate(data, powernow_table); |
859 | else | 864 | else |
@@ -866,11 +871,8 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | |||
866 | powernow_table[data->acpi_data.state_count].index = 0; | 871 | powernow_table[data->acpi_data.state_count].index = 0; |
867 | data->powernow_table = powernow_table; | 872 | data->powernow_table = powernow_table; |
868 | 873 | ||
869 | /* fill in data */ | ||
870 | data->numps = data->acpi_data.state_count; | ||
871 | if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) | 874 | if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) |
872 | print_basics(data); | 875 | print_basics(data); |
873 | powernow_k8_acpi_pst_values(data, 0); | ||
874 | 876 | ||
875 | /* notify BIOS that we exist */ | 877 | /* notify BIOS that we exist */ |
876 | acpi_processor_notify_smm(THIS_MODULE); | 878 | acpi_processor_notify_smm(THIS_MODULE); |
@@ -914,13 +916,13 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, | |||
914 | "bad value %d.\n", i, index); | 916 | "bad value %d.\n", i, index); |
915 | printk(KERN_ERR PFX "Please report to BIOS " | 917 | printk(KERN_ERR PFX "Please report to BIOS " |
916 | "manufacturer\n"); | 918 | "manufacturer\n"); |
917 | invalidate_entry(data, i); | 919 | invalidate_entry(powernow_table, i); |
918 | continue; | 920 | continue; |
919 | } | 921 | } |
920 | rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); | 922 | rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); |
921 | if (!(hi & HW_PSTATE_VALID_MASK)) { | 923 | if (!(hi & HW_PSTATE_VALID_MASK)) { |
922 | dprintk("invalid pstate %d, ignoring\n", index); | 924 | dprintk("invalid pstate %d, ignoring\n", index); |
923 | invalidate_entry(data, i); | 925 | invalidate_entry(powernow_table, i); |
924 | continue; | 926 | continue; |
925 | } | 927 | } |
926 | 928 | ||
@@ -941,7 +943,6 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, | |||
941 | struct cpufreq_frequency_table *powernow_table) | 943 | struct cpufreq_frequency_table *powernow_table) |
942 | { | 944 | { |
943 | int i; | 945 | int i; |
944 | int cntlofreq = 0; | ||
945 | 946 | ||
946 | for (i = 0; i < data->acpi_data.state_count; i++) { | 947 | for (i = 0; i < data->acpi_data.state_count; i++) { |
947 | u32 fid; | 948 | u32 fid; |
@@ -970,7 +971,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, | |||
970 | /* verify frequency is OK */ | 971 | /* verify frequency is OK */ |
971 | if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) { | 972 | if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) { |
972 | dprintk("invalid freq %u kHz, ignoring\n", freq); | 973 | dprintk("invalid freq %u kHz, ignoring\n", freq); |
973 | invalidate_entry(data, i); | 974 | invalidate_entry(powernow_table, i); |
974 | continue; | 975 | continue; |
975 | } | 976 | } |
976 | 977 | ||
@@ -978,38 +979,17 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, | |||
978 | * BIOSs are using "off" to indicate invalid */ | 979 | * BIOSs are using "off" to indicate invalid */ |
979 | if (vid == VID_OFF) { | 980 | if (vid == VID_OFF) { |
980 | dprintk("invalid vid %u, ignoring\n", vid); | 981 | dprintk("invalid vid %u, ignoring\n", vid); |
981 | invalidate_entry(data, i); | 982 | invalidate_entry(powernow_table, i); |
982 | continue; | 983 | continue; |
983 | } | 984 | } |
984 | 985 | ||
985 | /* verify only 1 entry from the lo frequency table */ | ||
986 | if (fid < HI_FID_TABLE_BOTTOM) { | ||
987 | if (cntlofreq) { | ||
988 | /* if both entries are the same, | ||
989 | * ignore this one ... */ | ||
990 | if ((freq != powernow_table[cntlofreq].frequency) || | ||
991 | (index != powernow_table[cntlofreq].index)) { | ||
992 | printk(KERN_ERR PFX | ||
993 | "Too many lo freq table " | ||
994 | "entries\n"); | ||
995 | return 1; | ||
996 | } | ||
997 | |||
998 | dprintk("double low frequency table entry, " | ||
999 | "ignoring it.\n"); | ||
1000 | invalidate_entry(data, i); | ||
1001 | continue; | ||
1002 | } else | ||
1003 | cntlofreq = i; | ||
1004 | } | ||
1005 | |||
1006 | if (freq != (data->acpi_data.states[i].core_frequency * 1000)) { | 986 | if (freq != (data->acpi_data.states[i].core_frequency * 1000)) { |
1007 | printk(KERN_INFO PFX "invalid freq entries " | 987 | printk(KERN_INFO PFX "invalid freq entries " |
1008 | "%u kHz vs. %u kHz\n", freq, | 988 | "%u kHz vs. %u kHz\n", freq, |
1009 | (unsigned int) | 989 | (unsigned int) |
1010 | (data->acpi_data.states[i].core_frequency | 990 | (data->acpi_data.states[i].core_frequency |
1011 | * 1000)); | 991 | * 1000)); |
1012 | invalidate_entry(data, i); | 992 | invalidate_entry(powernow_table, i); |
1013 | continue; | 993 | continue; |
1014 | } | 994 | } |
1015 | } | 995 | } |
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c index 93ba8eeb100a..08be922de33a 100644 --- a/arch/x86/kernel/cpu/hypervisor.c +++ b/arch/x86/kernel/cpu/hypervisor.c | |||
@@ -34,13 +34,6 @@ detect_hypervisor_vendor(struct cpuinfo_x86 *c) | |||
34 | c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE; | 34 | c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE; |
35 | } | 35 | } |
36 | 36 | ||
37 | unsigned long get_hypervisor_tsc_freq(void) | ||
38 | { | ||
39 | if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE) | ||
40 | return vmware_get_tsc_khz(); | ||
41 | return 0; | ||
42 | } | ||
43 | |||
44 | static inline void __cpuinit | 37 | static inline void __cpuinit |
45 | hypervisor_set_feature_bits(struct cpuinfo_x86 *c) | 38 | hypervisor_set_feature_bits(struct cpuinfo_x86 *c) |
46 | { | 39 | { |
@@ -55,3 +48,10 @@ void __cpuinit init_hypervisor(struct cpuinfo_x86 *c) | |||
55 | detect_hypervisor_vendor(c); | 48 | detect_hypervisor_vendor(c); |
56 | hypervisor_set_feature_bits(c); | 49 | hypervisor_set_feature_bits(c); |
57 | } | 50 | } |
51 | |||
52 | void __init init_hypervisor_platform(void) | ||
53 | { | ||
54 | init_hypervisor(&boot_cpu_data); | ||
55 | if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE) | ||
56 | vmware_platform_setup(); | ||
57 | } | ||
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index bc24f514ec93..0a46b4df5d80 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/dmi.h> | 24 | #include <linux/dmi.h> |
25 | #include <asm/div64.h> | 25 | #include <asm/div64.h> |
26 | #include <asm/vmware.h> | 26 | #include <asm/vmware.h> |
27 | #include <asm/x86_init.h> | ||
27 | 28 | ||
28 | #define CPUID_VMWARE_INFO_LEAF 0x40000000 | 29 | #define CPUID_VMWARE_INFO_LEAF 0x40000000 |
29 | #define VMWARE_HYPERVISOR_MAGIC 0x564D5868 | 30 | #define VMWARE_HYPERVISOR_MAGIC 0x564D5868 |
@@ -47,21 +48,29 @@ static inline int __vmware_platform(void) | |||
47 | return eax != (uint32_t)-1 && ebx == VMWARE_HYPERVISOR_MAGIC; | 48 | return eax != (uint32_t)-1 && ebx == VMWARE_HYPERVISOR_MAGIC; |
48 | } | 49 | } |
49 | 50 | ||
50 | static unsigned long __vmware_get_tsc_khz(void) | 51 | static unsigned long vmware_get_tsc_khz(void) |
51 | { | 52 | { |
52 | uint64_t tsc_hz; | 53 | uint64_t tsc_hz; |
53 | uint32_t eax, ebx, ecx, edx; | 54 | uint32_t eax, ebx, ecx, edx; |
54 | 55 | ||
55 | VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); | 56 | VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); |
56 | 57 | ||
57 | if (ebx == UINT_MAX) | ||
58 | return 0; | ||
59 | tsc_hz = eax | (((uint64_t)ebx) << 32); | 58 | tsc_hz = eax | (((uint64_t)ebx) << 32); |
60 | do_div(tsc_hz, 1000); | 59 | do_div(tsc_hz, 1000); |
61 | BUG_ON(tsc_hz >> 32); | 60 | BUG_ON(tsc_hz >> 32); |
62 | return tsc_hz; | 61 | return tsc_hz; |
63 | } | 62 | } |
64 | 63 | ||
64 | void __init vmware_platform_setup(void) | ||
65 | { | ||
66 | uint32_t eax, ebx, ecx, edx; | ||
67 | |||
68 | VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); | ||
69 | |||
70 | if (ebx != UINT_MAX) | ||
71 | x86_platform.calibrate_tsc = vmware_get_tsc_khz; | ||
72 | } | ||
73 | |||
65 | /* | 74 | /* |
66 | * While checking the dmi string infomation, just checking the product | 75 | * While checking the dmi string infomation, just checking the product |
67 | * serial key should be enough, as this will always have a VMware | 76 | * serial key should be enough, as this will always have a VMware |
@@ -87,12 +96,6 @@ int vmware_platform(void) | |||
87 | return 0; | 96 | return 0; |
88 | } | 97 | } |
89 | 98 | ||
90 | unsigned long vmware_get_tsc_khz(void) | ||
91 | { | ||
92 | BUG_ON(!vmware_platform()); | ||
93 | return __vmware_get_tsc_khz(); | ||
94 | } | ||
95 | |||
96 | /* | 99 | /* |
97 | * VMware hypervisor takes care of exporting a reliable TSC to the guest. | 100 | * VMware hypervisor takes care of exporting a reliable TSC to the guest. |
98 | * Still, due to timing difference when running on virtual cpus, the TSC can | 101 | * Still, due to timing difference when running on virtual cpus, the TSC can |
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 147005a1cc3c..a3210ce1eccd 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
@@ -1455,28 +1455,11 @@ char *__init default_machine_specific_memory_setup(void) | |||
1455 | return who; | 1455 | return who; |
1456 | } | 1456 | } |
1457 | 1457 | ||
1458 | char *__init __attribute__((weak)) machine_specific_memory_setup(void) | ||
1459 | { | ||
1460 | if (x86_quirks->arch_memory_setup) { | ||
1461 | char *who = x86_quirks->arch_memory_setup(); | ||
1462 | |||
1463 | if (who) | ||
1464 | return who; | ||
1465 | } | ||
1466 | return default_machine_specific_memory_setup(); | ||
1467 | } | ||
1468 | |||
1469 | /* Overridden in paravirt.c if CONFIG_PARAVIRT */ | ||
1470 | char * __init __attribute__((weak)) memory_setup(void) | ||
1471 | { | ||
1472 | return machine_specific_memory_setup(); | ||
1473 | } | ||
1474 | |||
1475 | void __init setup_memory_map(void) | 1458 | void __init setup_memory_map(void) |
1476 | { | 1459 | { |
1477 | char *who; | 1460 | char *who; |
1478 | 1461 | ||
1479 | who = memory_setup(); | 1462 | who = x86_init.resources.memory_setup(); |
1480 | memcpy(&e820_saved, &e820, sizeof(struct e820map)); | 1463 | memcpy(&e820_saved, &e820, sizeof(struct e820map)); |
1481 | printk(KERN_INFO "BIOS-provided physical RAM map:\n"); | 1464 | printk(KERN_INFO "BIOS-provided physical RAM map:\n"); |
1482 | e820_print_map(who); | 1465 | e820_print_map(who); |
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c index fe26ba3e3451..ad5bd988fb79 100644 --- a/arch/x86/kernel/efi.c +++ b/arch/x86/kernel/efi.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <asm/time.h> | 42 | #include <asm/time.h> |
43 | #include <asm/cacheflush.h> | 43 | #include <asm/cacheflush.h> |
44 | #include <asm/tlbflush.h> | 44 | #include <asm/tlbflush.h> |
45 | #include <asm/x86_init.h> | ||
45 | 46 | ||
46 | #define EFI_DEBUG 1 | 47 | #define EFI_DEBUG 1 |
47 | #define PFX "EFI: " | 48 | #define PFX "EFI: " |
@@ -453,6 +454,9 @@ void __init efi_init(void) | |||
453 | if (add_efi_memmap) | 454 | if (add_efi_memmap) |
454 | do_add_efi_memmap(); | 455 | do_add_efi_memmap(); |
455 | 456 | ||
457 | x86_platform.get_wallclock = efi_get_time; | ||
458 | x86_platform.set_wallclock = efi_set_rtc_mmss; | ||
459 | |||
456 | /* Setup for EFI runtime service */ | 460 | /* Setup for EFI runtime service */ |
457 | reboot_type = BOOT_EFI; | 461 | reboot_type = BOOT_EFI; |
458 | 462 | ||
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index 3f8579f8d42c..4f8e2507e8f3 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c | |||
@@ -11,8 +11,21 @@ | |||
11 | #include <asm/setup.h> | 11 | #include <asm/setup.h> |
12 | #include <asm/sections.h> | 12 | #include <asm/sections.h> |
13 | #include <asm/e820.h> | 13 | #include <asm/e820.h> |
14 | #include <asm/bios_ebda.h> | 14 | #include <asm/page.h> |
15 | #include <asm/trampoline.h> | 15 | #include <asm/trampoline.h> |
16 | #include <asm/apic.h> | ||
17 | #include <asm/io_apic.h> | ||
18 | #include <asm/bios_ebda.h> | ||
19 | |||
20 | static void __init i386_default_early_setup(void) | ||
21 | { | ||
22 | /* Initilize 32bit specific setup functions */ | ||
23 | x86_init.resources.probe_roms = probe_roms; | ||
24 | x86_init.resources.reserve_resources = i386_reserve_resources; | ||
25 | x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc; | ||
26 | |||
27 | reserve_ebda_region(); | ||
28 | } | ||
16 | 29 | ||
17 | void __init i386_start_kernel(void) | 30 | void __init i386_start_kernel(void) |
18 | { | 31 | { |
@@ -29,7 +42,16 @@ void __init i386_start_kernel(void) | |||
29 | reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); | 42 | reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); |
30 | } | 43 | } |
31 | #endif | 44 | #endif |
32 | reserve_ebda_region(); | 45 | |
46 | /* Call the subarch specific early setup function */ | ||
47 | switch (boot_params.hdr.hardware_subarch) { | ||
48 | case X86_SUBARCH_MRST: | ||
49 | x86_mrst_early_setup(); | ||
50 | break; | ||
51 | default: | ||
52 | i386_default_early_setup(); | ||
53 | break; | ||
54 | } | ||
33 | 55 | ||
34 | /* | 56 | /* |
35 | * At this point everything still needed from the boot loader | 57 | * At this point everything still needed from the boot loader |
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 70eaa852c732..0b06cd778fd9 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c | |||
@@ -23,8 +23,8 @@ | |||
23 | #include <asm/sections.h> | 23 | #include <asm/sections.h> |
24 | #include <asm/kdebug.h> | 24 | #include <asm/kdebug.h> |
25 | #include <asm/e820.h> | 25 | #include <asm/e820.h> |
26 | #include <asm/bios_ebda.h> | ||
27 | #include <asm/trampoline.h> | 26 | #include <asm/trampoline.h> |
27 | #include <asm/bios_ebda.h> | ||
28 | 28 | ||
29 | static void __init zap_identity_mappings(void) | 29 | static void __init zap_identity_mappings(void) |
30 | { | 30 | { |
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 7ffec6b3b331..b766e8c7252d 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -157,6 +157,7 @@ subarch_entries: | |||
157 | .long default_entry /* normal x86/PC */ | 157 | .long default_entry /* normal x86/PC */ |
158 | .long lguest_entry /* lguest hypervisor */ | 158 | .long lguest_entry /* lguest hypervisor */ |
159 | .long xen_entry /* Xen hypervisor */ | 159 | .long xen_entry /* Xen hypervisor */ |
160 | .long default_entry /* Moorestown MID */ | ||
160 | num_subarch_entries = (. - subarch_entries) / 4 | 161 | num_subarch_entries = (. - subarch_entries) / 4 |
161 | .previous | 162 | .previous |
162 | #endif /* CONFIG_PARAVIRT */ | 163 | #endif /* CONFIG_PARAVIRT */ |
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c index 5cf36c053ac4..23c167925a5c 100644 --- a/arch/x86/kernel/i8253.c +++ b/arch/x86/kernel/i8253.c | |||
@@ -19,12 +19,6 @@ | |||
19 | DEFINE_SPINLOCK(i8253_lock); | 19 | DEFINE_SPINLOCK(i8253_lock); |
20 | EXPORT_SYMBOL(i8253_lock); | 20 | EXPORT_SYMBOL(i8253_lock); |
21 | 21 | ||
22 | #ifdef CONFIG_X86_32 | ||
23 | static void pit_disable_clocksource(void); | ||
24 | #else | ||
25 | static inline void pit_disable_clocksource(void) { } | ||
26 | #endif | ||
27 | |||
28 | /* | 22 | /* |
29 | * HPET replaces the PIT, when enabled. So we need to know, which of | 23 | * HPET replaces the PIT, when enabled. So we need to know, which of |
30 | * the two timers is used | 24 | * the two timers is used |
@@ -57,12 +51,10 @@ static void init_pit_timer(enum clock_event_mode mode, | |||
57 | outb_pit(0, PIT_CH0); | 51 | outb_pit(0, PIT_CH0); |
58 | outb_pit(0, PIT_CH0); | 52 | outb_pit(0, PIT_CH0); |
59 | } | 53 | } |
60 | pit_disable_clocksource(); | ||
61 | break; | 54 | break; |
62 | 55 | ||
63 | case CLOCK_EVT_MODE_ONESHOT: | 56 | case CLOCK_EVT_MODE_ONESHOT: |
64 | /* One shot setup */ | 57 | /* One shot setup */ |
65 | pit_disable_clocksource(); | ||
66 | outb_pit(0x38, PIT_MODE); | 58 | outb_pit(0x38, PIT_MODE); |
67 | break; | 59 | break; |
68 | 60 | ||
@@ -200,17 +192,6 @@ static struct clocksource pit_cs = { | |||
200 | .shift = 20, | 192 | .shift = 20, |
201 | }; | 193 | }; |
202 | 194 | ||
203 | static void pit_disable_clocksource(void) | ||
204 | { | ||
205 | /* | ||
206 | * Use mult to check whether it is registered or not | ||
207 | */ | ||
208 | if (pit_cs.mult) { | ||
209 | clocksource_unregister(&pit_cs); | ||
210 | pit_cs.mult = 0; | ||
211 | } | ||
212 | } | ||
213 | |||
214 | static int __init init_pit_clocksource(void) | 195 | static int __init init_pit_clocksource(void) |
215 | { | 196 | { |
216 | /* | 197 | /* |
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index ccf8ab54f31a..300883112e3d 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c | |||
@@ -116,7 +116,7 @@ int vector_used_by_percpu_irq(unsigned int vector) | |||
116 | return 0; | 116 | return 0; |
117 | } | 117 | } |
118 | 118 | ||
119 | static void __init init_ISA_irqs(void) | 119 | void __init init_ISA_irqs(void) |
120 | { | 120 | { |
121 | int i; | 121 | int i; |
122 | 122 | ||
@@ -140,8 +140,10 @@ static void __init init_ISA_irqs(void) | |||
140 | } | 140 | } |
141 | } | 141 | } |
142 | 142 | ||
143 | /* Overridden in paravirt.c */ | 143 | void __init init_IRQ(void) |
144 | void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); | 144 | { |
145 | x86_init.irqs.intr_init(); | ||
146 | } | ||
145 | 147 | ||
146 | static void __init smp_intr_init(void) | 148 | static void __init smp_intr_init(void) |
147 | { | 149 | { |
@@ -213,32 +215,12 @@ static void __init apic_intr_init(void) | |||
213 | #endif | 215 | #endif |
214 | } | 216 | } |
215 | 217 | ||
216 | /** | ||
217 | * x86_quirk_pre_intr_init - initialisation prior to setting up interrupt vectors | ||
218 | * | ||
219 | * Description: | ||
220 | * Perform any necessary interrupt initialisation prior to setting up | ||
221 | * the "ordinary" interrupt call gates. For legacy reasons, the ISA | ||
222 | * interrupts should be initialised here if the machine emulates a PC | ||
223 | * in any way. | ||
224 | **/ | ||
225 | static void __init x86_quirk_pre_intr_init(void) | ||
226 | { | ||
227 | #ifdef CONFIG_X86_32 | ||
228 | if (x86_quirks->arch_pre_intr_init) { | ||
229 | if (x86_quirks->arch_pre_intr_init()) | ||
230 | return; | ||
231 | } | ||
232 | #endif | ||
233 | init_ISA_irqs(); | ||
234 | } | ||
235 | |||
236 | void __init native_init_IRQ(void) | 218 | void __init native_init_IRQ(void) |
237 | { | 219 | { |
238 | int i; | 220 | int i; |
239 | 221 | ||
240 | /* Execute any quirks before the call gates are initialised: */ | 222 | /* Execute any quirks before the call gates are initialised: */ |
241 | x86_quirk_pre_intr_init(); | 223 | x86_init.irqs.pre_vector_init(); |
242 | 224 | ||
243 | apic_intr_init(); | 225 | apic_intr_init(); |
244 | 226 | ||
@@ -258,12 +240,6 @@ void __init native_init_IRQ(void) | |||
258 | 240 | ||
259 | #ifdef CONFIG_X86_32 | 241 | #ifdef CONFIG_X86_32 |
260 | /* | 242 | /* |
261 | * Call quirks after call gates are initialised (usually add in | ||
262 | * the architecture specific gates): | ||
263 | */ | ||
264 | x86_quirk_intr_init(); | ||
265 | |||
266 | /* | ||
267 | * External FPU? Set up irq13 if so, for | 243 | * External FPU? Set up irq13 if so, for |
268 | * original braindamaged IBM FERR coupling. | 244 | * original braindamaged IBM FERR coupling. |
269 | */ | 245 | */ |
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index e5efcdcca31b..feaeb0d3aa4f 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
@@ -22,6 +22,8 @@ | |||
22 | #include <asm/msr.h> | 22 | #include <asm/msr.h> |
23 | #include <asm/apic.h> | 23 | #include <asm/apic.h> |
24 | #include <linux/percpu.h> | 24 | #include <linux/percpu.h> |
25 | |||
26 | #include <asm/x86_init.h> | ||
25 | #include <asm/reboot.h> | 27 | #include <asm/reboot.h> |
26 | 28 | ||
27 | #define KVM_SCALE 22 | 29 | #define KVM_SCALE 22 |
@@ -182,12 +184,13 @@ void __init kvmclock_init(void) | |||
182 | if (kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) { | 184 | if (kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) { |
183 | if (kvm_register_clock("boot clock")) | 185 | if (kvm_register_clock("boot clock")) |
184 | return; | 186 | return; |
185 | pv_time_ops.get_wallclock = kvm_get_wallclock; | ||
186 | pv_time_ops.set_wallclock = kvm_set_wallclock; | ||
187 | pv_time_ops.sched_clock = kvm_clock_read; | 187 | pv_time_ops.sched_clock = kvm_clock_read; |
188 | pv_time_ops.get_tsc_khz = kvm_get_tsc_khz; | 188 | x86_platform.calibrate_tsc = kvm_get_tsc_khz; |
189 | x86_platform.get_wallclock = kvm_get_wallclock; | ||
190 | x86_platform.set_wallclock = kvm_set_wallclock; | ||
189 | #ifdef CONFIG_X86_LOCAL_APIC | 191 | #ifdef CONFIG_X86_LOCAL_APIC |
190 | pv_apic_ops.setup_secondary_clock = kvm_setup_secondary_clock; | 192 | x86_cpuinit.setup_percpu_clockev = |
193 | kvm_setup_secondary_clock; | ||
191 | #endif | 194 | #endif |
192 | #ifdef CONFIG_SMP | 195 | #ifdef CONFIG_SMP |
193 | smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; | 196 | smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; |
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index fcd513bf2846..5be95ef4ffec 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
@@ -45,6 +45,11 @@ static int __init mpf_checksum(unsigned char *mp, int len) | |||
45 | return sum & 0xFF; | 45 | return sum & 0xFF; |
46 | } | 46 | } |
47 | 47 | ||
48 | int __init default_mpc_apic_id(struct mpc_cpu *m) | ||
49 | { | ||
50 | return m->apicid; | ||
51 | } | ||
52 | |||
48 | static void __init MP_processor_info(struct mpc_cpu *m) | 53 | static void __init MP_processor_info(struct mpc_cpu *m) |
49 | { | 54 | { |
50 | int apicid; | 55 | int apicid; |
@@ -55,10 +60,7 @@ static void __init MP_processor_info(struct mpc_cpu *m) | |||
55 | return; | 60 | return; |
56 | } | 61 | } |
57 | 62 | ||
58 | if (x86_quirks->mpc_apic_id) | 63 | apicid = x86_init.mpparse.mpc_apic_id(m); |
59 | apicid = x86_quirks->mpc_apic_id(m); | ||
60 | else | ||
61 | apicid = m->apicid; | ||
62 | 64 | ||
63 | if (m->cpuflag & CPU_BOOTPROCESSOR) { | 65 | if (m->cpuflag & CPU_BOOTPROCESSOR) { |
64 | bootup_cpu = " (Bootup-CPU)"; | 66 | bootup_cpu = " (Bootup-CPU)"; |
@@ -70,16 +72,18 @@ static void __init MP_processor_info(struct mpc_cpu *m) | |||
70 | } | 72 | } |
71 | 73 | ||
72 | #ifdef CONFIG_X86_IO_APIC | 74 | #ifdef CONFIG_X86_IO_APIC |
73 | static void __init MP_bus_info(struct mpc_bus *m) | 75 | void __init default_mpc_oem_bus_info(struct mpc_bus *m, char *str) |
74 | { | 76 | { |
75 | char str[7]; | ||
76 | memcpy(str, m->bustype, 6); | 77 | memcpy(str, m->bustype, 6); |
77 | str[6] = 0; | 78 | str[6] = 0; |
79 | apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str); | ||
80 | } | ||
78 | 81 | ||
79 | if (x86_quirks->mpc_oem_bus_info) | 82 | static void __init MP_bus_info(struct mpc_bus *m) |
80 | x86_quirks->mpc_oem_bus_info(m, str); | 83 | { |
81 | else | 84 | char str[7]; |
82 | apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str); | 85 | |
86 | x86_init.mpparse.mpc_oem_bus_info(m, str); | ||
83 | 87 | ||
84 | #if MAX_MP_BUSSES < 256 | 88 | #if MAX_MP_BUSSES < 256 |
85 | if (m->busid >= MAX_MP_BUSSES) { | 89 | if (m->busid >= MAX_MP_BUSSES) { |
@@ -96,8 +100,8 @@ static void __init MP_bus_info(struct mpc_bus *m) | |||
96 | mp_bus_id_to_type[m->busid] = MP_BUS_ISA; | 100 | mp_bus_id_to_type[m->busid] = MP_BUS_ISA; |
97 | #endif | 101 | #endif |
98 | } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) { | 102 | } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) { |
99 | if (x86_quirks->mpc_oem_pci_bus) | 103 | if (x86_init.mpparse.mpc_oem_pci_bus) |
100 | x86_quirks->mpc_oem_pci_bus(m); | 104 | x86_init.mpparse.mpc_oem_pci_bus(m); |
101 | 105 | ||
102 | clear_bit(m->busid, mp_bus_not_pci); | 106 | clear_bit(m->busid, mp_bus_not_pci); |
103 | #if defined(CONFIG_EISA) || defined(CONFIG_MCA) | 107 | #if defined(CONFIG_EISA) || defined(CONFIG_MCA) |
@@ -291,6 +295,8 @@ static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt) | |||
291 | 1, mpc, mpc->length, 1); | 295 | 1, mpc, mpc->length, 1); |
292 | } | 296 | } |
293 | 297 | ||
298 | void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { } | ||
299 | |||
294 | static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) | 300 | static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) |
295 | { | 301 | { |
296 | char str[16]; | 302 | char str[16]; |
@@ -312,16 +318,13 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) | |||
312 | if (early) | 318 | if (early) |
313 | return 1; | 319 | return 1; |
314 | 320 | ||
315 | if (mpc->oemptr && x86_quirks->smp_read_mpc_oem) { | 321 | if (mpc->oemptr) |
316 | struct mpc_oemtable *oem_table = (void *)(long)mpc->oemptr; | 322 | x86_init.mpparse.smp_read_mpc_oem(mpc); |
317 | x86_quirks->smp_read_mpc_oem(oem_table, mpc->oemsize); | ||
318 | } | ||
319 | 323 | ||
320 | /* | 324 | /* |
321 | * Now process the configuration blocks. | 325 | * Now process the configuration blocks. |
322 | */ | 326 | */ |
323 | if (x86_quirks->mpc_record) | 327 | x86_init.mpparse.mpc_record(0); |
324 | *x86_quirks->mpc_record = 0; | ||
325 | 328 | ||
326 | while (count < mpc->length) { | 329 | while (count < mpc->length) { |
327 | switch (*mpt) { | 330 | switch (*mpt) { |
@@ -353,8 +356,7 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) | |||
353 | count = mpc->length; | 356 | count = mpc->length; |
354 | break; | 357 | break; |
355 | } | 358 | } |
356 | if (x86_quirks->mpc_record) | 359 | x86_init.mpparse.mpc_record(1); |
357 | (*x86_quirks->mpc_record)++; | ||
358 | } | 360 | } |
359 | 361 | ||
360 | #ifdef CONFIG_X86_BIGSMP | 362 | #ifdef CONFIG_X86_BIGSMP |
@@ -608,7 +610,7 @@ static int __init check_physptr(struct mpf_intel *mpf, unsigned int early) | |||
608 | /* | 610 | /* |
609 | * Scan the memory blocks for an SMP configuration block. | 611 | * Scan the memory blocks for an SMP configuration block. |
610 | */ | 612 | */ |
611 | static void __init __get_smp_config(unsigned int early) | 613 | void __init default_get_smp_config(unsigned int early) |
612 | { | 614 | { |
613 | struct mpf_intel *mpf = mpf_found; | 615 | struct mpf_intel *mpf = mpf_found; |
614 | 616 | ||
@@ -625,11 +627,6 @@ static void __init __get_smp_config(unsigned int early) | |||
625 | if (acpi_lapic && acpi_ioapic) | 627 | if (acpi_lapic && acpi_ioapic) |
626 | return; | 628 | return; |
627 | 629 | ||
628 | if (x86_quirks->mach_get_smp_config) { | ||
629 | if (x86_quirks->mach_get_smp_config(early)) | ||
630 | return; | ||
631 | } | ||
632 | |||
633 | printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", | 630 | printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", |
634 | mpf->specification); | 631 | mpf->specification); |
635 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) | 632 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) |
@@ -670,16 +667,6 @@ static void __init __get_smp_config(unsigned int early) | |||
670 | */ | 667 | */ |
671 | } | 668 | } |
672 | 669 | ||
673 | void __init early_get_smp_config(void) | ||
674 | { | ||
675 | __get_smp_config(1); | ||
676 | } | ||
677 | |||
678 | void __init get_smp_config(void) | ||
679 | { | ||
680 | __get_smp_config(0); | ||
681 | } | ||
682 | |||
683 | static void __init smp_reserve_bootmem(struct mpf_intel *mpf) | 670 | static void __init smp_reserve_bootmem(struct mpf_intel *mpf) |
684 | { | 671 | { |
685 | unsigned long size = get_mpc_size(mpf->physptr); | 672 | unsigned long size = get_mpc_size(mpf->physptr); |
@@ -745,14 +732,10 @@ static int __init smp_scan_config(unsigned long base, unsigned long length, | |||
745 | return 0; | 732 | return 0; |
746 | } | 733 | } |
747 | 734 | ||
748 | static void __init __find_smp_config(unsigned int reserve) | 735 | void __init default_find_smp_config(unsigned int reserve) |
749 | { | 736 | { |
750 | unsigned int address; | 737 | unsigned int address; |
751 | 738 | ||
752 | if (x86_quirks->mach_find_smp_config) { | ||
753 | if (x86_quirks->mach_find_smp_config(reserve)) | ||
754 | return; | ||
755 | } | ||
756 | /* | 739 | /* |
757 | * FIXME: Linux assumes you have 640K of base ram.. | 740 | * FIXME: Linux assumes you have 640K of base ram.. |
758 | * this continues the error... | 741 | * this continues the error... |
@@ -787,16 +770,6 @@ static void __init __find_smp_config(unsigned int reserve) | |||
787 | smp_scan_config(address, 0x400, reserve); | 770 | smp_scan_config(address, 0x400, reserve); |
788 | } | 771 | } |
789 | 772 | ||
790 | void __init early_find_smp_config(void) | ||
791 | { | ||
792 | __find_smp_config(0); | ||
793 | } | ||
794 | |||
795 | void __init find_smp_config(void) | ||
796 | { | ||
797 | __find_smp_config(1); | ||
798 | } | ||
799 | |||
800 | #ifdef CONFIG_X86_IO_APIC | 773 | #ifdef CONFIG_X86_IO_APIC |
801 | static u8 __initdata irq_used[MAX_IRQ_SOURCES]; | 774 | static u8 __initdata irq_used[MAX_IRQ_SOURCES]; |
802 | 775 | ||
diff --git a/arch/x86/kernel/mrst.c b/arch/x86/kernel/mrst.c new file mode 100644 index 000000000000..3b7078abc871 --- /dev/null +++ b/arch/x86/kernel/mrst.c | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * mrst.c: Intel Moorestown platform specific setup code | ||
3 | * | ||
4 | * (C) Copyright 2008 Intel Corporation | ||
5 | * Author: Jacob Pan (jacob.jun.pan@intel.com) | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; version 2 | ||
10 | * of the License. | ||
11 | */ | ||
12 | #include <linux/init.h> | ||
13 | |||
14 | #include <asm/setup.h> | ||
15 | |||
16 | /* | ||
17 | * Moorestown specific x86_init function overrides and early setup | ||
18 | * calls. | ||
19 | */ | ||
20 | void __init x86_mrst_early_setup(void) | ||
21 | { | ||
22 | x86_init.resources.probe_roms = x86_init_noop; | ||
23 | x86_init.resources.reserve_resources = x86_init_noop; | ||
24 | } | ||
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index f5b0b4a01fb2..1b1739d16310 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -54,17 +54,12 @@ u64 _paravirt_ident_64(u64 x) | |||
54 | return x; | 54 | return x; |
55 | } | 55 | } |
56 | 56 | ||
57 | static void __init default_banner(void) | 57 | void __init default_banner(void) |
58 | { | 58 | { |
59 | printk(KERN_INFO "Booting paravirtualized kernel on %s\n", | 59 | printk(KERN_INFO "Booting paravirtualized kernel on %s\n", |
60 | pv_info.name); | 60 | pv_info.name); |
61 | } | 61 | } |
62 | 62 | ||
63 | char *memory_setup(void) | ||
64 | { | ||
65 | return pv_init_ops.memory_setup(); | ||
66 | } | ||
67 | |||
68 | /* Simple instruction patching code. */ | 63 | /* Simple instruction patching code. */ |
69 | #define DEF_NATIVE(ops, name, code) \ | 64 | #define DEF_NATIVE(ops, name, code) \ |
70 | extern const char start_##ops##_##name[], end_##ops##_##name[]; \ | 65 | extern const char start_##ops##_##name[], end_##ops##_##name[]; \ |
@@ -188,11 +183,6 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len, | |||
188 | return insn_len; | 183 | return insn_len; |
189 | } | 184 | } |
190 | 185 | ||
191 | void init_IRQ(void) | ||
192 | { | ||
193 | pv_irq_ops.init_IRQ(); | ||
194 | } | ||
195 | |||
196 | static void native_flush_tlb(void) | 186 | static void native_flush_tlb(void) |
197 | { | 187 | { |
198 | __native_flush_tlb(); | 188 | __native_flush_tlb(); |
@@ -218,13 +208,6 @@ extern void native_irq_enable_sysexit(void); | |||
218 | extern void native_usergs_sysret32(void); | 208 | extern void native_usergs_sysret32(void); |
219 | extern void native_usergs_sysret64(void); | 209 | extern void native_usergs_sysret64(void); |
220 | 210 | ||
221 | static int __init print_banner(void) | ||
222 | { | ||
223 | pv_init_ops.banner(); | ||
224 | return 0; | ||
225 | } | ||
226 | core_initcall(print_banner); | ||
227 | |||
228 | static struct resource reserve_ioports = { | 211 | static struct resource reserve_ioports = { |
229 | .start = 0, | 212 | .start = 0, |
230 | .end = IO_SPACE_LIMIT, | 213 | .end = IO_SPACE_LIMIT, |
@@ -320,21 +303,13 @@ struct pv_info pv_info = { | |||
320 | 303 | ||
321 | struct pv_init_ops pv_init_ops = { | 304 | struct pv_init_ops pv_init_ops = { |
322 | .patch = native_patch, | 305 | .patch = native_patch, |
323 | .banner = default_banner, | ||
324 | .arch_setup = paravirt_nop, | ||
325 | .memory_setup = machine_specific_memory_setup, | ||
326 | }; | 306 | }; |
327 | 307 | ||
328 | struct pv_time_ops pv_time_ops = { | 308 | struct pv_time_ops pv_time_ops = { |
329 | .time_init = hpet_time_init, | ||
330 | .get_wallclock = native_get_wallclock, | ||
331 | .set_wallclock = native_set_wallclock, | ||
332 | .sched_clock = native_sched_clock, | 309 | .sched_clock = native_sched_clock, |
333 | .get_tsc_khz = native_calibrate_tsc, | ||
334 | }; | 310 | }; |
335 | 311 | ||
336 | struct pv_irq_ops pv_irq_ops = { | 312 | struct pv_irq_ops pv_irq_ops = { |
337 | .init_IRQ = native_init_IRQ, | ||
338 | .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), | 313 | .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), |
339 | .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl), | 314 | .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl), |
340 | .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable), | 315 | .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable), |
@@ -409,8 +384,6 @@ struct pv_cpu_ops pv_cpu_ops = { | |||
409 | 384 | ||
410 | struct pv_apic_ops pv_apic_ops = { | 385 | struct pv_apic_ops pv_apic_ops = { |
411 | #ifdef CONFIG_X86_LOCAL_APIC | 386 | #ifdef CONFIG_X86_LOCAL_APIC |
412 | .setup_boot_clock = setup_boot_APIC_clock, | ||
413 | .setup_secondary_clock = setup_secondary_APIC_clock, | ||
414 | .startup_ipi_hook = paravirt_nop, | 387 | .startup_ipi_hook = paravirt_nop, |
415 | #endif | 388 | #endif |
416 | }; | 389 | }; |
@@ -424,13 +397,6 @@ struct pv_apic_ops pv_apic_ops = { | |||
424 | #endif | 397 | #endif |
425 | 398 | ||
426 | struct pv_mmu_ops pv_mmu_ops = { | 399 | struct pv_mmu_ops pv_mmu_ops = { |
427 | #ifndef CONFIG_X86_64 | ||
428 | .pagetable_setup_start = native_pagetable_setup_start, | ||
429 | .pagetable_setup_done = native_pagetable_setup_done, | ||
430 | #else | ||
431 | .pagetable_setup_start = paravirt_nop, | ||
432 | .pagetable_setup_done = paravirt_nop, | ||
433 | #endif | ||
434 | 400 | ||
435 | .read_cr2 = native_read_cr2, | 401 | .read_cr2 = native_read_cr2, |
436 | .write_cr2 = native_write_cr2, | 402 | .write_cr2 = native_write_cr2, |
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index 5d465b207e72..1cfbbfc3ae26 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/pnp.h> | 8 | #include <linux/pnp.h> |
9 | 9 | ||
10 | #include <asm/vsyscall.h> | 10 | #include <asm/vsyscall.h> |
11 | #include <asm/x86_init.h> | ||
11 | #include <asm/time.h> | 12 | #include <asm/time.h> |
12 | 13 | ||
13 | #ifdef CONFIG_X86_32 | 14 | #ifdef CONFIG_X86_32 |
@@ -165,33 +166,29 @@ void rtc_cmos_write(unsigned char val, unsigned char addr) | |||
165 | } | 166 | } |
166 | EXPORT_SYMBOL(rtc_cmos_write); | 167 | EXPORT_SYMBOL(rtc_cmos_write); |
167 | 168 | ||
168 | static int set_rtc_mmss(unsigned long nowtime) | 169 | int update_persistent_clock(struct timespec now) |
169 | { | 170 | { |
170 | unsigned long flags; | 171 | unsigned long flags; |
171 | int retval; | 172 | int retval; |
172 | 173 | ||
173 | spin_lock_irqsave(&rtc_lock, flags); | 174 | spin_lock_irqsave(&rtc_lock, flags); |
174 | retval = set_wallclock(nowtime); | 175 | retval = x86_platform.set_wallclock(now.tv_sec); |
175 | spin_unlock_irqrestore(&rtc_lock, flags); | 176 | spin_unlock_irqrestore(&rtc_lock, flags); |
176 | 177 | ||
177 | return retval; | 178 | return retval; |
178 | } | 179 | } |
179 | 180 | ||
180 | /* not static: needed by APM */ | 181 | /* not static: needed by APM */ |
181 | unsigned long read_persistent_clock(void) | 182 | void read_persistent_clock(struct timespec *ts) |
182 | { | 183 | { |
183 | unsigned long retval, flags; | 184 | unsigned long retval, flags; |
184 | 185 | ||
185 | spin_lock_irqsave(&rtc_lock, flags); | 186 | spin_lock_irqsave(&rtc_lock, flags); |
186 | retval = get_wallclock(); | 187 | retval = x86_platform.get_wallclock(); |
187 | spin_unlock_irqrestore(&rtc_lock, flags); | 188 | spin_unlock_irqrestore(&rtc_lock, flags); |
188 | 189 | ||
189 | return retval; | 190 | ts->tv_sec = retval; |
190 | } | 191 | ts->tv_nsec = 0; |
191 | |||
192 | int update_persistent_clock(struct timespec now) | ||
193 | { | ||
194 | return set_rtc_mmss(now.tv_sec); | ||
195 | } | 192 | } |
196 | 193 | ||
197 | unsigned long long native_read_tsc(void) | 194 | unsigned long long native_read_tsc(void) |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index f5baa2ae0c87..f327bccf5089 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -109,10 +109,6 @@ | |||
109 | #include <asm/numa_64.h> | 109 | #include <asm/numa_64.h> |
110 | #endif | 110 | #endif |
111 | 111 | ||
112 | #ifndef ARCH_SETUP | ||
113 | #define ARCH_SETUP | ||
114 | #endif | ||
115 | |||
116 | /* | 112 | /* |
117 | * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. | 113 | * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. |
118 | * The direct mapping extends to max_pfn_mapped, so that we can directly access | 114 | * The direct mapping extends to max_pfn_mapped, so that we can directly access |
@@ -134,9 +130,9 @@ int default_cpu_present_to_apicid(int mps_cpu) | |||
134 | return __default_cpu_present_to_apicid(mps_cpu); | 130 | return __default_cpu_present_to_apicid(mps_cpu); |
135 | } | 131 | } |
136 | 132 | ||
137 | int default_check_phys_apicid_present(int boot_cpu_physical_apicid) | 133 | int default_check_phys_apicid_present(int phys_apicid) |
138 | { | 134 | { |
139 | return __default_check_phys_apicid_present(boot_cpu_physical_apicid); | 135 | return __default_check_phys_apicid_present(phys_apicid); |
140 | } | 136 | } |
141 | #endif | 137 | #endif |
142 | 138 | ||
@@ -172,13 +168,6 @@ static struct resource bss_resource = { | |||
172 | 168 | ||
173 | 169 | ||
174 | #ifdef CONFIG_X86_32 | 170 | #ifdef CONFIG_X86_32 |
175 | static struct resource video_ram_resource = { | ||
176 | .name = "Video RAM area", | ||
177 | .start = 0xa0000, | ||
178 | .end = 0xbffff, | ||
179 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | ||
180 | }; | ||
181 | |||
182 | /* cpu data as detected by the assembly code in head.S */ | 171 | /* cpu data as detected by the assembly code in head.S */ |
183 | struct cpuinfo_x86 new_cpu_data __cpuinitdata = {0, 0, 0, 0, -1, 1, 0, 0, -1}; | 172 | struct cpuinfo_x86 new_cpu_data __cpuinitdata = {0, 0, 0, 0, -1, 1, 0, 0, -1}; |
184 | /* common cpu data for all cpus */ | 173 | /* common cpu data for all cpus */ |
@@ -606,7 +595,7 @@ static struct resource standard_io_resources[] = { | |||
606 | .flags = IORESOURCE_BUSY | IORESOURCE_IO } | 595 | .flags = IORESOURCE_BUSY | IORESOURCE_IO } |
607 | }; | 596 | }; |
608 | 597 | ||
609 | static void __init reserve_standard_io_resources(void) | 598 | void __init reserve_standard_io_resources(void) |
610 | { | 599 | { |
611 | int i; | 600 | int i; |
612 | 601 | ||
@@ -638,10 +627,6 @@ static int __init setup_elfcorehdr(char *arg) | |||
638 | early_param("elfcorehdr", setup_elfcorehdr); | 627 | early_param("elfcorehdr", setup_elfcorehdr); |
639 | #endif | 628 | #endif |
640 | 629 | ||
641 | static struct x86_quirks default_x86_quirks __initdata; | ||
642 | |||
643 | struct x86_quirks *x86_quirks __initdata = &default_x86_quirks; | ||
644 | |||
645 | #ifdef CONFIG_X86_RESERVE_LOW_64K | 630 | #ifdef CONFIG_X86_RESERVE_LOW_64K |
646 | static int __init dmi_low_memory_corruption(const struct dmi_system_id *d) | 631 | static int __init dmi_low_memory_corruption(const struct dmi_system_id *d) |
647 | { | 632 | { |
@@ -758,7 +743,7 @@ void __init setup_arch(char **cmdline_p) | |||
758 | } | 743 | } |
759 | #endif | 744 | #endif |
760 | 745 | ||
761 | ARCH_SETUP | 746 | x86_init.oem.arch_setup(); |
762 | 747 | ||
763 | setup_memory_map(); | 748 | setup_memory_map(); |
764 | parse_setup_data(); | 749 | parse_setup_data(); |
@@ -844,11 +829,9 @@ void __init setup_arch(char **cmdline_p) | |||
844 | * VMware detection requires dmi to be available, so this | 829 | * VMware detection requires dmi to be available, so this |
845 | * needs to be done after dmi_scan_machine, for the BP. | 830 | * needs to be done after dmi_scan_machine, for the BP. |
846 | */ | 831 | */ |
847 | init_hypervisor(&boot_cpu_data); | 832 | init_hypervisor_platform(); |
848 | 833 | ||
849 | #ifdef CONFIG_X86_32 | 834 | x86_init.resources.probe_roms(); |
850 | probe_roms(); | ||
851 | #endif | ||
852 | 835 | ||
853 | /* after parse_early_param, so could debug it */ | 836 | /* after parse_early_param, so could debug it */ |
854 | insert_resource(&iomem_resource, &code_resource); | 837 | insert_resource(&iomem_resource, &code_resource); |
@@ -983,10 +966,9 @@ void __init setup_arch(char **cmdline_p) | |||
983 | kvmclock_init(); | 966 | kvmclock_init(); |
984 | #endif | 967 | #endif |
985 | 968 | ||
986 | paravirt_pagetable_setup_start(swapper_pg_dir); | 969 | x86_init.paging.pagetable_setup_start(swapper_pg_dir); |
987 | paging_init(); | 970 | paging_init(); |
988 | paravirt_pagetable_setup_done(swapper_pg_dir); | 971 | x86_init.paging.pagetable_setup_done(swapper_pg_dir); |
989 | paravirt_post_allocator_init(); | ||
990 | 972 | ||
991 | tboot_probe(); | 973 | tboot_probe(); |
992 | 974 | ||
@@ -1003,13 +985,11 @@ void __init setup_arch(char **cmdline_p) | |||
1003 | */ | 985 | */ |
1004 | acpi_boot_init(); | 986 | acpi_boot_init(); |
1005 | 987 | ||
1006 | #if defined(CONFIG_X86_MPPARSE) || defined(CONFIG_X86_VISWS) | ||
1007 | /* | 988 | /* |
1008 | * get boot-time SMP configuration: | 989 | * get boot-time SMP configuration: |
1009 | */ | 990 | */ |
1010 | if (smp_found_config) | 991 | if (smp_found_config) |
1011 | get_smp_config(); | 992 | get_smp_config(); |
1012 | #endif | ||
1013 | 993 | ||
1014 | prefill_possible_map(); | 994 | prefill_possible_map(); |
1015 | 995 | ||
@@ -1028,10 +1008,7 @@ void __init setup_arch(char **cmdline_p) | |||
1028 | e820_reserve_resources(); | 1008 | e820_reserve_resources(); |
1029 | e820_mark_nosave_regions(max_low_pfn); | 1009 | e820_mark_nosave_regions(max_low_pfn); |
1030 | 1010 | ||
1031 | #ifdef CONFIG_X86_32 | 1011 | x86_init.resources.reserve_resources(); |
1032 | request_resource(&iomem_resource, &video_ram_resource); | ||
1033 | #endif | ||
1034 | reserve_standard_io_resources(); | ||
1035 | 1012 | ||
1036 | e820_setup_gap(); | 1013 | e820_setup_gap(); |
1037 | 1014 | ||
@@ -1043,78 +1020,22 @@ void __init setup_arch(char **cmdline_p) | |||
1043 | conswitchp = &dummy_con; | 1020 | conswitchp = &dummy_con; |
1044 | #endif | 1021 | #endif |
1045 | #endif | 1022 | #endif |
1023 | x86_init.oem.banner(); | ||
1046 | } | 1024 | } |
1047 | 1025 | ||
1048 | #ifdef CONFIG_X86_32 | 1026 | #ifdef CONFIG_X86_32 |
1049 | 1027 | ||
1050 | /** | 1028 | static struct resource video_ram_resource = { |
1051 | * x86_quirk_intr_init - post gate setup interrupt initialisation | 1029 | .name = "Video RAM area", |
1052 | * | 1030 | .start = 0xa0000, |
1053 | * Description: | 1031 | .end = 0xbffff, |
1054 | * Fill in any interrupts that may have been left out by the general | 1032 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM |
1055 | * init_IRQ() routine. interrupts having to do with the machine rather | ||
1056 | * than the devices on the I/O bus (like APIC interrupts in intel MP | ||
1057 | * systems) are started here. | ||
1058 | **/ | ||
1059 | void __init x86_quirk_intr_init(void) | ||
1060 | { | ||
1061 | if (x86_quirks->arch_intr_init) { | ||
1062 | if (x86_quirks->arch_intr_init()) | ||
1063 | return; | ||
1064 | } | ||
1065 | } | ||
1066 | |||
1067 | /** | ||
1068 | * x86_quirk_trap_init - initialise system specific traps | ||
1069 | * | ||
1070 | * Description: | ||
1071 | * Called as the final act of trap_init(). Used in VISWS to initialise | ||
1072 | * the various board specific APIC traps. | ||
1073 | **/ | ||
1074 | void __init x86_quirk_trap_init(void) | ||
1075 | { | ||
1076 | if (x86_quirks->arch_trap_init) { | ||
1077 | if (x86_quirks->arch_trap_init()) | ||
1078 | return; | ||
1079 | } | ||
1080 | } | ||
1081 | |||
1082 | static struct irqaction irq0 = { | ||
1083 | .handler = timer_interrupt, | ||
1084 | .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER, | ||
1085 | .name = "timer" | ||
1086 | }; | 1033 | }; |
1087 | 1034 | ||
1088 | /** | 1035 | void __init i386_reserve_resources(void) |
1089 | * x86_quirk_pre_time_init - do any specific initialisations before. | ||
1090 | * | ||
1091 | **/ | ||
1092 | void __init x86_quirk_pre_time_init(void) | ||
1093 | { | 1036 | { |
1094 | if (x86_quirks->arch_pre_time_init) | 1037 | request_resource(&iomem_resource, &video_ram_resource); |
1095 | x86_quirks->arch_pre_time_init(); | 1038 | reserve_standard_io_resources(); |
1096 | } | 1039 | } |
1097 | 1040 | ||
1098 | /** | ||
1099 | * x86_quirk_time_init - do any specific initialisations for the system timer. | ||
1100 | * | ||
1101 | * Description: | ||
1102 | * Must plug the system timer interrupt source at HZ into the IRQ listed | ||
1103 | * in irq_vectors.h:TIMER_IRQ | ||
1104 | **/ | ||
1105 | void __init x86_quirk_time_init(void) | ||
1106 | { | ||
1107 | if (x86_quirks->arch_time_init) { | ||
1108 | /* | ||
1109 | * A nonzero return code does not mean failure, it means | ||
1110 | * that the architecture quirk does not want any | ||
1111 | * generic (timer) setup to be performed after this: | ||
1112 | */ | ||
1113 | if (x86_quirks->arch_time_init()) | ||
1114 | return; | ||
1115 | } | ||
1116 | |||
1117 | irq0.mask = cpumask_of_cpu(0); | ||
1118 | setup_irq(0, &irq0); | ||
1119 | } | ||
1120 | #endif /* CONFIG_X86_32 */ | 1041 | #endif /* CONFIG_X86_32 */ |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index a25eeec00080..09c5e077dff7 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -324,7 +324,7 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
324 | /* enable local interrupts */ | 324 | /* enable local interrupts */ |
325 | local_irq_enable(); | 325 | local_irq_enable(); |
326 | 326 | ||
327 | setup_secondary_clock(); | 327 | x86_cpuinit.setup_percpu_clockev(); |
328 | 328 | ||
329 | wmb(); | 329 | wmb(); |
330 | cpu_idle(); | 330 | cpu_idle(); |
@@ -1114,7 +1114,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1114 | 1114 | ||
1115 | printk(KERN_INFO "CPU%d: ", 0); | 1115 | printk(KERN_INFO "CPU%d: ", 0); |
1116 | print_cpu_info(&cpu_data(0)); | 1116 | print_cpu_info(&cpu_data(0)); |
1117 | setup_boot_clock(); | 1117 | x86_init.timers.setup_percpu_clockev(); |
1118 | 1118 | ||
1119 | if (is_uv_system()) | 1119 | if (is_uv_system()) |
1120 | uv_system_init(); | 1120 | uv_system_init(); |
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c new file mode 100644 index 000000000000..e293ac56c723 --- /dev/null +++ b/arch/x86/kernel/time.c | |||
@@ -0,0 +1,121 @@ | |||
1 | /* | ||
2 | * Copyright (c) 1991,1992,1995 Linus Torvalds | ||
3 | * Copyright (c) 1994 Alan Modra | ||
4 | * Copyright (c) 1995 Markus Kuhn | ||
5 | * Copyright (c) 1996 Ingo Molnar | ||
6 | * Copyright (c) 1998 Andrea Arcangeli | ||
7 | * Copyright (c) 2002,2006 Vojtech Pavlik | ||
8 | * Copyright (c) 2003 Andi Kleen | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <linux/clockchips.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/time.h> | ||
15 | #include <linux/mca.h> | ||
16 | |||
17 | #include <asm/vsyscall.h> | ||
18 | #include <asm/x86_init.h> | ||
19 | #include <asm/i8259.h> | ||
20 | #include <asm/i8253.h> | ||
21 | #include <asm/timer.h> | ||
22 | #include <asm/hpet.h> | ||
23 | #include <asm/time.h> | ||
24 | |||
25 | #if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC) | ||
26 | int timer_ack; | ||
27 | #endif | ||
28 | |||
29 | #ifdef CONFIG_X86_64 | ||
30 | volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES; | ||
31 | #endif | ||
32 | |||
33 | unsigned long profile_pc(struct pt_regs *regs) | ||
34 | { | ||
35 | unsigned long pc = instruction_pointer(regs); | ||
36 | |||
37 | if (!user_mode_vm(regs) && in_lock_functions(pc)) { | ||
38 | #ifdef CONFIG_FRAME_POINTER | ||
39 | return *(unsigned long *)(regs->bp + sizeof(long)); | ||
40 | #else | ||
41 | unsigned long *sp = (unsigned long *)regs->sp; | ||
42 | /* | ||
43 | * Return address is either directly at stack pointer | ||
44 | * or above a saved flags. Eflags has bits 22-31 zero, | ||
45 | * kernel addresses don't. | ||
46 | */ | ||
47 | if (sp[0] >> 22) | ||
48 | return sp[0]; | ||
49 | if (sp[1] >> 22) | ||
50 | return sp[1]; | ||
51 | #endif | ||
52 | } | ||
53 | return pc; | ||
54 | } | ||
55 | EXPORT_SYMBOL(profile_pc); | ||
56 | |||
57 | /* | ||
58 | * Default timer interrupt handler for PIT/HPET | ||
59 | */ | ||
60 | static irqreturn_t timer_interrupt(int irq, void *dev_id) | ||
61 | { | ||
62 | /* Keep nmi watchdog up to date */ | ||
63 | inc_irq_stat(irq0_irqs); | ||
64 | |||
65 | /* Optimized out for !IO_APIC and x86_64 */ | ||
66 | if (timer_ack) { | ||
67 | /* | ||
68 | * Subtle, when I/O APICs are used we have to ack timer IRQ | ||
69 | * manually to deassert NMI lines for the watchdog if run | ||
70 | * on an 82489DX-based system. | ||
71 | */ | ||
72 | spin_lock(&i8259A_lock); | ||
73 | outb(0x0c, PIC_MASTER_OCW3); | ||
74 | /* Ack the IRQ; AEOI will end it automatically. */ | ||
75 | inb(PIC_MASTER_POLL); | ||
76 | spin_unlock(&i8259A_lock); | ||
77 | } | ||
78 | |||
79 | global_clock_event->event_handler(global_clock_event); | ||
80 | |||
81 | /* MCA bus quirk: Acknowledge irq0 by setting bit 7 in port 0x61 */ | ||
82 | if (MCA_bus) | ||
83 | outb_p(inb_p(0x61)| 0x80, 0x61); | ||
84 | |||
85 | return IRQ_HANDLED; | ||
86 | } | ||
87 | |||
88 | static struct irqaction irq0 = { | ||
89 | .handler = timer_interrupt, | ||
90 | .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER, | ||
91 | .name = "timer" | ||
92 | }; | ||
93 | |||
94 | void __init setup_default_timer_irq(void) | ||
95 | { | ||
96 | irq0.mask = cpumask_of_cpu(0); | ||
97 | setup_irq(0, &irq0); | ||
98 | } | ||
99 | |||
100 | /* Default timer init function */ | ||
101 | void __init hpet_time_init(void) | ||
102 | { | ||
103 | if (!hpet_enable()) | ||
104 | setup_pit_timer(); | ||
105 | setup_default_timer_irq(); | ||
106 | } | ||
107 | |||
108 | static __init void x86_late_time_init(void) | ||
109 | { | ||
110 | x86_init.timers.timer_init(); | ||
111 | tsc_init(); | ||
112 | } | ||
113 | |||
114 | /* | ||
115 | * Initialize TSC and delay the periodic timer init to | ||
116 | * late x86_late_time_init() so ioremap works. | ||
117 | */ | ||
118 | void __init time_init(void) | ||
119 | { | ||
120 | late_time_init = x86_late_time_init; | ||
121 | } | ||
diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c deleted file mode 100644 index 5c5d87f0b2e1..000000000000 --- a/arch/x86/kernel/time_32.c +++ /dev/null | |||
@@ -1,137 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds | ||
3 | * | ||
4 | * This file contains the PC-specific time handling details: | ||
5 | * reading the RTC at bootup, etc.. | ||
6 | * 1994-07-02 Alan Modra | ||
7 | * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime | ||
8 | * 1995-03-26 Markus Kuhn | ||
9 | * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887 | ||
10 | * precision CMOS clock update | ||
11 | * 1996-05-03 Ingo Molnar | ||
12 | * fixed time warps in do_[slow|fast]_gettimeoffset() | ||
13 | * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 | ||
14 | * "A Kernel Model for Precision Timekeeping" by Dave Mills | ||
15 | * 1998-09-05 (Various) | ||
16 | * More robust do_fast_gettimeoffset() algorithm implemented | ||
17 | * (works with APM, Cyrix 6x86MX and Centaur C6), | ||
18 | * monotonic gettimeofday() with fast_get_timeoffset(), | ||
19 | * drift-proof precision TSC calibration on boot | ||
20 | * (C. Scott Ananian <cananian@alumni.princeton.edu>, Andrew D. | ||
21 | * Balsa <andrebalsa@altern.org>, Philip Gladstone <philip@raptor.com>; | ||
22 | * ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause@tu-harburg.de>). | ||
23 | * 1998-12-16 Andrea Arcangeli | ||
24 | * Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy | ||
25 | * because was not accounting lost_ticks. | ||
26 | * 1998-12-24 Copyright (C) 1998 Andrea Arcangeli | ||
27 | * Fixed a xtime SMP race (we need the xtime_lock rw spinlock to | ||
28 | * serialize accesses to xtime/lost_ticks). | ||
29 | */ | ||
30 | |||
31 | #include <linux/init.h> | ||
32 | #include <linux/interrupt.h> | ||
33 | #include <linux/time.h> | ||
34 | #include <linux/mca.h> | ||
35 | |||
36 | #include <asm/setup.h> | ||
37 | #include <asm/hpet.h> | ||
38 | #include <asm/time.h> | ||
39 | #include <asm/timer.h> | ||
40 | |||
41 | #include <asm/do_timer.h> | ||
42 | |||
43 | int timer_ack; | ||
44 | |||
45 | unsigned long profile_pc(struct pt_regs *regs) | ||
46 | { | ||
47 | unsigned long pc = instruction_pointer(regs); | ||
48 | |||
49 | #ifdef CONFIG_SMP | ||
50 | if (!user_mode_vm(regs) && in_lock_functions(pc)) { | ||
51 | #ifdef CONFIG_FRAME_POINTER | ||
52 | return *(unsigned long *)(regs->bp + sizeof(long)); | ||
53 | #else | ||
54 | unsigned long *sp = (unsigned long *)®s->sp; | ||
55 | |||
56 | /* Return address is either directly at stack pointer | ||
57 | or above a saved flags. Eflags has bits 22-31 zero, | ||
58 | kernel addresses don't. */ | ||
59 | if (sp[0] >> 22) | ||
60 | return sp[0]; | ||
61 | if (sp[1] >> 22) | ||
62 | return sp[1]; | ||
63 | #endif | ||
64 | } | ||
65 | #endif | ||
66 | return pc; | ||
67 | } | ||
68 | EXPORT_SYMBOL(profile_pc); | ||
69 | |||
70 | /* | ||
71 | * This is the same as the above, except we _also_ save the current | ||
72 | * Time Stamp Counter value at the time of the timer interrupt, so that | ||
73 | * we later on can estimate the time of day more exactly. | ||
74 | */ | ||
75 | irqreturn_t timer_interrupt(int irq, void *dev_id) | ||
76 | { | ||
77 | /* Keep nmi watchdog up to date */ | ||
78 | inc_irq_stat(irq0_irqs); | ||
79 | |||
80 | #ifdef CONFIG_X86_IO_APIC | ||
81 | if (timer_ack) { | ||
82 | /* | ||
83 | * Subtle, when I/O APICs are used we have to ack timer IRQ | ||
84 | * manually to deassert NMI lines for the watchdog if run | ||
85 | * on an 82489DX-based system. | ||
86 | */ | ||
87 | spin_lock(&i8259A_lock); | ||
88 | outb(0x0c, PIC_MASTER_OCW3); | ||
89 | /* Ack the IRQ; AEOI will end it automatically. */ | ||
90 | inb(PIC_MASTER_POLL); | ||
91 | spin_unlock(&i8259A_lock); | ||
92 | } | ||
93 | #endif | ||
94 | |||
95 | do_timer_interrupt_hook(); | ||
96 | |||
97 | #ifdef CONFIG_MCA | ||
98 | if (MCA_bus) { | ||
99 | /* The PS/2 uses level-triggered interrupts. You can't | ||
100 | turn them off, nor would you want to (any attempt to | ||
101 | enable edge-triggered interrupts usually gets intercepted by a | ||
102 | special hardware circuit). Hence we have to acknowledge | ||
103 | the timer interrupt. Through some incredibly stupid | ||
104 | design idea, the reset for IRQ 0 is done by setting the | ||
105 | high bit of the PPI port B (0x61). Note that some PS/2s, | ||
106 | notably the 55SX, work fine if this is removed. */ | ||
107 | |||
108 | u8 irq_v = inb_p(0x61); /* read the current state */ | ||
109 | outb_p(irq_v | 0x80, 0x61); /* reset the IRQ */ | ||
110 | } | ||
111 | #endif | ||
112 | |||
113 | return IRQ_HANDLED; | ||
114 | } | ||
115 | |||
116 | /* Duplicate of time_init() below, with hpet_enable part added */ | ||
117 | void __init hpet_time_init(void) | ||
118 | { | ||
119 | if (!hpet_enable()) | ||
120 | setup_pit_timer(); | ||
121 | x86_quirk_time_init(); | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * This is called directly from init code; we must delay timer setup in the | ||
126 | * HPET case as we can't make the decision to turn on HPET this early in the | ||
127 | * boot process. | ||
128 | * | ||
129 | * The chosen time_init function will usually be hpet_time_init, above, but | ||
130 | * in the case of virtual hardware, an alternative function may be substituted. | ||
131 | */ | ||
132 | void __init time_init(void) | ||
133 | { | ||
134 | x86_quirk_pre_time_init(); | ||
135 | tsc_init(); | ||
136 | late_time_init = choose_time_init(); | ||
137 | } | ||
diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c deleted file mode 100644 index 5ba343e61844..000000000000 --- a/arch/x86/kernel/time_64.c +++ /dev/null | |||
@@ -1,135 +0,0 @@ | |||
1 | /* | ||
2 | * "High Precision Event Timer" based timekeeping. | ||
3 | * | ||
4 | * Copyright (c) 1991,1992,1995 Linus Torvalds | ||
5 | * Copyright (c) 1994 Alan Modra | ||
6 | * Copyright (c) 1995 Markus Kuhn | ||
7 | * Copyright (c) 1996 Ingo Molnar | ||
8 | * Copyright (c) 1998 Andrea Arcangeli | ||
9 | * Copyright (c) 2002,2006 Vojtech Pavlik | ||
10 | * Copyright (c) 2003 Andi Kleen | ||
11 | * RTC support code taken from arch/i386/kernel/timers/time_hpet.c | ||
12 | */ | ||
13 | |||
14 | #include <linux/clockchips.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/time.h> | ||
19 | #include <linux/mca.h> | ||
20 | #include <linux/nmi.h> | ||
21 | |||
22 | #include <asm/i8253.h> | ||
23 | #include <asm/hpet.h> | ||
24 | #include <asm/vgtod.h> | ||
25 | #include <asm/time.h> | ||
26 | #include <asm/timer.h> | ||
27 | |||
28 | volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES; | ||
29 | |||
30 | unsigned long profile_pc(struct pt_regs *regs) | ||
31 | { | ||
32 | unsigned long pc = instruction_pointer(regs); | ||
33 | |||
34 | /* Assume the lock function has either no stack frame or a copy | ||
35 | of flags from PUSHF | ||
36 | Eflags always has bits 22 and up cleared unlike kernel addresses. */ | ||
37 | if (!user_mode_vm(regs) && in_lock_functions(pc)) { | ||
38 | #ifdef CONFIG_FRAME_POINTER | ||
39 | return *(unsigned long *)(regs->bp + sizeof(long)); | ||
40 | #else | ||
41 | unsigned long *sp = (unsigned long *)regs->sp; | ||
42 | if (sp[0] >> 22) | ||
43 | return sp[0]; | ||
44 | if (sp[1] >> 22) | ||
45 | return sp[1]; | ||
46 | #endif | ||
47 | } | ||
48 | return pc; | ||
49 | } | ||
50 | EXPORT_SYMBOL(profile_pc); | ||
51 | |||
52 | static irqreturn_t timer_interrupt(int irq, void *dev_id) | ||
53 | { | ||
54 | inc_irq_stat(irq0_irqs); | ||
55 | |||
56 | global_clock_event->event_handler(global_clock_event); | ||
57 | |||
58 | #ifdef CONFIG_MCA | ||
59 | if (MCA_bus) { | ||
60 | u8 irq_v = inb_p(0x61); /* read the current state */ | ||
61 | outb_p(irq_v|0x80, 0x61); /* reset the IRQ */ | ||
62 | } | ||
63 | #endif | ||
64 | |||
65 | return IRQ_HANDLED; | ||
66 | } | ||
67 | |||
68 | /* calibrate_cpu is used on systems with fixed rate TSCs to determine | ||
69 | * processor frequency */ | ||
70 | #define TICK_COUNT 100000000 | ||
71 | unsigned long __init calibrate_cpu(void) | ||
72 | { | ||
73 | int tsc_start, tsc_now; | ||
74 | int i, no_ctr_free; | ||
75 | unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0; | ||
76 | unsigned long flags; | ||
77 | |||
78 | for (i = 0; i < 4; i++) | ||
79 | if (avail_to_resrv_perfctr_nmi_bit(i)) | ||
80 | break; | ||
81 | no_ctr_free = (i == 4); | ||
82 | if (no_ctr_free) { | ||
83 | WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... " | ||
84 | "cpu_khz value may be incorrect.\n"); | ||
85 | i = 3; | ||
86 | rdmsrl(MSR_K7_EVNTSEL3, evntsel3); | ||
87 | wrmsrl(MSR_K7_EVNTSEL3, 0); | ||
88 | rdmsrl(MSR_K7_PERFCTR3, pmc3); | ||
89 | } else { | ||
90 | reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i); | ||
91 | reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i); | ||
92 | } | ||
93 | local_irq_save(flags); | ||
94 | /* start measuring cycles, incrementing from 0 */ | ||
95 | wrmsrl(MSR_K7_PERFCTR0 + i, 0); | ||
96 | wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76); | ||
97 | rdtscl(tsc_start); | ||
98 | do { | ||
99 | rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now); | ||
100 | tsc_now = get_cycles(); | ||
101 | } while ((tsc_now - tsc_start) < TICK_COUNT); | ||
102 | |||
103 | local_irq_restore(flags); | ||
104 | if (no_ctr_free) { | ||
105 | wrmsrl(MSR_K7_EVNTSEL3, 0); | ||
106 | wrmsrl(MSR_K7_PERFCTR3, pmc3); | ||
107 | wrmsrl(MSR_K7_EVNTSEL3, evntsel3); | ||
108 | } else { | ||
109 | release_perfctr_nmi(MSR_K7_PERFCTR0 + i); | ||
110 | release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); | ||
111 | } | ||
112 | |||
113 | return pmc_now * tsc_khz / (tsc_now - tsc_start); | ||
114 | } | ||
115 | |||
116 | static struct irqaction irq0 = { | ||
117 | .handler = timer_interrupt, | ||
118 | .flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING | IRQF_TIMER, | ||
119 | .name = "timer" | ||
120 | }; | ||
121 | |||
122 | void __init hpet_time_init(void) | ||
123 | { | ||
124 | if (!hpet_enable()) | ||
125 | setup_pit_timer(); | ||
126 | |||
127 | setup_irq(0, &irq0); | ||
128 | } | ||
129 | |||
130 | void __init time_init(void) | ||
131 | { | ||
132 | tsc_init(); | ||
133 | |||
134 | late_time_init = choose_time_init(); | ||
135 | } | ||
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 83264922a878..7dc0de9d1ed9 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -59,6 +59,7 @@ | |||
59 | #include <asm/mach_traps.h> | 59 | #include <asm/mach_traps.h> |
60 | 60 | ||
61 | #ifdef CONFIG_X86_64 | 61 | #ifdef CONFIG_X86_64 |
62 | #include <asm/x86_init.h> | ||
62 | #include <asm/pgalloc.h> | 63 | #include <asm/pgalloc.h> |
63 | #include <asm/proto.h> | 64 | #include <asm/proto.h> |
64 | #else | 65 | #else |
@@ -972,7 +973,5 @@ void __init trap_init(void) | |||
972 | */ | 973 | */ |
973 | cpu_init(); | 974 | cpu_init(); |
974 | 975 | ||
975 | #ifdef CONFIG_X86_32 | 976 | x86_init.irqs.trap_init(); |
976 | x86_quirk_trap_init(); | ||
977 | #endif | ||
978 | } | 977 | } |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 8a2fc1123690..cd982f48e23e 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -17,6 +17,8 @@ | |||
17 | #include <asm/time.h> | 17 | #include <asm/time.h> |
18 | #include <asm/delay.h> | 18 | #include <asm/delay.h> |
19 | #include <asm/hypervisor.h> | 19 | #include <asm/hypervisor.h> |
20 | #include <asm/nmi.h> | ||
21 | #include <asm/x86_init.h> | ||
20 | 22 | ||
21 | unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ | 23 | unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ |
22 | EXPORT_SYMBOL(cpu_khz); | 24 | EXPORT_SYMBOL(cpu_khz); |
@@ -400,15 +402,9 @@ unsigned long native_calibrate_tsc(void) | |||
400 | { | 402 | { |
401 | u64 tsc1, tsc2, delta, ref1, ref2; | 403 | u64 tsc1, tsc2, delta, ref1, ref2; |
402 | unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; | 404 | unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; |
403 | unsigned long flags, latch, ms, fast_calibrate, hv_tsc_khz; | 405 | unsigned long flags, latch, ms, fast_calibrate; |
404 | int hpet = is_hpet_enabled(), i, loopmin; | 406 | int hpet = is_hpet_enabled(), i, loopmin; |
405 | 407 | ||
406 | hv_tsc_khz = get_hypervisor_tsc_freq(); | ||
407 | if (hv_tsc_khz) { | ||
408 | printk(KERN_INFO "TSC: Frequency read from the hypervisor\n"); | ||
409 | return hv_tsc_khz; | ||
410 | } | ||
411 | |||
412 | local_irq_save(flags); | 408 | local_irq_save(flags); |
413 | fast_calibrate = quick_pit_calibrate(); | 409 | fast_calibrate = quick_pit_calibrate(); |
414 | local_irq_restore(flags); | 410 | local_irq_restore(flags); |
@@ -566,7 +562,7 @@ int recalibrate_cpu_khz(void) | |||
566 | unsigned long cpu_khz_old = cpu_khz; | 562 | unsigned long cpu_khz_old = cpu_khz; |
567 | 563 | ||
568 | if (cpu_has_tsc) { | 564 | if (cpu_has_tsc) { |
569 | tsc_khz = calibrate_tsc(); | 565 | tsc_khz = x86_platform.calibrate_tsc(); |
570 | cpu_khz = tsc_khz; | 566 | cpu_khz = tsc_khz; |
571 | cpu_data(0).loops_per_jiffy = | 567 | cpu_data(0).loops_per_jiffy = |
572 | cpufreq_scale(cpu_data(0).loops_per_jiffy, | 568 | cpufreq_scale(cpu_data(0).loops_per_jiffy, |
@@ -744,10 +740,16 @@ static cycle_t __vsyscall_fn vread_tsc(void) | |||
744 | } | 740 | } |
745 | #endif | 741 | #endif |
746 | 742 | ||
743 | static void resume_tsc(void) | ||
744 | { | ||
745 | clocksource_tsc.cycle_last = 0; | ||
746 | } | ||
747 | |||
747 | static struct clocksource clocksource_tsc = { | 748 | static struct clocksource clocksource_tsc = { |
748 | .name = "tsc", | 749 | .name = "tsc", |
749 | .rating = 300, | 750 | .rating = 300, |
750 | .read = read_tsc, | 751 | .read = read_tsc, |
752 | .resume = resume_tsc, | ||
751 | .mask = CLOCKSOURCE_MASK(64), | 753 | .mask = CLOCKSOURCE_MASK(64), |
752 | .shift = 22, | 754 | .shift = 22, |
753 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | | 755 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | |
@@ -761,12 +763,14 @@ void mark_tsc_unstable(char *reason) | |||
761 | { | 763 | { |
762 | if (!tsc_unstable) { | 764 | if (!tsc_unstable) { |
763 | tsc_unstable = 1; | 765 | tsc_unstable = 1; |
764 | printk("Marking TSC unstable due to %s\n", reason); | 766 | printk(KERN_INFO "Marking TSC unstable due to %s\n", reason); |
765 | /* Change only the rating, when not registered */ | 767 | /* Change only the rating, when not registered */ |
766 | if (clocksource_tsc.mult) | 768 | if (clocksource_tsc.mult) |
767 | clocksource_change_rating(&clocksource_tsc, 0); | 769 | clocksource_mark_unstable(&clocksource_tsc); |
768 | else | 770 | else { |
771 | clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE; | ||
769 | clocksource_tsc.rating = 0; | 772 | clocksource_tsc.rating = 0; |
773 | } | ||
770 | } | 774 | } |
771 | } | 775 | } |
772 | 776 | ||
@@ -852,15 +856,71 @@ static void __init init_tsc_clocksource(void) | |||
852 | clocksource_register(&clocksource_tsc); | 856 | clocksource_register(&clocksource_tsc); |
853 | } | 857 | } |
854 | 858 | ||
859 | #ifdef CONFIG_X86_64 | ||
860 | /* | ||
861 | * calibrate_cpu is used on systems with fixed rate TSCs to determine | ||
862 | * processor frequency | ||
863 | */ | ||
864 | #define TICK_COUNT 100000000 | ||
865 | static unsigned long __init calibrate_cpu(void) | ||
866 | { | ||
867 | int tsc_start, tsc_now; | ||
868 | int i, no_ctr_free; | ||
869 | unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0; | ||
870 | unsigned long flags; | ||
871 | |||
872 | for (i = 0; i < 4; i++) | ||
873 | if (avail_to_resrv_perfctr_nmi_bit(i)) | ||
874 | break; | ||
875 | no_ctr_free = (i == 4); | ||
876 | if (no_ctr_free) { | ||
877 | WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... " | ||
878 | "cpu_khz value may be incorrect.\n"); | ||
879 | i = 3; | ||
880 | rdmsrl(MSR_K7_EVNTSEL3, evntsel3); | ||
881 | wrmsrl(MSR_K7_EVNTSEL3, 0); | ||
882 | rdmsrl(MSR_K7_PERFCTR3, pmc3); | ||
883 | } else { | ||
884 | reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i); | ||
885 | reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i); | ||
886 | } | ||
887 | local_irq_save(flags); | ||
888 | /* start measuring cycles, incrementing from 0 */ | ||
889 | wrmsrl(MSR_K7_PERFCTR0 + i, 0); | ||
890 | wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76); | ||
891 | rdtscl(tsc_start); | ||
892 | do { | ||
893 | rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now); | ||
894 | tsc_now = get_cycles(); | ||
895 | } while ((tsc_now - tsc_start) < TICK_COUNT); | ||
896 | |||
897 | local_irq_restore(flags); | ||
898 | if (no_ctr_free) { | ||
899 | wrmsrl(MSR_K7_EVNTSEL3, 0); | ||
900 | wrmsrl(MSR_K7_PERFCTR3, pmc3); | ||
901 | wrmsrl(MSR_K7_EVNTSEL3, evntsel3); | ||
902 | } else { | ||
903 | release_perfctr_nmi(MSR_K7_PERFCTR0 + i); | ||
904 | release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); | ||
905 | } | ||
906 | |||
907 | return pmc_now * tsc_khz / (tsc_now - tsc_start); | ||
908 | } | ||
909 | #else | ||
910 | static inline unsigned long calibrate_cpu(void) { return cpu_khz; } | ||
911 | #endif | ||
912 | |||
855 | void __init tsc_init(void) | 913 | void __init tsc_init(void) |
856 | { | 914 | { |
857 | u64 lpj; | 915 | u64 lpj; |
858 | int cpu; | 916 | int cpu; |
859 | 917 | ||
918 | x86_init.timers.tsc_pre_init(); | ||
919 | |||
860 | if (!cpu_has_tsc) | 920 | if (!cpu_has_tsc) |
861 | return; | 921 | return; |
862 | 922 | ||
863 | tsc_khz = calibrate_tsc(); | 923 | tsc_khz = x86_platform.calibrate_tsc(); |
864 | cpu_khz = tsc_khz; | 924 | cpu_khz = tsc_khz; |
865 | 925 | ||
866 | if (!tsc_khz) { | 926 | if (!tsc_khz) { |
@@ -868,11 +928,9 @@ void __init tsc_init(void) | |||
868 | return; | 928 | return; |
869 | } | 929 | } |
870 | 930 | ||
871 | #ifdef CONFIG_X86_64 | ||
872 | if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) && | 931 | if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) && |
873 | (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)) | 932 | (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)) |
874 | cpu_khz = calibrate_cpu(); | 933 | cpu_khz = calibrate_cpu(); |
875 | #endif | ||
876 | 934 | ||
877 | printk("Detected %lu.%03lu MHz processor.\n", | 935 | printk("Detected %lu.%03lu MHz processor.\n", |
878 | (unsigned long)cpu_khz / 1000, | 936 | (unsigned long)cpu_khz / 1000, |
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c index 31ffc24eec4d..f068553a1b17 100644 --- a/arch/x86/kernel/visws_quirks.c +++ b/arch/x86/kernel/visws_quirks.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <asm/setup.h> | 30 | #include <asm/setup.h> |
31 | #include <asm/apic.h> | 31 | #include <asm/apic.h> |
32 | #include <asm/e820.h> | 32 | #include <asm/e820.h> |
33 | #include <asm/time.h> | ||
33 | #include <asm/io.h> | 34 | #include <asm/io.h> |
34 | 35 | ||
35 | #include <linux/kernel_stat.h> | 36 | #include <linux/kernel_stat.h> |
@@ -53,7 +54,7 @@ int is_visws_box(void) | |||
53 | return visws_board_type >= 0; | 54 | return visws_board_type >= 0; |
54 | } | 55 | } |
55 | 56 | ||
56 | static int __init visws_time_init(void) | 57 | static void __init visws_time_init(void) |
57 | { | 58 | { |
58 | printk(KERN_INFO "Starting Cobalt Timer system clock\n"); | 59 | printk(KERN_INFO "Starting Cobalt Timer system clock\n"); |
59 | 60 | ||
@@ -66,21 +67,13 @@ static int __init visws_time_init(void) | |||
66 | /* Enable (unmask) the timer interrupt */ | 67 | /* Enable (unmask) the timer interrupt */ |
67 | co_cpu_write(CO_CPU_CTRL, co_cpu_read(CO_CPU_CTRL) & ~CO_CTRL_TIMEMASK); | 68 | co_cpu_write(CO_CPU_CTRL, co_cpu_read(CO_CPU_CTRL) & ~CO_CTRL_TIMEMASK); |
68 | 69 | ||
69 | /* | 70 | setup_default_timer_irq(); |
70 | * Zero return means the generic timer setup code will set up | ||
71 | * the standard vector: | ||
72 | */ | ||
73 | return 0; | ||
74 | } | 71 | } |
75 | 72 | ||
76 | static int __init visws_pre_intr_init(void) | 73 | /* Replaces the default init_ISA_irqs in the generic setup */ |
74 | static void __init visws_pre_intr_init(void) | ||
77 | { | 75 | { |
78 | init_VISWS_APIC_irqs(); | 76 | init_VISWS_APIC_irqs(); |
79 | |||
80 | /* | ||
81 | * We dont want ISA irqs to be set up by the generic code: | ||
82 | */ | ||
83 | return 1; | ||
84 | } | 77 | } |
85 | 78 | ||
86 | /* Quirk for machine specific memory setup. */ | 79 | /* Quirk for machine specific memory setup. */ |
@@ -156,12 +149,8 @@ static void visws_machine_power_off(void) | |||
156 | outl(PIIX_SPECIAL_STOP, 0xCFC); | 149 | outl(PIIX_SPECIAL_STOP, 0xCFC); |
157 | } | 150 | } |
158 | 151 | ||
159 | static int __init visws_get_smp_config(unsigned int early) | 152 | static void __init visws_get_smp_config(unsigned int early) |
160 | { | 153 | { |
161 | /* | ||
162 | * Prevent MP-table parsing by the generic code: | ||
163 | */ | ||
164 | return 1; | ||
165 | } | 154 | } |
166 | 155 | ||
167 | /* | 156 | /* |
@@ -208,7 +197,7 @@ static void __init MP_processor_info(struct mpc_cpu *m) | |||
208 | apic_version[m->apicid] = ver; | 197 | apic_version[m->apicid] = ver; |
209 | } | 198 | } |
210 | 199 | ||
211 | static int __init visws_find_smp_config(unsigned int reserve) | 200 | static void __init visws_find_smp_config(unsigned int reserve) |
212 | { | 201 | { |
213 | struct mpc_cpu *mp = phys_to_virt(CO_CPU_TAB_PHYS); | 202 | struct mpc_cpu *mp = phys_to_virt(CO_CPU_TAB_PHYS); |
214 | unsigned short ncpus = readw(phys_to_virt(CO_CPU_NUM_PHYS)); | 203 | unsigned short ncpus = readw(phys_to_virt(CO_CPU_NUM_PHYS)); |
@@ -230,21 +219,9 @@ static int __init visws_find_smp_config(unsigned int reserve) | |||
230 | MP_processor_info(mp++); | 219 | MP_processor_info(mp++); |
231 | 220 | ||
232 | mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; | 221 | mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; |
233 | |||
234 | return 1; | ||
235 | } | 222 | } |
236 | 223 | ||
237 | static int visws_trap_init(void); | 224 | static void visws_trap_init(void); |
238 | |||
239 | static struct x86_quirks visws_x86_quirks __initdata = { | ||
240 | .arch_time_init = visws_time_init, | ||
241 | .arch_pre_intr_init = visws_pre_intr_init, | ||
242 | .arch_memory_setup = visws_memory_setup, | ||
243 | .arch_intr_init = NULL, | ||
244 | .arch_trap_init = visws_trap_init, | ||
245 | .mach_get_smp_config = visws_get_smp_config, | ||
246 | .mach_find_smp_config = visws_find_smp_config, | ||
247 | }; | ||
248 | 225 | ||
249 | void __init visws_early_detect(void) | 226 | void __init visws_early_detect(void) |
250 | { | 227 | { |
@@ -257,11 +234,14 @@ void __init visws_early_detect(void) | |||
257 | return; | 234 | return; |
258 | 235 | ||
259 | /* | 236 | /* |
260 | * Install special quirks for timer, interrupt and memory setup: | 237 | * Override the default platform setup functions |
261 | * Fall back to generic behavior for traps: | ||
262 | * Override generic MP-table parsing: | ||
263 | */ | 238 | */ |
264 | x86_quirks = &visws_x86_quirks; | 239 | x86_init.resources.memory_setup = visws_memory_setup; |
240 | x86_init.mpparse.get_smp_config = visws_get_smp_config; | ||
241 | x86_init.mpparse.find_smp_config = visws_find_smp_config; | ||
242 | x86_init.irqs.pre_vector_init = visws_pre_intr_init; | ||
243 | x86_init.irqs.trap_init = visws_trap_init; | ||
244 | x86_init.timers.timer_init = visws_time_init; | ||
265 | 245 | ||
266 | /* | 246 | /* |
267 | * Install reboot quirks: | 247 | * Install reboot quirks: |
@@ -400,12 +380,10 @@ static __init void cobalt_init(void) | |||
400 | co_apic_read(CO_APIC_ID)); | 380 | co_apic_read(CO_APIC_ID)); |
401 | } | 381 | } |
402 | 382 | ||
403 | static int __init visws_trap_init(void) | 383 | static void __init visws_trap_init(void) |
404 | { | 384 | { |
405 | lithium_init(); | 385 | lithium_init(); |
406 | cobalt_init(); | 386 | cobalt_init(); |
407 | |||
408 | return 1; | ||
409 | } | 387 | } |
410 | 388 | ||
411 | /* | 389 | /* |
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 95a7289e4b0c..31e6f6cfe53e 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c | |||
@@ -817,15 +817,15 @@ static inline int __init activate_vmi(void) | |||
817 | vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm); | 817 | vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm); |
818 | vmi_timer_ops.cancel_alarm = | 818 | vmi_timer_ops.cancel_alarm = |
819 | vmi_get_function(VMI_CALL_CancelAlarm); | 819 | vmi_get_function(VMI_CALL_CancelAlarm); |
820 | pv_time_ops.time_init = vmi_time_init; | 820 | x86_init.timers.timer_init = vmi_time_init; |
821 | pv_time_ops.get_wallclock = vmi_get_wallclock; | ||
822 | pv_time_ops.set_wallclock = vmi_set_wallclock; | ||
823 | #ifdef CONFIG_X86_LOCAL_APIC | 821 | #ifdef CONFIG_X86_LOCAL_APIC |
824 | pv_apic_ops.setup_boot_clock = vmi_time_bsp_init; | 822 | x86_init.timers.setup_percpu_clockev = vmi_time_bsp_init; |
825 | pv_apic_ops.setup_secondary_clock = vmi_time_ap_init; | 823 | x86_cpuinit.setup_percpu_clockev = vmi_time_ap_init; |
826 | #endif | 824 | #endif |
827 | pv_time_ops.sched_clock = vmi_sched_clock; | 825 | pv_time_ops.sched_clock = vmi_sched_clock; |
828 | pv_time_ops.get_tsc_khz = vmi_tsc_khz; | 826 | x86_platform.calibrate_tsc = vmi_tsc_khz; |
827 | x86_platform.get_wallclock = vmi_get_wallclock; | ||
828 | x86_platform.set_wallclock = vmi_set_wallclock; | ||
829 | 829 | ||
830 | /* We have true wallclock functions; disable CMOS clock sync */ | 830 | /* We have true wallclock functions; disable CMOS clock sync */ |
831 | no_sync_cmos_clock = 1; | 831 | no_sync_cmos_clock = 1; |
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c index 2b3eb82efeeb..611b9e2360d3 100644 --- a/arch/x86/kernel/vmiclock_32.c +++ b/arch/x86/kernel/vmiclock_32.c | |||
@@ -68,7 +68,7 @@ unsigned long long vmi_sched_clock(void) | |||
68 | return cycles_2_ns(vmi_timer_ops.get_cycle_counter(VMI_CYCLES_AVAILABLE)); | 68 | return cycles_2_ns(vmi_timer_ops.get_cycle_counter(VMI_CYCLES_AVAILABLE)); |
69 | } | 69 | } |
70 | 70 | ||
71 | /* paravirt_ops.get_tsc_khz = vmi_tsc_khz */ | 71 | /* x86_platform.calibrate_tsc = vmi_tsc_khz */ |
72 | unsigned long vmi_tsc_khz(void) | 72 | unsigned long vmi_tsc_khz(void) |
73 | { | 73 | { |
74 | unsigned long long khz; | 74 | unsigned long long khz; |
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 25ee06a80aad..cf53a78e2dcf 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c | |||
@@ -87,6 +87,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) | |||
87 | vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; | 87 | vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; |
88 | vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; | 88 | vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; |
89 | vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic; | 89 | vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic; |
90 | vsyscall_gtod_data.wall_time_coarse = __current_kernel_time(); | ||
90 | write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); | 91 | write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); |
91 | } | 92 | } |
92 | 93 | ||
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c new file mode 100644 index 000000000000..4449a4a2c2ed --- /dev/null +++ b/arch/x86/kernel/x86_init.c | |||
@@ -0,0 +1,75 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2009 Thomas Gleixner <tglx@linutronix.de> | ||
3 | * | ||
4 | * For licencing details see kernel-base/COPYING | ||
5 | */ | ||
6 | #include <linux/init.h> | ||
7 | |||
8 | #include <asm/bios_ebda.h> | ||
9 | #include <asm/paravirt.h> | ||
10 | #include <asm/mpspec.h> | ||
11 | #include <asm/setup.h> | ||
12 | #include <asm/apic.h> | ||
13 | #include <asm/e820.h> | ||
14 | #include <asm/time.h> | ||
15 | #include <asm/irq.h> | ||
16 | #include <asm/tsc.h> | ||
17 | |||
18 | void __cpuinit x86_init_noop(void) { } | ||
19 | void __init x86_init_uint_noop(unsigned int unused) { } | ||
20 | void __init x86_init_pgd_noop(pgd_t *unused) { } | ||
21 | |||
22 | /* | ||
23 | * The platform setup functions are preset with the default functions | ||
24 | * for standard PC hardware. | ||
25 | */ | ||
26 | struct x86_init_ops x86_init __initdata = { | ||
27 | |||
28 | .resources = { | ||
29 | .probe_roms = x86_init_noop, | ||
30 | .reserve_resources = reserve_standard_io_resources, | ||
31 | .memory_setup = default_machine_specific_memory_setup, | ||
32 | }, | ||
33 | |||
34 | .mpparse = { | ||
35 | .mpc_record = x86_init_uint_noop, | ||
36 | .setup_ioapic_ids = x86_init_noop, | ||
37 | .mpc_apic_id = default_mpc_apic_id, | ||
38 | .smp_read_mpc_oem = default_smp_read_mpc_oem, | ||
39 | .mpc_oem_bus_info = default_mpc_oem_bus_info, | ||
40 | .find_smp_config = default_find_smp_config, | ||
41 | .get_smp_config = default_get_smp_config, | ||
42 | }, | ||
43 | |||
44 | .irqs = { | ||
45 | .pre_vector_init = init_ISA_irqs, | ||
46 | .intr_init = native_init_IRQ, | ||
47 | .trap_init = x86_init_noop, | ||
48 | }, | ||
49 | |||
50 | .oem = { | ||
51 | .arch_setup = x86_init_noop, | ||
52 | .banner = default_banner, | ||
53 | }, | ||
54 | |||
55 | .paging = { | ||
56 | .pagetable_setup_start = native_pagetable_setup_start, | ||
57 | .pagetable_setup_done = native_pagetable_setup_done, | ||
58 | }, | ||
59 | |||
60 | .timers = { | ||
61 | .setup_percpu_clockev = setup_boot_APIC_clock, | ||
62 | .tsc_pre_init = x86_init_noop, | ||
63 | .timer_init = hpet_time_init, | ||
64 | }, | ||
65 | }; | ||
66 | |||
67 | struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { | ||
68 | .setup_percpu_clockev = setup_secondary_APIC_clock, | ||
69 | }; | ||
70 | |||
71 | struct x86_platform_ops x86_platform = { | ||
72 | .calibrate_tsc = native_calibrate_tsc, | ||
73 | .get_wallclock = mach_get_cmos_time, | ||
74 | .set_wallclock = mach_set_rtc_mmss, | ||
75 | }; | ||