aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/kernel-parameters.txt6
-rw-r--r--arch/x86/Kconfig15
-rw-r--r--arch/x86/include/asm/apb_timer.h70
-rw-r--r--arch/x86/include/asm/hw_irq.h7
-rw-r--r--arch/x86/include/asm/i8259.h19
-rw-r--r--arch/x86/include/asm/io_apic.h7
-rw-r--r--arch/x86/include/asm/irq.h1
-rw-r--r--arch/x86/include/asm/mrst.h19
-rw-r--r--arch/x86/include/asm/numaq.h1
-rw-r--r--arch/x86/include/asm/olpc.h20
-rw-r--r--arch/x86/include/asm/pci.h9
-rw-r--r--arch/x86/include/asm/pci_x86.h22
-rw-r--r--arch/x86/include/asm/setup.h2
-rw-r--r--arch/x86/include/asm/visws/cobalt.h2
-rw-r--r--arch/x86/include/asm/x86_init.h15
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/acpi/boot.c4
-rw-r--r--arch/x86/kernel/apb_timer.c784
-rw-r--r--arch/x86/kernel/apic/apic.c8
-rw-r--r--arch/x86/kernel/apic/io_apic.c86
-rw-r--r--arch/x86/kernel/apic/nmi.c2
-rw-r--r--arch/x86/kernel/apic/numaq_32.c1
-rw-r--r--arch/x86/kernel/i8259.c64
-rw-r--r--arch/x86/kernel/irqinit.c9
-rw-r--r--arch/x86/kernel/mrst.c216
-rw-r--r--arch/x86/kernel/olpc.c10
-rw-r--r--arch/x86/kernel/smpboot.c9
-rw-r--r--arch/x86/kernel/visws_quirks.c21
-rw-r--r--arch/x86/kernel/x86_init.c8
-rw-r--r--arch/x86/pci/Makefile2
-rw-r--r--arch/x86/pci/acpi.c7
-rw-r--r--arch/x86/pci/common.c6
-rw-r--r--arch/x86/pci/init.c8
-rw-r--r--arch/x86/pci/irq.c16
-rw-r--r--arch/x86/pci/legacy.c24
-rw-r--r--arch/x86/pci/mrst.c262
-rw-r--r--arch/x86/pci/numaq_32.c6
-rw-r--r--arch/x86/pci/olpc.c3
-rw-r--r--arch/x86/pci/visws.c6
-rw-r--r--drivers/pci/pci.c43
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pci_regs.h1
42 files changed, 1662 insertions, 162 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index d80930d58dae..3bc48b0bd3a9 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2834,6 +2834,12 @@ and is between 256 and 4096 characters. It is defined in the file
2834 default x2apic cluster mode on platforms 2834 default x2apic cluster mode on platforms
2835 supporting x2apic. 2835 supporting x2apic.
2836 2836
2837 x86_mrst_timer= [X86-32,APBT]
2838 Choose timer option for x86 Moorestown MID platform.
2839 Two valid options are apbt timer only and lapic timer
2840 plus one apbt timer for broadcast timer.
2841 x86_mrst_timer=apbt_only | lapic_and_apbt
2842
2837 xd= [HW,XT] Original XT pre-IDE (RLL encoded) disks. 2843 xd= [HW,XT] Original XT pre-IDE (RLL encoded) disks.
2838 xd_geo= See header of drivers/block/xd.c. 2844 xd_geo= See header of drivers/block/xd.c.
2839 2845
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index f15f37bfbd62..e98440371525 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -393,8 +393,12 @@ config X86_ELAN
393 393
394config X86_MRST 394config X86_MRST
395 bool "Moorestown MID platform" 395 bool "Moorestown MID platform"
396 depends on PCI
397 depends on PCI_GOANY
396 depends on X86_32 398 depends on X86_32
397 depends on X86_EXTENDED_PLATFORM 399 depends on X86_EXTENDED_PLATFORM
400 depends on X86_IO_APIC
401 select APB_TIMER
398 ---help--- 402 ---help---
399 Moorestown is Intel's Low Power Intel Architecture (LPIA) based Moblin 403 Moorestown is Intel's Low Power Intel Architecture (LPIA) based Moblin
400 Internet Device(MID) platform. Moorestown consists of two chips: 404 Internet Device(MID) platform. Moorestown consists of two chips:
@@ -429,6 +433,7 @@ config X86_32_NON_STANDARD
429config X86_NUMAQ 433config X86_NUMAQ
430 bool "NUMAQ (IBM/Sequent)" 434 bool "NUMAQ (IBM/Sequent)"
431 depends on X86_32_NON_STANDARD 435 depends on X86_32_NON_STANDARD
436 depends on PCI
432 select NUMA 437 select NUMA
433 select X86_MPPARSE 438 select X86_MPPARSE
434 ---help--- 439 ---help---
@@ -629,6 +634,16 @@ config HPET_EMULATE_RTC
629 def_bool y 634 def_bool y
630 depends on HPET_TIMER && (RTC=y || RTC=m || RTC_DRV_CMOS=m || RTC_DRV_CMOS=y) 635 depends on HPET_TIMER && (RTC=y || RTC=m || RTC_DRV_CMOS=m || RTC_DRV_CMOS=y)
631 636
637config APB_TIMER
638 def_bool y if MRST
639 prompt "Langwell APB Timer Support" if X86_MRST
640 help
641 APB timer is the replacement for 8254, HPET on X86 MID platforms.
642 The APBT provides a stable time base on SMP
643 systems, unlike the TSC, but it is more expensive to access,
644 as it is off-chip. APB timers are always running regardless of CPU
645 C states, they are used as per CPU clockevent device when possible.
646
632# Mark as embedded because too many people got it wrong. 647# Mark as embedded because too many people got it wrong.
633# The code disables itself when not needed. 648# The code disables itself when not needed.
634config DMI 649config DMI
diff --git a/arch/x86/include/asm/apb_timer.h b/arch/x86/include/asm/apb_timer.h
new file mode 100644
index 000000000000..c74a2eebe570
--- /dev/null
+++ b/arch/x86/include/asm/apb_timer.h
@@ -0,0 +1,70 @@
1/*
2 * apb_timer.h: Driver for Langwell APB timer based on Synopsis DesignWare
3 *
4 * (C) Copyright 2009 Intel Corporation
5 * Author: Jacob Pan (jacob.jun.pan@intel.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 *
12 * Note:
13 */
14
15#ifndef ASM_X86_APBT_H
16#define ASM_X86_APBT_H
17#include <linux/sfi.h>
18
19#ifdef CONFIG_APB_TIMER
20
21/* Langwell DW APB timer registers */
22#define APBTMR_N_LOAD_COUNT 0x00
23#define APBTMR_N_CURRENT_VALUE 0x04
24#define APBTMR_N_CONTROL 0x08
25#define APBTMR_N_EOI 0x0c
26#define APBTMR_N_INT_STATUS 0x10
27
28#define APBTMRS_INT_STATUS 0xa0
29#define APBTMRS_EOI 0xa4
30#define APBTMRS_RAW_INT_STATUS 0xa8
31#define APBTMRS_COMP_VERSION 0xac
32#define APBTMRS_REG_SIZE 0x14
33
34/* register bits */
35#define APBTMR_CONTROL_ENABLE (1<<0)
36#define APBTMR_CONTROL_MODE_PERIODIC (1<<1) /*1: periodic 0:free running */
37#define APBTMR_CONTROL_INT (1<<2)
38
39/* default memory mapped register base */
40#define LNW_SCU_ADDR 0xFF100000
41#define LNW_EXT_TIMER_OFFSET 0x1B800
42#define APBT_DEFAULT_BASE (LNW_SCU_ADDR+LNW_EXT_TIMER_OFFSET)
43#define LNW_EXT_TIMER_PGOFFSET 0x800
44
45/* APBT clock speed range from PCLK to fabric base, 25-100MHz */
46#define APBT_MAX_FREQ 50
47#define APBT_MIN_FREQ 1
48#define APBT_MMAP_SIZE 1024
49
50#define APBT_DEV_USED 1
51
52extern void apbt_time_init(void);
53extern struct clock_event_device *global_clock_event;
54extern unsigned long apbt_quick_calibrate(void);
55extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu);
56extern void apbt_setup_secondary_clock(void);
57extern unsigned int boot_cpu_id;
58extern int disable_apbt_percpu;
59
60extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint);
61extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr);
62extern int sfi_mtimer_num;
63
64#else /* CONFIG_APB_TIMER */
65
66static inline unsigned long apbt_quick_calibrate(void) {return 0; }
67static inline void apbt_time_init(void) {return 0; }
68
69#endif
70#endif /* ASM_X86_APBT_H */
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index eeac829a0f44..a929c9ede33d 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -53,13 +53,6 @@ extern void threshold_interrupt(void);
53extern void call_function_interrupt(void); 53extern void call_function_interrupt(void);
54extern void call_function_single_interrupt(void); 54extern void call_function_single_interrupt(void);
55 55
56/* PIC specific functions */
57extern void disable_8259A_irq(unsigned int irq);
58extern void enable_8259A_irq(unsigned int irq);
59extern int i8259A_irq_pending(unsigned int irq);
60extern void make_8259A_irq(unsigned int irq);
61extern void init_8259A(int aeoi);
62
63/* IOAPIC */ 56/* IOAPIC */
64#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs)) 57#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs))
65extern unsigned long io_apic_irqs; 58extern unsigned long io_apic_irqs;
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
index 7ec65b18085d..1655147646aa 100644
--- a/arch/x86/include/asm/i8259.h
+++ b/arch/x86/include/asm/i8259.h
@@ -26,11 +26,6 @@ extern unsigned int cached_irq_mask;
26 26
27extern raw_spinlock_t i8259A_lock; 27extern raw_spinlock_t i8259A_lock;
28 28
29extern void init_8259A(int auto_eoi);
30extern void enable_8259A_irq(unsigned int irq);
31extern void disable_8259A_irq(unsigned int irq);
32extern unsigned int startup_8259A_irq(unsigned int irq);
33
34/* the PIC may need a careful delay on some platforms, hence specific calls */ 29/* the PIC may need a careful delay on some platforms, hence specific calls */
35static inline unsigned char inb_pic(unsigned int port) 30static inline unsigned char inb_pic(unsigned int port)
36{ 31{
@@ -57,7 +52,17 @@ static inline void outb_pic(unsigned char value, unsigned int port)
57 52
58extern struct irq_chip i8259A_chip; 53extern struct irq_chip i8259A_chip;
59 54
60extern void mask_8259A(void); 55struct legacy_pic {
61extern void unmask_8259A(void); 56 int nr_legacy_irqs;
57 struct irq_chip *chip;
58 void (*mask_all)(void);
59 void (*restore_mask)(void);
60 void (*init)(int auto_eoi);
61 int (*irq_pending)(unsigned int irq);
62 void (*make_irq)(unsigned int irq);
63};
64
65extern struct legacy_pic *legacy_pic;
66extern struct legacy_pic null_legacy_pic;
62 67
63#endif /* _ASM_X86_I8259_H */ 68#endif /* _ASM_X86_I8259_H */
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 5f61f6e0ffdd..35832a03a515 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -143,8 +143,6 @@ extern int noioapicreroute;
143/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */ 143/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */
144extern int timer_through_8259; 144extern int timer_through_8259;
145 145
146extern void io_apic_disable_legacy(void);
147
148/* 146/*
149 * If we use the IO-APIC for IRQ routing, disable automatic 147 * If we use the IO-APIC for IRQ routing, disable automatic
150 * assignment of PCI IRQ's. 148 * assignment of PCI IRQ's.
@@ -189,6 +187,7 @@ extern struct mp_ioapic_gsi mp_gsi_routing[];
189int mp_find_ioapic(int gsi); 187int mp_find_ioapic(int gsi);
190int mp_find_ioapic_pin(int ioapic, int gsi); 188int mp_find_ioapic_pin(int ioapic, int gsi);
191void __init mp_register_ioapic(int id, u32 address, u32 gsi_base); 189void __init mp_register_ioapic(int id, u32 address, u32 gsi_base);
190extern void __init pre_init_apic_IRQ0(void);
192 191
193#else /* !CONFIG_X86_IO_APIC */ 192#else /* !CONFIG_X86_IO_APIC */
194 193
@@ -198,7 +197,11 @@ static const int timer_through_8259 = 0;
198static inline void ioapic_init_mappings(void) { } 197static inline void ioapic_init_mappings(void) { }
199static inline void ioapic_insert_resources(void) { } 198static inline void ioapic_insert_resources(void) { }
200static inline void probe_nr_irqs_gsi(void) { } 199static inline void probe_nr_irqs_gsi(void) { }
200static inline int mp_find_ioapic(int gsi) { return 0; }
201 201
202struct io_apic_irq_attr;
203static inline int io_apic_set_pci_routing(struct device *dev, int irq,
204 struct io_apic_irq_attr *irq_attr) { return 0; }
202#endif 205#endif
203 206
204#endif /* _ASM_X86_IO_APIC_H */ 207#endif /* _ASM_X86_IO_APIC_H */
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 262292729fc4..5458380b6ef8 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -48,6 +48,5 @@ extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
48extern int vector_used_by_percpu_irq(unsigned int vector); 48extern int vector_used_by_percpu_irq(unsigned int vector);
49 49
50extern void init_ISA_irqs(void); 50extern void init_ISA_irqs(void);
51extern int nr_legacy_irqs;
52 51
53#endif /* _ASM_X86_IRQ_H */ 52#endif /* _ASM_X86_IRQ_H */
diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h
new file mode 100644
index 000000000000..451d30e7f62d
--- /dev/null
+++ b/arch/x86/include/asm/mrst.h
@@ -0,0 +1,19 @@
1/*
2 * mrst.h: Intel Moorestown platform specific setup code
3 *
4 * (C) Copyright 2009 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; version 2
9 * of the License.
10 */
11#ifndef _ASM_X86_MRST_H
12#define _ASM_X86_MRST_H
13extern int pci_mrst_init(void);
14int __init sfi_parse_mrtc(struct sfi_table_header *table);
15
16#define SFI_MTMR_MAX_NUM 8
17#define SFI_MRTC_MAX 8
18
19#endif /* _ASM_X86_MRST_H */
diff --git a/arch/x86/include/asm/numaq.h b/arch/x86/include/asm/numaq.h
index 13370b95ea94..37c516545ec8 100644
--- a/arch/x86/include/asm/numaq.h
+++ b/arch/x86/include/asm/numaq.h
@@ -30,6 +30,7 @@
30 30
31extern int found_numaq; 31extern int found_numaq;
32extern int get_memcfg_numaq(void); 32extern int get_memcfg_numaq(void);
33extern int pci_numaq_init(void);
33 34
34extern void *xquad_portio; 35extern void *xquad_portio;
35 36
diff --git a/arch/x86/include/asm/olpc.h b/arch/x86/include/asm/olpc.h
index 3a57385d9fa7..101229b0d8ed 100644
--- a/arch/x86/include/asm/olpc.h
+++ b/arch/x86/include/asm/olpc.h
@@ -13,7 +13,6 @@ struct olpc_platform_t {
13 13
14#define OLPC_F_PRESENT 0x01 14#define OLPC_F_PRESENT 0x01
15#define OLPC_F_DCON 0x02 15#define OLPC_F_DCON 0x02
16#define OLPC_F_VSA 0x04
17 16
18#ifdef CONFIG_OLPC 17#ifdef CONFIG_OLPC
19 18
@@ -51,18 +50,6 @@ static inline int olpc_has_dcon(void)
51} 50}
52 51
53/* 52/*
54 * The VSA is software from AMD that typical Geode bioses will include.
55 * It is used to emulate the PCI bus, VGA, etc. OLPC's Open Firmware does
56 * not include the VSA; instead, PCI is emulated by the kernel.
57 *
58 * The VSA is described further in arch/x86/pci/olpc.c.
59 */
60static inline int olpc_has_vsa(void)
61{
62 return (olpc_platform_info.flags & OLPC_F_VSA) ? 1 : 0;
63}
64
65/*
66 * The "Mass Production" version of OLPC's XO is identified as being model 53 * The "Mass Production" version of OLPC's XO is identified as being model
67 * C2. During the prototype phase, the following models (in chronological 54 * C2. During the prototype phase, the following models (in chronological
68 * order) were created: A1, B1, B2, B3, B4, C1. The A1 through B2 models 55 * order) were created: A1, B1, B2, B3, B4, C1. The A1 through B2 models
@@ -87,13 +74,10 @@ static inline int olpc_has_dcon(void)
87 return 0; 74 return 0;
88} 75}
89 76
90static inline int olpc_has_vsa(void)
91{
92 return 0;
93}
94
95#endif 77#endif
96 78
79extern int pci_olpc_init(void);
80
97/* EC related functions */ 81/* EC related functions */
98 82
99extern int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen, 83extern int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen,
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index b4a00dd4eed5..3e002ca5a287 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -45,8 +45,15 @@ static inline int pci_proc_domain(struct pci_bus *bus)
45 45
46#ifdef CONFIG_PCI 46#ifdef CONFIG_PCI
47extern unsigned int pcibios_assign_all_busses(void); 47extern unsigned int pcibios_assign_all_busses(void);
48extern int pci_legacy_init(void);
49# ifdef CONFIG_ACPI
50# define x86_default_pci_init pci_acpi_init
51# else
52# define x86_default_pci_init pci_legacy_init
53# endif
48#else 54#else
49#define pcibios_assign_all_busses() 0 55# define pcibios_assign_all_busses() 0
56# define x86_default_pci_init NULL
50#endif 57#endif
51 58
52extern unsigned long pci_mem_start; 59extern unsigned long pci_mem_start;
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 05b58ccb2e82..1a0422348d6d 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -83,7 +83,6 @@ struct irq_routing_table {
83 83
84extern unsigned int pcibios_irq_mask; 84extern unsigned int pcibios_irq_mask;
85 85
86extern int pcibios_scanned;
87extern spinlock_t pci_config_lock; 86extern spinlock_t pci_config_lock;
88 87
89extern int (*pcibios_enable_irq)(struct pci_dev *dev); 88extern int (*pcibios_enable_irq)(struct pci_dev *dev);
@@ -106,16 +105,15 @@ extern bool port_cf9_safe;
106extern int pci_direct_probe(void); 105extern int pci_direct_probe(void);
107extern void pci_direct_init(int type); 106extern void pci_direct_init(int type);
108extern void pci_pcbios_init(void); 107extern void pci_pcbios_init(void);
109extern int pci_olpc_init(void);
110extern void __init dmi_check_pciprobe(void); 108extern void __init dmi_check_pciprobe(void);
111extern void __init dmi_check_skip_isa_align(void); 109extern void __init dmi_check_skip_isa_align(void);
112 110
113/* some common used subsys_initcalls */ 111/* some common used subsys_initcalls */
114extern int __init pci_acpi_init(void); 112extern int __init pci_acpi_init(void);
115extern int __init pcibios_irq_init(void); 113extern void __init pcibios_irq_init(void);
116extern int __init pci_visws_init(void);
117extern int __init pci_numaq_init(void);
118extern int __init pcibios_init(void); 114extern int __init pcibios_init(void);
115extern int pci_legacy_init(void);
116extern void pcibios_fixup_irqs(void);
119 117
120/* pci-mmconfig.c */ 118/* pci-mmconfig.c */
121 119
@@ -183,3 +181,17 @@ static inline void mmio_config_writel(void __iomem *pos, u32 val)
183{ 181{
184 asm volatile("movl %%eax,(%1)" : : "a" (val), "r" (pos) : "memory"); 182 asm volatile("movl %%eax,(%1)" : : "a" (val), "r" (pos) : "memory");
185} 183}
184
185#ifdef CONFIG_PCI
186# ifdef CONFIG_ACPI
187# define x86_default_pci_init pci_acpi_init
188# else
189# define x86_default_pci_init pci_legacy_init
190# endif
191# define x86_default_pci_init_irq pcibios_irq_init
192# define x86_default_pci_fixup_irqs pcibios_fixup_irqs
193#else
194# define x86_default_pci_init NULL
195# define x86_default_pci_init_irq NULL
196# define x86_default_pci_fixup_irqs NULL
197#endif
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 18e496c98ff0..86b1506f4179 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -37,10 +37,8 @@ void setup_bios_corruption_check(void);
37 37
38#ifdef CONFIG_X86_VISWS 38#ifdef CONFIG_X86_VISWS
39extern void visws_early_detect(void); 39extern void visws_early_detect(void);
40extern int is_visws_box(void);
41#else 40#else
42static inline void visws_early_detect(void) { } 41static inline void visws_early_detect(void) { }
43static inline int is_visws_box(void) { return 0; }
44#endif 42#endif
45 43
46extern unsigned long saved_video_mode; 44extern unsigned long saved_video_mode;
diff --git a/arch/x86/include/asm/visws/cobalt.h b/arch/x86/include/asm/visws/cobalt.h
index 166adf61e770..2edb37637ead 100644
--- a/arch/x86/include/asm/visws/cobalt.h
+++ b/arch/x86/include/asm/visws/cobalt.h
@@ -122,4 +122,6 @@ extern char visws_board_type;
122 122
123extern char visws_board_rev; 123extern char visws_board_rev;
124 124
125extern int pci_visws_init(void);
126
125#endif /* _ASM_X86_VISWS_COBALT_H */ 127#endif /* _ASM_X86_VISWS_COBALT_H */
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 60cc35269083..519b54327d75 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -99,6 +99,20 @@ struct x86_init_iommu {
99}; 99};
100 100
101/** 101/**
102 * struct x86_init_pci - platform specific pci init functions
103 * @arch_init: platform specific pci arch init call
104 * @init: platform specific pci subsystem init
105 * @init_irq: platform specific pci irq init
106 * @fixup_irqs: platform specific pci irq fixup
107 */
108struct x86_init_pci {
109 int (*arch_init)(void);
110 int (*init)(void);
111 void (*init_irq)(void);
112 void (*fixup_irqs)(void);
113};
114
115/**
102 * struct x86_init_ops - functions for platform specific setup 116 * struct x86_init_ops - functions for platform specific setup
103 * 117 *
104 */ 118 */
@@ -110,6 +124,7 @@ struct x86_init_ops {
110 struct x86_init_paging paging; 124 struct x86_init_paging paging;
111 struct x86_init_timers timers; 125 struct x86_init_timers timers;
112 struct x86_init_iommu iommu; 126 struct x86_init_iommu iommu;
127 struct x86_init_pci pci;
113}; 128};
114 129
115/** 130/**
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index d87f09bc5a52..4c58352209e0 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -87,6 +87,7 @@ obj-$(CONFIG_VM86) += vm86_32.o
87obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 87obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
88 88
89obj-$(CONFIG_HPET_TIMER) += hpet.o 89obj-$(CONFIG_HPET_TIMER) += hpet.o
90obj-$(CONFIG_APB_TIMER) += apb_timer.o
90 91
91obj-$(CONFIG_K8_NB) += k8.o 92obj-$(CONFIG_K8_NB) += k8.o
92obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o 93obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 738fcb60e708..a54d714545ff 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -35,6 +35,7 @@
35#include <linux/ioport.h> 35#include <linux/ioport.h>
36#include <linux/pci.h> 36#include <linux/pci.h>
37 37
38#include <asm/pci_x86.h>
38#include <asm/pgtable.h> 39#include <asm/pgtable.h>
39#include <asm/io_apic.h> 40#include <asm/io_apic.h>
40#include <asm/apic.h> 41#include <asm/apic.h>
@@ -1624,6 +1625,9 @@ int __init acpi_boot_init(void)
1624 1625
1625 acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet); 1626 acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
1626 1627
1628 if (!acpi_noirq)
1629 x86_init.pci.init = pci_acpi_init;
1630
1627 return 0; 1631 return 0;
1628} 1632}
1629 1633
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
new file mode 100644
index 000000000000..4b7099526d2c
--- /dev/null
+++ b/arch/x86/kernel/apb_timer.c
@@ -0,0 +1,784 @@
1/*
2 * apb_timer.c: Driver for Langwell APB timers
3 *
4 * (C) Copyright 2009 Intel Corporation
5 * Author: Jacob Pan (jacob.jun.pan@intel.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 *
12 * Note:
13 * Langwell is the south complex of Intel Moorestown MID platform. There are
14 * eight external timers in total that can be used by the operating system.
15 * The timer information, such as frequency and addresses, is provided to the
16 * OS via SFI tables.
17 * Timer interrupts are routed via FW/HW emulated IOAPIC independently via
18 * individual redirection table entries (RTE).
19 * Unlike HPET, there is no master counter, therefore one of the timers are
20 * used as clocksource. The overall allocation looks like:
21 * - timer 0 - NR_CPUs for per cpu timer
22 * - one timer for clocksource
23 * - one timer for watchdog driver.
24 * It is also worth notice that APB timer does not support true one-shot mode,
25 * free-running mode will be used here to emulate one-shot mode.
26 * APB timer can also be used as broadcast timer along with per cpu local APIC
27 * timer, but by default APB timer has higher rating than local APIC timers.
28 */
29
30#include <linux/clocksource.h>
31#include <linux/clockchips.h>
32#include <linux/delay.h>
33#include <linux/errno.h>
34#include <linux/init.h>
35#include <linux/sysdev.h>
36#include <linux/pm.h>
37#include <linux/pci.h>
38#include <linux/sfi.h>
39#include <linux/interrupt.h>
40#include <linux/cpu.h>
41#include <linux/irq.h>
42
43#include <asm/fixmap.h>
44#include <asm/apb_timer.h>
45
46#define APBT_MASK CLOCKSOURCE_MASK(32)
47#define APBT_SHIFT 22
48#define APBT_CLOCKEVENT_RATING 150
49#define APBT_CLOCKSOURCE_RATING 250
50#define APBT_MIN_DELTA_USEC 200
51
52#define EVT_TO_APBT_DEV(evt) container_of(evt, struct apbt_dev, evt)
53#define APBT_CLOCKEVENT0_NUM (0)
54#define APBT_CLOCKEVENT1_NUM (1)
55#define APBT_CLOCKSOURCE_NUM (2)
56
57static unsigned long apbt_address;
58static int apb_timer_block_enabled;
59static void __iomem *apbt_virt_address;
60static int phy_cs_timer_id;
61
62/*
63 * Common DW APB timer info
64 */
65static uint64_t apbt_freq;
66
67static void apbt_set_mode(enum clock_event_mode mode,
68 struct clock_event_device *evt);
69static int apbt_next_event(unsigned long delta,
70 struct clock_event_device *evt);
71static cycle_t apbt_read_clocksource(struct clocksource *cs);
72static void apbt_restart_clocksource(struct clocksource *cs);
73
74struct apbt_dev {
75 struct clock_event_device evt;
76 unsigned int num;
77 int cpu;
78 unsigned int irq;
79 unsigned int tick;
80 unsigned int count;
81 unsigned int flags;
82 char name[10];
83};
84
85int disable_apbt_percpu __cpuinitdata;
86
87static DEFINE_PER_CPU(struct apbt_dev, cpu_apbt_dev);
88
89#ifdef CONFIG_SMP
90static unsigned int apbt_num_timers_used;
91static struct apbt_dev *apbt_devs;
92#endif
93
94static inline unsigned long apbt_readl_reg(unsigned long a)
95{
96 return readl(apbt_virt_address + a);
97}
98
99static inline void apbt_writel_reg(unsigned long d, unsigned long a)
100{
101 writel(d, apbt_virt_address + a);
102}
103
104static inline unsigned long apbt_readl(int n, unsigned long a)
105{
106 return readl(apbt_virt_address + a + n * APBTMRS_REG_SIZE);
107}
108
109static inline void apbt_writel(int n, unsigned long d, unsigned long a)
110{
111 writel(d, apbt_virt_address + a + n * APBTMRS_REG_SIZE);
112}
113
114static inline void apbt_set_mapping(void)
115{
116 struct sfi_timer_table_entry *mtmr;
117
118 if (apbt_virt_address) {
119 pr_debug("APBT base already mapped\n");
120 return;
121 }
122 mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM);
123 if (mtmr == NULL) {
124 printk(KERN_ERR "Failed to get MTMR %d from SFI\n",
125 APBT_CLOCKEVENT0_NUM);
126 return;
127 }
128 apbt_address = (unsigned long)mtmr->phys_addr;
129 if (!apbt_address) {
130 printk(KERN_WARNING "No timer base from SFI, use default\n");
131 apbt_address = APBT_DEFAULT_BASE;
132 }
133 apbt_virt_address = ioremap_nocache(apbt_address, APBT_MMAP_SIZE);
134 if (apbt_virt_address) {
135 pr_debug("Mapped APBT physical addr %p at virtual addr %p\n",\
136 (void *)apbt_address, (void *)apbt_virt_address);
137 } else {
138 pr_debug("Failed mapping APBT phy address at %p\n",\
139 (void *)apbt_address);
140 goto panic_noapbt;
141 }
142 apbt_freq = mtmr->freq_hz / USEC_PER_SEC;
143 sfi_free_mtmr(mtmr);
144
145 /* Now figure out the physical timer id for clocksource device */
146 mtmr = sfi_get_mtmr(APBT_CLOCKSOURCE_NUM);
147 if (mtmr == NULL)
148 goto panic_noapbt;
149
150 /* Now figure out the physical timer id */
151 phy_cs_timer_id = (unsigned int)(mtmr->phys_addr & 0xff)
152 / APBTMRS_REG_SIZE;
153 pr_debug("Use timer %d for clocksource\n", phy_cs_timer_id);
154 return;
155
156panic_noapbt:
157 panic("Failed to setup APB system timer\n");
158
159}
160
161static inline void apbt_clear_mapping(void)
162{
163 iounmap(apbt_virt_address);
164 apbt_virt_address = NULL;
165}
166
167/*
168 * APBT timer interrupt enable / disable
169 */
170static inline int is_apbt_capable(void)
171{
172 return apbt_virt_address ? 1 : 0;
173}
174
175static struct clocksource clocksource_apbt = {
176 .name = "apbt",
177 .rating = APBT_CLOCKSOURCE_RATING,
178 .read = apbt_read_clocksource,
179 .mask = APBT_MASK,
180 .shift = APBT_SHIFT,
181 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
182 .resume = apbt_restart_clocksource,
183};
184
185/* boot APB clock event device */
186static struct clock_event_device apbt_clockevent = {
187 .name = "apbt0",
188 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
189 .set_mode = apbt_set_mode,
190 .set_next_event = apbt_next_event,
191 .shift = APBT_SHIFT,
192 .irq = 0,
193 .rating = APBT_CLOCKEVENT_RATING,
194};
195
196/*
197 * if user does not want to use per CPU apb timer, just give it a lower rating
198 * than local apic timer and skip the late per cpu timer init.
199 */
200static inline int __init setup_x86_mrst_timer(char *arg)
201{
202 if (!arg)
203 return -EINVAL;
204
205 if (strcmp("apbt_only", arg) == 0)
206 disable_apbt_percpu = 0;
207 else if (strcmp("lapic_and_apbt", arg) == 0)
208 disable_apbt_percpu = 1;
209 else {
210 pr_warning("X86 MRST timer option %s not recognised"
211 " use x86_mrst_timer=apbt_only or lapic_and_apbt\n",
212 arg);
213 return -EINVAL;
214 }
215 return 0;
216}
217__setup("x86_mrst_timer=", setup_x86_mrst_timer);
218
219/*
220 * start count down from 0xffff_ffff. this is done by toggling the enable bit
221 * then load initial load count to ~0.
222 */
223static void apbt_start_counter(int n)
224{
225 unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
226
227 ctrl &= ~APBTMR_CONTROL_ENABLE;
228 apbt_writel(n, ctrl, APBTMR_N_CONTROL);
229 apbt_writel(n, ~0, APBTMR_N_LOAD_COUNT);
230 /* enable, mask interrupt */
231 ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC;
232 ctrl |= (APBTMR_CONTROL_ENABLE | APBTMR_CONTROL_INT);
233 apbt_writel(n, ctrl, APBTMR_N_CONTROL);
234 /* read it once to get cached counter value initialized */
235 apbt_read_clocksource(&clocksource_apbt);
236}
237
238static irqreturn_t apbt_interrupt_handler(int irq, void *data)
239{
240 struct apbt_dev *dev = (struct apbt_dev *)data;
241 struct clock_event_device *aevt = &dev->evt;
242
243 if (!aevt->event_handler) {
244 printk(KERN_INFO "Spurious APBT timer interrupt on %d\n",
245 dev->num);
246 return IRQ_NONE;
247 }
248 aevt->event_handler(aevt);
249 return IRQ_HANDLED;
250}
251
252static void apbt_restart_clocksource(struct clocksource *cs)
253{
254 apbt_start_counter(phy_cs_timer_id);
255}
256
257/* Setup IRQ routing via IOAPIC */
258#ifdef CONFIG_SMP
259static void apbt_setup_irq(struct apbt_dev *adev)
260{
261 struct irq_chip *chip;
262 struct irq_desc *desc;
263
264 /* timer0 irq has been setup early */
265 if (adev->irq == 0)
266 return;
267 desc = irq_to_desc(adev->irq);
268 chip = get_irq_chip(adev->irq);
269 disable_irq(adev->irq);
270 desc->status |= IRQ_MOVE_PCNTXT;
271 irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
272 /* APB timer irqs are set up as mp_irqs, timer is edge triggerred */
273 set_irq_chip_and_handler_name(adev->irq, chip, handle_edge_irq, "edge");
274 enable_irq(adev->irq);
275 if (system_state == SYSTEM_BOOTING)
276 if (request_irq(adev->irq, apbt_interrupt_handler,
277 IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING,
278 adev->name, adev)) {
279 printk(KERN_ERR "Failed request IRQ for APBT%d\n",
280 adev->num);
281 }
282}
283#endif
284
285static void apbt_enable_int(int n)
286{
287 unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
288 /* clear pending intr */
289 apbt_readl(n, APBTMR_N_EOI);
290 ctrl &= ~APBTMR_CONTROL_INT;
291 apbt_writel(n, ctrl, APBTMR_N_CONTROL);
292}
293
294static void apbt_disable_int(int n)
295{
296 unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
297
298 ctrl |= APBTMR_CONTROL_INT;
299 apbt_writel(n, ctrl, APBTMR_N_CONTROL);
300}
301
302
303static int __init apbt_clockevent_register(void)
304{
305 struct sfi_timer_table_entry *mtmr;
306 struct apbt_dev *adev = &__get_cpu_var(cpu_apbt_dev);
307
308 mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM);
309 if (mtmr == NULL) {
310 printk(KERN_ERR "Failed to get MTMR %d from SFI\n",
311 APBT_CLOCKEVENT0_NUM);
312 return -ENODEV;
313 }
314
315 /*
316 * We need to calculate the scaled math multiplication factor for
317 * nanosecond to apbt tick conversion.
318 * mult = (nsec/cycle)*2^APBT_SHIFT
319 */
320 apbt_clockevent.mult = div_sc((unsigned long) mtmr->freq_hz
321 , NSEC_PER_SEC, APBT_SHIFT);
322
323 /* Calculate the min / max delta */
324 apbt_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
325 &apbt_clockevent);
326 apbt_clockevent.min_delta_ns = clockevent_delta2ns(
327 APBT_MIN_DELTA_USEC*apbt_freq,
328 &apbt_clockevent);
329 /*
330 * Start apbt with the boot cpu mask and make it
331 * global if not used for per cpu timer.
332 */
333 apbt_clockevent.cpumask = cpumask_of(smp_processor_id());
334 adev->num = smp_processor_id();
335 memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device));
336
337 if (disable_apbt_percpu) {
338 apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100;
339 global_clock_event = &adev->evt;
340 printk(KERN_DEBUG "%s clockevent registered as global\n",
341 global_clock_event->name);
342 }
343
344 if (request_irq(apbt_clockevent.irq, apbt_interrupt_handler,
345 IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING,
346 apbt_clockevent.name, adev)) {
347 printk(KERN_ERR "Failed request IRQ for APBT%d\n",
348 apbt_clockevent.irq);
349 }
350
351 clockevents_register_device(&adev->evt);
352 /* Start APBT 0 interrupts */
353 apbt_enable_int(APBT_CLOCKEVENT0_NUM);
354
355 sfi_free_mtmr(mtmr);
356 return 0;
357}
358
359#ifdef CONFIG_SMP
360/* Should be called with per cpu */
361void apbt_setup_secondary_clock(void)
362{
363 struct apbt_dev *adev;
364 struct clock_event_device *aevt;
365 int cpu;
366
367 /* Don't register boot CPU clockevent */
368 cpu = smp_processor_id();
369 if (cpu == boot_cpu_id)
370 return;
371 /*
372 * We need to calculate the scaled math multiplication factor for
373 * nanosecond to apbt tick conversion.
374 * mult = (nsec/cycle)*2^APBT_SHIFT
375 */
376 printk(KERN_INFO "Init per CPU clockevent %d\n", cpu);
377 adev = &per_cpu(cpu_apbt_dev, cpu);
378 aevt = &adev->evt;
379
380 memcpy(aevt, &apbt_clockevent, sizeof(*aevt));
381 aevt->cpumask = cpumask_of(cpu);
382 aevt->name = adev->name;
383 aevt->mode = CLOCK_EVT_MODE_UNUSED;
384
385 printk(KERN_INFO "Registering CPU %d clockevent device %s, mask %08x\n",
386 cpu, aevt->name, *(u32 *)aevt->cpumask);
387
388 apbt_setup_irq(adev);
389
390 clockevents_register_device(aevt);
391
392 apbt_enable_int(cpu);
393
394 return;
395}
396
397/*
398 * this notify handler process CPU hotplug events. in case of S0i3, nonboot
399 * cpus are disabled/enabled frequently, for performance reasons, we keep the
400 * per cpu timer irq registered so that we do need to do free_irq/request_irq.
401 *
402 * TODO: it might be more reliable to directly disable percpu clockevent device
403 * without the notifier chain. currently, cpu 0 may get interrupts from other
404 * cpu timers during the offline process due to the ordering of notification.
405 * the extra interrupt is harmless.
406 */
407static int apbt_cpuhp_notify(struct notifier_block *n,
408 unsigned long action, void *hcpu)
409{
410 unsigned long cpu = (unsigned long)hcpu;
411 struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu);
412
413 switch (action & 0xf) {
414 case CPU_DEAD:
415 apbt_disable_int(cpu);
416 if (system_state == SYSTEM_RUNNING)
417 pr_debug("skipping APBT CPU %lu offline\n", cpu);
418 else if (adev) {
419 pr_debug("APBT clockevent for cpu %lu offline\n", cpu);
420 free_irq(adev->irq, adev);
421 }
422 break;
423 default:
424 pr_debug(KERN_INFO "APBT notified %lu, no action\n", action);
425 }
426 return NOTIFY_OK;
427}
428
429static __init int apbt_late_init(void)
430{
431 if (disable_apbt_percpu)
432 return 0;
433 /* This notifier should be called after workqueue is ready */
434 hotcpu_notifier(apbt_cpuhp_notify, -20);
435 return 0;
436}
437fs_initcall(apbt_late_init);
438#else
439
440void apbt_setup_secondary_clock(void) {}
441
442#endif /* CONFIG_SMP */
443
444static void apbt_set_mode(enum clock_event_mode mode,
445 struct clock_event_device *evt)
446{
447 unsigned long ctrl;
448 uint64_t delta;
449 int timer_num;
450 struct apbt_dev *adev = EVT_TO_APBT_DEV(evt);
451
452 timer_num = adev->num;
453 pr_debug("%s CPU %d timer %d mode=%d\n",
454 __func__, first_cpu(*evt->cpumask), timer_num, mode);
455
456 switch (mode) {
457 case CLOCK_EVT_MODE_PERIODIC:
458 delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * apbt_clockevent.mult;
459 delta >>= apbt_clockevent.shift;
460 ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
461 ctrl |= APBTMR_CONTROL_MODE_PERIODIC;
462 apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
463 /*
464 * DW APB p. 46, have to disable timer before load counter,
465 * may cause sync problem.
466 */
467 ctrl &= ~APBTMR_CONTROL_ENABLE;
468 apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
469 udelay(1);
470 pr_debug("Setting clock period %d for HZ %d\n", (int)delta, HZ);
471 apbt_writel(timer_num, delta, APBTMR_N_LOAD_COUNT);
472 ctrl |= APBTMR_CONTROL_ENABLE;
473 apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
474 break;
475 /* APB timer does not have one-shot mode, use free running mode */
476 case CLOCK_EVT_MODE_ONESHOT:
477 ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
478 /*
479 * set free running mode, this mode will let timer reload max
480 * timeout which will give time (3min on 25MHz clock) to rearm
481 * the next event, therefore emulate the one-shot mode.
482 */
483 ctrl &= ~APBTMR_CONTROL_ENABLE;
484 ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC;
485
486 apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
487 /* write again to set free running mode */
488 apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
489
490 /*
491 * DW APB p. 46, load counter with all 1s before starting free
492 * running mode.
493 */
494 apbt_writel(timer_num, ~0, APBTMR_N_LOAD_COUNT);
495 ctrl &= ~APBTMR_CONTROL_INT;
496 ctrl |= APBTMR_CONTROL_ENABLE;
497 apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
498 break;
499
500 case CLOCK_EVT_MODE_UNUSED:
501 case CLOCK_EVT_MODE_SHUTDOWN:
502 apbt_disable_int(timer_num);
503 ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
504 ctrl &= ~APBTMR_CONTROL_ENABLE;
505 apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
506 break;
507
508 case CLOCK_EVT_MODE_RESUME:
509 apbt_enable_int(timer_num);
510 break;
511 }
512}
513
514static int apbt_next_event(unsigned long delta,
515 struct clock_event_device *evt)
516{
517 unsigned long ctrl;
518 int timer_num;
519
520 struct apbt_dev *adev = EVT_TO_APBT_DEV(evt);
521
522 timer_num = adev->num;
523 /* Disable timer */
524 ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
525 ctrl &= ~APBTMR_CONTROL_ENABLE;
526 apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
527 /* write new count */
528 apbt_writel(timer_num, delta, APBTMR_N_LOAD_COUNT);
529 ctrl |= APBTMR_CONTROL_ENABLE;
530 apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
531 return 0;
532}
533
534/*
535 * APB timer clock is not in sync with pclk on Langwell, which translates to
536 * unreliable read value caused by sampling error. the error does not add up
537 * overtime and only happens when sampling a 0 as a 1 by mistake. so the time
538 * would go backwards. the following code is trying to prevent time traveling
539 * backwards. little bit paranoid.
540 */
541static cycle_t apbt_read_clocksource(struct clocksource *cs)
542{
543 unsigned long t0, t1, t2;
544 static unsigned long last_read;
545
546bad_count:
547 t1 = apbt_readl(phy_cs_timer_id,
548 APBTMR_N_CURRENT_VALUE);
549 t2 = apbt_readl(phy_cs_timer_id,
550 APBTMR_N_CURRENT_VALUE);
551 if (unlikely(t1 < t2)) {
552 pr_debug("APBT: read current count error %lx:%lx:%lx\n",
553 t1, t2, t2 - t1);
554 goto bad_count;
555 }
556 /*
557 * check against cached last read, makes sure time does not go back.
558 * it could be a normal rollover but we will do tripple check anyway
559 */
560 if (unlikely(t2 > last_read)) {
561 /* check if we have a normal rollover */
562 unsigned long raw_intr_status =
563 apbt_readl_reg(APBTMRS_RAW_INT_STATUS);
564 /*
565 * cs timer interrupt is masked but raw intr bit is set if
566 * rollover occurs. then we read EOI reg to clear it.
567 */
568 if (raw_intr_status & (1 << phy_cs_timer_id)) {
569 apbt_readl(phy_cs_timer_id, APBTMR_N_EOI);
570 goto out;
571 }
572 pr_debug("APB CS going back %lx:%lx:%lx ",
573 t2, last_read, t2 - last_read);
574bad_count_x3:
575 pr_debug(KERN_INFO "tripple check enforced\n");
576 t0 = apbt_readl(phy_cs_timer_id,
577 APBTMR_N_CURRENT_VALUE);
578 udelay(1);
579 t1 = apbt_readl(phy_cs_timer_id,
580 APBTMR_N_CURRENT_VALUE);
581 udelay(1);
582 t2 = apbt_readl(phy_cs_timer_id,
583 APBTMR_N_CURRENT_VALUE);
584 if ((t2 > t1) || (t1 > t0)) {
585 printk(KERN_ERR "Error: APB CS tripple check failed\n");
586 goto bad_count_x3;
587 }
588 }
589out:
590 last_read = t2;
591 return (cycle_t)~t2;
592}
593
594static int apbt_clocksource_register(void)
595{
596 u64 start, now;
597 cycle_t t1;
598
599 /* Start the counter, use timer 2 as source, timer 0/1 for event */
600 apbt_start_counter(phy_cs_timer_id);
601
602 /* Verify whether apbt counter works */
603 t1 = apbt_read_clocksource(&clocksource_apbt);
604 rdtscll(start);
605
606 /*
607 * We don't know the TSC frequency yet, but waiting for
608 * 200000 TSC cycles is safe:
609 * 4 GHz == 50us
610 * 1 GHz == 200us
611 */
612 do {
613 rep_nop();
614 rdtscll(now);
615 } while ((now - start) < 200000UL);
616
617 /* APBT is the only always on clocksource, it has to work! */
618 if (t1 == apbt_read_clocksource(&clocksource_apbt))
619 panic("APBT counter not counting. APBT disabled\n");
620
621 /*
622 * initialize and register APBT clocksource
623 * convert that to ns/clock cycle
624 * mult = (ns/c) * 2^APBT_SHIFT
625 */
626 clocksource_apbt.mult = div_sc(MSEC_PER_SEC,
627 (unsigned long) apbt_freq, APBT_SHIFT);
628 clocksource_register(&clocksource_apbt);
629
630 return 0;
631}
632
633/*
634 * Early setup the APBT timer, only use timer 0 for booting then switch to
635 * per CPU timer if possible.
636 * returns 1 if per cpu apbt is setup
637 * returns 0 if no per cpu apbt is chosen
638 * panic if set up failed, this is the only platform timer on Moorestown.
639 */
640void __init apbt_time_init(void)
641{
642#ifdef CONFIG_SMP
643 int i;
644 struct sfi_timer_table_entry *p_mtmr;
645 unsigned int percpu_timer;
646 struct apbt_dev *adev;
647#endif
648
649 if (apb_timer_block_enabled)
650 return;
651 apbt_set_mapping();
652 if (apbt_virt_address) {
653 pr_debug("Found APBT version 0x%lx\n",\
654 apbt_readl_reg(APBTMRS_COMP_VERSION));
655 } else
656 goto out_noapbt;
657 /*
658 * Read the frequency and check for a sane value, for ESL model
659 * we extend the possible clock range to allow time scaling.
660 */
661
662 if (apbt_freq < APBT_MIN_FREQ || apbt_freq > APBT_MAX_FREQ) {
663 pr_debug("APBT has invalid freq 0x%llx\n", apbt_freq);
664 goto out_noapbt;
665 }
666 if (apbt_clocksource_register()) {
667 pr_debug("APBT has failed to register clocksource\n");
668 goto out_noapbt;
669 }
670 if (!apbt_clockevent_register())
671 apb_timer_block_enabled = 1;
672 else {
673 pr_debug("APBT has failed to register clockevent\n");
674 goto out_noapbt;
675 }
676#ifdef CONFIG_SMP
677 /* kernel cmdline disable apb timer, so we will use lapic timers */
678 if (disable_apbt_percpu) {
679 printk(KERN_INFO "apbt: disabled per cpu timer\n");
680 return;
681 }
682 pr_debug("%s: %d CPUs online\n", __func__, num_online_cpus());
683 if (num_possible_cpus() <= sfi_mtimer_num) {
684 percpu_timer = 1;
685 apbt_num_timers_used = num_possible_cpus();
686 } else {
687 percpu_timer = 0;
688 apbt_num_timers_used = 1;
689 adev = &per_cpu(cpu_apbt_dev, 0);
690 adev->flags &= ~APBT_DEV_USED;
691 }
692 pr_debug("%s: %d APB timers used\n", __func__, apbt_num_timers_used);
693
694 /* here we set up per CPU timer data structure */
695 apbt_devs = kzalloc(sizeof(struct apbt_dev) * apbt_num_timers_used,
696 GFP_KERNEL);
697 if (!apbt_devs) {
698 printk(KERN_ERR "Failed to allocate APB timer devices\n");
699 return;
700 }
701 for (i = 0; i < apbt_num_timers_used; i++) {
702 adev = &per_cpu(cpu_apbt_dev, i);
703 adev->num = i;
704 adev->cpu = i;
705 p_mtmr = sfi_get_mtmr(i);
706 if (p_mtmr) {
707 adev->tick = p_mtmr->freq_hz;
708 adev->irq = p_mtmr->irq;
709 } else
710 printk(KERN_ERR "Failed to get timer for cpu %d\n", i);
711 adev->count = 0;
712 sprintf(adev->name, "apbt%d", i);
713 }
714#endif
715
716 return;
717
718out_noapbt:
719 apbt_clear_mapping();
720 apb_timer_block_enabled = 0;
721 panic("failed to enable APB timer\n");
722}
723
724static inline void apbt_disable(int n)
725{
726 if (is_apbt_capable()) {
727 unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
728 ctrl &= ~APBTMR_CONTROL_ENABLE;
729 apbt_writel(n, ctrl, APBTMR_N_CONTROL);
730 }
731}
732
733/* called before apb_timer_enable, use early map */
734unsigned long apbt_quick_calibrate()
735{
736 int i, scale;
737 u64 old, new;
738 cycle_t t1, t2;
739 unsigned long khz = 0;
740 u32 loop, shift;
741
742 apbt_set_mapping();
743 apbt_start_counter(phy_cs_timer_id);
744
745 /* check if the timer can count down, otherwise return */
746 old = apbt_read_clocksource(&clocksource_apbt);
747 i = 10000;
748 while (--i) {
749 if (old != apbt_read_clocksource(&clocksource_apbt))
750 break;
751 }
752 if (!i)
753 goto failed;
754
755 /* count 16 ms */
756 loop = (apbt_freq * 1000) << 4;
757
758 /* restart the timer to ensure it won't get to 0 in the calibration */
759 apbt_start_counter(phy_cs_timer_id);
760
761 old = apbt_read_clocksource(&clocksource_apbt);
762 old += loop;
763
764 t1 = __native_read_tsc();
765
766 do {
767 new = apbt_read_clocksource(&clocksource_apbt);
768 } while (new < old);
769
770 t2 = __native_read_tsc();
771
772 shift = 5;
773 if (unlikely(loop >> shift == 0)) {
774 printk(KERN_INFO
775 "APBT TSC calibration failed, not enough resolution\n");
776 return 0;
777 }
778 scale = (int)div_u64((t2 - t1), loop >> shift);
779 khz = (scale * apbt_freq * 1000) >> shift;
780 printk(KERN_INFO "TSC freq calculated by APB timer is %lu khz\n", khz);
781 return khz;
782failed:
783 return 0;
784}
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 6e29b2a77aa8..00187f1fcfb7 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1390,7 +1390,7 @@ void __init enable_IR_x2apic(void)
1390 } 1390 }
1391 1391
1392 local_irq_save(flags); 1392 local_irq_save(flags);
1393 mask_8259A(); 1393 legacy_pic->mask_all();
1394 mask_IO_APIC_setup(ioapic_entries); 1394 mask_IO_APIC_setup(ioapic_entries);
1395 1395
1396 if (dmar_table_init_ret) 1396 if (dmar_table_init_ret)
@@ -1422,7 +1422,7 @@ void __init enable_IR_x2apic(void)
1422nox2apic: 1422nox2apic:
1423 if (!ret) /* IR enabling failed */ 1423 if (!ret) /* IR enabling failed */
1424 restore_IO_APIC_setup(ioapic_entries); 1424 restore_IO_APIC_setup(ioapic_entries);
1425 unmask_8259A(); 1425 legacy_pic->restore_mask();
1426 local_irq_restore(flags); 1426 local_irq_restore(flags);
1427 1427
1428out: 1428out:
@@ -2018,7 +2018,7 @@ static int lapic_resume(struct sys_device *dev)
2018 } 2018 }
2019 2019
2020 mask_IO_APIC_setup(ioapic_entries); 2020 mask_IO_APIC_setup(ioapic_entries);
2021 mask_8259A(); 2021 legacy_pic->mask_all();
2022 } 2022 }
2023 2023
2024 if (x2apic_mode) 2024 if (x2apic_mode)
@@ -2062,7 +2062,7 @@ static int lapic_resume(struct sys_device *dev)
2062 2062
2063 if (intr_remapping_enabled) { 2063 if (intr_remapping_enabled) {
2064 reenable_intr_remapping(x2apic_mode); 2064 reenable_intr_remapping(x2apic_mode);
2065 unmask_8259A(); 2065 legacy_pic->restore_mask();
2066 restore_IO_APIC_setup(ioapic_entries); 2066 restore_IO_APIC_setup(ioapic_entries);
2067 free_ioapic_entries(ioapic_entries); 2067 free_ioapic_entries(ioapic_entries);
2068 } 2068 }
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 14862f11cc4a..e4e0ddcb1546 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -143,12 +143,6 @@ static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY];
143static struct irq_cfg irq_cfgx[NR_IRQS]; 143static struct irq_cfg irq_cfgx[NR_IRQS];
144#endif 144#endif
145 145
146void __init io_apic_disable_legacy(void)
147{
148 nr_legacy_irqs = 0;
149 nr_irqs_gsi = 0;
150}
151
152int __init arch_early_irq_init(void) 146int __init arch_early_irq_init(void)
153{ 147{
154 struct irq_cfg *cfg; 148 struct irq_cfg *cfg;
@@ -157,6 +151,11 @@ int __init arch_early_irq_init(void)
157 int node; 151 int node;
158 int i; 152 int i;
159 153
154 if (!legacy_pic->nr_legacy_irqs) {
155 nr_irqs_gsi = 0;
156 io_apic_irqs = ~0UL;
157 }
158
160 cfg = irq_cfgx; 159 cfg = irq_cfgx;
161 count = ARRAY_SIZE(irq_cfgx); 160 count = ARRAY_SIZE(irq_cfgx);
162 node= cpu_to_node(boot_cpu_id); 161 node= cpu_to_node(boot_cpu_id);
@@ -170,7 +169,7 @@ int __init arch_early_irq_init(void)
170 * For legacy IRQ's, start with assigning irq0 to irq15 to 169 * For legacy IRQ's, start with assigning irq0 to irq15 to
171 * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0. 170 * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0.
172 */ 171 */
173 if (i < nr_legacy_irqs) { 172 if (i < legacy_pic->nr_legacy_irqs) {
174 cfg[i].vector = IRQ0_VECTOR + i; 173 cfg[i].vector = IRQ0_VECTOR + i;
175 cpumask_set_cpu(0, cfg[i].domain); 174 cpumask_set_cpu(0, cfg[i].domain);
176 } 175 }
@@ -852,7 +851,7 @@ static int __init find_isa_irq_apic(int irq, int type)
852 */ 851 */
853static int EISA_ELCR(unsigned int irq) 852static int EISA_ELCR(unsigned int irq)
854{ 853{
855 if (irq < nr_legacy_irqs) { 854 if (irq < legacy_pic->nr_legacy_irqs) {
856 unsigned int port = 0x4d0 + (irq >> 3); 855 unsigned int port = 0x4d0 + (irq >> 3);
857 return (inb(port) >> (irq & 7)) & 1; 856 return (inb(port) >> (irq & 7)) & 1;
858 } 857 }
@@ -1439,7 +1438,7 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq
1439 * controllers like 8259. Now that IO-APIC can handle this irq, update 1438 * controllers like 8259. Now that IO-APIC can handle this irq, update
1440 * the cfg->domain. 1439 * the cfg->domain.
1441 */ 1440 */
1442 if (irq < nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain)) 1441 if (irq < legacy_pic->nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain))
1443 apic->vector_allocation_domain(0, cfg->domain); 1442 apic->vector_allocation_domain(0, cfg->domain);
1444 1443
1445 if (assign_irq_vector(irq, cfg, apic->target_cpus())) 1444 if (assign_irq_vector(irq, cfg, apic->target_cpus()))
@@ -1463,8 +1462,8 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq
1463 } 1462 }
1464 1463
1465 ioapic_register_intr(irq, desc, trigger); 1464 ioapic_register_intr(irq, desc, trigger);
1466 if (irq < nr_legacy_irqs) 1465 if (irq < legacy_pic->nr_legacy_irqs)
1467 disable_8259A_irq(irq); 1466 legacy_pic->chip->mask(irq);
1468 1467
1469 ioapic_write_entry(apic_id, pin, entry); 1468 ioapic_write_entry(apic_id, pin, entry);
1470} 1469}
@@ -1873,7 +1872,7 @@ __apicdebuginit(void) print_PIC(void)
1873 unsigned int v; 1872 unsigned int v;
1874 unsigned long flags; 1873 unsigned long flags;
1875 1874
1876 if (!nr_legacy_irqs) 1875 if (!legacy_pic->nr_legacy_irqs)
1877 return; 1876 return;
1878 1877
1879 printk(KERN_DEBUG "\nprinting PIC contents\n"); 1878 printk(KERN_DEBUG "\nprinting PIC contents\n");
@@ -1957,7 +1956,7 @@ void __init enable_IO_APIC(void)
1957 nr_ioapic_registers[apic] = reg_01.bits.entries+1; 1956 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1958 } 1957 }
1959 1958
1960 if (!nr_legacy_irqs) 1959 if (!legacy_pic->nr_legacy_irqs)
1961 return; 1960 return;
1962 1961
1963 for(apic = 0; apic < nr_ioapics; apic++) { 1962 for(apic = 0; apic < nr_ioapics; apic++) {
@@ -2014,7 +2013,7 @@ void disable_IO_APIC(void)
2014 */ 2013 */
2015 clear_IO_APIC(); 2014 clear_IO_APIC();
2016 2015
2017 if (!nr_legacy_irqs) 2016 if (!legacy_pic->nr_legacy_irqs)
2018 return; 2017 return;
2019 2018
2020 /* 2019 /*
@@ -2247,9 +2246,9 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
2247 struct irq_cfg *cfg; 2246 struct irq_cfg *cfg;
2248 2247
2249 raw_spin_lock_irqsave(&ioapic_lock, flags); 2248 raw_spin_lock_irqsave(&ioapic_lock, flags);
2250 if (irq < nr_legacy_irqs) { 2249 if (irq < legacy_pic->nr_legacy_irqs) {
2251 disable_8259A_irq(irq); 2250 legacy_pic->chip->mask(irq);
2252 if (i8259A_irq_pending(irq)) 2251 if (legacy_pic->irq_pending(irq))
2253 was_pending = 1; 2252 was_pending = 1;
2254 } 2253 }
2255 cfg = irq_cfg(irq); 2254 cfg = irq_cfg(irq);
@@ -2782,8 +2781,8 @@ static inline void init_IO_APIC_traps(void)
2782 * so default to an old-fashioned 8259 2781 * so default to an old-fashioned 8259
2783 * interrupt if we can.. 2782 * interrupt if we can..
2784 */ 2783 */
2785 if (irq < nr_legacy_irqs) 2784 if (irq < legacy_pic->nr_legacy_irqs)
2786 make_8259A_irq(irq); 2785 legacy_pic->make_irq(irq);
2787 else 2786 else
2788 /* Strange. Oh, well.. */ 2787 /* Strange. Oh, well.. */
2789 desc->chip = &no_irq_chip; 2788 desc->chip = &no_irq_chip;
@@ -2940,7 +2939,7 @@ static inline void __init check_timer(void)
2940 /* 2939 /*
2941 * get/set the timer IRQ vector: 2940 * get/set the timer IRQ vector:
2942 */ 2941 */
2943 disable_8259A_irq(0); 2942 legacy_pic->chip->mask(0);
2944 assign_irq_vector(0, cfg, apic->target_cpus()); 2943 assign_irq_vector(0, cfg, apic->target_cpus());
2945 2944
2946 /* 2945 /*
@@ -2953,7 +2952,7 @@ static inline void __init check_timer(void)
2953 * automatically. 2952 * automatically.
2954 */ 2953 */
2955 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); 2954 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2956 init_8259A(1); 2955 legacy_pic->init(1);
2957#ifdef CONFIG_X86_32 2956#ifdef CONFIG_X86_32
2958 { 2957 {
2959 unsigned int ver; 2958 unsigned int ver;
@@ -3012,7 +3011,7 @@ static inline void __init check_timer(void)
3012 if (timer_irq_works()) { 3011 if (timer_irq_works()) {
3013 if (nmi_watchdog == NMI_IO_APIC) { 3012 if (nmi_watchdog == NMI_IO_APIC) {
3014 setup_nmi(); 3013 setup_nmi();
3015 enable_8259A_irq(0); 3014 legacy_pic->chip->unmask(0);
3016 } 3015 }
3017 if (disable_timer_pin_1 > 0) 3016 if (disable_timer_pin_1 > 0)
3018 clear_IO_APIC_pin(0, pin1); 3017 clear_IO_APIC_pin(0, pin1);
@@ -3035,14 +3034,14 @@ static inline void __init check_timer(void)
3035 */ 3034 */
3036 replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); 3035 replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2);
3037 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); 3036 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
3038 enable_8259A_irq(0); 3037 legacy_pic->chip->unmask(0);
3039 if (timer_irq_works()) { 3038 if (timer_irq_works()) {
3040 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); 3039 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
3041 timer_through_8259 = 1; 3040 timer_through_8259 = 1;
3042 if (nmi_watchdog == NMI_IO_APIC) { 3041 if (nmi_watchdog == NMI_IO_APIC) {
3043 disable_8259A_irq(0); 3042 legacy_pic->chip->mask(0);
3044 setup_nmi(); 3043 setup_nmi();
3045 enable_8259A_irq(0); 3044 legacy_pic->chip->unmask(0);
3046 } 3045 }
3047 goto out; 3046 goto out;
3048 } 3047 }
@@ -3050,7 +3049,7 @@ static inline void __init check_timer(void)
3050 * Cleanup, just in case ... 3049 * Cleanup, just in case ...
3051 */ 3050 */
3052 local_irq_disable(); 3051 local_irq_disable();
3053 disable_8259A_irq(0); 3052 legacy_pic->chip->mask(0);
3054 clear_IO_APIC_pin(apic2, pin2); 3053 clear_IO_APIC_pin(apic2, pin2);
3055 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); 3054 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
3056 } 3055 }
@@ -3069,22 +3068,22 @@ static inline void __init check_timer(void)
3069 3068
3070 lapic_register_intr(0, desc); 3069 lapic_register_intr(0, desc);
3071 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ 3070 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
3072 enable_8259A_irq(0); 3071 legacy_pic->chip->unmask(0);
3073 3072
3074 if (timer_irq_works()) { 3073 if (timer_irq_works()) {
3075 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 3074 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
3076 goto out; 3075 goto out;
3077 } 3076 }
3078 local_irq_disable(); 3077 local_irq_disable();
3079 disable_8259A_irq(0); 3078 legacy_pic->chip->mask(0);
3080 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); 3079 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
3081 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); 3080 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
3082 3081
3083 apic_printk(APIC_QUIET, KERN_INFO 3082 apic_printk(APIC_QUIET, KERN_INFO
3084 "...trying to set up timer as ExtINT IRQ...\n"); 3083 "...trying to set up timer as ExtINT IRQ...\n");
3085 3084
3086 init_8259A(0); 3085 legacy_pic->init(0);
3087 make_8259A_irq(0); 3086 legacy_pic->make_irq(0);
3088 apic_write(APIC_LVT0, APIC_DM_EXTINT); 3087 apic_write(APIC_LVT0, APIC_DM_EXTINT);
3089 3088
3090 unlock_ExtINT_logic(); 3089 unlock_ExtINT_logic();
@@ -3126,7 +3125,7 @@ void __init setup_IO_APIC(void)
3126 /* 3125 /*
3127 * calling enable_IO_APIC() is moved to setup_local_APIC for BP 3126 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
3128 */ 3127 */
3129 io_apic_irqs = nr_legacy_irqs ? ~PIC_IRQS : ~0UL; 3128 io_apic_irqs = legacy_pic->nr_legacy_irqs ? ~PIC_IRQS : ~0UL;
3130 3129
3131 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); 3130 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
3132 /* 3131 /*
@@ -3137,7 +3136,7 @@ void __init setup_IO_APIC(void)
3137 sync_Arb_IDs(); 3136 sync_Arb_IDs();
3138 setup_IO_APIC_irqs(); 3137 setup_IO_APIC_irqs();
3139 init_IO_APIC_traps(); 3138 init_IO_APIC_traps();
3140 if (nr_legacy_irqs) 3139 if (legacy_pic->nr_legacy_irqs)
3141 check_timer(); 3140 check_timer();
3142} 3141}
3143 3142
@@ -3928,7 +3927,7 @@ static int __io_apic_set_pci_routing(struct device *dev, int irq,
3928 /* 3927 /*
3929 * IRQs < 16 are already in the irq_2_pin[] map 3928 * IRQs < 16 are already in the irq_2_pin[] map
3930 */ 3929 */
3931 if (irq >= nr_legacy_irqs) { 3930 if (irq >= legacy_pic->nr_legacy_irqs) {
3932 cfg = desc->chip_data; 3931 cfg = desc->chip_data;
3933 if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) { 3932 if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) {
3934 printk(KERN_INFO "can not add pin %d for irq %d\n", 3933 printk(KERN_INFO "can not add pin %d for irq %d\n",
@@ -4302,3 +4301,24 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
4302 4301
4303 nr_ioapics++; 4302 nr_ioapics++;
4304} 4303}
4304
4305/* Enable IOAPIC early just for system timer */
4306void __init pre_init_apic_IRQ0(void)
4307{
4308 struct irq_cfg *cfg;
4309 struct irq_desc *desc;
4310
4311 printk(KERN_INFO "Early APIC setup for system timer0\n");
4312#ifndef CONFIG_SMP
4313 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid);
4314#endif
4315 desc = irq_to_desc_alloc_node(0, 0);
4316
4317 setup_local_APIC();
4318
4319 cfg = irq_cfg(0);
4320 add_pin_to_irq_node(cfg, 0, 0, 0);
4321 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
4322
4323 setup_IO_APIC_irq(0, 0, 0, desc, 0, 0);
4324}
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index bd7c96b5e8d8..8aa65adbd25d 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -177,7 +177,7 @@ int __init check_nmi_watchdog(void)
177error: 177error:
178 if (nmi_watchdog == NMI_IO_APIC) { 178 if (nmi_watchdog == NMI_IO_APIC) {
179 if (!timer_through_8259) 179 if (!timer_through_8259)
180 disable_8259A_irq(0); 180 legacy_pic->chip->mask(0);
181 on_each_cpu(__acpi_nmi_disable, NULL, 1); 181 on_each_cpu(__acpi_nmi_disable, NULL, 1);
182 } 182 }
183 183
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
index 47dd856708e5..3e28401f161c 100644
--- a/arch/x86/kernel/apic/numaq_32.c
+++ b/arch/x86/kernel/apic/numaq_32.c
@@ -277,6 +277,7 @@ static __init void early_check_numaq(void)
277 x86_init.mpparse.mpc_oem_pci_bus = mpc_oem_pci_bus; 277 x86_init.mpparse.mpc_oem_pci_bus = mpc_oem_pci_bus;
278 x86_init.mpparse.mpc_oem_bus_info = mpc_oem_bus_info; 278 x86_init.mpparse.mpc_oem_bus_info = mpc_oem_bus_info;
279 x86_init.timers.tsc_pre_init = numaq_tsc_init; 279 x86_init.timers.tsc_pre_init = numaq_tsc_init;
280 x86_init.pci.init = pci_numaq_init;
280 } 281 }
281} 282}
282 283
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 8c93a84bb627..fb725ee15f55 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -34,6 +34,12 @@
34static int i8259A_auto_eoi; 34static int i8259A_auto_eoi;
35DEFINE_RAW_SPINLOCK(i8259A_lock); 35DEFINE_RAW_SPINLOCK(i8259A_lock);
36static void mask_and_ack_8259A(unsigned int); 36static void mask_and_ack_8259A(unsigned int);
37static void mask_8259A(void);
38static void unmask_8259A(void);
39static void disable_8259A_irq(unsigned int irq);
40static void enable_8259A_irq(unsigned int irq);
41static void init_8259A(int auto_eoi);
42static int i8259A_irq_pending(unsigned int irq);
37 43
38struct irq_chip i8259A_chip = { 44struct irq_chip i8259A_chip = {
39 .name = "XT-PIC", 45 .name = "XT-PIC",
@@ -63,7 +69,7 @@ unsigned int cached_irq_mask = 0xffff;
63 */ 69 */
64unsigned long io_apic_irqs; 70unsigned long io_apic_irqs;
65 71
66void disable_8259A_irq(unsigned int irq) 72static void disable_8259A_irq(unsigned int irq)
67{ 73{
68 unsigned int mask = 1 << irq; 74 unsigned int mask = 1 << irq;
69 unsigned long flags; 75 unsigned long flags;
@@ -77,7 +83,7 @@ void disable_8259A_irq(unsigned int irq)
77 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 83 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
78} 84}
79 85
80void enable_8259A_irq(unsigned int irq) 86static void enable_8259A_irq(unsigned int irq)
81{ 87{
82 unsigned int mask = ~(1 << irq); 88 unsigned int mask = ~(1 << irq);
83 unsigned long flags; 89 unsigned long flags;
@@ -91,7 +97,7 @@ void enable_8259A_irq(unsigned int irq)
91 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 97 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
92} 98}
93 99
94int i8259A_irq_pending(unsigned int irq) 100static int i8259A_irq_pending(unsigned int irq)
95{ 101{
96 unsigned int mask = 1<<irq; 102 unsigned int mask = 1<<irq;
97 unsigned long flags; 103 unsigned long flags;
@@ -107,7 +113,7 @@ int i8259A_irq_pending(unsigned int irq)
107 return ret; 113 return ret;
108} 114}
109 115
110void make_8259A_irq(unsigned int irq) 116static void make_8259A_irq(unsigned int irq)
111{ 117{
112 disable_irq_nosync(irq); 118 disable_irq_nosync(irq);
113 io_apic_irqs &= ~(1<<irq); 119 io_apic_irqs &= ~(1<<irq);
@@ -281,7 +287,7 @@ static int __init i8259A_init_sysfs(void)
281 287
282device_initcall(i8259A_init_sysfs); 288device_initcall(i8259A_init_sysfs);
283 289
284void mask_8259A(void) 290static void mask_8259A(void)
285{ 291{
286 unsigned long flags; 292 unsigned long flags;
287 293
@@ -293,7 +299,7 @@ void mask_8259A(void)
293 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 299 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
294} 300}
295 301
296void unmask_8259A(void) 302static void unmask_8259A(void)
297{ 303{
298 unsigned long flags; 304 unsigned long flags;
299 305
@@ -305,7 +311,7 @@ void unmask_8259A(void)
305 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 311 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
306} 312}
307 313
308void init_8259A(int auto_eoi) 314static void init_8259A(int auto_eoi)
309{ 315{
310 unsigned long flags; 316 unsigned long flags;
311 317
@@ -358,3 +364,47 @@ void init_8259A(int auto_eoi)
358 364
359 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 365 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
360} 366}
367
368/*
369 * make i8259 a driver so that we can select pic functions at run time. the goal
370 * is to make x86 binary compatible among pc compatible and non-pc compatible
371 * platforms, such as x86 MID.
372 */
373
374static void legacy_pic_noop(void) { };
375static void legacy_pic_uint_noop(unsigned int unused) { };
376static void legacy_pic_int_noop(int unused) { };
377
378static struct irq_chip dummy_pic_chip = {
379 .name = "dummy pic",
380 .mask = legacy_pic_uint_noop,
381 .unmask = legacy_pic_uint_noop,
382 .disable = legacy_pic_uint_noop,
383 .mask_ack = legacy_pic_uint_noop,
384};
385static int legacy_pic_irq_pending_noop(unsigned int irq)
386{
387 return 0;
388}
389
390struct legacy_pic null_legacy_pic = {
391 .nr_legacy_irqs = 0,
392 .chip = &dummy_pic_chip,
393 .mask_all = legacy_pic_noop,
394 .restore_mask = legacy_pic_noop,
395 .init = legacy_pic_int_noop,
396 .irq_pending = legacy_pic_irq_pending_noop,
397 .make_irq = legacy_pic_uint_noop,
398};
399
400struct legacy_pic default_legacy_pic = {
401 .nr_legacy_irqs = NR_IRQS_LEGACY,
402 .chip = &i8259A_chip,
403 .mask_all = mask_8259A,
404 .restore_mask = unmask_8259A,
405 .init = init_8259A,
406 .irq_pending = i8259A_irq_pending,
407 .make_irq = make_8259A_irq,
408};
409
410struct legacy_pic *legacy_pic = &default_legacy_pic;
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index fce55d532631..ef257fc2921b 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -99,9 +99,6 @@ int vector_used_by_percpu_irq(unsigned int vector)
99 return 0; 99 return 0;
100} 100}
101 101
102/* Number of legacy interrupts */
103int nr_legacy_irqs __read_mostly = NR_IRQS_LEGACY;
104
105void __init init_ISA_irqs(void) 102void __init init_ISA_irqs(void)
106{ 103{
107 int i; 104 int i;
@@ -109,12 +106,12 @@ void __init init_ISA_irqs(void)
109#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) 106#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
110 init_bsp_APIC(); 107 init_bsp_APIC();
111#endif 108#endif
112 init_8259A(0); 109 legacy_pic->init(0);
113 110
114 /* 111 /*
115 * 16 old-style INTA-cycle interrupts: 112 * 16 old-style INTA-cycle interrupts:
116 */ 113 */
117 for (i = 0; i < NR_IRQS_LEGACY; i++) { 114 for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) {
118 struct irq_desc *desc = irq_to_desc(i); 115 struct irq_desc *desc = irq_to_desc(i);
119 116
120 desc->status = IRQ_DISABLED; 117 desc->status = IRQ_DISABLED;
@@ -138,7 +135,7 @@ void __init init_IRQ(void)
138 * then this vector space can be freed and re-used dynamically as the 135 * then this vector space can be freed and re-used dynamically as the
139 * irq's migrate etc. 136 * irq's migrate etc.
140 */ 137 */
141 for (i = 0; i < nr_legacy_irqs; i++) 138 for (i = 0; i < legacy_pic->nr_legacy_irqs; i++)
142 per_cpu(vector_irq, 0)[IRQ0_VECTOR + i] = i; 139 per_cpu(vector_irq, 0)[IRQ0_VECTOR + i] = i;
143 140
144 x86_init.irqs.intr_init(); 141 x86_init.irqs.intr_init();
diff --git a/arch/x86/kernel/mrst.c b/arch/x86/kernel/mrst.c
index 3b7078abc871..0aad8670858e 100644
--- a/arch/x86/kernel/mrst.c
+++ b/arch/x86/kernel/mrst.c
@@ -10,8 +10,211 @@
10 * of the License. 10 * of the License.
11 */ 11 */
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/sfi.h>
15#include <linux/irq.h>
16#include <linux/module.h>
13 17
14#include <asm/setup.h> 18#include <asm/setup.h>
19#include <asm/mpspec_def.h>
20#include <asm/hw_irq.h>
21#include <asm/apic.h>
22#include <asm/io_apic.h>
23#include <asm/mrst.h>
24#include <asm/io.h>
25#include <asm/i8259.h>
26#include <asm/apb_timer.h>
27
28static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM];
29static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM];
30int sfi_mtimer_num;
31
32struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
33EXPORT_SYMBOL_GPL(sfi_mrtc_array);
34int sfi_mrtc_num;
35
36static inline void assign_to_mp_irq(struct mpc_intsrc *m,
37 struct mpc_intsrc *mp_irq)
38{
39 memcpy(mp_irq, m, sizeof(struct mpc_intsrc));
40}
41
42static inline int mp_irq_cmp(struct mpc_intsrc *mp_irq,
43 struct mpc_intsrc *m)
44{
45 return memcmp(mp_irq, m, sizeof(struct mpc_intsrc));
46}
47
48static void save_mp_irq(struct mpc_intsrc *m)
49{
50 int i;
51
52 for (i = 0; i < mp_irq_entries; i++) {
53 if (!mp_irq_cmp(&mp_irqs[i], m))
54 return;
55 }
56
57 assign_to_mp_irq(m, &mp_irqs[mp_irq_entries]);
58 if (++mp_irq_entries == MAX_IRQ_SOURCES)
59 panic("Max # of irq sources exceeded!!\n");
60}
61
62/* parse all the mtimer info to a static mtimer array */
63static int __init sfi_parse_mtmr(struct sfi_table_header *table)
64{
65 struct sfi_table_simple *sb;
66 struct sfi_timer_table_entry *pentry;
67 struct mpc_intsrc mp_irq;
68 int totallen;
69
70 sb = (struct sfi_table_simple *)table;
71 if (!sfi_mtimer_num) {
72 sfi_mtimer_num = SFI_GET_NUM_ENTRIES(sb,
73 struct sfi_timer_table_entry);
74 pentry = (struct sfi_timer_table_entry *) sb->pentry;
75 totallen = sfi_mtimer_num * sizeof(*pentry);
76 memcpy(sfi_mtimer_array, pentry, totallen);
77 }
78
79 printk(KERN_INFO "SFI: MTIMER info (num = %d):\n", sfi_mtimer_num);
80 pentry = sfi_mtimer_array;
81 for (totallen = 0; totallen < sfi_mtimer_num; totallen++, pentry++) {
82 printk(KERN_INFO "timer[%d]: paddr = 0x%08x, freq = %dHz,"
83 " irq = %d\n", totallen, (u32)pentry->phys_addr,
84 pentry->freq_hz, pentry->irq);
85 if (!pentry->irq)
86 continue;
87 mp_irq.type = MP_IOAPIC;
88 mp_irq.irqtype = mp_INT;
89/* triggering mode edge bit 2-3, active high polarity bit 0-1 */
90 mp_irq.irqflag = 5;
91 mp_irq.srcbus = 0;
92 mp_irq.srcbusirq = pentry->irq; /* IRQ */
93 mp_irq.dstapic = MP_APIC_ALL;
94 mp_irq.dstirq = pentry->irq;
95 save_mp_irq(&mp_irq);
96 }
97
98 return 0;
99}
100
101struct sfi_timer_table_entry *sfi_get_mtmr(int hint)
102{
103 int i;
104 if (hint < sfi_mtimer_num) {
105 if (!sfi_mtimer_usage[hint]) {
106 pr_debug("hint taken for timer %d irq %d\n",\
107 hint, sfi_mtimer_array[hint].irq);
108 sfi_mtimer_usage[hint] = 1;
109 return &sfi_mtimer_array[hint];
110 }
111 }
112 /* take the first timer available */
113 for (i = 0; i < sfi_mtimer_num;) {
114 if (!sfi_mtimer_usage[i]) {
115 sfi_mtimer_usage[i] = 1;
116 return &sfi_mtimer_array[i];
117 }
118 i++;
119 }
120 return NULL;
121}
122
123void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr)
124{
125 int i;
126 for (i = 0; i < sfi_mtimer_num;) {
127 if (mtmr->irq == sfi_mtimer_array[i].irq) {
128 sfi_mtimer_usage[i] = 0;
129 return;
130 }
131 i++;
132 }
133}
134
135/* parse all the mrtc info to a global mrtc array */
136int __init sfi_parse_mrtc(struct sfi_table_header *table)
137{
138 struct sfi_table_simple *sb;
139 struct sfi_rtc_table_entry *pentry;
140 struct mpc_intsrc mp_irq;
141
142 int totallen;
143
144 sb = (struct sfi_table_simple *)table;
145 if (!sfi_mrtc_num) {
146 sfi_mrtc_num = SFI_GET_NUM_ENTRIES(sb,
147 struct sfi_rtc_table_entry);
148 pentry = (struct sfi_rtc_table_entry *)sb->pentry;
149 totallen = sfi_mrtc_num * sizeof(*pentry);
150 memcpy(sfi_mrtc_array, pentry, totallen);
151 }
152
153 printk(KERN_INFO "SFI: RTC info (num = %d):\n", sfi_mrtc_num);
154 pentry = sfi_mrtc_array;
155 for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) {
156 printk(KERN_INFO "RTC[%d]: paddr = 0x%08x, irq = %d\n",
157 totallen, (u32)pentry->phys_addr, pentry->irq);
158 mp_irq.type = MP_IOAPIC;
159 mp_irq.irqtype = mp_INT;
160 mp_irq.irqflag = 0;
161 mp_irq.srcbus = 0;
162 mp_irq.srcbusirq = pentry->irq; /* IRQ */
163 mp_irq.dstapic = MP_APIC_ALL;
164 mp_irq.dstirq = pentry->irq;
165 save_mp_irq(&mp_irq);
166 }
167 return 0;
168}
169
170/*
171 * the secondary clock in Moorestown can be APBT or LAPIC clock, default to
172 * APBT but cmdline option can also override it.
173 */
174static void __cpuinit mrst_setup_secondary_clock(void)
175{
176 /* restore default lapic clock if disabled by cmdline */
177 if (disable_apbt_percpu)
178 return setup_secondary_APIC_clock();
179 apbt_setup_secondary_clock();
180}
181
182static unsigned long __init mrst_calibrate_tsc(void)
183{
184 unsigned long flags, fast_calibrate;
185
186 local_irq_save(flags);
187 fast_calibrate = apbt_quick_calibrate();
188 local_irq_restore(flags);
189
190 if (fast_calibrate)
191 return fast_calibrate;
192
193 return 0;
194}
195
196void __init mrst_time_init(void)
197{
198 sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr);
199 pre_init_apic_IRQ0();
200 apbt_time_init();
201}
202
203void __init mrst_rtc_init(void)
204{
205 sfi_table_parse(SFI_SIG_MRTC, NULL, NULL, sfi_parse_mrtc);
206}
207
208/*
209 * if we use per cpu apb timer, the bootclock already setup. if we use lapic
210 * timer and one apbt timer for broadcast, we need to set up lapic boot clock.
211 */
212static void __init mrst_setup_boot_clock(void)
213{
214 pr_info("%s: per cpu apbt flag %d \n", __func__, disable_apbt_percpu);
215 if (disable_apbt_percpu)
216 setup_boot_APIC_clock();
217};
15 218
16/* 219/*
17 * Moorestown specific x86_init function overrides and early setup 220 * Moorestown specific x86_init function overrides and early setup
@@ -21,4 +224,17 @@ void __init x86_mrst_early_setup(void)
21{ 224{
22 x86_init.resources.probe_roms = x86_init_noop; 225 x86_init.resources.probe_roms = x86_init_noop;
23 x86_init.resources.reserve_resources = x86_init_noop; 226 x86_init.resources.reserve_resources = x86_init_noop;
227
228 x86_init.timers.timer_init = mrst_time_init;
229 x86_init.timers.setup_percpu_clockev = mrst_setup_boot_clock;
230
231 x86_init.irqs.pre_vector_init = x86_init_noop;
232
233 x86_cpuinit.setup_percpu_clockev = mrst_setup_secondary_clock;
234
235 x86_platform.calibrate_tsc = mrst_calibrate_tsc;
236 x86_init.pci.init = pci_mrst_init;
237 x86_init.pci.fixup_irqs = x86_init_noop;
238
239 legacy_pic = &null_legacy_pic;
24} 240}
diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c
index 9d1d263f786f..8297160c41b3 100644
--- a/arch/x86/kernel/olpc.c
+++ b/arch/x86/kernel/olpc.c
@@ -17,7 +17,9 @@
17#include <linux/spinlock.h> 17#include <linux/spinlock.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/string.h> 19#include <linux/string.h>
20
20#include <asm/geode.h> 21#include <asm/geode.h>
22#include <asm/setup.h>
21#include <asm/olpc.h> 23#include <asm/olpc.h>
22 24
23#ifdef CONFIG_OPEN_FIRMWARE 25#ifdef CONFIG_OPEN_FIRMWARE
@@ -243,9 +245,11 @@ static int __init olpc_init(void)
243 olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0, 245 olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0,
244 (unsigned char *) &olpc_platform_info.ecver, 1); 246 (unsigned char *) &olpc_platform_info.ecver, 1);
245 247
246 /* check to see if the VSA exists */ 248#ifdef CONFIG_PCI_OLPC
247 if (cs5535_has_vsa2()) 249 /* If the VSA exists let it emulate PCI, if not emulate in kernel */
248 olpc_platform_info.flags |= OLPC_F_VSA; 250 if (!cs5535_has_vsa2())
251 x86_init.pci.arch_init = pci_olpc_init;
252#endif
249 253
250 printk(KERN_INFO "OLPC board revision %s%X (EC=%x)\n", 254 printk(KERN_INFO "OLPC board revision %s%X (EC=%x)\n",
251 ((olpc_platform_info.boardrev & 0xf) < 8) ? "pre" : "", 255 ((olpc_platform_info.boardrev & 0xf) < 8) ? "pre" : "",
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index a435c76d714e..a02e80c3c54b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -48,6 +48,7 @@
48#include <linux/err.h> 48#include <linux/err.h>
49#include <linux/nmi.h> 49#include <linux/nmi.h>
50#include <linux/tboot.h> 50#include <linux/tboot.h>
51#include <linux/stackprotector.h>
51 52
52#include <asm/acpi.h> 53#include <asm/acpi.h>
53#include <asm/desc.h> 54#include <asm/desc.h>
@@ -67,6 +68,7 @@
67#include <linux/mc146818rtc.h> 68#include <linux/mc146818rtc.h>
68 69
69#include <asm/smpboot_hooks.h> 70#include <asm/smpboot_hooks.h>
71#include <asm/i8259.h>
70 72
71#ifdef CONFIG_X86_32 73#ifdef CONFIG_X86_32
72u8 apicid_2_node[MAX_APICID]; 74u8 apicid_2_node[MAX_APICID];
@@ -291,9 +293,9 @@ notrace static void __cpuinit start_secondary(void *unused)
291 check_tsc_sync_target(); 293 check_tsc_sync_target();
292 294
293 if (nmi_watchdog == NMI_IO_APIC) { 295 if (nmi_watchdog == NMI_IO_APIC) {
294 disable_8259A_irq(0); 296 legacy_pic->chip->mask(0);
295 enable_NMI_through_LVT0(); 297 enable_NMI_through_LVT0();
296 enable_8259A_irq(0); 298 legacy_pic->chip->unmask(0);
297 } 299 }
298 300
299#ifdef CONFIG_X86_32 301#ifdef CONFIG_X86_32
@@ -329,6 +331,9 @@ notrace static void __cpuinit start_secondary(void *unused)
329 /* enable local interrupts */ 331 /* enable local interrupts */
330 local_irq_enable(); 332 local_irq_enable();
331 333
334 /* to prevent fake stack check failure in clock setup */
335 boot_init_stack_canary();
336
332 x86_cpuinit.setup_percpu_clockev(); 337 x86_cpuinit.setup_percpu_clockev();
333 338
334 wmb(); 339 wmb();
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index ab38ce0984fa..e680ea52db9b 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -49,11 +49,6 @@ extern int no_broadcast;
49char visws_board_type = -1; 49char visws_board_type = -1;
50char visws_board_rev = -1; 50char visws_board_rev = -1;
51 51
52int is_visws_box(void)
53{
54 return visws_board_type >= 0;
55}
56
57static void __init visws_time_init(void) 52static void __init visws_time_init(void)
58{ 53{
59 printk(KERN_INFO "Starting Cobalt Timer system clock\n"); 54 printk(KERN_INFO "Starting Cobalt Timer system clock\n");
@@ -242,6 +237,8 @@ void __init visws_early_detect(void)
242 x86_init.irqs.pre_vector_init = visws_pre_intr_init; 237 x86_init.irqs.pre_vector_init = visws_pre_intr_init;
243 x86_init.irqs.trap_init = visws_trap_init; 238 x86_init.irqs.trap_init = visws_trap_init;
244 x86_init.timers.timer_init = visws_time_init; 239 x86_init.timers.timer_init = visws_time_init;
240 x86_init.pci.init = pci_visws_init;
241 x86_init.pci.init_irq = x86_init_noop;
245 242
246 /* 243 /*
247 * Install reboot quirks: 244 * Install reboot quirks:
@@ -508,7 +505,7 @@ static struct irq_chip cobalt_irq_type = {
508 */ 505 */
509static unsigned int startup_piix4_master_irq(unsigned int irq) 506static unsigned int startup_piix4_master_irq(unsigned int irq)
510{ 507{
511 init_8259A(0); 508 legacy_pic->init(0);
512 509
513 return startup_cobalt_irq(irq); 510 return startup_cobalt_irq(irq);
514} 511}
@@ -532,9 +529,6 @@ static struct irq_chip piix4_master_irq_type = {
532 529
533static struct irq_chip piix4_virtual_irq_type = { 530static struct irq_chip piix4_virtual_irq_type = {
534 .name = "PIIX4-virtual", 531 .name = "PIIX4-virtual",
535 .shutdown = disable_8259A_irq,
536 .enable = enable_8259A_irq,
537 .disable = disable_8259A_irq,
538}; 532};
539 533
540 534
@@ -609,7 +603,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
609 handle_IRQ_event(realirq, desc->action); 603 handle_IRQ_event(realirq, desc->action);
610 604
611 if (!(desc->status & IRQ_DISABLED)) 605 if (!(desc->status & IRQ_DISABLED))
612 enable_8259A_irq(realirq); 606 legacy_pic->chip->unmask(realirq);
613 607
614 return IRQ_HANDLED; 608 return IRQ_HANDLED;
615 609
@@ -628,6 +622,12 @@ static struct irqaction cascade_action = {
628 .name = "cascade", 622 .name = "cascade",
629}; 623};
630 624
625static inline void set_piix4_virtual_irq_type(void)
626{
627 piix4_virtual_irq_type.shutdown = i8259A_chip.mask;
628 piix4_virtual_irq_type.enable = i8259A_chip.unmask;
629 piix4_virtual_irq_type.disable = i8259A_chip.mask;
630}
631 631
632void init_VISWS_APIC_irqs(void) 632void init_VISWS_APIC_irqs(void)
633{ 633{
@@ -653,6 +653,7 @@ void init_VISWS_APIC_irqs(void)
653 desc->chip = &piix4_master_irq_type; 653 desc->chip = &piix4_master_irq_type;
654 } 654 }
655 else if (i < CO_IRQ_APIC0) { 655 else if (i < CO_IRQ_APIC0) {
656 set_piix4_virtual_irq_type();
656 desc->chip = &piix4_virtual_irq_type; 657 desc->chip = &piix4_virtual_irq_type;
657 } 658 }
658 else if (IS_CO_APIC(i)) { 659 else if (IS_CO_APIC(i)) {
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index ee5746c94628..61a1e8c7e19f 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -4,9 +4,11 @@
4 * For licencing details see kernel-base/COPYING 4 * For licencing details see kernel-base/COPYING
5 */ 5 */
6#include <linux/init.h> 6#include <linux/init.h>
7#include <linux/ioport.h>
7 8
8#include <asm/bios_ebda.h> 9#include <asm/bios_ebda.h>
9#include <asm/paravirt.h> 10#include <asm/paravirt.h>
11#include <asm/pci_x86.h>
10#include <asm/mpspec.h> 12#include <asm/mpspec.h>
11#include <asm/setup.h> 13#include <asm/setup.h>
12#include <asm/apic.h> 14#include <asm/apic.h>
@@ -70,6 +72,12 @@ struct x86_init_ops x86_init __initdata = {
70 .iommu = { 72 .iommu = {
71 .iommu_init = iommu_init_noop, 73 .iommu_init = iommu_init_noop,
72 }, 74 },
75
76 .pci = {
77 .init = x86_default_pci_init,
78 .init_irq = x86_default_pci_init_irq,
79 .fixup_irqs = x86_default_pci_fixup_irqs,
80 },
73}; 81};
74 82
75struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { 83struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
index 0b7d3e9593e1..b110d97fb925 100644
--- a/arch/x86/pci/Makefile
+++ b/arch/x86/pci/Makefile
@@ -13,6 +13,8 @@ obj-$(CONFIG_X86_VISWS) += visws.o
13 13
14obj-$(CONFIG_X86_NUMAQ) += numaq_32.o 14obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
15 15
16obj-$(CONFIG_X86_MRST) += mrst.o
17
16obj-y += common.o early.o 18obj-y += common.o early.o
17obj-y += amd_bus.o bus_numa.o 19obj-y += amd_bus.o bus_numa.o
18 20
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 5f11ff6f5389..6e22454bfaa6 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -298,17 +298,14 @@ int __init pci_acpi_init(void)
298{ 298{
299 struct pci_dev *dev = NULL; 299 struct pci_dev *dev = NULL;
300 300
301 if (pcibios_scanned)
302 return 0;
303
304 if (acpi_noirq) 301 if (acpi_noirq)
305 return 0; 302 return -ENODEV;
306 303
307 printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n"); 304 printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
308 acpi_irq_penalty_init(); 305 acpi_irq_penalty_init();
309 pcibios_scanned++;
310 pcibios_enable_irq = acpi_pci_irq_enable; 306 pcibios_enable_irq = acpi_pci_irq_enable;
311 pcibios_disable_irq = acpi_pci_irq_disable; 307 pcibios_disable_irq = acpi_pci_irq_disable;
308 x86_init.pci.init_irq = x86_init_noop;
312 309
313 if (pci_routeirq) { 310 if (pci_routeirq) {
314 /* 311 /*
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 3736176acaab..294e10cb11e1 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -72,12 +72,6 @@ struct pci_ops pci_root_ops = {
72}; 72};
73 73
74/* 74/*
75 * legacy, numa, and acpi all want to call pcibios_scan_root
76 * from their initcalls. This flag prevents that.
77 */
78int pcibios_scanned;
79
80/*
81 * This interrupt-safe spinlock protects all accesses to PCI 75 * This interrupt-safe spinlock protects all accesses to PCI
82 * configuration space. 76 * configuration space.
83 */ 77 */
diff --git a/arch/x86/pci/init.c b/arch/x86/pci/init.c
index 25a1f8efed4a..adb62aaa7ecd 100644
--- a/arch/x86/pci/init.c
+++ b/arch/x86/pci/init.c
@@ -1,6 +1,7 @@
1#include <linux/pci.h> 1#include <linux/pci.h>
2#include <linux/init.h> 2#include <linux/init.h>
3#include <asm/pci_x86.h> 3#include <asm/pci_x86.h>
4#include <asm/x86_init.h>
4 5
5/* arch_initcall has too random ordering, so call the initializers 6/* arch_initcall has too random ordering, so call the initializers
6 in the right sequence from here. */ 7 in the right sequence from here. */
@@ -15,10 +16,9 @@ static __init int pci_arch_init(void)
15 if (!(pci_probe & PCI_PROBE_NOEARLY)) 16 if (!(pci_probe & PCI_PROBE_NOEARLY))
16 pci_mmcfg_early_init(); 17 pci_mmcfg_early_init();
17 18
18#ifdef CONFIG_PCI_OLPC 19 if (x86_init.pci.arch_init && !x86_init.pci.arch_init())
19 if (!pci_olpc_init()) 20 return 0;
20 return 0; /* skip additional checks if it's an XO */ 21
21#endif
22#ifdef CONFIG_PCI_BIOS 22#ifdef CONFIG_PCI_BIOS
23 pci_pcbios_init(); 23 pci_pcbios_init();
24#endif 24#endif
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index b02f6d8ac922..8b107521d24e 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -53,7 +53,7 @@ struct irq_router_handler {
53 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device); 53 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
54}; 54};
55 55
56int (*pcibios_enable_irq)(struct pci_dev *dev) = NULL; 56int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
57void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL; 57void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
58 58
59/* 59/*
@@ -1018,7 +1018,7 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
1018 return 1; 1018 return 1;
1019} 1019}
1020 1020
1021static void __init pcibios_fixup_irqs(void) 1021void __init pcibios_fixup_irqs(void)
1022{ 1022{
1023 struct pci_dev *dev = NULL; 1023 struct pci_dev *dev = NULL;
1024 u8 pin; 1024 u8 pin;
@@ -1112,12 +1112,12 @@ static struct dmi_system_id __initdata pciirq_dmi_table[] = {
1112 { } 1112 { }
1113}; 1113};
1114 1114
1115int __init pcibios_irq_init(void) 1115void __init pcibios_irq_init(void)
1116{ 1116{
1117 DBG(KERN_DEBUG "PCI: IRQ init\n"); 1117 DBG(KERN_DEBUG "PCI: IRQ init\n");
1118 1118
1119 if (pcibios_enable_irq || raw_pci_ops == NULL) 1119 if (raw_pci_ops == NULL)
1120 return 0; 1120 return;
1121 1121
1122 dmi_check_system(pciirq_dmi_table); 1122 dmi_check_system(pciirq_dmi_table);
1123 1123
@@ -1144,9 +1144,7 @@ int __init pcibios_irq_init(void)
1144 pirq_table = NULL; 1144 pirq_table = NULL;
1145 } 1145 }
1146 1146
1147 pcibios_enable_irq = pirq_enable_irq; 1147 x86_init.pci.fixup_irqs();
1148
1149 pcibios_fixup_irqs();
1150 1148
1151 if (io_apic_assign_pci_irqs && pci_routeirq) { 1149 if (io_apic_assign_pci_irqs && pci_routeirq) {
1152 struct pci_dev *dev = NULL; 1150 struct pci_dev *dev = NULL;
@@ -1159,8 +1157,6 @@ int __init pcibios_irq_init(void)
1159 for_each_pci_dev(dev) 1157 for_each_pci_dev(dev)
1160 pirq_enable_irq(dev); 1158 pirq_enable_irq(dev);
1161 } 1159 }
1162
1163 return 0;
1164} 1160}
1165 1161
1166static void pirq_penalize_isa_irq(int irq, int active) 1162static void pirq_penalize_isa_irq(int irq, int active)
diff --git a/arch/x86/pci/legacy.c b/arch/x86/pci/legacy.c
index 4061bb0f267d..0db5eaf54560 100644
--- a/arch/x86/pci/legacy.c
+++ b/arch/x86/pci/legacy.c
@@ -35,16 +35,13 @@ static void __devinit pcibios_fixup_peer_bridges(void)
35 } 35 }
36} 36}
37 37
38static int __init pci_legacy_init(void) 38int __init pci_legacy_init(void)
39{ 39{
40 if (!raw_pci_ops) { 40 if (!raw_pci_ops) {
41 printk("PCI: System does not support PCI\n"); 41 printk("PCI: System does not support PCI\n");
42 return 0; 42 return 0;
43 } 43 }
44 44
45 if (pcibios_scanned++)
46 return 0;
47
48 printk("PCI: Probing PCI hardware\n"); 45 printk("PCI: Probing PCI hardware\n");
49 pci_root_bus = pcibios_scan_root(0); 46 pci_root_bus = pcibios_scan_root(0);
50 if (pci_root_bus) 47 if (pci_root_bus)
@@ -55,18 +52,15 @@ static int __init pci_legacy_init(void)
55 52
56int __init pci_subsys_init(void) 53int __init pci_subsys_init(void)
57{ 54{
58#ifdef CONFIG_X86_NUMAQ 55 /*
59 pci_numaq_init(); 56 * The init function returns an non zero value when
60#endif 57 * pci_legacy_init should be invoked.
61#ifdef CONFIG_ACPI 58 */
62 pci_acpi_init(); 59 if (x86_init.pci.init())
63#endif 60 pci_legacy_init();
64#ifdef CONFIG_X86_VISWS 61
65 pci_visws_init();
66#endif
67 pci_legacy_init();
68 pcibios_fixup_peer_bridges(); 62 pcibios_fixup_peer_bridges();
69 pcibios_irq_init(); 63 x86_init.pci.init_irq();
70 pcibios_init(); 64 pcibios_init();
71 65
72 return 0; 66 return 0;
diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
new file mode 100644
index 000000000000..8bf2fcb88d04
--- /dev/null
+++ b/arch/x86/pci/mrst.c
@@ -0,0 +1,262 @@
1/*
2 * Moorestown PCI support
3 * Copyright (c) 2008 Intel Corporation
4 * Jesse Barnes <jesse.barnes@intel.com>
5 *
6 * Moorestown has an interesting PCI implementation:
7 * - configuration space is memory mapped (as defined by MCFG)
8 * - Lincroft devices also have a real, type 1 configuration space
9 * - Early Lincroft silicon has a type 1 access bug that will cause
10 * a hang if non-existent devices are accessed
11 * - some devices have the "fixed BAR" capability, which means
12 * they can't be relocated or modified; check for that during
13 * BAR sizing
14 *
15 * So, we use the MCFG space for all reads and writes, but also send
16 * Lincroft writes to type 1 space. But only read/write if the device
17 * actually exists, otherwise return all 1s for reads and bit bucket
18 * the writes.
19 */
20
21#include <linux/sched.h>
22#include <linux/pci.h>
23#include <linux/ioport.h>
24#include <linux/init.h>
25#include <linux/dmi.h>
26
27#include <asm/acpi.h>
28#include <asm/segment.h>
29#include <asm/io.h>
30#include <asm/smp.h>
31#include <asm/pci_x86.h>
32#include <asm/hw_irq.h>
33#include <asm/io_apic.h>
34
35#define PCIE_CAP_OFFSET 0x100
36
37/* Fixed BAR fields */
38#define PCIE_VNDR_CAP_ID_FIXED_BAR 0x00 /* Fixed BAR (TBD) */
39#define PCI_FIXED_BAR_0_SIZE 0x04
40#define PCI_FIXED_BAR_1_SIZE 0x08
41#define PCI_FIXED_BAR_2_SIZE 0x0c
42#define PCI_FIXED_BAR_3_SIZE 0x10
43#define PCI_FIXED_BAR_4_SIZE 0x14
44#define PCI_FIXED_BAR_5_SIZE 0x1c
45
46/**
47 * fixed_bar_cap - return the offset of the fixed BAR cap if found
48 * @bus: PCI bus
49 * @devfn: device in question
50 *
51 * Look for the fixed BAR cap on @bus and @devfn, returning its offset
52 * if found or 0 otherwise.
53 */
54static int fixed_bar_cap(struct pci_bus *bus, unsigned int devfn)
55{
56 int pos;
57 u32 pcie_cap = 0, cap_data;
58
59 pos = PCIE_CAP_OFFSET;
60
61 if (!raw_pci_ext_ops)
62 return 0;
63
64 while (pos) {
65 if (raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
66 devfn, pos, 4, &pcie_cap))
67 return 0;
68
69 if (pcie_cap == 0xffffffff)
70 return 0;
71
72 if (PCI_EXT_CAP_ID(pcie_cap) == PCI_EXT_CAP_ID_VNDR) {
73 raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
74 devfn, pos + 4, 4, &cap_data);
75 if ((cap_data & 0xffff) == PCIE_VNDR_CAP_ID_FIXED_BAR)
76 return pos;
77 }
78
79 pos = pcie_cap >> 20;
80 }
81
82 return 0;
83}
84
85static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn,
86 int reg, int len, u32 val, int offset)
87{
88 u32 size;
89 unsigned int domain, busnum;
90 int bar = (reg - PCI_BASE_ADDRESS_0) >> 2;
91
92 domain = pci_domain_nr(bus);
93 busnum = bus->number;
94
95 if (val == ~0 && len == 4) {
96 unsigned long decode;
97
98 raw_pci_ext_ops->read(domain, busnum, devfn,
99 offset + 8 + (bar * 4), 4, &size);
100
101 /* Turn the size into a decode pattern for the sizing code */
102 if (size) {
103 decode = size - 1;
104 decode |= decode >> 1;
105 decode |= decode >> 2;
106 decode |= decode >> 4;
107 decode |= decode >> 8;
108 decode |= decode >> 16;
109 decode++;
110 decode = ~(decode - 1);
111 } else {
112 decode = ~0;
113 }
114
115 /*
116 * If val is all ones, the core code is trying to size the reg,
117 * so update the mmconfig space with the real size.
118 *
119 * Note: this assumes the fixed size we got is a power of two.
120 */
121 return raw_pci_ext_ops->write(domain, busnum, devfn, reg, 4,
122 decode);
123 }
124
125 /* This is some other kind of BAR write, so just do it. */
126 return raw_pci_ext_ops->write(domain, busnum, devfn, reg, len, val);
127}
128
129/**
130 * type1_access_ok - check whether to use type 1
131 * @bus: bus number
132 * @devfn: device & function in question
133 *
134 * If the bus is on a Lincroft chip and it exists, or is not on a Lincroft at
135 * all, the we can go ahead with any reads & writes. If it's on a Lincroft,
136 * but doesn't exist, avoid the access altogether to keep the chip from
137 * hanging.
138 */
139static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
140{
141 /* This is a workaround for A0 LNC bug where PCI status register does
142 * not have new CAP bit set. can not be written by SW either.
143 *
144 * PCI header type in real LNC indicates a single function device, this
145 * will prevent probing other devices under the same function in PCI
146 * shim. Therefore, use the header type in shim instead.
147 */
148 if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE)
149 return 0;
150 if (bus == 0 && (devfn == PCI_DEVFN(2, 0) || devfn == PCI_DEVFN(0, 0)))
151 return 1;
152 return 0; /* langwell on others */
153}
154
155static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
156 int size, u32 *value)
157{
158 if (type1_access_ok(bus->number, devfn, where))
159 return pci_direct_conf1.read(pci_domain_nr(bus), bus->number,
160 devfn, where, size, value);
161 return raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
162 devfn, where, size, value);
163}
164
165static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
166 int size, u32 value)
167{
168 int offset;
169
170 /* On MRST, there is no PCI ROM BAR, this will cause a subsequent read
171 * to ROM BAR return 0 then being ignored.
172 */
173 if (where == PCI_ROM_ADDRESS)
174 return 0;
175
176 /*
177 * Devices with fixed BARs need special handling:
178 * - BAR sizing code will save, write ~0, read size, restore
179 * - so writes to fixed BARs need special handling
180 * - other writes to fixed BAR devices should go through mmconfig
181 */
182 offset = fixed_bar_cap(bus, devfn);
183 if (offset &&
184 (where >= PCI_BASE_ADDRESS_0 && where <= PCI_BASE_ADDRESS_5)) {
185 return pci_device_update_fixed(bus, devfn, where, size, value,
186 offset);
187 }
188
189 /*
190 * On Moorestown update both real & mmconfig space
191 * Note: early Lincroft silicon can't handle type 1 accesses to
192 * non-existent devices, so just eat the write in that case.
193 */
194 if (type1_access_ok(bus->number, devfn, where))
195 return pci_direct_conf1.write(pci_domain_nr(bus), bus->number,
196 devfn, where, size, value);
197 return raw_pci_ext_ops->write(pci_domain_nr(bus), bus->number, devfn,
198 where, size, value);
199}
200
201static int mrst_pci_irq_enable(struct pci_dev *dev)
202{
203 u8 pin;
204 struct io_apic_irq_attr irq_attr;
205
206 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
207
208 /* MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to
209 * IOAPIC RTE entries, so we just enable RTE for the device.
210 */
211 irq_attr.ioapic = mp_find_ioapic(dev->irq);
212 irq_attr.ioapic_pin = dev->irq;
213 irq_attr.trigger = 1; /* level */
214 irq_attr.polarity = 1; /* active low */
215 io_apic_set_pci_routing(&dev->dev, dev->irq, &irq_attr);
216
217 return 0;
218}
219
220struct pci_ops pci_mrst_ops = {
221 .read = pci_read,
222 .write = pci_write,
223};
224
225/**
226 * pci_mrst_init - installs pci_mrst_ops
227 *
228 * Moorestown has an interesting PCI implementation (see above).
229 * Called when the early platform detection installs it.
230 */
231int __init pci_mrst_init(void)
232{
233 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
234 pci_mmcfg_late_init();
235 pcibios_enable_irq = mrst_pci_irq_enable;
236 pci_root_ops = pci_mrst_ops;
237 /* Continue with standard init */
238 return 1;
239}
240
241/*
242 * Langwell devices reside at fixed offsets, don't try to move them.
243 */
244static void __devinit pci_fixed_bar_fixup(struct pci_dev *dev)
245{
246 unsigned long offset;
247 u32 size;
248 int i;
249
250 /* Fixup the BAR sizes for fixed BAR devices and make them unmoveable */
251 offset = fixed_bar_cap(dev->bus, dev->devfn);
252 if (!offset || PCI_DEVFN(2, 0) == dev->devfn ||
253 PCI_DEVFN(2, 2) == dev->devfn)
254 return;
255
256 for (i = 0; i < PCI_ROM_RESOURCE; i++) {
257 pci_read_config_dword(dev, offset + 8 + (i * 4), &size);
258 dev->resource[i].end = dev->resource[i].start + size - 1;
259 dev->resource[i].flags |= IORESOURCE_PCI_FIXED;
260 }
261}
262DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_fixed_bar_fixup);
diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
index 8884a1c1ada6..8223738ad806 100644
--- a/arch/x86/pci/numaq_32.c
+++ b/arch/x86/pci/numaq_32.c
@@ -148,14 +148,8 @@ int __init pci_numaq_init(void)
148{ 148{
149 int quad; 149 int quad;
150 150
151 if (!found_numaq)
152 return 0;
153
154 raw_pci_ops = &pci_direct_conf1_mq; 151 raw_pci_ops = &pci_direct_conf1_mq;
155 152
156 if (pcibios_scanned++)
157 return 0;
158
159 pci_root_bus = pcibios_scan_root(0); 153 pci_root_bus = pcibios_scan_root(0);
160 if (pci_root_bus) 154 if (pci_root_bus)
161 pci_bus_add_devices(pci_root_bus); 155 pci_bus_add_devices(pci_root_bus);
diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c
index b889d824f7c6..b34815408f58 100644
--- a/arch/x86/pci/olpc.c
+++ b/arch/x86/pci/olpc.c
@@ -304,9 +304,6 @@ static struct pci_raw_ops pci_olpc_conf = {
304 304
305int __init pci_olpc_init(void) 305int __init pci_olpc_init(void)
306{ 306{
307 if (!machine_is_olpc() || olpc_has_vsa())
308 return -ENODEV;
309
310 printk(KERN_INFO "PCI: Using configuration type OLPC\n"); 307 printk(KERN_INFO "PCI: Using configuration type OLPC\n");
311 raw_pci_ops = &pci_olpc_conf; 308 raw_pci_ops = &pci_olpc_conf;
312 is_lx = is_geode_lx(); 309 is_lx = is_geode_lx();
diff --git a/arch/x86/pci/visws.c b/arch/x86/pci/visws.c
index bcead7a46871..03008f72eb04 100644
--- a/arch/x86/pci/visws.c
+++ b/arch/x86/pci/visws.c
@@ -69,9 +69,6 @@ void __init pcibios_update_irq(struct pci_dev *dev, int irq)
69 69
70int __init pci_visws_init(void) 70int __init pci_visws_init(void)
71{ 71{
72 if (!is_visws_box())
73 return -1;
74
75 pcibios_enable_irq = &pci_visws_enable_irq; 72 pcibios_enable_irq = &pci_visws_enable_irq;
76 pcibios_disable_irq = &pci_visws_disable_irq; 73 pcibios_disable_irq = &pci_visws_disable_irq;
77 74
@@ -90,5 +87,6 @@ int __init pci_visws_init(void)
90 pci_scan_bus_with_sysdata(pci_bus1); 87 pci_scan_bus_with_sysdata(pci_bus1);
91 pci_fixup_irqs(pci_common_swizzle, visws_map_irq); 88 pci_fixup_irqs(pci_common_swizzle, visws_map_irq);
92 pcibios_resource_survey(); 89 pcibios_resource_survey();
93 return 0; 90 /* Request bus scan */
91 return 1;
94} 92}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 5b548aee9cbc..77b493b3d97b 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -303,6 +303,49 @@ int pci_find_ext_capability(struct pci_dev *dev, int cap)
303} 303}
304EXPORT_SYMBOL_GPL(pci_find_ext_capability); 304EXPORT_SYMBOL_GPL(pci_find_ext_capability);
305 305
306/**
307 * pci_bus_find_ext_capability - find an extended capability
308 * @bus: the PCI bus to query
309 * @devfn: PCI device to query
310 * @cap: capability code
311 *
312 * Like pci_find_ext_capability() but works for pci devices that do not have a
313 * pci_dev structure set up yet.
314 *
315 * Returns the address of the requested capability structure within the
316 * device's PCI configuration space or 0 in case the device does not
317 * support it.
318 */
319int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
320 int cap)
321{
322 u32 header;
323 int ttl;
324 int pos = PCI_CFG_SPACE_SIZE;
325
326 /* minimum 8 bytes per capability */
327 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
328
329 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
330 return 0;
331 if (header == 0xffffffff || header == 0)
332 return 0;
333
334 while (ttl-- > 0) {
335 if (PCI_EXT_CAP_ID(header) == cap)
336 return pos;
337
338 pos = PCI_EXT_CAP_NEXT(header);
339 if (pos < PCI_CFG_SPACE_SIZE)
340 break;
341
342 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
343 break;
344 }
345
346 return 0;
347}
348
306static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) 349static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
307{ 350{
308 int rc, ttl = PCI_FIND_CAP_TTL; 351 int rc, ttl = PCI_FIND_CAP_TTL;
diff --git a/include/linux/pci.h b/include/linux/pci.h
index ec95ebe629f1..cd5809a5963e 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -678,6 +678,8 @@ enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *dev);
678int pci_find_capability(struct pci_dev *dev, int cap); 678int pci_find_capability(struct pci_dev *dev, int cap);
679int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap); 679int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
680int pci_find_ext_capability(struct pci_dev *dev, int cap); 680int pci_find_ext_capability(struct pci_dev *dev, int cap);
681int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
682 int cap);
681int pci_find_ht_capability(struct pci_dev *dev, int ht_cap); 683int pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
682int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap); 684int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap);
683struct pci_bus *pci_find_next_bus(const struct pci_bus *from); 685struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index 9f2ad0aa3c39..c8f302991b66 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -507,6 +507,7 @@
507#define PCI_EXT_CAP_ID_VC 2 507#define PCI_EXT_CAP_ID_VC 2
508#define PCI_EXT_CAP_ID_DSN 3 508#define PCI_EXT_CAP_ID_DSN 3
509#define PCI_EXT_CAP_ID_PWR 4 509#define PCI_EXT_CAP_ID_PWR 4
510#define PCI_EXT_CAP_ID_VNDR 11
510#define PCI_EXT_CAP_ID_ACS 13 511#define PCI_EXT_CAP_ID_ACS 13
511#define PCI_EXT_CAP_ID_ARI 14 512#define PCI_EXT_CAP_ID_ARI 14
512#define PCI_EXT_CAP_ID_ATS 15 513#define PCI_EXT_CAP_ID_ATS 15