diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/x86/platform | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'arch/x86/platform')
33 files changed, 7597 insertions, 0 deletions
diff --git a/arch/x86/platform/Makefile b/arch/x86/platform/Makefile new file mode 100644 index 000000000000..021eee91c056 --- /dev/null +++ b/arch/x86/platform/Makefile | |||
@@ -0,0 +1,10 @@ | |||
1 | # Platform specific code goes here | ||
2 | obj-y += ce4100/ | ||
3 | obj-y += efi/ | ||
4 | obj-y += iris/ | ||
5 | obj-y += mrst/ | ||
6 | obj-y += olpc/ | ||
7 | obj-y += scx200/ | ||
8 | obj-y += sfi/ | ||
9 | obj-y += visws/ | ||
10 | obj-y += uv/ | ||
diff --git a/arch/x86/platform/ce4100/Makefile b/arch/x86/platform/ce4100/Makefile new file mode 100644 index 000000000000..91fc92971d94 --- /dev/null +++ b/arch/x86/platform/ce4100/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-$(CONFIG_X86_INTEL_CE) += ce4100.o | |||
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c new file mode 100644 index 000000000000..28071bb31db7 --- /dev/null +++ b/arch/x86/platform/ce4100/ce4100.c | |||
@@ -0,0 +1,146 @@ | |||
1 | /* | ||
2 | * Intel CE4100 platform specific setup code | ||
3 | * | ||
4 | * (C) Copyright 2010 Intel Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; version 2 | ||
9 | * of the License. | ||
10 | */ | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/irq.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/serial_reg.h> | ||
16 | #include <linux/serial_8250.h> | ||
17 | |||
18 | #include <asm/ce4100.h> | ||
19 | #include <asm/prom.h> | ||
20 | #include <asm/setup.h> | ||
21 | #include <asm/i8259.h> | ||
22 | #include <asm/io.h> | ||
23 | #include <asm/io_apic.h> | ||
24 | |||
25 | static int ce4100_i8042_detect(void) | ||
26 | { | ||
27 | return 0; | ||
28 | } | ||
29 | |||
30 | #ifdef CONFIG_SERIAL_8250 | ||
31 | |||
32 | static unsigned int mem_serial_in(struct uart_port *p, int offset) | ||
33 | { | ||
34 | offset = offset << p->regshift; | ||
35 | return readl(p->membase + offset); | ||
36 | } | ||
37 | |||
38 | /* | ||
39 | * The UART Tx interrupts are not set under some conditions and therefore serial | ||
40 | * transmission hangs. This is a silicon issue and has not been root caused. The | ||
41 | * workaround for this silicon issue checks UART_LSR_THRE bit and UART_LSR_TEMT | ||
42 | * bit of LSR register in interrupt handler to see whether at least one of these | ||
43 | * two bits is set, if so then process the transmit request. If this workaround | ||
44 | * is not applied, then the serial transmission may hang. This workaround is for | ||
45 | * errata number 9 in Errata - B step. | ||
46 | */ | ||
47 | |||
48 | static unsigned int ce4100_mem_serial_in(struct uart_port *p, int offset) | ||
49 | { | ||
50 | unsigned int ret, ier, lsr; | ||
51 | |||
52 | if (offset == UART_IIR) { | ||
53 | offset = offset << p->regshift; | ||
54 | ret = readl(p->membase + offset); | ||
55 | if (ret & UART_IIR_NO_INT) { | ||
56 | /* see if the TX interrupt should have really set */ | ||
57 | ier = mem_serial_in(p, UART_IER); | ||
58 | /* see if the UART's XMIT interrupt is enabled */ | ||
59 | if (ier & UART_IER_THRI) { | ||
60 | lsr = mem_serial_in(p, UART_LSR); | ||
61 | /* now check to see if the UART should be | ||
62 | generating an interrupt (but isn't) */ | ||
63 | if (lsr & (UART_LSR_THRE | UART_LSR_TEMT)) | ||
64 | ret &= ~UART_IIR_NO_INT; | ||
65 | } | ||
66 | } | ||
67 | } else | ||
68 | ret = mem_serial_in(p, offset); | ||
69 | return ret; | ||
70 | } | ||
71 | |||
72 | static void ce4100_mem_serial_out(struct uart_port *p, int offset, int value) | ||
73 | { | ||
74 | offset = offset << p->regshift; | ||
75 | writel(value, p->membase + offset); | ||
76 | } | ||
77 | |||
78 | static void ce4100_serial_fixup(int port, struct uart_port *up, | ||
79 | unsigned short *capabilites) | ||
80 | { | ||
81 | #ifdef CONFIG_EARLY_PRINTK | ||
82 | /* | ||
83 | * Over ride the legacy port configuration that comes from | ||
84 | * asm/serial.h. Using the ioport driver then switching to the | ||
85 | * PCI memmaped driver hangs the IOAPIC | ||
86 | */ | ||
87 | if (up->iotype != UPIO_MEM32) { | ||
88 | up->uartclk = 14745600; | ||
89 | up->mapbase = 0xdffe0200; | ||
90 | set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, | ||
91 | up->mapbase & PAGE_MASK); | ||
92 | up->membase = | ||
93 | (void __iomem *)__fix_to_virt(FIX_EARLYCON_MEM_BASE); | ||
94 | up->membase += up->mapbase & ~PAGE_MASK; | ||
95 | up->iotype = UPIO_MEM32; | ||
96 | up->regshift = 2; | ||
97 | } | ||
98 | #endif | ||
99 | up->iobase = 0; | ||
100 | up->serial_in = ce4100_mem_serial_in; | ||
101 | up->serial_out = ce4100_mem_serial_out; | ||
102 | |||
103 | *capabilites |= (1 << 12); | ||
104 | } | ||
105 | |||
106 | static __init void sdv_serial_fixup(void) | ||
107 | { | ||
108 | serial8250_set_isa_configurator(ce4100_serial_fixup); | ||
109 | } | ||
110 | |||
111 | #else | ||
112 | static inline void sdv_serial_fixup(void); | ||
113 | #endif | ||
114 | |||
115 | static void __init sdv_arch_setup(void) | ||
116 | { | ||
117 | sdv_serial_fixup(); | ||
118 | } | ||
119 | |||
120 | #ifdef CONFIG_X86_IO_APIC | ||
121 | static void __cpuinit sdv_pci_init(void) | ||
122 | { | ||
123 | x86_of_pci_init(); | ||
124 | /* We can't set this earlier, because we need to calibrate the timer */ | ||
125 | legacy_pic = &null_legacy_pic; | ||
126 | } | ||
127 | #endif | ||
128 | |||
129 | /* | ||
130 | * CE4100 specific x86_init function overrides and early setup | ||
131 | * calls. | ||
132 | */ | ||
133 | void __init x86_ce4100_early_setup(void) | ||
134 | { | ||
135 | x86_init.oem.arch_setup = sdv_arch_setup; | ||
136 | x86_platform.i8042_detect = ce4100_i8042_detect; | ||
137 | x86_init.resources.probe_roms = x86_init_noop; | ||
138 | x86_init.mpparse.get_smp_config = x86_init_uint_noop; | ||
139 | x86_init.mpparse.find_smp_config = x86_init_noop; | ||
140 | x86_init.pci.init = ce4100_pci_init; | ||
141 | |||
142 | #ifdef CONFIG_X86_IO_APIC | ||
143 | x86_init.pci.init_irq = sdv_pci_init; | ||
144 | x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc_nocheck; | ||
145 | #endif | ||
146 | } | ||
diff --git a/arch/x86/platform/ce4100/falconfalls.dts b/arch/x86/platform/ce4100/falconfalls.dts new file mode 100644 index 000000000000..e70be38ce039 --- /dev/null +++ b/arch/x86/platform/ce4100/falconfalls.dts | |||
@@ -0,0 +1,430 @@ | |||
1 | /* | ||
2 | * CE4100 on Falcon Falls | ||
3 | * | ||
4 | * (c) Copyright 2010 Intel Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; version 2 of the License. | ||
9 | */ | ||
10 | /dts-v1/; | ||
11 | / { | ||
12 | model = "intel,falconfalls"; | ||
13 | compatible = "intel,falconfalls"; | ||
14 | #address-cells = <1>; | ||
15 | #size-cells = <1>; | ||
16 | |||
17 | cpus { | ||
18 | #address-cells = <1>; | ||
19 | #size-cells = <0>; | ||
20 | |||
21 | cpu@0 { | ||
22 | device_type = "cpu"; | ||
23 | compatible = "intel,ce4100"; | ||
24 | reg = <0>; | ||
25 | lapic = <&lapic0>; | ||
26 | }; | ||
27 | }; | ||
28 | |||
29 | soc@0 { | ||
30 | #address-cells = <1>; | ||
31 | #size-cells = <1>; | ||
32 | compatible = "intel,ce4100-cp"; | ||
33 | ranges; | ||
34 | |||
35 | ioapic1: interrupt-controller@fec00000 { | ||
36 | #interrupt-cells = <2>; | ||
37 | compatible = "intel,ce4100-ioapic"; | ||
38 | interrupt-controller; | ||
39 | reg = <0xfec00000 0x1000>; | ||
40 | }; | ||
41 | |||
42 | timer@fed00000 { | ||
43 | compatible = "intel,ce4100-hpet"; | ||
44 | reg = <0xfed00000 0x200>; | ||
45 | }; | ||
46 | |||
47 | lapic0: interrupt-controller@fee00000 { | ||
48 | compatible = "intel,ce4100-lapic"; | ||
49 | reg = <0xfee00000 0x1000>; | ||
50 | }; | ||
51 | |||
52 | pci@3fc { | ||
53 | #address-cells = <3>; | ||
54 | #size-cells = <2>; | ||
55 | compatible = "intel,ce4100-pci", "pci"; | ||
56 | device_type = "pci"; | ||
57 | bus-range = <0 0>; | ||
58 | ranges = <0x2000000 0 0xbffff000 0xbffff000 0 0x1000 | ||
59 | 0x2000000 0 0xdffe0000 0xdffe0000 0 0x1000 | ||
60 | 0x0000000 0 0x0 0x0 0 0x100>; | ||
61 | |||
62 | /* Secondary IO-APIC */ | ||
63 | ioapic2: interrupt-controller@0,1 { | ||
64 | #interrupt-cells = <2>; | ||
65 | compatible = "intel,ce4100-ioapic"; | ||
66 | interrupt-controller; | ||
67 | reg = <0x100 0x0 0x0 0x0 0x0>; | ||
68 | assigned-addresses = <0x02000000 0x0 0xbffff000 0x0 0x1000>; | ||
69 | }; | ||
70 | |||
71 | pci@1,0 { | ||
72 | #address-cells = <3>; | ||
73 | #size-cells = <2>; | ||
74 | compatible = "intel,ce4100-pci", "pci"; | ||
75 | device_type = "pci"; | ||
76 | bus-range = <1 1>; | ||
77 | reg = <0x0800 0x0 0x0 0x0 0x0>; | ||
78 | ranges = <0x2000000 0 0xdffe0000 0x2000000 0 0xdffe0000 0 0x1000>; | ||
79 | |||
80 | interrupt-parent = <&ioapic2>; | ||
81 | |||
82 | display@2,0 { | ||
83 | compatible = "pci8086,2e5b.2", | ||
84 | "pci8086,2e5b", | ||
85 | "pciclass038000", | ||
86 | "pciclass0380"; | ||
87 | |||
88 | reg = <0x11000 0x0 0x0 0x0 0x0>; | ||
89 | interrupts = <0 1>; | ||
90 | }; | ||
91 | |||
92 | multimedia@3,0 { | ||
93 | compatible = "pci8086,2e5c.2", | ||
94 | "pci8086,2e5c", | ||
95 | "pciclass048000", | ||
96 | "pciclass0480"; | ||
97 | |||
98 | reg = <0x11800 0x0 0x0 0x0 0x0>; | ||
99 | interrupts = <2 1>; | ||
100 | }; | ||
101 | |||
102 | multimedia@4,0 { | ||
103 | compatible = "pci8086,2e5d.2", | ||
104 | "pci8086,2e5d", | ||
105 | "pciclass048000", | ||
106 | "pciclass0480"; | ||
107 | |||
108 | reg = <0x12000 0x0 0x0 0x0 0x0>; | ||
109 | interrupts = <4 1>; | ||
110 | }; | ||
111 | |||
112 | multimedia@4,1 { | ||
113 | compatible = "pci8086,2e5e.2", | ||
114 | "pci8086,2e5e", | ||
115 | "pciclass048000", | ||
116 | "pciclass0480"; | ||
117 | |||
118 | reg = <0x12100 0x0 0x0 0x0 0x0>; | ||
119 | interrupts = <5 1>; | ||
120 | }; | ||
121 | |||
122 | sound@6,0 { | ||
123 | compatible = "pci8086,2e5f.2", | ||
124 | "pci8086,2e5f", | ||
125 | "pciclass040100", | ||
126 | "pciclass0401"; | ||
127 | |||
128 | reg = <0x13000 0x0 0x0 0x0 0x0>; | ||
129 | interrupts = <6 1>; | ||
130 | }; | ||
131 | |||
132 | sound@6,1 { | ||
133 | compatible = "pci8086,2e5f.2", | ||
134 | "pci8086,2e5f", | ||
135 | "pciclass040100", | ||
136 | "pciclass0401"; | ||
137 | |||
138 | reg = <0x13100 0x0 0x0 0x0 0x0>; | ||
139 | interrupts = <7 1>; | ||
140 | }; | ||
141 | |||
142 | sound@6,2 { | ||
143 | compatible = "pci8086,2e60.2", | ||
144 | "pci8086,2e60", | ||
145 | "pciclass040100", | ||
146 | "pciclass0401"; | ||
147 | |||
148 | reg = <0x13200 0x0 0x0 0x0 0x0>; | ||
149 | interrupts = <8 1>; | ||
150 | }; | ||
151 | |||
152 | display@8,0 { | ||
153 | compatible = "pci8086,2e61.2", | ||
154 | "pci8086,2e61", | ||
155 | "pciclass038000", | ||
156 | "pciclass0380"; | ||
157 | |||
158 | reg = <0x14000 0x0 0x0 0x0 0x0>; | ||
159 | interrupts = <9 1>; | ||
160 | }; | ||
161 | |||
162 | display@8,1 { | ||
163 | compatible = "pci8086,2e62.2", | ||
164 | "pci8086,2e62", | ||
165 | "pciclass038000", | ||
166 | "pciclass0380"; | ||
167 | |||
168 | reg = <0x14100 0x0 0x0 0x0 0x0>; | ||
169 | interrupts = <10 1>; | ||
170 | }; | ||
171 | |||
172 | multimedia@8,2 { | ||
173 | compatible = "pci8086,2e63.2", | ||
174 | "pci8086,2e63", | ||
175 | "pciclass048000", | ||
176 | "pciclass0480"; | ||
177 | |||
178 | reg = <0x14200 0x0 0x0 0x0 0x0>; | ||
179 | interrupts = <11 1>; | ||
180 | }; | ||
181 | |||
182 | entertainment-encryption@9,0 { | ||
183 | compatible = "pci8086,2e64.2", | ||
184 | "pci8086,2e64", | ||
185 | "pciclass101000", | ||
186 | "pciclass1010"; | ||
187 | |||
188 | reg = <0x14800 0x0 0x0 0x0 0x0>; | ||
189 | interrupts = <12 1>; | ||
190 | }; | ||
191 | |||
192 | localbus@a,0 { | ||
193 | compatible = "pci8086,2e65.2", | ||
194 | "pci8086,2e65", | ||
195 | "pciclassff0000", | ||
196 | "pciclassff00"; | ||
197 | |||
198 | reg = <0x15000 0x0 0x0 0x0 0x0>; | ||
199 | }; | ||
200 | |||
201 | serial@b,0 { | ||
202 | compatible = "pci8086,2e66.2", | ||
203 | "pci8086,2e66", | ||
204 | "pciclass070003", | ||
205 | "pciclass0700"; | ||
206 | |||
207 | reg = <0x15800 0x0 0x0 0x0 0x0>; | ||
208 | interrupts = <14 1>; | ||
209 | }; | ||
210 | |||
211 | gpio@b,1 { | ||
212 | compatible = "pci8086,2e67.2", | ||
213 | "pci8086,2e67", | ||
214 | "pciclassff0000", | ||
215 | "pciclassff00"; | ||
216 | |||
217 | #gpio-cells = <2>; | ||
218 | reg = <0x15900 0x0 0x0 0x0 0x0>; | ||
219 | interrupts = <15 1>; | ||
220 | gpio-controller; | ||
221 | }; | ||
222 | |||
223 | i2c-controller@b,2 { | ||
224 | #address-cells = <2>; | ||
225 | #size-cells = <1>; | ||
226 | compatible = "pci8086,2e68.2", | ||
227 | "pci8086,2e68", | ||
228 | "pciclass,ff0000", | ||
229 | "pciclass,ff00"; | ||
230 | |||
231 | reg = <0x15a00 0x0 0x0 0x0 0x0>; | ||
232 | interrupts = <16 1>; | ||
233 | ranges = <0 0 0x02000000 0 0xdffe0500 0x100 | ||
234 | 1 0 0x02000000 0 0xdffe0600 0x100 | ||
235 | 2 0 0x02000000 0 0xdffe0700 0x100>; | ||
236 | |||
237 | i2c@0 { | ||
238 | #address-cells = <1>; | ||
239 | #size-cells = <0>; | ||
240 | compatible = "intel,ce4100-i2c-controller"; | ||
241 | reg = <0 0 0x100>; | ||
242 | }; | ||
243 | |||
244 | i2c@1 { | ||
245 | #address-cells = <1>; | ||
246 | #size-cells = <0>; | ||
247 | compatible = "intel,ce4100-i2c-controller"; | ||
248 | reg = <1 0 0x100>; | ||
249 | |||
250 | gpio@26 { | ||
251 | #gpio-cells = <2>; | ||
252 | compatible = "ti,pcf8575"; | ||
253 | reg = <0x26>; | ||
254 | gpio-controller; | ||
255 | }; | ||
256 | }; | ||
257 | |||
258 | i2c@2 { | ||
259 | #address-cells = <1>; | ||
260 | #size-cells = <0>; | ||
261 | compatible = "intel,ce4100-i2c-controller"; | ||
262 | reg = <2 0 0x100>; | ||
263 | |||
264 | gpio@26 { | ||
265 | #gpio-cells = <2>; | ||
266 | compatible = "ti,pcf8575"; | ||
267 | reg = <0x26>; | ||
268 | gpio-controller; | ||
269 | }; | ||
270 | }; | ||
271 | }; | ||
272 | |||
273 | smard-card@b,3 { | ||
274 | compatible = "pci8086,2e69.2", | ||
275 | "pci8086,2e69", | ||
276 | "pciclass070500", | ||
277 | "pciclass0705"; | ||
278 | |||
279 | reg = <0x15b00 0x0 0x0 0x0 0x0>; | ||
280 | interrupts = <15 1>; | ||
281 | }; | ||
282 | |||
283 | spi-controller@b,4 { | ||
284 | #address-cells = <1>; | ||
285 | #size-cells = <0>; | ||
286 | compatible = | ||
287 | "pci8086,2e6a.2", | ||
288 | "pci8086,2e6a", | ||
289 | "pciclass,ff0000", | ||
290 | "pciclass,ff00"; | ||
291 | |||
292 | reg = <0x15c00 0x0 0x0 0x0 0x0>; | ||
293 | interrupts = <15 1>; | ||
294 | |||
295 | dac@0 { | ||
296 | compatible = "ti,pcm1755"; | ||
297 | reg = <0>; | ||
298 | spi-max-frequency = <115200>; | ||
299 | }; | ||
300 | |||
301 | dac@1 { | ||
302 | compatible = "ti,pcm1609a"; | ||
303 | reg = <1>; | ||
304 | spi-max-frequency = <115200>; | ||
305 | }; | ||
306 | |||
307 | eeprom@2 { | ||
308 | compatible = "atmel,at93c46"; | ||
309 | reg = <2>; | ||
310 | spi-max-frequency = <115200>; | ||
311 | }; | ||
312 | }; | ||
313 | |||
314 | multimedia@b,7 { | ||
315 | compatible = "pci8086,2e6d.2", | ||
316 | "pci8086,2e6d", | ||
317 | "pciclassff0000", | ||
318 | "pciclassff00"; | ||
319 | |||
320 | reg = <0x15f00 0x0 0x0 0x0 0x0>; | ||
321 | }; | ||
322 | |||
323 | ethernet@c,0 { | ||
324 | compatible = "pci8086,2e6e.2", | ||
325 | "pci8086,2e6e", | ||
326 | "pciclass020000", | ||
327 | "pciclass0200"; | ||
328 | |||
329 | reg = <0x16000 0x0 0x0 0x0 0x0>; | ||
330 | interrupts = <21 1>; | ||
331 | }; | ||
332 | |||
333 | clock@c,1 { | ||
334 | compatible = "pci8086,2e6f.2", | ||
335 | "pci8086,2e6f", | ||
336 | "pciclassff0000", | ||
337 | "pciclassff00"; | ||
338 | |||
339 | reg = <0x16100 0x0 0x0 0x0 0x0>; | ||
340 | interrupts = <3 1>; | ||
341 | }; | ||
342 | |||
343 | usb@d,0 { | ||
344 | compatible = "pci8086,2e70.2", | ||
345 | "pci8086,2e70", | ||
346 | "pciclass0c0320", | ||
347 | "pciclass0c03"; | ||
348 | |||
349 | reg = <0x16800 0x0 0x0 0x0 0x0>; | ||
350 | interrupts = <22 1>; | ||
351 | }; | ||
352 | |||
353 | usb@d,1 { | ||
354 | compatible = "pci8086,2e70.2", | ||
355 | "pci8086,2e70", | ||
356 | "pciclass0c0320", | ||
357 | "pciclass0c03"; | ||
358 | |||
359 | reg = <0x16900 0x0 0x0 0x0 0x0>; | ||
360 | interrupts = <22 1>; | ||
361 | }; | ||
362 | |||
363 | sata@e,0 { | ||
364 | compatible = "pci8086,2e71.0", | ||
365 | "pci8086,2e71", | ||
366 | "pciclass010601", | ||
367 | "pciclass0106"; | ||
368 | |||
369 | reg = <0x17000 0x0 0x0 0x0 0x0>; | ||
370 | interrupts = <23 1>; | ||
371 | }; | ||
372 | |||
373 | flash@f,0 { | ||
374 | compatible = "pci8086,701.1", | ||
375 | "pci8086,701", | ||
376 | "pciclass050100", | ||
377 | "pciclass0501"; | ||
378 | |||
379 | reg = <0x17800 0x0 0x0 0x0 0x0>; | ||
380 | interrupts = <13 1>; | ||
381 | }; | ||
382 | |||
383 | entertainment-encryption@10,0 { | ||
384 | compatible = "pci8086,702.1", | ||
385 | "pci8086,702", | ||
386 | "pciclass101000", | ||
387 | "pciclass1010"; | ||
388 | |||
389 | reg = <0x18000 0x0 0x0 0x0 0x0>; | ||
390 | }; | ||
391 | |||
392 | co-processor@11,0 { | ||
393 | compatible = "pci8086,703.1", | ||
394 | "pci8086,703", | ||
395 | "pciclass0b4000", | ||
396 | "pciclass0b40"; | ||
397 | |||
398 | reg = <0x18800 0x0 0x0 0x0 0x0>; | ||
399 | interrupts = <1 1>; | ||
400 | }; | ||
401 | |||
402 | multimedia@12,0 { | ||
403 | compatible = "pci8086,704.0", | ||
404 | "pci8086,704", | ||
405 | "pciclass048000", | ||
406 | "pciclass0480"; | ||
407 | |||
408 | reg = <0x19000 0x0 0x0 0x0 0x0>; | ||
409 | }; | ||
410 | }; | ||
411 | |||
412 | isa@1f,0 { | ||
413 | #address-cells = <2>; | ||
414 | #size-cells = <1>; | ||
415 | compatible = "isa"; | ||
416 | reg = <0xf800 0x0 0x0 0x0 0x0>; | ||
417 | ranges = <1 0 0 0 0 0x100>; | ||
418 | |||
419 | rtc@70 { | ||
420 | compatible = "intel,ce4100-rtc", "motorola,mc146818"; | ||
421 | interrupts = <8 3>; | ||
422 | interrupt-parent = <&ioapic1>; | ||
423 | ctrl-reg = <2>; | ||
424 | freq-reg = <0x26>; | ||
425 | reg = <1 0x70 2>; | ||
426 | }; | ||
427 | }; | ||
428 | }; | ||
429 | }; | ||
430 | }; | ||
diff --git a/arch/x86/platform/efi/Makefile b/arch/x86/platform/efi/Makefile new file mode 100644 index 000000000000..73b8be0f3675 --- /dev/null +++ b/arch/x86/platform/efi/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o | |||
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c new file mode 100644 index 000000000000..899e393d8e73 --- /dev/null +++ b/arch/x86/platform/efi/efi.c | |||
@@ -0,0 +1,710 @@ | |||
1 | /* | ||
2 | * Common EFI (Extensible Firmware Interface) support functions | ||
3 | * Based on Extensible Firmware Interface Specification version 1.0 | ||
4 | * | ||
5 | * Copyright (C) 1999 VA Linux Systems | ||
6 | * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> | ||
7 | * Copyright (C) 1999-2002 Hewlett-Packard Co. | ||
8 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
9 | * Stephane Eranian <eranian@hpl.hp.com> | ||
10 | * Copyright (C) 2005-2008 Intel Co. | ||
11 | * Fenghua Yu <fenghua.yu@intel.com> | ||
12 | * Bibo Mao <bibo.mao@intel.com> | ||
13 | * Chandramouli Narayanan <mouli@linux.intel.com> | ||
14 | * Huang Ying <ying.huang@intel.com> | ||
15 | * | ||
16 | * Copied from efi_32.c to eliminate the duplicated code between EFI | ||
17 | * 32/64 support code. --ying 2007-10-26 | ||
18 | * | ||
19 | * All EFI Runtime Services are not implemented yet as EFI only | ||
20 | * supports physical mode addressing on SoftSDV. This is to be fixed | ||
21 | * in a future version. --drummond 1999-07-20 | ||
22 | * | ||
23 | * Implemented EFI runtime services and virtual mode calls. --davidm | ||
24 | * | ||
25 | * Goutham Rao: <goutham.rao@intel.com> | ||
26 | * Skip non-WB memory and ignore empty memory ranges. | ||
27 | */ | ||
28 | |||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/efi.h> | ||
32 | #include <linux/bootmem.h> | ||
33 | #include <linux/memblock.h> | ||
34 | #include <linux/spinlock.h> | ||
35 | #include <linux/uaccess.h> | ||
36 | #include <linux/time.h> | ||
37 | #include <linux/io.h> | ||
38 | #include <linux/reboot.h> | ||
39 | #include <linux/bcd.h> | ||
40 | |||
41 | #include <asm/setup.h> | ||
42 | #include <asm/efi.h> | ||
43 | #include <asm/time.h> | ||
44 | #include <asm/cacheflush.h> | ||
45 | #include <asm/tlbflush.h> | ||
46 | #include <asm/x86_init.h> | ||
47 | |||
48 | #define EFI_DEBUG 1 | ||
49 | #define PFX "EFI: " | ||
50 | |||
51 | int efi_enabled; | ||
52 | EXPORT_SYMBOL(efi_enabled); | ||
53 | |||
54 | struct efi efi; | ||
55 | EXPORT_SYMBOL(efi); | ||
56 | |||
57 | struct efi_memory_map memmap; | ||
58 | |||
59 | static struct efi efi_phys __initdata; | ||
60 | static efi_system_table_t efi_systab __initdata; | ||
61 | |||
62 | static int __init setup_noefi(char *arg) | ||
63 | { | ||
64 | efi_enabled = 0; | ||
65 | return 0; | ||
66 | } | ||
67 | early_param("noefi", setup_noefi); | ||
68 | |||
69 | int add_efi_memmap; | ||
70 | EXPORT_SYMBOL(add_efi_memmap); | ||
71 | |||
72 | static int __init setup_add_efi_memmap(char *arg) | ||
73 | { | ||
74 | add_efi_memmap = 1; | ||
75 | return 0; | ||
76 | } | ||
77 | early_param("add_efi_memmap", setup_add_efi_memmap); | ||
78 | |||
79 | |||
80 | static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) | ||
81 | { | ||
82 | return efi_call_virt2(get_time, tm, tc); | ||
83 | } | ||
84 | |||
85 | static efi_status_t virt_efi_set_time(efi_time_t *tm) | ||
86 | { | ||
87 | return efi_call_virt1(set_time, tm); | ||
88 | } | ||
89 | |||
90 | static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled, | ||
91 | efi_bool_t *pending, | ||
92 | efi_time_t *tm) | ||
93 | { | ||
94 | return efi_call_virt3(get_wakeup_time, | ||
95 | enabled, pending, tm); | ||
96 | } | ||
97 | |||
98 | static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm) | ||
99 | { | ||
100 | return efi_call_virt2(set_wakeup_time, | ||
101 | enabled, tm); | ||
102 | } | ||
103 | |||
104 | static efi_status_t virt_efi_get_variable(efi_char16_t *name, | ||
105 | efi_guid_t *vendor, | ||
106 | u32 *attr, | ||
107 | unsigned long *data_size, | ||
108 | void *data) | ||
109 | { | ||
110 | return efi_call_virt5(get_variable, | ||
111 | name, vendor, attr, | ||
112 | data_size, data); | ||
113 | } | ||
114 | |||
115 | static efi_status_t virt_efi_get_next_variable(unsigned long *name_size, | ||
116 | efi_char16_t *name, | ||
117 | efi_guid_t *vendor) | ||
118 | { | ||
119 | return efi_call_virt3(get_next_variable, | ||
120 | name_size, name, vendor); | ||
121 | } | ||
122 | |||
123 | static efi_status_t virt_efi_set_variable(efi_char16_t *name, | ||
124 | efi_guid_t *vendor, | ||
125 | unsigned long attr, | ||
126 | unsigned long data_size, | ||
127 | void *data) | ||
128 | { | ||
129 | return efi_call_virt5(set_variable, | ||
130 | name, vendor, attr, | ||
131 | data_size, data); | ||
132 | } | ||
133 | |||
134 | static efi_status_t virt_efi_get_next_high_mono_count(u32 *count) | ||
135 | { | ||
136 | return efi_call_virt1(get_next_high_mono_count, count); | ||
137 | } | ||
138 | |||
139 | static void virt_efi_reset_system(int reset_type, | ||
140 | efi_status_t status, | ||
141 | unsigned long data_size, | ||
142 | efi_char16_t *data) | ||
143 | { | ||
144 | efi_call_virt4(reset_system, reset_type, status, | ||
145 | data_size, data); | ||
146 | } | ||
147 | |||
148 | static efi_status_t __init phys_efi_set_virtual_address_map( | ||
149 | unsigned long memory_map_size, | ||
150 | unsigned long descriptor_size, | ||
151 | u32 descriptor_version, | ||
152 | efi_memory_desc_t *virtual_map) | ||
153 | { | ||
154 | efi_status_t status; | ||
155 | |||
156 | efi_call_phys_prelog(); | ||
157 | status = efi_call_phys4(efi_phys.set_virtual_address_map, | ||
158 | memory_map_size, descriptor_size, | ||
159 | descriptor_version, virtual_map); | ||
160 | efi_call_phys_epilog(); | ||
161 | return status; | ||
162 | } | ||
163 | |||
164 | static efi_status_t __init phys_efi_get_time(efi_time_t *tm, | ||
165 | efi_time_cap_t *tc) | ||
166 | { | ||
167 | efi_status_t status; | ||
168 | |||
169 | efi_call_phys_prelog(); | ||
170 | status = efi_call_phys2(efi_phys.get_time, tm, tc); | ||
171 | efi_call_phys_epilog(); | ||
172 | return status; | ||
173 | } | ||
174 | |||
175 | int efi_set_rtc_mmss(unsigned long nowtime) | ||
176 | { | ||
177 | int real_seconds, real_minutes; | ||
178 | efi_status_t status; | ||
179 | efi_time_t eft; | ||
180 | efi_time_cap_t cap; | ||
181 | |||
182 | status = efi.get_time(&eft, &cap); | ||
183 | if (status != EFI_SUCCESS) { | ||
184 | printk(KERN_ERR "Oops: efitime: can't read time!\n"); | ||
185 | return -1; | ||
186 | } | ||
187 | |||
188 | real_seconds = nowtime % 60; | ||
189 | real_minutes = nowtime / 60; | ||
190 | if (((abs(real_minutes - eft.minute) + 15)/30) & 1) | ||
191 | real_minutes += 30; | ||
192 | real_minutes %= 60; | ||
193 | eft.minute = real_minutes; | ||
194 | eft.second = real_seconds; | ||
195 | |||
196 | status = efi.set_time(&eft); | ||
197 | if (status != EFI_SUCCESS) { | ||
198 | printk(KERN_ERR "Oops: efitime: can't write time!\n"); | ||
199 | return -1; | ||
200 | } | ||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | unsigned long efi_get_time(void) | ||
205 | { | ||
206 | efi_status_t status; | ||
207 | efi_time_t eft; | ||
208 | efi_time_cap_t cap; | ||
209 | |||
210 | status = efi.get_time(&eft, &cap); | ||
211 | if (status != EFI_SUCCESS) | ||
212 | printk(KERN_ERR "Oops: efitime: can't read time!\n"); | ||
213 | |||
214 | return mktime(eft.year, eft.month, eft.day, eft.hour, | ||
215 | eft.minute, eft.second); | ||
216 | } | ||
217 | |||
218 | /* | ||
219 | * Tell the kernel about the EFI memory map. This might include | ||
220 | * more than the max 128 entries that can fit in the e820 legacy | ||
221 | * (zeropage) memory map. | ||
222 | */ | ||
223 | |||
224 | static void __init do_add_efi_memmap(void) | ||
225 | { | ||
226 | void *p; | ||
227 | |||
228 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | ||
229 | efi_memory_desc_t *md = p; | ||
230 | unsigned long long start = md->phys_addr; | ||
231 | unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; | ||
232 | int e820_type; | ||
233 | |||
234 | switch (md->type) { | ||
235 | case EFI_LOADER_CODE: | ||
236 | case EFI_LOADER_DATA: | ||
237 | case EFI_BOOT_SERVICES_CODE: | ||
238 | case EFI_BOOT_SERVICES_DATA: | ||
239 | case EFI_CONVENTIONAL_MEMORY: | ||
240 | if (md->attribute & EFI_MEMORY_WB) | ||
241 | e820_type = E820_RAM; | ||
242 | else | ||
243 | e820_type = E820_RESERVED; | ||
244 | break; | ||
245 | case EFI_ACPI_RECLAIM_MEMORY: | ||
246 | e820_type = E820_ACPI; | ||
247 | break; | ||
248 | case EFI_ACPI_MEMORY_NVS: | ||
249 | e820_type = E820_NVS; | ||
250 | break; | ||
251 | case EFI_UNUSABLE_MEMORY: | ||
252 | e820_type = E820_UNUSABLE; | ||
253 | break; | ||
254 | default: | ||
255 | /* | ||
256 | * EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE | ||
257 | * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO | ||
258 | * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE | ||
259 | */ | ||
260 | e820_type = E820_RESERVED; | ||
261 | break; | ||
262 | } | ||
263 | e820_add_region(start, size, e820_type); | ||
264 | } | ||
265 | sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); | ||
266 | } | ||
267 | |||
268 | void __init efi_memblock_x86_reserve_range(void) | ||
269 | { | ||
270 | unsigned long pmap; | ||
271 | |||
272 | #ifdef CONFIG_X86_32 | ||
273 | pmap = boot_params.efi_info.efi_memmap; | ||
274 | #else | ||
275 | pmap = (boot_params.efi_info.efi_memmap | | ||
276 | ((__u64)boot_params.efi_info.efi_memmap_hi<<32)); | ||
277 | #endif | ||
278 | memmap.phys_map = (void *)pmap; | ||
279 | memmap.nr_map = boot_params.efi_info.efi_memmap_size / | ||
280 | boot_params.efi_info.efi_memdesc_size; | ||
281 | memmap.desc_version = boot_params.efi_info.efi_memdesc_version; | ||
282 | memmap.desc_size = boot_params.efi_info.efi_memdesc_size; | ||
283 | memblock_x86_reserve_range(pmap, pmap + memmap.nr_map * memmap.desc_size, | ||
284 | "EFI memmap"); | ||
285 | } | ||
286 | |||
287 | #if EFI_DEBUG | ||
288 | static void __init print_efi_memmap(void) | ||
289 | { | ||
290 | efi_memory_desc_t *md; | ||
291 | void *p; | ||
292 | int i; | ||
293 | |||
294 | for (p = memmap.map, i = 0; | ||
295 | p < memmap.map_end; | ||
296 | p += memmap.desc_size, i++) { | ||
297 | md = p; | ||
298 | printk(KERN_INFO PFX "mem%02u: type=%u, attr=0x%llx, " | ||
299 | "range=[0x%016llx-0x%016llx) (%lluMB)\n", | ||
300 | i, md->type, md->attribute, md->phys_addr, | ||
301 | md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT), | ||
302 | (md->num_pages >> (20 - EFI_PAGE_SHIFT))); | ||
303 | } | ||
304 | } | ||
305 | #endif /* EFI_DEBUG */ | ||
306 | |||
307 | void __init efi_reserve_boot_services(void) | ||
308 | { | ||
309 | void *p; | ||
310 | |||
311 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | ||
312 | efi_memory_desc_t *md = p; | ||
313 | u64 start = md->phys_addr; | ||
314 | u64 size = md->num_pages << EFI_PAGE_SHIFT; | ||
315 | |||
316 | if (md->type != EFI_BOOT_SERVICES_CODE && | ||
317 | md->type != EFI_BOOT_SERVICES_DATA) | ||
318 | continue; | ||
319 | /* Only reserve where possible: | ||
320 | * - Not within any already allocated areas | ||
321 | * - Not over any memory area (really needed, if above?) | ||
322 | * - Not within any part of the kernel | ||
323 | * - Not the bios reserved area | ||
324 | */ | ||
325 | if ((start+size >= virt_to_phys(_text) | ||
326 | && start <= virt_to_phys(_end)) || | ||
327 | !e820_all_mapped(start, start+size, E820_RAM) || | ||
328 | memblock_x86_check_reserved_size(&start, &size, | ||
329 | 1<<EFI_PAGE_SHIFT)) { | ||
330 | /* Could not reserve, skip it */ | ||
331 | md->num_pages = 0; | ||
332 | memblock_dbg(PFX "Could not reserve boot range " | ||
333 | "[0x%010llx-0x%010llx]\n", | ||
334 | start, start+size-1); | ||
335 | } else | ||
336 | memblock_x86_reserve_range(start, start+size, | ||
337 | "EFI Boot"); | ||
338 | } | ||
339 | } | ||
340 | |||
341 | static void __init efi_free_boot_services(void) | ||
342 | { | ||
343 | void *p; | ||
344 | |||
345 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | ||
346 | efi_memory_desc_t *md = p; | ||
347 | unsigned long long start = md->phys_addr; | ||
348 | unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; | ||
349 | |||
350 | if (md->type != EFI_BOOT_SERVICES_CODE && | ||
351 | md->type != EFI_BOOT_SERVICES_DATA) | ||
352 | continue; | ||
353 | |||
354 | /* Could not reserve boot area */ | ||
355 | if (!size) | ||
356 | continue; | ||
357 | |||
358 | free_bootmem_late(start, size); | ||
359 | } | ||
360 | } | ||
361 | |||
362 | void __init efi_init(void) | ||
363 | { | ||
364 | efi_config_table_t *config_tables; | ||
365 | efi_runtime_services_t *runtime; | ||
366 | efi_char16_t *c16; | ||
367 | char vendor[100] = "unknown"; | ||
368 | int i = 0; | ||
369 | void *tmp; | ||
370 | |||
371 | #ifdef CONFIG_X86_32 | ||
372 | efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab; | ||
373 | #else | ||
374 | efi_phys.systab = (efi_system_table_t *) | ||
375 | (boot_params.efi_info.efi_systab | | ||
376 | ((__u64)boot_params.efi_info.efi_systab_hi<<32)); | ||
377 | #endif | ||
378 | |||
379 | efi.systab = early_ioremap((unsigned long)efi_phys.systab, | ||
380 | sizeof(efi_system_table_t)); | ||
381 | if (efi.systab == NULL) | ||
382 | printk(KERN_ERR "Couldn't map the EFI system table!\n"); | ||
383 | memcpy(&efi_systab, efi.systab, sizeof(efi_system_table_t)); | ||
384 | early_iounmap(efi.systab, sizeof(efi_system_table_t)); | ||
385 | efi.systab = &efi_systab; | ||
386 | |||
387 | /* | ||
388 | * Verify the EFI Table | ||
389 | */ | ||
390 | if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) | ||
391 | printk(KERN_ERR "EFI system table signature incorrect!\n"); | ||
392 | if ((efi.systab->hdr.revision >> 16) == 0) | ||
393 | printk(KERN_ERR "Warning: EFI system table version " | ||
394 | "%d.%02d, expected 1.00 or greater!\n", | ||
395 | efi.systab->hdr.revision >> 16, | ||
396 | efi.systab->hdr.revision & 0xffff); | ||
397 | |||
398 | /* | ||
399 | * Show what we know for posterity | ||
400 | */ | ||
401 | c16 = tmp = early_ioremap(efi.systab->fw_vendor, 2); | ||
402 | if (c16) { | ||
403 | for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i) | ||
404 | vendor[i] = *c16++; | ||
405 | vendor[i] = '\0'; | ||
406 | } else | ||
407 | printk(KERN_ERR PFX "Could not map the firmware vendor!\n"); | ||
408 | early_iounmap(tmp, 2); | ||
409 | |||
410 | printk(KERN_INFO "EFI v%u.%.02u by %s\n", | ||
411 | efi.systab->hdr.revision >> 16, | ||
412 | efi.systab->hdr.revision & 0xffff, vendor); | ||
413 | |||
414 | /* | ||
415 | * Let's see what config tables the firmware passed to us. | ||
416 | */ | ||
417 | config_tables = early_ioremap( | ||
418 | efi.systab->tables, | ||
419 | efi.systab->nr_tables * sizeof(efi_config_table_t)); | ||
420 | if (config_tables == NULL) | ||
421 | printk(KERN_ERR "Could not map EFI Configuration Table!\n"); | ||
422 | |||
423 | printk(KERN_INFO); | ||
424 | for (i = 0; i < efi.systab->nr_tables; i++) { | ||
425 | if (!efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID)) { | ||
426 | efi.mps = config_tables[i].table; | ||
427 | printk(" MPS=0x%lx ", config_tables[i].table); | ||
428 | } else if (!efi_guidcmp(config_tables[i].guid, | ||
429 | ACPI_20_TABLE_GUID)) { | ||
430 | efi.acpi20 = config_tables[i].table; | ||
431 | printk(" ACPI 2.0=0x%lx ", config_tables[i].table); | ||
432 | } else if (!efi_guidcmp(config_tables[i].guid, | ||
433 | ACPI_TABLE_GUID)) { | ||
434 | efi.acpi = config_tables[i].table; | ||
435 | printk(" ACPI=0x%lx ", config_tables[i].table); | ||
436 | } else if (!efi_guidcmp(config_tables[i].guid, | ||
437 | SMBIOS_TABLE_GUID)) { | ||
438 | efi.smbios = config_tables[i].table; | ||
439 | printk(" SMBIOS=0x%lx ", config_tables[i].table); | ||
440 | #ifdef CONFIG_X86_UV | ||
441 | } else if (!efi_guidcmp(config_tables[i].guid, | ||
442 | UV_SYSTEM_TABLE_GUID)) { | ||
443 | efi.uv_systab = config_tables[i].table; | ||
444 | printk(" UVsystab=0x%lx ", config_tables[i].table); | ||
445 | #endif | ||
446 | } else if (!efi_guidcmp(config_tables[i].guid, | ||
447 | HCDP_TABLE_GUID)) { | ||
448 | efi.hcdp = config_tables[i].table; | ||
449 | printk(" HCDP=0x%lx ", config_tables[i].table); | ||
450 | } else if (!efi_guidcmp(config_tables[i].guid, | ||
451 | UGA_IO_PROTOCOL_GUID)) { | ||
452 | efi.uga = config_tables[i].table; | ||
453 | printk(" UGA=0x%lx ", config_tables[i].table); | ||
454 | } | ||
455 | } | ||
456 | printk("\n"); | ||
457 | early_iounmap(config_tables, | ||
458 | efi.systab->nr_tables * sizeof(efi_config_table_t)); | ||
459 | |||
460 | /* | ||
461 | * Check out the runtime services table. We need to map | ||
462 | * the runtime services table so that we can grab the physical | ||
463 | * address of several of the EFI runtime functions, needed to | ||
464 | * set the firmware into virtual mode. | ||
465 | */ | ||
466 | runtime = early_ioremap((unsigned long)efi.systab->runtime, | ||
467 | sizeof(efi_runtime_services_t)); | ||
468 | if (runtime != NULL) { | ||
469 | /* | ||
470 | * We will only need *early* access to the following | ||
471 | * two EFI runtime services before set_virtual_address_map | ||
472 | * is invoked. | ||
473 | */ | ||
474 | efi_phys.get_time = (efi_get_time_t *)runtime->get_time; | ||
475 | efi_phys.set_virtual_address_map = | ||
476 | (efi_set_virtual_address_map_t *) | ||
477 | runtime->set_virtual_address_map; | ||
478 | /* | ||
479 | * Make efi_get_time can be called before entering | ||
480 | * virtual mode. | ||
481 | */ | ||
482 | efi.get_time = phys_efi_get_time; | ||
483 | } else | ||
484 | printk(KERN_ERR "Could not map the EFI runtime service " | ||
485 | "table!\n"); | ||
486 | early_iounmap(runtime, sizeof(efi_runtime_services_t)); | ||
487 | |||
488 | /* Map the EFI memory map */ | ||
489 | memmap.map = early_ioremap((unsigned long)memmap.phys_map, | ||
490 | memmap.nr_map * memmap.desc_size); | ||
491 | if (memmap.map == NULL) | ||
492 | printk(KERN_ERR "Could not map the EFI memory map!\n"); | ||
493 | memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size); | ||
494 | |||
495 | if (memmap.desc_size != sizeof(efi_memory_desc_t)) | ||
496 | printk(KERN_WARNING | ||
497 | "Kernel-defined memdesc doesn't match the one from EFI!\n"); | ||
498 | |||
499 | if (add_efi_memmap) | ||
500 | do_add_efi_memmap(); | ||
501 | |||
502 | #ifdef CONFIG_X86_32 | ||
503 | x86_platform.get_wallclock = efi_get_time; | ||
504 | x86_platform.set_wallclock = efi_set_rtc_mmss; | ||
505 | #endif | ||
506 | |||
507 | #if EFI_DEBUG | ||
508 | print_efi_memmap(); | ||
509 | #endif | ||
510 | } | ||
511 | |||
512 | void __init efi_set_executable(efi_memory_desc_t *md, bool executable) | ||
513 | { | ||
514 | u64 addr, npages; | ||
515 | |||
516 | addr = md->virt_addr; | ||
517 | npages = md->num_pages; | ||
518 | |||
519 | memrange_efi_to_native(&addr, &npages); | ||
520 | |||
521 | if (executable) | ||
522 | set_memory_x(addr, npages); | ||
523 | else | ||
524 | set_memory_nx(addr, npages); | ||
525 | } | ||
526 | |||
527 | static void __init runtime_code_page_mkexec(void) | ||
528 | { | ||
529 | efi_memory_desc_t *md; | ||
530 | void *p; | ||
531 | |||
532 | /* Make EFI runtime service code area executable */ | ||
533 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | ||
534 | md = p; | ||
535 | |||
536 | if (md->type != EFI_RUNTIME_SERVICES_CODE) | ||
537 | continue; | ||
538 | |||
539 | efi_set_executable(md, true); | ||
540 | } | ||
541 | } | ||
542 | |||
543 | /* | ||
544 | * This function will switch the EFI runtime services to virtual mode. | ||
545 | * Essentially, look through the EFI memmap and map every region that | ||
546 | * has the runtime attribute bit set in its memory descriptor and update | ||
547 | * that memory descriptor with the virtual address obtained from ioremap(). | ||
548 | * This enables the runtime services to be called without having to | ||
549 | * thunk back into physical mode for every invocation. | ||
550 | */ | ||
551 | void __init efi_enter_virtual_mode(void) | ||
552 | { | ||
553 | efi_memory_desc_t *md, *prev_md = NULL; | ||
554 | efi_status_t status; | ||
555 | unsigned long size; | ||
556 | u64 end, systab, addr, npages, end_pfn; | ||
557 | void *p, *va, *new_memmap = NULL; | ||
558 | int count = 0; | ||
559 | |||
560 | efi.systab = NULL; | ||
561 | |||
562 | /* Merge contiguous regions of the same type and attribute */ | ||
563 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | ||
564 | u64 prev_size; | ||
565 | md = p; | ||
566 | |||
567 | if (!prev_md) { | ||
568 | prev_md = md; | ||
569 | continue; | ||
570 | } | ||
571 | |||
572 | if (prev_md->type != md->type || | ||
573 | prev_md->attribute != md->attribute) { | ||
574 | prev_md = md; | ||
575 | continue; | ||
576 | } | ||
577 | |||
578 | prev_size = prev_md->num_pages << EFI_PAGE_SHIFT; | ||
579 | |||
580 | if (md->phys_addr == (prev_md->phys_addr + prev_size)) { | ||
581 | prev_md->num_pages += md->num_pages; | ||
582 | md->type = EFI_RESERVED_TYPE; | ||
583 | md->attribute = 0; | ||
584 | continue; | ||
585 | } | ||
586 | prev_md = md; | ||
587 | } | ||
588 | |||
589 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | ||
590 | md = p; | ||
591 | if (!(md->attribute & EFI_MEMORY_RUNTIME) && | ||
592 | md->type != EFI_BOOT_SERVICES_CODE && | ||
593 | md->type != EFI_BOOT_SERVICES_DATA) | ||
594 | continue; | ||
595 | |||
596 | size = md->num_pages << EFI_PAGE_SHIFT; | ||
597 | end = md->phys_addr + size; | ||
598 | |||
599 | end_pfn = PFN_UP(end); | ||
600 | if (end_pfn <= max_low_pfn_mapped | ||
601 | || (end_pfn > (1UL << (32 - PAGE_SHIFT)) | ||
602 | && end_pfn <= max_pfn_mapped)) | ||
603 | va = __va(md->phys_addr); | ||
604 | else | ||
605 | va = efi_ioremap(md->phys_addr, size, md->type); | ||
606 | |||
607 | md->virt_addr = (u64) (unsigned long) va; | ||
608 | |||
609 | if (!va) { | ||
610 | printk(KERN_ERR PFX "ioremap of 0x%llX failed!\n", | ||
611 | (unsigned long long)md->phys_addr); | ||
612 | continue; | ||
613 | } | ||
614 | |||
615 | if (!(md->attribute & EFI_MEMORY_WB)) { | ||
616 | addr = md->virt_addr; | ||
617 | npages = md->num_pages; | ||
618 | memrange_efi_to_native(&addr, &npages); | ||
619 | set_memory_uc(addr, npages); | ||
620 | } | ||
621 | |||
622 | systab = (u64) (unsigned long) efi_phys.systab; | ||
623 | if (md->phys_addr <= systab && systab < end) { | ||
624 | systab += md->virt_addr - md->phys_addr; | ||
625 | efi.systab = (efi_system_table_t *) (unsigned long) systab; | ||
626 | } | ||
627 | new_memmap = krealloc(new_memmap, | ||
628 | (count + 1) * memmap.desc_size, | ||
629 | GFP_KERNEL); | ||
630 | memcpy(new_memmap + (count * memmap.desc_size), md, | ||
631 | memmap.desc_size); | ||
632 | count++; | ||
633 | } | ||
634 | |||
635 | BUG_ON(!efi.systab); | ||
636 | |||
637 | status = phys_efi_set_virtual_address_map( | ||
638 | memmap.desc_size * count, | ||
639 | memmap.desc_size, | ||
640 | memmap.desc_version, | ||
641 | (efi_memory_desc_t *)__pa(new_memmap)); | ||
642 | |||
643 | if (status != EFI_SUCCESS) { | ||
644 | printk(KERN_ALERT "Unable to switch EFI into virtual mode " | ||
645 | "(status=%lx)!\n", status); | ||
646 | panic("EFI call to SetVirtualAddressMap() failed!"); | ||
647 | } | ||
648 | |||
649 | /* | ||
650 | * Thankfully, it does seem that no runtime services other than | ||
651 | * SetVirtualAddressMap() will touch boot services code, so we can | ||
652 | * get rid of it all at this point | ||
653 | */ | ||
654 | efi_free_boot_services(); | ||
655 | |||
656 | /* | ||
657 | * Now that EFI is in virtual mode, update the function | ||
658 | * pointers in the runtime service table to the new virtual addresses. | ||
659 | * | ||
660 | * Call EFI services through wrapper functions. | ||
661 | */ | ||
662 | efi.get_time = virt_efi_get_time; | ||
663 | efi.set_time = virt_efi_set_time; | ||
664 | efi.get_wakeup_time = virt_efi_get_wakeup_time; | ||
665 | efi.set_wakeup_time = virt_efi_set_wakeup_time; | ||
666 | efi.get_variable = virt_efi_get_variable; | ||
667 | efi.get_next_variable = virt_efi_get_next_variable; | ||
668 | efi.set_variable = virt_efi_set_variable; | ||
669 | efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count; | ||
670 | efi.reset_system = virt_efi_reset_system; | ||
671 | efi.set_virtual_address_map = NULL; | ||
672 | if (__supported_pte_mask & _PAGE_NX) | ||
673 | runtime_code_page_mkexec(); | ||
674 | early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size); | ||
675 | memmap.map = NULL; | ||
676 | kfree(new_memmap); | ||
677 | } | ||
678 | |||
679 | /* | ||
680 | * Convenience functions to obtain memory types and attributes | ||
681 | */ | ||
682 | u32 efi_mem_type(unsigned long phys_addr) | ||
683 | { | ||
684 | efi_memory_desc_t *md; | ||
685 | void *p; | ||
686 | |||
687 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | ||
688 | md = p; | ||
689 | if ((md->phys_addr <= phys_addr) && | ||
690 | (phys_addr < (md->phys_addr + | ||
691 | (md->num_pages << EFI_PAGE_SHIFT)))) | ||
692 | return md->type; | ||
693 | } | ||
694 | return 0; | ||
695 | } | ||
696 | |||
697 | u64 efi_mem_attributes(unsigned long phys_addr) | ||
698 | { | ||
699 | efi_memory_desc_t *md; | ||
700 | void *p; | ||
701 | |||
702 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | ||
703 | md = p; | ||
704 | if ((md->phys_addr <= phys_addr) && | ||
705 | (phys_addr < (md->phys_addr + | ||
706 | (md->num_pages << EFI_PAGE_SHIFT)))) | ||
707 | return md->attribute; | ||
708 | } | ||
709 | return 0; | ||
710 | } | ||
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c new file mode 100644 index 000000000000..5cab48ee61a4 --- /dev/null +++ b/arch/x86/platform/efi/efi_32.c | |||
@@ -0,0 +1,112 @@ | |||
1 | /* | ||
2 | * Extensible Firmware Interface | ||
3 | * | ||
4 | * Based on Extensible Firmware Interface Specification version 1.0 | ||
5 | * | ||
6 | * Copyright (C) 1999 VA Linux Systems | ||
7 | * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> | ||
8 | * Copyright (C) 1999-2002 Hewlett-Packard Co. | ||
9 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
10 | * Stephane Eranian <eranian@hpl.hp.com> | ||
11 | * | ||
12 | * All EFI Runtime Services are not implemented yet as EFI only | ||
13 | * supports physical mode addressing on SoftSDV. This is to be fixed | ||
14 | * in a future version. --drummond 1999-07-20 | ||
15 | * | ||
16 | * Implemented EFI runtime services and virtual mode calls. --davidm | ||
17 | * | ||
18 | * Goutham Rao: <goutham.rao@intel.com> | ||
19 | * Skip non-WB memory and ignore empty memory ranges. | ||
20 | */ | ||
21 | |||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/types.h> | ||
24 | #include <linux/ioport.h> | ||
25 | #include <linux/efi.h> | ||
26 | |||
27 | #include <asm/io.h> | ||
28 | #include <asm/page.h> | ||
29 | #include <asm/pgtable.h> | ||
30 | #include <asm/tlbflush.h> | ||
31 | #include <asm/efi.h> | ||
32 | |||
33 | /* | ||
34 | * To make EFI call EFI runtime service in physical addressing mode we need | ||
35 | * prelog/epilog before/after the invocation to disable interrupt, to | ||
36 | * claim EFI runtime service handler exclusively and to duplicate a memory in | ||
37 | * low memory space say 0 - 3G. | ||
38 | */ | ||
39 | |||
40 | static unsigned long efi_rt_eflags; | ||
41 | static pgd_t efi_bak_pg_dir_pointer[2]; | ||
42 | |||
43 | void efi_call_phys_prelog(void) | ||
44 | { | ||
45 | unsigned long cr4; | ||
46 | unsigned long temp; | ||
47 | struct desc_ptr gdt_descr; | ||
48 | |||
49 | local_irq_save(efi_rt_eflags); | ||
50 | |||
51 | /* | ||
52 | * If I don't have PAE, I should just duplicate two entries in page | ||
53 | * directory. If I have PAE, I just need to duplicate one entry in | ||
54 | * page directory. | ||
55 | */ | ||
56 | cr4 = read_cr4_safe(); | ||
57 | |||
58 | if (cr4 & X86_CR4_PAE) { | ||
59 | efi_bak_pg_dir_pointer[0].pgd = | ||
60 | swapper_pg_dir[pgd_index(0)].pgd; | ||
61 | swapper_pg_dir[0].pgd = | ||
62 | swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd; | ||
63 | } else { | ||
64 | efi_bak_pg_dir_pointer[0].pgd = | ||
65 | swapper_pg_dir[pgd_index(0)].pgd; | ||
66 | efi_bak_pg_dir_pointer[1].pgd = | ||
67 | swapper_pg_dir[pgd_index(0x400000)].pgd; | ||
68 | swapper_pg_dir[pgd_index(0)].pgd = | ||
69 | swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd; | ||
70 | temp = PAGE_OFFSET + 0x400000; | ||
71 | swapper_pg_dir[pgd_index(0x400000)].pgd = | ||
72 | swapper_pg_dir[pgd_index(temp)].pgd; | ||
73 | } | ||
74 | |||
75 | /* | ||
76 | * After the lock is released, the original page table is restored. | ||
77 | */ | ||
78 | __flush_tlb_all(); | ||
79 | |||
80 | gdt_descr.address = __pa(get_cpu_gdt_table(0)); | ||
81 | gdt_descr.size = GDT_SIZE - 1; | ||
82 | load_gdt(&gdt_descr); | ||
83 | } | ||
84 | |||
85 | void efi_call_phys_epilog(void) | ||
86 | { | ||
87 | unsigned long cr4; | ||
88 | struct desc_ptr gdt_descr; | ||
89 | |||
90 | gdt_descr.address = (unsigned long)get_cpu_gdt_table(0); | ||
91 | gdt_descr.size = GDT_SIZE - 1; | ||
92 | load_gdt(&gdt_descr); | ||
93 | |||
94 | cr4 = read_cr4_safe(); | ||
95 | |||
96 | if (cr4 & X86_CR4_PAE) { | ||
97 | swapper_pg_dir[pgd_index(0)].pgd = | ||
98 | efi_bak_pg_dir_pointer[0].pgd; | ||
99 | } else { | ||
100 | swapper_pg_dir[pgd_index(0)].pgd = | ||
101 | efi_bak_pg_dir_pointer[0].pgd; | ||
102 | swapper_pg_dir[pgd_index(0x400000)].pgd = | ||
103 | efi_bak_pg_dir_pointer[1].pgd; | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * After the lock is released, the original page table is restored. | ||
108 | */ | ||
109 | __flush_tlb_all(); | ||
110 | |||
111 | local_irq_restore(efi_rt_eflags); | ||
112 | } | ||
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c new file mode 100644 index 000000000000..ac3aa54e2654 --- /dev/null +++ b/arch/x86/platform/efi/efi_64.c | |||
@@ -0,0 +1,99 @@ | |||
1 | /* | ||
2 | * x86_64 specific EFI support functions | ||
3 | * Based on Extensible Firmware Interface Specification version 1.0 | ||
4 | * | ||
5 | * Copyright (C) 2005-2008 Intel Co. | ||
6 | * Fenghua Yu <fenghua.yu@intel.com> | ||
7 | * Bibo Mao <bibo.mao@intel.com> | ||
8 | * Chandramouli Narayanan <mouli@linux.intel.com> | ||
9 | * Huang Ying <ying.huang@intel.com> | ||
10 | * | ||
11 | * Code to convert EFI to E820 map has been implemented in elilo bootloader | ||
12 | * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table | ||
13 | * is setup appropriately for EFI runtime code. | ||
14 | * - mouli 06/14/2007. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <linux/types.h> | ||
22 | #include <linux/spinlock.h> | ||
23 | #include <linux/bootmem.h> | ||
24 | #include <linux/ioport.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/efi.h> | ||
27 | #include <linux/uaccess.h> | ||
28 | #include <linux/io.h> | ||
29 | #include <linux/reboot.h> | ||
30 | |||
31 | #include <asm/setup.h> | ||
32 | #include <asm/page.h> | ||
33 | #include <asm/e820.h> | ||
34 | #include <asm/pgtable.h> | ||
35 | #include <asm/tlbflush.h> | ||
36 | #include <asm/proto.h> | ||
37 | #include <asm/efi.h> | ||
38 | #include <asm/cacheflush.h> | ||
39 | #include <asm/fixmap.h> | ||
40 | |||
41 | static pgd_t save_pgd __initdata; | ||
42 | static unsigned long efi_flags __initdata; | ||
43 | |||
44 | static void __init early_code_mapping_set_exec(int executable) | ||
45 | { | ||
46 | efi_memory_desc_t *md; | ||
47 | void *p; | ||
48 | |||
49 | if (!(__supported_pte_mask & _PAGE_NX)) | ||
50 | return; | ||
51 | |||
52 | /* Make EFI service code area executable */ | ||
53 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | ||
54 | md = p; | ||
55 | if (md->type == EFI_RUNTIME_SERVICES_CODE || | ||
56 | md->type == EFI_BOOT_SERVICES_CODE) | ||
57 | efi_set_executable(md, executable); | ||
58 | } | ||
59 | } | ||
60 | |||
61 | void __init efi_call_phys_prelog(void) | ||
62 | { | ||
63 | unsigned long vaddress; | ||
64 | |||
65 | early_code_mapping_set_exec(1); | ||
66 | local_irq_save(efi_flags); | ||
67 | vaddress = (unsigned long)__va(0x0UL); | ||
68 | save_pgd = *pgd_offset_k(0x0UL); | ||
69 | set_pgd(pgd_offset_k(0x0UL), *pgd_offset_k(vaddress)); | ||
70 | __flush_tlb_all(); | ||
71 | } | ||
72 | |||
73 | void __init efi_call_phys_epilog(void) | ||
74 | { | ||
75 | /* | ||
76 | * After the lock is released, the original page table is restored. | ||
77 | */ | ||
78 | set_pgd(pgd_offset_k(0x0UL), save_pgd); | ||
79 | __flush_tlb_all(); | ||
80 | local_irq_restore(efi_flags); | ||
81 | early_code_mapping_set_exec(0); | ||
82 | } | ||
83 | |||
84 | void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, | ||
85 | u32 type) | ||
86 | { | ||
87 | unsigned long last_map_pfn; | ||
88 | |||
89 | if (type == EFI_MEMORY_MAPPED_IO) | ||
90 | return ioremap(phys_addr, size); | ||
91 | |||
92 | last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); | ||
93 | if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) { | ||
94 | unsigned long top = last_map_pfn << PAGE_SHIFT; | ||
95 | efi_ioremap(top, size - (top - phys_addr), type); | ||
96 | } | ||
97 | |||
98 | return (void __iomem *)__va(phys_addr); | ||
99 | } | ||
diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S new file mode 100644 index 000000000000..fbe66e626c09 --- /dev/null +++ b/arch/x86/platform/efi/efi_stub_32.S | |||
@@ -0,0 +1,123 @@ | |||
1 | /* | ||
2 | * EFI call stub for IA32. | ||
3 | * | ||
4 | * This stub allows us to make EFI calls in physical mode with interrupts | ||
5 | * turned off. | ||
6 | */ | ||
7 | |||
8 | #include <linux/linkage.h> | ||
9 | #include <asm/page_types.h> | ||
10 | |||
11 | /* | ||
12 | * efi_call_phys(void *, ...) is a function with variable parameters. | ||
13 | * All the callers of this function assure that all the parameters are 4-bytes. | ||
14 | */ | ||
15 | |||
16 | /* | ||
17 | * In gcc calling convention, EBX, ESP, EBP, ESI and EDI are all callee save. | ||
18 | * So we'd better save all of them at the beginning of this function and restore | ||
19 | * at the end no matter how many we use, because we can not assure EFI runtime | ||
20 | * service functions will comply with gcc calling convention, too. | ||
21 | */ | ||
22 | |||
23 | .text | ||
24 | ENTRY(efi_call_phys) | ||
25 | /* | ||
26 | * 0. The function can only be called in Linux kernel. So CS has been | ||
27 | * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found | ||
28 | * the values of these registers are the same. And, the corresponding | ||
29 | * GDT entries are identical. So I will do nothing about segment reg | ||
30 | * and GDT, but change GDT base register in prelog and epilog. | ||
31 | */ | ||
32 | |||
33 | /* | ||
34 | * 1. Now I am running with EIP = <physical address> + PAGE_OFFSET. | ||
35 | * But to make it smoothly switch from virtual mode to flat mode. | ||
36 | * The mapping of lower virtual memory has been created in prelog and | ||
37 | * epilog. | ||
38 | */ | ||
39 | movl $1f, %edx | ||
40 | subl $__PAGE_OFFSET, %edx | ||
41 | jmp *%edx | ||
42 | 1: | ||
43 | |||
44 | /* | ||
45 | * 2. Now on the top of stack is the return | ||
46 | * address in the caller of efi_call_phys(), then parameter 1, | ||
47 | * parameter 2, ..., param n. To make things easy, we save the return | ||
48 | * address of efi_call_phys in a global variable. | ||
49 | */ | ||
50 | popl %edx | ||
51 | movl %edx, saved_return_addr | ||
52 | /* get the function pointer into ECX*/ | ||
53 | popl %ecx | ||
54 | movl %ecx, efi_rt_function_ptr | ||
55 | movl $2f, %edx | ||
56 | subl $__PAGE_OFFSET, %edx | ||
57 | pushl %edx | ||
58 | |||
59 | /* | ||
60 | * 3. Clear PG bit in %CR0. | ||
61 | */ | ||
62 | movl %cr0, %edx | ||
63 | andl $0x7fffffff, %edx | ||
64 | movl %edx, %cr0 | ||
65 | jmp 1f | ||
66 | 1: | ||
67 | |||
68 | /* | ||
69 | * 4. Adjust stack pointer. | ||
70 | */ | ||
71 | subl $__PAGE_OFFSET, %esp | ||
72 | |||
73 | /* | ||
74 | * 5. Call the physical function. | ||
75 | */ | ||
76 | jmp *%ecx | ||
77 | |||
78 | 2: | ||
79 | /* | ||
80 | * 6. After EFI runtime service returns, control will return to | ||
81 | * following instruction. We'd better readjust stack pointer first. | ||
82 | */ | ||
83 | addl $__PAGE_OFFSET, %esp | ||
84 | |||
85 | /* | ||
86 | * 7. Restore PG bit | ||
87 | */ | ||
88 | movl %cr0, %edx | ||
89 | orl $0x80000000, %edx | ||
90 | movl %edx, %cr0 | ||
91 | jmp 1f | ||
92 | 1: | ||
93 | /* | ||
94 | * 8. Now restore the virtual mode from flat mode by | ||
95 | * adding EIP with PAGE_OFFSET. | ||
96 | */ | ||
97 | movl $1f, %edx | ||
98 | jmp *%edx | ||
99 | 1: | ||
100 | |||
101 | /* | ||
102 | * 9. Balance the stack. And because EAX contain the return value, | ||
103 | * we'd better not clobber it. | ||
104 | */ | ||
105 | leal efi_rt_function_ptr, %edx | ||
106 | movl (%edx), %ecx | ||
107 | pushl %ecx | ||
108 | |||
109 | /* | ||
110 | * 10. Push the saved return address onto the stack and return. | ||
111 | */ | ||
112 | leal saved_return_addr, %edx | ||
113 | movl (%edx), %ecx | ||
114 | pushl %ecx | ||
115 | ret | ||
116 | ENDPROC(efi_call_phys) | ||
117 | .previous | ||
118 | |||
119 | .data | ||
120 | saved_return_addr: | ||
121 | .long 0 | ||
122 | efi_rt_function_ptr: | ||
123 | .long 0 | ||
diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S new file mode 100644 index 000000000000..4c07ccab8146 --- /dev/null +++ b/arch/x86/platform/efi/efi_stub_64.S | |||
@@ -0,0 +1,116 @@ | |||
1 | /* | ||
2 | * Function calling ABI conversion from Linux to EFI for x86_64 | ||
3 | * | ||
4 | * Copyright (C) 2007 Intel Corp | ||
5 | * Bibo Mao <bibo.mao@intel.com> | ||
6 | * Huang Ying <ying.huang@intel.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/linkage.h> | ||
10 | |||
11 | #define SAVE_XMM \ | ||
12 | mov %rsp, %rax; \ | ||
13 | subq $0x70, %rsp; \ | ||
14 | and $~0xf, %rsp; \ | ||
15 | mov %rax, (%rsp); \ | ||
16 | mov %cr0, %rax; \ | ||
17 | clts; \ | ||
18 | mov %rax, 0x8(%rsp); \ | ||
19 | movaps %xmm0, 0x60(%rsp); \ | ||
20 | movaps %xmm1, 0x50(%rsp); \ | ||
21 | movaps %xmm2, 0x40(%rsp); \ | ||
22 | movaps %xmm3, 0x30(%rsp); \ | ||
23 | movaps %xmm4, 0x20(%rsp); \ | ||
24 | movaps %xmm5, 0x10(%rsp) | ||
25 | |||
26 | #define RESTORE_XMM \ | ||
27 | movaps 0x60(%rsp), %xmm0; \ | ||
28 | movaps 0x50(%rsp), %xmm1; \ | ||
29 | movaps 0x40(%rsp), %xmm2; \ | ||
30 | movaps 0x30(%rsp), %xmm3; \ | ||
31 | movaps 0x20(%rsp), %xmm4; \ | ||
32 | movaps 0x10(%rsp), %xmm5; \ | ||
33 | mov 0x8(%rsp), %rsi; \ | ||
34 | mov %rsi, %cr0; \ | ||
35 | mov (%rsp), %rsp | ||
36 | |||
37 | ENTRY(efi_call0) | ||
38 | SAVE_XMM | ||
39 | subq $32, %rsp | ||
40 | call *%rdi | ||
41 | addq $32, %rsp | ||
42 | RESTORE_XMM | ||
43 | ret | ||
44 | ENDPROC(efi_call0) | ||
45 | |||
46 | ENTRY(efi_call1) | ||
47 | SAVE_XMM | ||
48 | subq $32, %rsp | ||
49 | mov %rsi, %rcx | ||
50 | call *%rdi | ||
51 | addq $32, %rsp | ||
52 | RESTORE_XMM | ||
53 | ret | ||
54 | ENDPROC(efi_call1) | ||
55 | |||
56 | ENTRY(efi_call2) | ||
57 | SAVE_XMM | ||
58 | subq $32, %rsp | ||
59 | mov %rsi, %rcx | ||
60 | call *%rdi | ||
61 | addq $32, %rsp | ||
62 | RESTORE_XMM | ||
63 | ret | ||
64 | ENDPROC(efi_call2) | ||
65 | |||
66 | ENTRY(efi_call3) | ||
67 | SAVE_XMM | ||
68 | subq $32, %rsp | ||
69 | mov %rcx, %r8 | ||
70 | mov %rsi, %rcx | ||
71 | call *%rdi | ||
72 | addq $32, %rsp | ||
73 | RESTORE_XMM | ||
74 | ret | ||
75 | ENDPROC(efi_call3) | ||
76 | |||
77 | ENTRY(efi_call4) | ||
78 | SAVE_XMM | ||
79 | subq $32, %rsp | ||
80 | mov %r8, %r9 | ||
81 | mov %rcx, %r8 | ||
82 | mov %rsi, %rcx | ||
83 | call *%rdi | ||
84 | addq $32, %rsp | ||
85 | RESTORE_XMM | ||
86 | ret | ||
87 | ENDPROC(efi_call4) | ||
88 | |||
89 | ENTRY(efi_call5) | ||
90 | SAVE_XMM | ||
91 | subq $48, %rsp | ||
92 | mov %r9, 32(%rsp) | ||
93 | mov %r8, %r9 | ||
94 | mov %rcx, %r8 | ||
95 | mov %rsi, %rcx | ||
96 | call *%rdi | ||
97 | addq $48, %rsp | ||
98 | RESTORE_XMM | ||
99 | ret | ||
100 | ENDPROC(efi_call5) | ||
101 | |||
102 | ENTRY(efi_call6) | ||
103 | SAVE_XMM | ||
104 | mov (%rsp), %rax | ||
105 | mov 8(%rax), %rax | ||
106 | subq $48, %rsp | ||
107 | mov %r9, 32(%rsp) | ||
108 | mov %rax, 40(%rsp) | ||
109 | mov %r8, %r9 | ||
110 | mov %rcx, %r8 | ||
111 | mov %rsi, %rcx | ||
112 | call *%rdi | ||
113 | addq $48, %rsp | ||
114 | RESTORE_XMM | ||
115 | ret | ||
116 | ENDPROC(efi_call6) | ||
diff --git a/arch/x86/platform/iris/Makefile b/arch/x86/platform/iris/Makefile new file mode 100644 index 000000000000..db921983a102 --- /dev/null +++ b/arch/x86/platform/iris/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-$(CONFIG_X86_32_IRIS) += iris.o | |||
diff --git a/arch/x86/platform/iris/iris.c b/arch/x86/platform/iris/iris.c new file mode 100644 index 000000000000..1ba7f5ed8c9b --- /dev/null +++ b/arch/x86/platform/iris/iris.c | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * Eurobraille/Iris power off support. | ||
3 | * | ||
4 | * Eurobraille's Iris machine is a PC with no APM or ACPI support. | ||
5 | * It is shutdown by a special I/O sequence which this module provides. | ||
6 | * | ||
7 | * Copyright (C) Shérab <Sebastien.Hinderer@ens-lyon.org> | ||
8 | * | ||
9 | * This program is free software ; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation ; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY ; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with the program ; if not, write to the Free Software | ||
21 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
22 | */ | ||
23 | |||
24 | #include <linux/moduleparam.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/errno.h> | ||
28 | #include <linux/delay.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <linux/pm.h> | ||
31 | #include <asm/io.h> | ||
32 | |||
33 | #define IRIS_GIO_BASE 0x340 | ||
34 | #define IRIS_GIO_INPUT IRIS_GIO_BASE | ||
35 | #define IRIS_GIO_OUTPUT (IRIS_GIO_BASE + 1) | ||
36 | #define IRIS_GIO_PULSE 0x80 /* First byte to send */ | ||
37 | #define IRIS_GIO_REST 0x00 /* Second byte to send */ | ||
38 | #define IRIS_GIO_NODEV 0xff /* Likely not an Iris */ | ||
39 | |||
40 | MODULE_LICENSE("GPL"); | ||
41 | MODULE_AUTHOR("Sébastien Hinderer <Sebastien.Hinderer@ens-lyon.org>"); | ||
42 | MODULE_DESCRIPTION("A power_off handler for Iris devices from EuroBraille"); | ||
43 | MODULE_SUPPORTED_DEVICE("Eurobraille/Iris"); | ||
44 | |||
45 | static int force; | ||
46 | |||
47 | module_param(force, bool, 0); | ||
48 | MODULE_PARM_DESC(force, "Set to one to force poweroff handler installation."); | ||
49 | |||
50 | static void (*old_pm_power_off)(void); | ||
51 | |||
52 | static void iris_power_off(void) | ||
53 | { | ||
54 | outb(IRIS_GIO_PULSE, IRIS_GIO_OUTPUT); | ||
55 | msleep(850); | ||
56 | outb(IRIS_GIO_REST, IRIS_GIO_OUTPUT); | ||
57 | } | ||
58 | |||
59 | /* | ||
60 | * Before installing the power_off handler, try to make sure the OS is | ||
61 | * running on an Iris. Since Iris does not support DMI, this is done | ||
62 | * by reading its input port and seeing whether the read value is | ||
63 | * meaningful. | ||
64 | */ | ||
65 | static int iris_init(void) | ||
66 | { | ||
67 | unsigned char status; | ||
68 | if (force != 1) { | ||
69 | printk(KERN_ERR "The force parameter has not been set to 1 so the Iris poweroff handler will not be installed.\n"); | ||
70 | return -ENODEV; | ||
71 | } | ||
72 | status = inb(IRIS_GIO_INPUT); | ||
73 | if (status == IRIS_GIO_NODEV) { | ||
74 | printk(KERN_ERR "This machine does not seem to be an Iris. Power_off handler not installed.\n"); | ||
75 | return -ENODEV; | ||
76 | } | ||
77 | old_pm_power_off = pm_power_off; | ||
78 | pm_power_off = &iris_power_off; | ||
79 | printk(KERN_INFO "Iris power_off handler installed.\n"); | ||
80 | |||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | static void iris_exit(void) | ||
85 | { | ||
86 | pm_power_off = old_pm_power_off; | ||
87 | printk(KERN_INFO "Iris power_off handler uninstalled.\n"); | ||
88 | } | ||
89 | |||
90 | module_init(iris_init); | ||
91 | module_exit(iris_exit); | ||
diff --git a/arch/x86/platform/mrst/Makefile b/arch/x86/platform/mrst/Makefile new file mode 100644 index 000000000000..f61ccdd49341 --- /dev/null +++ b/arch/x86/platform/mrst/Makefile | |||
@@ -0,0 +1,3 @@ | |||
1 | obj-$(CONFIG_X86_MRST) += mrst.o | ||
2 | obj-$(CONFIG_X86_MRST) += vrtc.o | ||
3 | obj-$(CONFIG_EARLY_PRINTK_MRST) += early_printk_mrst.o | ||
diff --git a/arch/x86/platform/mrst/early_printk_mrst.c b/arch/x86/platform/mrst/early_printk_mrst.c new file mode 100644 index 000000000000..25bfdbb5b130 --- /dev/null +++ b/arch/x86/platform/mrst/early_printk_mrst.c | |||
@@ -0,0 +1,319 @@ | |||
1 | /* | ||
2 | * early_printk_mrst.c - early consoles for Intel MID platforms | ||
3 | * | ||
4 | * Copyright (c) 2008-2010, Intel Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; version 2 | ||
9 | * of the License. | ||
10 | */ | ||
11 | |||
12 | /* | ||
13 | * This file implements two early consoles named mrst and hsu. | ||
14 | * mrst is based on Maxim3110 spi-uart device, it exists in both | ||
15 | * Moorestown and Medfield platforms, while hsu is based on a High | ||
16 | * Speed UART device which only exists in the Medfield platform | ||
17 | */ | ||
18 | |||
19 | #include <linux/serial_reg.h> | ||
20 | #include <linux/serial_mfd.h> | ||
21 | #include <linux/kmsg_dump.h> | ||
22 | #include <linux/console.h> | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/delay.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/io.h> | ||
27 | |||
28 | #include <asm/fixmap.h> | ||
29 | #include <asm/pgtable.h> | ||
30 | #include <asm/mrst.h> | ||
31 | |||
32 | #define MRST_SPI_TIMEOUT 0x200000 | ||
33 | #define MRST_REGBASE_SPI0 0xff128000 | ||
34 | #define MRST_REGBASE_SPI1 0xff128400 | ||
35 | #define MRST_CLK_SPI0_REG 0xff11d86c | ||
36 | |||
37 | /* Bit fields in CTRLR0 */ | ||
38 | #define SPI_DFS_OFFSET 0 | ||
39 | |||
40 | #define SPI_FRF_OFFSET 4 | ||
41 | #define SPI_FRF_SPI 0x0 | ||
42 | #define SPI_FRF_SSP 0x1 | ||
43 | #define SPI_FRF_MICROWIRE 0x2 | ||
44 | #define SPI_FRF_RESV 0x3 | ||
45 | |||
46 | #define SPI_MODE_OFFSET 6 | ||
47 | #define SPI_SCPH_OFFSET 6 | ||
48 | #define SPI_SCOL_OFFSET 7 | ||
49 | #define SPI_TMOD_OFFSET 8 | ||
50 | #define SPI_TMOD_TR 0x0 /* xmit & recv */ | ||
51 | #define SPI_TMOD_TO 0x1 /* xmit only */ | ||
52 | #define SPI_TMOD_RO 0x2 /* recv only */ | ||
53 | #define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */ | ||
54 | |||
55 | #define SPI_SLVOE_OFFSET 10 | ||
56 | #define SPI_SRL_OFFSET 11 | ||
57 | #define SPI_CFS_OFFSET 12 | ||
58 | |||
59 | /* Bit fields in SR, 7 bits */ | ||
60 | #define SR_MASK 0x7f /* cover 7 bits */ | ||
61 | #define SR_BUSY (1 << 0) | ||
62 | #define SR_TF_NOT_FULL (1 << 1) | ||
63 | #define SR_TF_EMPT (1 << 2) | ||
64 | #define SR_RF_NOT_EMPT (1 << 3) | ||
65 | #define SR_RF_FULL (1 << 4) | ||
66 | #define SR_TX_ERR (1 << 5) | ||
67 | #define SR_DCOL (1 << 6) | ||
68 | |||
69 | struct dw_spi_reg { | ||
70 | u32 ctrl0; | ||
71 | u32 ctrl1; | ||
72 | u32 ssienr; | ||
73 | u32 mwcr; | ||
74 | u32 ser; | ||
75 | u32 baudr; | ||
76 | u32 txfltr; | ||
77 | u32 rxfltr; | ||
78 | u32 txflr; | ||
79 | u32 rxflr; | ||
80 | u32 sr; | ||
81 | u32 imr; | ||
82 | u32 isr; | ||
83 | u32 risr; | ||
84 | u32 txoicr; | ||
85 | u32 rxoicr; | ||
86 | u32 rxuicr; | ||
87 | u32 msticr; | ||
88 | u32 icr; | ||
89 | u32 dmacr; | ||
90 | u32 dmatdlr; | ||
91 | u32 dmardlr; | ||
92 | u32 idr; | ||
93 | u32 version; | ||
94 | |||
95 | /* Currently operates as 32 bits, though only the low 16 bits matter */ | ||
96 | u32 dr; | ||
97 | } __packed; | ||
98 | |||
99 | #define dw_readl(dw, name) __raw_readl(&(dw)->name) | ||
100 | #define dw_writel(dw, name, val) __raw_writel((val), &(dw)->name) | ||
101 | |||
102 | /* Default use SPI0 register for mrst, we will detect Penwell and use SPI1 */ | ||
103 | static unsigned long mrst_spi_paddr = MRST_REGBASE_SPI0; | ||
104 | |||
105 | static u32 *pclk_spi0; | ||
106 | /* Always contains an accessible address, start with 0 */ | ||
107 | static struct dw_spi_reg *pspi; | ||
108 | |||
109 | static struct kmsg_dumper dw_dumper; | ||
110 | static int dumper_registered; | ||
111 | |||
112 | static void dw_kmsg_dump(struct kmsg_dumper *dumper, | ||
113 | enum kmsg_dump_reason reason, | ||
114 | const char *s1, unsigned long l1, | ||
115 | const char *s2, unsigned long l2) | ||
116 | { | ||
117 | int i; | ||
118 | |||
119 | /* When run to this, we'd better re-init the HW */ | ||
120 | mrst_early_console_init(); | ||
121 | |||
122 | for (i = 0; i < l1; i++) | ||
123 | early_mrst_console.write(&early_mrst_console, s1 + i, 1); | ||
124 | for (i = 0; i < l2; i++) | ||
125 | early_mrst_console.write(&early_mrst_console, s2 + i, 1); | ||
126 | } | ||
127 | |||
128 | /* Set the ratio rate to 115200, 8n1, IRQ disabled */ | ||
129 | static void max3110_write_config(void) | ||
130 | { | ||
131 | u16 config; | ||
132 | |||
133 | config = 0xc001; | ||
134 | dw_writel(pspi, dr, config); | ||
135 | } | ||
136 | |||
137 | /* Translate char to a eligible word and send to max3110 */ | ||
138 | static void max3110_write_data(char c) | ||
139 | { | ||
140 | u16 data; | ||
141 | |||
142 | data = 0x8000 | c; | ||
143 | dw_writel(pspi, dr, data); | ||
144 | } | ||
145 | |||
146 | void mrst_early_console_init(void) | ||
147 | { | ||
148 | u32 ctrlr0 = 0; | ||
149 | u32 spi0_cdiv; | ||
150 | u32 freq; /* Freqency info only need be searched once */ | ||
151 | |||
152 | /* Base clk is 100 MHz, the actual clk = 100M / (clk_divider + 1) */ | ||
153 | pclk_spi0 = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE, | ||
154 | MRST_CLK_SPI0_REG); | ||
155 | spi0_cdiv = ((*pclk_spi0) & 0xe00) >> 9; | ||
156 | freq = 100000000 / (spi0_cdiv + 1); | ||
157 | |||
158 | if (mrst_identify_cpu() == MRST_CPU_CHIP_PENWELL) | ||
159 | mrst_spi_paddr = MRST_REGBASE_SPI1; | ||
160 | |||
161 | pspi = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE, | ||
162 | mrst_spi_paddr); | ||
163 | |||
164 | /* Disable SPI controller */ | ||
165 | dw_writel(pspi, ssienr, 0); | ||
166 | |||
167 | /* Set control param, 8 bits, transmit only mode */ | ||
168 | ctrlr0 = dw_readl(pspi, ctrl0); | ||
169 | |||
170 | ctrlr0 &= 0xfcc0; | ||
171 | ctrlr0 |= 0xf | (SPI_FRF_SPI << SPI_FRF_OFFSET) | ||
172 | | (SPI_TMOD_TO << SPI_TMOD_OFFSET); | ||
173 | dw_writel(pspi, ctrl0, ctrlr0); | ||
174 | |||
175 | /* | ||
176 | * Change the spi0 clk to comply with 115200 bps, use 100000 to | ||
177 | * calculate the clk dividor to make the clock a little slower | ||
178 | * than real baud rate. | ||
179 | */ | ||
180 | dw_writel(pspi, baudr, freq/100000); | ||
181 | |||
182 | /* Disable all INT for early phase */ | ||
183 | dw_writel(pspi, imr, 0x0); | ||
184 | |||
185 | /* Set the cs to spi-uart */ | ||
186 | dw_writel(pspi, ser, 0x2); | ||
187 | |||
188 | /* Enable the HW, the last step for HW init */ | ||
189 | dw_writel(pspi, ssienr, 0x1); | ||
190 | |||
191 | /* Set the default configuration */ | ||
192 | max3110_write_config(); | ||
193 | |||
194 | /* Register the kmsg dumper */ | ||
195 | if (!dumper_registered) { | ||
196 | dw_dumper.dump = dw_kmsg_dump; | ||
197 | kmsg_dump_register(&dw_dumper); | ||
198 | dumper_registered = 1; | ||
199 | } | ||
200 | } | ||
201 | |||
202 | /* Slave select should be called in the read/write function */ | ||
203 | static void early_mrst_spi_putc(char c) | ||
204 | { | ||
205 | unsigned int timeout; | ||
206 | u32 sr; | ||
207 | |||
208 | timeout = MRST_SPI_TIMEOUT; | ||
209 | /* Early putc needs to make sure the TX FIFO is not full */ | ||
210 | while (--timeout) { | ||
211 | sr = dw_readl(pspi, sr); | ||
212 | if (!(sr & SR_TF_NOT_FULL)) | ||
213 | cpu_relax(); | ||
214 | else | ||
215 | break; | ||
216 | } | ||
217 | |||
218 | if (!timeout) | ||
219 | pr_warning("MRST earlycon: timed out\n"); | ||
220 | else | ||
221 | max3110_write_data(c); | ||
222 | } | ||
223 | |||
224 | /* Early SPI only uses polling mode */ | ||
225 | static void early_mrst_spi_write(struct console *con, const char *str, unsigned n) | ||
226 | { | ||
227 | int i; | ||
228 | |||
229 | for (i = 0; i < n && *str; i++) { | ||
230 | if (*str == '\n') | ||
231 | early_mrst_spi_putc('\r'); | ||
232 | early_mrst_spi_putc(*str); | ||
233 | str++; | ||
234 | } | ||
235 | } | ||
236 | |||
237 | struct console early_mrst_console = { | ||
238 | .name = "earlymrst", | ||
239 | .write = early_mrst_spi_write, | ||
240 | .flags = CON_PRINTBUFFER, | ||
241 | .index = -1, | ||
242 | }; | ||
243 | |||
244 | /* | ||
245 | * Following is the early console based on Medfield HSU (High | ||
246 | * Speed UART) device. | ||
247 | */ | ||
248 | #define HSU_PORT2_PADDR 0xffa28180 | ||
249 | |||
250 | static void __iomem *phsu; | ||
251 | |||
252 | void hsu_early_console_init(void) | ||
253 | { | ||
254 | u8 lcr; | ||
255 | |||
256 | phsu = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE, | ||
257 | HSU_PORT2_PADDR); | ||
258 | |||
259 | /* Disable FIFO */ | ||
260 | writeb(0x0, phsu + UART_FCR); | ||
261 | |||
262 | /* Set to default 115200 bps, 8n1 */ | ||
263 | lcr = readb(phsu + UART_LCR); | ||
264 | writeb((0x80 | lcr), phsu + UART_LCR); | ||
265 | writeb(0x18, phsu + UART_DLL); | ||
266 | writeb(lcr, phsu + UART_LCR); | ||
267 | writel(0x3600, phsu + UART_MUL*4); | ||
268 | |||
269 | writeb(0x8, phsu + UART_MCR); | ||
270 | writeb(0x7, phsu + UART_FCR); | ||
271 | writeb(0x3, phsu + UART_LCR); | ||
272 | |||
273 | /* Clear IRQ status */ | ||
274 | readb(phsu + UART_LSR); | ||
275 | readb(phsu + UART_RX); | ||
276 | readb(phsu + UART_IIR); | ||
277 | readb(phsu + UART_MSR); | ||
278 | |||
279 | /* Enable FIFO */ | ||
280 | writeb(0x7, phsu + UART_FCR); | ||
281 | } | ||
282 | |||
283 | #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) | ||
284 | |||
285 | static void early_hsu_putc(char ch) | ||
286 | { | ||
287 | unsigned int timeout = 10000; /* 10ms */ | ||
288 | u8 status; | ||
289 | |||
290 | while (--timeout) { | ||
291 | status = readb(phsu + UART_LSR); | ||
292 | if (status & BOTH_EMPTY) | ||
293 | break; | ||
294 | udelay(1); | ||
295 | } | ||
296 | |||
297 | /* Only write the char when there was no timeout */ | ||
298 | if (timeout) | ||
299 | writeb(ch, phsu + UART_TX); | ||
300 | } | ||
301 | |||
302 | static void early_hsu_write(struct console *con, const char *str, unsigned n) | ||
303 | { | ||
304 | int i; | ||
305 | |||
306 | for (i = 0; i < n && *str; i++) { | ||
307 | if (*str == '\n') | ||
308 | early_hsu_putc('\r'); | ||
309 | early_hsu_putc(*str); | ||
310 | str++; | ||
311 | } | ||
312 | } | ||
313 | |||
314 | struct console early_hsu_console = { | ||
315 | .name = "earlyhsu", | ||
316 | .write = early_hsu_write, | ||
317 | .flags = CON_PRINTBUFFER, | ||
318 | .index = -1, | ||
319 | }; | ||
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c new file mode 100644 index 000000000000..7000e74b3087 --- /dev/null +++ b/arch/x86/platform/mrst/mrst.c | |||
@@ -0,0 +1,811 @@ | |||
1 | /* | ||
2 | * mrst.c: Intel Moorestown platform specific setup code | ||
3 | * | ||
4 | * (C) Copyright 2008 Intel Corporation | ||
5 | * Author: Jacob Pan (jacob.jun.pan@intel.com) | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; version 2 | ||
10 | * of the License. | ||
11 | */ | ||
12 | |||
13 | #define pr_fmt(fmt) "mrst: " fmt | ||
14 | |||
15 | #include <linux/init.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/sfi.h> | ||
18 | #include <linux/intel_pmic_gpio.h> | ||
19 | #include <linux/spi/spi.h> | ||
20 | #include <linux/i2c.h> | ||
21 | #include <linux/i2c/pca953x.h> | ||
22 | #include <linux/gpio_keys.h> | ||
23 | #include <linux/input.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | #include <linux/irq.h> | ||
26 | #include <linux/module.h> | ||
27 | |||
28 | #include <asm/setup.h> | ||
29 | #include <asm/mpspec_def.h> | ||
30 | #include <asm/hw_irq.h> | ||
31 | #include <asm/apic.h> | ||
32 | #include <asm/io_apic.h> | ||
33 | #include <asm/mrst.h> | ||
34 | #include <asm/mrst-vrtc.h> | ||
35 | #include <asm/io.h> | ||
36 | #include <asm/i8259.h> | ||
37 | #include <asm/intel_scu_ipc.h> | ||
38 | #include <asm/apb_timer.h> | ||
39 | #include <asm/reboot.h> | ||
40 | |||
41 | /* | ||
42 | * the clockevent devices on Moorestown/Medfield can be APBT or LAPIC clock, | ||
43 | * cmdline option x86_mrst_timer can be used to override the configuration | ||
44 | * to prefer one or the other. | ||
45 | * at runtime, there are basically three timer configurations: | ||
46 | * 1. per cpu apbt clock only | ||
47 | * 2. per cpu always-on lapic clocks only, this is Penwell/Medfield only | ||
48 | * 3. per cpu lapic clock (C3STOP) and one apbt clock, with broadcast. | ||
49 | * | ||
50 | * by default (without cmdline option), platform code first detects cpu type | ||
51 | * to see if we are on lincroft or penwell, then set up both lapic or apbt | ||
52 | * clocks accordingly. | ||
53 | * i.e. by default, medfield uses configuration #2, moorestown uses #1. | ||
54 | * config #3 is supported but not recommended on medfield. | ||
55 | * | ||
56 | * rating and feature summary: | ||
57 | * lapic (with C3STOP) --------- 100 | ||
58 | * apbt (always-on) ------------ 110 | ||
59 | * lapic (always-on,ARAT) ------ 150 | ||
60 | */ | ||
61 | |||
62 | __cpuinitdata enum mrst_timer_options mrst_timer_options; | ||
63 | |||
64 | static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM]; | ||
65 | static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM]; | ||
66 | enum mrst_cpu_type __mrst_cpu_chip; | ||
67 | EXPORT_SYMBOL_GPL(__mrst_cpu_chip); | ||
68 | |||
69 | int sfi_mtimer_num; | ||
70 | |||
71 | struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX]; | ||
72 | EXPORT_SYMBOL_GPL(sfi_mrtc_array); | ||
73 | int sfi_mrtc_num; | ||
74 | |||
75 | /* parse all the mtimer info to a static mtimer array */ | ||
76 | static int __init sfi_parse_mtmr(struct sfi_table_header *table) | ||
77 | { | ||
78 | struct sfi_table_simple *sb; | ||
79 | struct sfi_timer_table_entry *pentry; | ||
80 | struct mpc_intsrc mp_irq; | ||
81 | int totallen; | ||
82 | |||
83 | sb = (struct sfi_table_simple *)table; | ||
84 | if (!sfi_mtimer_num) { | ||
85 | sfi_mtimer_num = SFI_GET_NUM_ENTRIES(sb, | ||
86 | struct sfi_timer_table_entry); | ||
87 | pentry = (struct sfi_timer_table_entry *) sb->pentry; | ||
88 | totallen = sfi_mtimer_num * sizeof(*pentry); | ||
89 | memcpy(sfi_mtimer_array, pentry, totallen); | ||
90 | } | ||
91 | |||
92 | pr_debug("SFI MTIMER info (num = %d):\n", sfi_mtimer_num); | ||
93 | pentry = sfi_mtimer_array; | ||
94 | for (totallen = 0; totallen < sfi_mtimer_num; totallen++, pentry++) { | ||
95 | pr_debug("timer[%d]: paddr = 0x%08x, freq = %dHz," | ||
96 | " irq = %d\n", totallen, (u32)pentry->phys_addr, | ||
97 | pentry->freq_hz, pentry->irq); | ||
98 | if (!pentry->irq) | ||
99 | continue; | ||
100 | mp_irq.type = MP_INTSRC; | ||
101 | mp_irq.irqtype = mp_INT; | ||
102 | /* triggering mode edge bit 2-3, active high polarity bit 0-1 */ | ||
103 | mp_irq.irqflag = 5; | ||
104 | mp_irq.srcbus = MP_BUS_ISA; | ||
105 | mp_irq.srcbusirq = pentry->irq; /* IRQ */ | ||
106 | mp_irq.dstapic = MP_APIC_ALL; | ||
107 | mp_irq.dstirq = pentry->irq; | ||
108 | mp_save_irq(&mp_irq); | ||
109 | } | ||
110 | |||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | struct sfi_timer_table_entry *sfi_get_mtmr(int hint) | ||
115 | { | ||
116 | int i; | ||
117 | if (hint < sfi_mtimer_num) { | ||
118 | if (!sfi_mtimer_usage[hint]) { | ||
119 | pr_debug("hint taken for timer %d irq %d\n",\ | ||
120 | hint, sfi_mtimer_array[hint].irq); | ||
121 | sfi_mtimer_usage[hint] = 1; | ||
122 | return &sfi_mtimer_array[hint]; | ||
123 | } | ||
124 | } | ||
125 | /* take the first timer available */ | ||
126 | for (i = 0; i < sfi_mtimer_num;) { | ||
127 | if (!sfi_mtimer_usage[i]) { | ||
128 | sfi_mtimer_usage[i] = 1; | ||
129 | return &sfi_mtimer_array[i]; | ||
130 | } | ||
131 | i++; | ||
132 | } | ||
133 | return NULL; | ||
134 | } | ||
135 | |||
136 | void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr) | ||
137 | { | ||
138 | int i; | ||
139 | for (i = 0; i < sfi_mtimer_num;) { | ||
140 | if (mtmr->irq == sfi_mtimer_array[i].irq) { | ||
141 | sfi_mtimer_usage[i] = 0; | ||
142 | return; | ||
143 | } | ||
144 | i++; | ||
145 | } | ||
146 | } | ||
147 | |||
148 | /* parse all the mrtc info to a global mrtc array */ | ||
149 | int __init sfi_parse_mrtc(struct sfi_table_header *table) | ||
150 | { | ||
151 | struct sfi_table_simple *sb; | ||
152 | struct sfi_rtc_table_entry *pentry; | ||
153 | struct mpc_intsrc mp_irq; | ||
154 | |||
155 | int totallen; | ||
156 | |||
157 | sb = (struct sfi_table_simple *)table; | ||
158 | if (!sfi_mrtc_num) { | ||
159 | sfi_mrtc_num = SFI_GET_NUM_ENTRIES(sb, | ||
160 | struct sfi_rtc_table_entry); | ||
161 | pentry = (struct sfi_rtc_table_entry *)sb->pentry; | ||
162 | totallen = sfi_mrtc_num * sizeof(*pentry); | ||
163 | memcpy(sfi_mrtc_array, pentry, totallen); | ||
164 | } | ||
165 | |||
166 | pr_debug("SFI RTC info (num = %d):\n", sfi_mrtc_num); | ||
167 | pentry = sfi_mrtc_array; | ||
168 | for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) { | ||
169 | pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n", | ||
170 | totallen, (u32)pentry->phys_addr, pentry->irq); | ||
171 | mp_irq.type = MP_INTSRC; | ||
172 | mp_irq.irqtype = mp_INT; | ||
173 | mp_irq.irqflag = 0xf; /* level trigger and active low */ | ||
174 | mp_irq.srcbus = MP_BUS_ISA; | ||
175 | mp_irq.srcbusirq = pentry->irq; /* IRQ */ | ||
176 | mp_irq.dstapic = MP_APIC_ALL; | ||
177 | mp_irq.dstirq = pentry->irq; | ||
178 | mp_save_irq(&mp_irq); | ||
179 | } | ||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | static unsigned long __init mrst_calibrate_tsc(void) | ||
184 | { | ||
185 | unsigned long flags, fast_calibrate; | ||
186 | |||
187 | local_irq_save(flags); | ||
188 | fast_calibrate = apbt_quick_calibrate(); | ||
189 | local_irq_restore(flags); | ||
190 | |||
191 | if (fast_calibrate) | ||
192 | return fast_calibrate; | ||
193 | |||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | static void __init mrst_time_init(void) | ||
198 | { | ||
199 | sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr); | ||
200 | switch (mrst_timer_options) { | ||
201 | case MRST_TIMER_APBT_ONLY: | ||
202 | break; | ||
203 | case MRST_TIMER_LAPIC_APBT: | ||
204 | x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock; | ||
205 | x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock; | ||
206 | break; | ||
207 | default: | ||
208 | if (!boot_cpu_has(X86_FEATURE_ARAT)) | ||
209 | break; | ||
210 | x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock; | ||
211 | x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock; | ||
212 | return; | ||
213 | } | ||
214 | /* we need at least one APB timer */ | ||
215 | pre_init_apic_IRQ0(); | ||
216 | apbt_time_init(); | ||
217 | } | ||
218 | |||
219 | static void __cpuinit mrst_arch_setup(void) | ||
220 | { | ||
221 | if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27) | ||
222 | __mrst_cpu_chip = MRST_CPU_CHIP_PENWELL; | ||
223 | else if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x26) | ||
224 | __mrst_cpu_chip = MRST_CPU_CHIP_LINCROFT; | ||
225 | else { | ||
226 | pr_err("Unknown Moorestown CPU (%d:%d), default to Lincroft\n", | ||
227 | boot_cpu_data.x86, boot_cpu_data.x86_model); | ||
228 | __mrst_cpu_chip = MRST_CPU_CHIP_LINCROFT; | ||
229 | } | ||
230 | pr_debug("Moorestown CPU %s identified\n", | ||
231 | (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT) ? | ||
232 | "Lincroft" : "Penwell"); | ||
233 | } | ||
234 | |||
235 | /* MID systems don't have i8042 controller */ | ||
236 | static int mrst_i8042_detect(void) | ||
237 | { | ||
238 | return 0; | ||
239 | } | ||
240 | |||
241 | /* Reboot and power off are handled by the SCU on a MID device */ | ||
242 | static void mrst_power_off(void) | ||
243 | { | ||
244 | intel_scu_ipc_simple_command(0xf1, 1); | ||
245 | } | ||
246 | |||
247 | static void mrst_reboot(void) | ||
248 | { | ||
249 | intel_scu_ipc_simple_command(0xf1, 0); | ||
250 | } | ||
251 | |||
252 | /* | ||
253 | * Moorestown specific x86_init function overrides and early setup | ||
254 | * calls. | ||
255 | */ | ||
256 | void __init x86_mrst_early_setup(void) | ||
257 | { | ||
258 | x86_init.resources.probe_roms = x86_init_noop; | ||
259 | x86_init.resources.reserve_resources = x86_init_noop; | ||
260 | |||
261 | x86_init.timers.timer_init = mrst_time_init; | ||
262 | x86_init.timers.setup_percpu_clockev = x86_init_noop; | ||
263 | |||
264 | x86_init.irqs.pre_vector_init = x86_init_noop; | ||
265 | |||
266 | x86_init.oem.arch_setup = mrst_arch_setup; | ||
267 | |||
268 | x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock; | ||
269 | |||
270 | x86_platform.calibrate_tsc = mrst_calibrate_tsc; | ||
271 | x86_platform.i8042_detect = mrst_i8042_detect; | ||
272 | x86_init.timers.wallclock_init = mrst_rtc_init; | ||
273 | x86_init.pci.init = pci_mrst_init; | ||
274 | x86_init.pci.fixup_irqs = x86_init_noop; | ||
275 | |||
276 | legacy_pic = &null_legacy_pic; | ||
277 | |||
278 | /* Moorestown specific power_off/restart method */ | ||
279 | pm_power_off = mrst_power_off; | ||
280 | machine_ops.emergency_restart = mrst_reboot; | ||
281 | |||
282 | /* Avoid searching for BIOS MP tables */ | ||
283 | x86_init.mpparse.find_smp_config = x86_init_noop; | ||
284 | x86_init.mpparse.get_smp_config = x86_init_uint_noop; | ||
285 | set_bit(MP_BUS_ISA, mp_bus_not_pci); | ||
286 | } | ||
287 | |||
288 | /* | ||
289 | * if user does not want to use per CPU apb timer, just give it a lower rating | ||
290 | * than local apic timer and skip the late per cpu timer init. | ||
291 | */ | ||
292 | static inline int __init setup_x86_mrst_timer(char *arg) | ||
293 | { | ||
294 | if (!arg) | ||
295 | return -EINVAL; | ||
296 | |||
297 | if (strcmp("apbt_only", arg) == 0) | ||
298 | mrst_timer_options = MRST_TIMER_APBT_ONLY; | ||
299 | else if (strcmp("lapic_and_apbt", arg) == 0) | ||
300 | mrst_timer_options = MRST_TIMER_LAPIC_APBT; | ||
301 | else { | ||
302 | pr_warning("X86 MRST timer option %s not recognised" | ||
303 | " use x86_mrst_timer=apbt_only or lapic_and_apbt\n", | ||
304 | arg); | ||
305 | return -EINVAL; | ||
306 | } | ||
307 | return 0; | ||
308 | } | ||
309 | __setup("x86_mrst_timer=", setup_x86_mrst_timer); | ||
310 | |||
311 | /* | ||
312 | * Parsing GPIO table first, since the DEVS table will need this table | ||
313 | * to map the pin name to the actual pin. | ||
314 | */ | ||
315 | static struct sfi_gpio_table_entry *gpio_table; | ||
316 | static int gpio_num_entry; | ||
317 | |||
318 | static int __init sfi_parse_gpio(struct sfi_table_header *table) | ||
319 | { | ||
320 | struct sfi_table_simple *sb; | ||
321 | struct sfi_gpio_table_entry *pentry; | ||
322 | int num, i; | ||
323 | |||
324 | if (gpio_table) | ||
325 | return 0; | ||
326 | sb = (struct sfi_table_simple *)table; | ||
327 | num = SFI_GET_NUM_ENTRIES(sb, struct sfi_gpio_table_entry); | ||
328 | pentry = (struct sfi_gpio_table_entry *)sb->pentry; | ||
329 | |||
330 | gpio_table = (struct sfi_gpio_table_entry *) | ||
331 | kmalloc(num * sizeof(*pentry), GFP_KERNEL); | ||
332 | if (!gpio_table) | ||
333 | return -1; | ||
334 | memcpy(gpio_table, pentry, num * sizeof(*pentry)); | ||
335 | gpio_num_entry = num; | ||
336 | |||
337 | pr_debug("GPIO pin info:\n"); | ||
338 | for (i = 0; i < num; i++, pentry++) | ||
339 | pr_debug("info[%2d]: controller = %16.16s, pin_name = %16.16s," | ||
340 | " pin = %d\n", i, | ||
341 | pentry->controller_name, | ||
342 | pentry->pin_name, | ||
343 | pentry->pin_no); | ||
344 | return 0; | ||
345 | } | ||
346 | |||
347 | static int get_gpio_by_name(const char *name) | ||
348 | { | ||
349 | struct sfi_gpio_table_entry *pentry = gpio_table; | ||
350 | int i; | ||
351 | |||
352 | if (!pentry) | ||
353 | return -1; | ||
354 | for (i = 0; i < gpio_num_entry; i++, pentry++) { | ||
355 | if (!strncmp(name, pentry->pin_name, SFI_NAME_LEN)) | ||
356 | return pentry->pin_no; | ||
357 | } | ||
358 | return -1; | ||
359 | } | ||
360 | |||
361 | /* | ||
362 | * Here defines the array of devices platform data that IAFW would export | ||
363 | * through SFI "DEVS" table, we use name and type to match the device and | ||
364 | * its platform data. | ||
365 | */ | ||
366 | struct devs_id { | ||
367 | char name[SFI_NAME_LEN + 1]; | ||
368 | u8 type; | ||
369 | u8 delay; | ||
370 | void *(*get_platform_data)(void *info); | ||
371 | }; | ||
372 | |||
373 | /* the offset for the mapping of global gpio pin to irq */ | ||
374 | #define MRST_IRQ_OFFSET 0x100 | ||
375 | |||
376 | static void __init *pmic_gpio_platform_data(void *info) | ||
377 | { | ||
378 | static struct intel_pmic_gpio_platform_data pmic_gpio_pdata; | ||
379 | int gpio_base = get_gpio_by_name("pmic_gpio_base"); | ||
380 | |||
381 | if (gpio_base == -1) | ||
382 | gpio_base = 64; | ||
383 | pmic_gpio_pdata.gpio_base = gpio_base; | ||
384 | pmic_gpio_pdata.irq_base = gpio_base + MRST_IRQ_OFFSET; | ||
385 | pmic_gpio_pdata.gpiointr = 0xffffeff8; | ||
386 | |||
387 | return &pmic_gpio_pdata; | ||
388 | } | ||
389 | |||
390 | static void __init *max3111_platform_data(void *info) | ||
391 | { | ||
392 | struct spi_board_info *spi_info = info; | ||
393 | int intr = get_gpio_by_name("max3111_int"); | ||
394 | |||
395 | if (intr == -1) | ||
396 | return NULL; | ||
397 | spi_info->irq = intr + MRST_IRQ_OFFSET; | ||
398 | return NULL; | ||
399 | } | ||
400 | |||
401 | /* we have multiple max7315 on the board ... */ | ||
402 | #define MAX7315_NUM 2 | ||
403 | static void __init *max7315_platform_data(void *info) | ||
404 | { | ||
405 | static struct pca953x_platform_data max7315_pdata[MAX7315_NUM]; | ||
406 | static int nr; | ||
407 | struct pca953x_platform_data *max7315 = &max7315_pdata[nr]; | ||
408 | struct i2c_board_info *i2c_info = info; | ||
409 | int gpio_base, intr; | ||
410 | char base_pin_name[SFI_NAME_LEN + 1]; | ||
411 | char intr_pin_name[SFI_NAME_LEN + 1]; | ||
412 | |||
413 | if (nr == MAX7315_NUM) { | ||
414 | pr_err("too many max7315s, we only support %d\n", | ||
415 | MAX7315_NUM); | ||
416 | return NULL; | ||
417 | } | ||
418 | /* we have several max7315 on the board, we only need load several | ||
419 | * instances of the same pca953x driver to cover them | ||
420 | */ | ||
421 | strcpy(i2c_info->type, "max7315"); | ||
422 | if (nr++) { | ||
423 | sprintf(base_pin_name, "max7315_%d_base", nr); | ||
424 | sprintf(intr_pin_name, "max7315_%d_int", nr); | ||
425 | } else { | ||
426 | strcpy(base_pin_name, "max7315_base"); | ||
427 | strcpy(intr_pin_name, "max7315_int"); | ||
428 | } | ||
429 | |||
430 | gpio_base = get_gpio_by_name(base_pin_name); | ||
431 | intr = get_gpio_by_name(intr_pin_name); | ||
432 | |||
433 | if (gpio_base == -1) | ||
434 | return NULL; | ||
435 | max7315->gpio_base = gpio_base; | ||
436 | if (intr != -1) { | ||
437 | i2c_info->irq = intr + MRST_IRQ_OFFSET; | ||
438 | max7315->irq_base = gpio_base + MRST_IRQ_OFFSET; | ||
439 | } else { | ||
440 | i2c_info->irq = -1; | ||
441 | max7315->irq_base = -1; | ||
442 | } | ||
443 | return max7315; | ||
444 | } | ||
445 | |||
446 | static void __init *emc1403_platform_data(void *info) | ||
447 | { | ||
448 | static short intr2nd_pdata; | ||
449 | struct i2c_board_info *i2c_info = info; | ||
450 | int intr = get_gpio_by_name("thermal_int"); | ||
451 | int intr2nd = get_gpio_by_name("thermal_alert"); | ||
452 | |||
453 | if (intr == -1 || intr2nd == -1) | ||
454 | return NULL; | ||
455 | |||
456 | i2c_info->irq = intr + MRST_IRQ_OFFSET; | ||
457 | intr2nd_pdata = intr2nd + MRST_IRQ_OFFSET; | ||
458 | |||
459 | return &intr2nd_pdata; | ||
460 | } | ||
461 | |||
462 | static void __init *lis331dl_platform_data(void *info) | ||
463 | { | ||
464 | static short intr2nd_pdata; | ||
465 | struct i2c_board_info *i2c_info = info; | ||
466 | int intr = get_gpio_by_name("accel_int"); | ||
467 | int intr2nd = get_gpio_by_name("accel_2"); | ||
468 | |||
469 | if (intr == -1 || intr2nd == -1) | ||
470 | return NULL; | ||
471 | |||
472 | i2c_info->irq = intr + MRST_IRQ_OFFSET; | ||
473 | intr2nd_pdata = intr2nd + MRST_IRQ_OFFSET; | ||
474 | |||
475 | return &intr2nd_pdata; | ||
476 | } | ||
477 | |||
478 | static void __init *no_platform_data(void *info) | ||
479 | { | ||
480 | return NULL; | ||
481 | } | ||
482 | |||
483 | static const struct devs_id __initconst device_ids[] = { | ||
484 | {"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data}, | ||
485 | {"spi_max3111", SFI_DEV_TYPE_SPI, 0, &max3111_platform_data}, | ||
486 | {"i2c_max7315", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data}, | ||
487 | {"i2c_max7315_2", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data}, | ||
488 | {"emc1403", SFI_DEV_TYPE_I2C, 1, &emc1403_platform_data}, | ||
489 | {"i2c_accel", SFI_DEV_TYPE_I2C, 0, &lis331dl_platform_data}, | ||
490 | {"pmic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data}, | ||
491 | {"msic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data}, | ||
492 | {}, | ||
493 | }; | ||
494 | |||
495 | #define MAX_IPCDEVS 24 | ||
496 | static struct platform_device *ipc_devs[MAX_IPCDEVS]; | ||
497 | static int ipc_next_dev; | ||
498 | |||
499 | #define MAX_SCU_SPI 24 | ||
500 | static struct spi_board_info *spi_devs[MAX_SCU_SPI]; | ||
501 | static int spi_next_dev; | ||
502 | |||
503 | #define MAX_SCU_I2C 24 | ||
504 | static struct i2c_board_info *i2c_devs[MAX_SCU_I2C]; | ||
505 | static int i2c_bus[MAX_SCU_I2C]; | ||
506 | static int i2c_next_dev; | ||
507 | |||
508 | static void __init intel_scu_device_register(struct platform_device *pdev) | ||
509 | { | ||
510 | if(ipc_next_dev == MAX_IPCDEVS) | ||
511 | pr_err("too many SCU IPC devices"); | ||
512 | else | ||
513 | ipc_devs[ipc_next_dev++] = pdev; | ||
514 | } | ||
515 | |||
516 | static void __init intel_scu_spi_device_register(struct spi_board_info *sdev) | ||
517 | { | ||
518 | struct spi_board_info *new_dev; | ||
519 | |||
520 | if (spi_next_dev == MAX_SCU_SPI) { | ||
521 | pr_err("too many SCU SPI devices"); | ||
522 | return; | ||
523 | } | ||
524 | |||
525 | new_dev = kzalloc(sizeof(*sdev), GFP_KERNEL); | ||
526 | if (!new_dev) { | ||
527 | pr_err("failed to alloc mem for delayed spi dev %s\n", | ||
528 | sdev->modalias); | ||
529 | return; | ||
530 | } | ||
531 | memcpy(new_dev, sdev, sizeof(*sdev)); | ||
532 | |||
533 | spi_devs[spi_next_dev++] = new_dev; | ||
534 | } | ||
535 | |||
536 | static void __init intel_scu_i2c_device_register(int bus, | ||
537 | struct i2c_board_info *idev) | ||
538 | { | ||
539 | struct i2c_board_info *new_dev; | ||
540 | |||
541 | if (i2c_next_dev == MAX_SCU_I2C) { | ||
542 | pr_err("too many SCU I2C devices"); | ||
543 | return; | ||
544 | } | ||
545 | |||
546 | new_dev = kzalloc(sizeof(*idev), GFP_KERNEL); | ||
547 | if (!new_dev) { | ||
548 | pr_err("failed to alloc mem for delayed i2c dev %s\n", | ||
549 | idev->type); | ||
550 | return; | ||
551 | } | ||
552 | memcpy(new_dev, idev, sizeof(*idev)); | ||
553 | |||
554 | i2c_bus[i2c_next_dev] = bus; | ||
555 | i2c_devs[i2c_next_dev++] = new_dev; | ||
556 | } | ||
557 | |||
558 | /* Called by IPC driver */ | ||
559 | void intel_scu_devices_create(void) | ||
560 | { | ||
561 | int i; | ||
562 | |||
563 | for (i = 0; i < ipc_next_dev; i++) | ||
564 | platform_device_add(ipc_devs[i]); | ||
565 | |||
566 | for (i = 0; i < spi_next_dev; i++) | ||
567 | spi_register_board_info(spi_devs[i], 1); | ||
568 | |||
569 | for (i = 0; i < i2c_next_dev; i++) { | ||
570 | struct i2c_adapter *adapter; | ||
571 | struct i2c_client *client; | ||
572 | |||
573 | adapter = i2c_get_adapter(i2c_bus[i]); | ||
574 | if (adapter) { | ||
575 | client = i2c_new_device(adapter, i2c_devs[i]); | ||
576 | if (!client) | ||
577 | pr_err("can't create i2c device %s\n", | ||
578 | i2c_devs[i]->type); | ||
579 | } else | ||
580 | i2c_register_board_info(i2c_bus[i], i2c_devs[i], 1); | ||
581 | } | ||
582 | } | ||
583 | EXPORT_SYMBOL_GPL(intel_scu_devices_create); | ||
584 | |||
585 | /* Called by IPC driver */ | ||
586 | void intel_scu_devices_destroy(void) | ||
587 | { | ||
588 | int i; | ||
589 | |||
590 | for (i = 0; i < ipc_next_dev; i++) | ||
591 | platform_device_del(ipc_devs[i]); | ||
592 | } | ||
593 | EXPORT_SYMBOL_GPL(intel_scu_devices_destroy); | ||
594 | |||
595 | static void __init install_irq_resource(struct platform_device *pdev, int irq) | ||
596 | { | ||
597 | /* Single threaded */ | ||
598 | static struct resource __initdata res = { | ||
599 | .name = "IRQ", | ||
600 | .flags = IORESOURCE_IRQ, | ||
601 | }; | ||
602 | res.start = irq; | ||
603 | platform_device_add_resources(pdev, &res, 1); | ||
604 | } | ||
605 | |||
606 | static void __init sfi_handle_ipc_dev(struct platform_device *pdev) | ||
607 | { | ||
608 | const struct devs_id *dev = device_ids; | ||
609 | void *pdata = NULL; | ||
610 | |||
611 | while (dev->name[0]) { | ||
612 | if (dev->type == SFI_DEV_TYPE_IPC && | ||
613 | !strncmp(dev->name, pdev->name, SFI_NAME_LEN)) { | ||
614 | pdata = dev->get_platform_data(pdev); | ||
615 | break; | ||
616 | } | ||
617 | dev++; | ||
618 | } | ||
619 | pdev->dev.platform_data = pdata; | ||
620 | intel_scu_device_register(pdev); | ||
621 | } | ||
622 | |||
623 | static void __init sfi_handle_spi_dev(struct spi_board_info *spi_info) | ||
624 | { | ||
625 | const struct devs_id *dev = device_ids; | ||
626 | void *pdata = NULL; | ||
627 | |||
628 | while (dev->name[0]) { | ||
629 | if (dev->type == SFI_DEV_TYPE_SPI && | ||
630 | !strncmp(dev->name, spi_info->modalias, SFI_NAME_LEN)) { | ||
631 | pdata = dev->get_platform_data(spi_info); | ||
632 | break; | ||
633 | } | ||
634 | dev++; | ||
635 | } | ||
636 | spi_info->platform_data = pdata; | ||
637 | if (dev->delay) | ||
638 | intel_scu_spi_device_register(spi_info); | ||
639 | else | ||
640 | spi_register_board_info(spi_info, 1); | ||
641 | } | ||
642 | |||
643 | static void __init sfi_handle_i2c_dev(int bus, struct i2c_board_info *i2c_info) | ||
644 | { | ||
645 | const struct devs_id *dev = device_ids; | ||
646 | void *pdata = NULL; | ||
647 | |||
648 | while (dev->name[0]) { | ||
649 | if (dev->type == SFI_DEV_TYPE_I2C && | ||
650 | !strncmp(dev->name, i2c_info->type, SFI_NAME_LEN)) { | ||
651 | pdata = dev->get_platform_data(i2c_info); | ||
652 | break; | ||
653 | } | ||
654 | dev++; | ||
655 | } | ||
656 | i2c_info->platform_data = pdata; | ||
657 | |||
658 | if (dev->delay) | ||
659 | intel_scu_i2c_device_register(bus, i2c_info); | ||
660 | else | ||
661 | i2c_register_board_info(bus, i2c_info, 1); | ||
662 | } | ||
663 | |||
664 | |||
665 | static int __init sfi_parse_devs(struct sfi_table_header *table) | ||
666 | { | ||
667 | struct sfi_table_simple *sb; | ||
668 | struct sfi_device_table_entry *pentry; | ||
669 | struct spi_board_info spi_info; | ||
670 | struct i2c_board_info i2c_info; | ||
671 | struct platform_device *pdev; | ||
672 | int num, i, bus; | ||
673 | int ioapic; | ||
674 | struct io_apic_irq_attr irq_attr; | ||
675 | |||
676 | sb = (struct sfi_table_simple *)table; | ||
677 | num = SFI_GET_NUM_ENTRIES(sb, struct sfi_device_table_entry); | ||
678 | pentry = (struct sfi_device_table_entry *)sb->pentry; | ||
679 | |||
680 | for (i = 0; i < num; i++, pentry++) { | ||
681 | if (pentry->irq != (u8)0xff) { /* native RTE case */ | ||
682 | /* these SPI2 devices are not exposed to system as PCI | ||
683 | * devices, but they have separate RTE entry in IOAPIC | ||
684 | * so we have to enable them one by one here | ||
685 | */ | ||
686 | ioapic = mp_find_ioapic(pentry->irq); | ||
687 | irq_attr.ioapic = ioapic; | ||
688 | irq_attr.ioapic_pin = pentry->irq; | ||
689 | irq_attr.trigger = 1; | ||
690 | irq_attr.polarity = 1; | ||
691 | io_apic_set_pci_routing(NULL, pentry->irq, &irq_attr); | ||
692 | } | ||
693 | switch (pentry->type) { | ||
694 | case SFI_DEV_TYPE_IPC: | ||
695 | /* ID as IRQ is a hack that will go away */ | ||
696 | pdev = platform_device_alloc(pentry->name, pentry->irq); | ||
697 | if (pdev == NULL) { | ||
698 | pr_err("out of memory for SFI platform device '%s'.\n", | ||
699 | pentry->name); | ||
700 | continue; | ||
701 | } | ||
702 | install_irq_resource(pdev, pentry->irq); | ||
703 | pr_debug("info[%2d]: IPC bus, name = %16.16s, " | ||
704 | "irq = 0x%2x\n", i, pentry->name, pentry->irq); | ||
705 | sfi_handle_ipc_dev(pdev); | ||
706 | break; | ||
707 | case SFI_DEV_TYPE_SPI: | ||
708 | memset(&spi_info, 0, sizeof(spi_info)); | ||
709 | strncpy(spi_info.modalias, pentry->name, SFI_NAME_LEN); | ||
710 | spi_info.irq = pentry->irq; | ||
711 | spi_info.bus_num = pentry->host_num; | ||
712 | spi_info.chip_select = pentry->addr; | ||
713 | spi_info.max_speed_hz = pentry->max_freq; | ||
714 | pr_debug("info[%2d]: SPI bus = %d, name = %16.16s, " | ||
715 | "irq = 0x%2x, max_freq = %d, cs = %d\n", i, | ||
716 | spi_info.bus_num, | ||
717 | spi_info.modalias, | ||
718 | spi_info.irq, | ||
719 | spi_info.max_speed_hz, | ||
720 | spi_info.chip_select); | ||
721 | sfi_handle_spi_dev(&spi_info); | ||
722 | break; | ||
723 | case SFI_DEV_TYPE_I2C: | ||
724 | memset(&i2c_info, 0, sizeof(i2c_info)); | ||
725 | bus = pentry->host_num; | ||
726 | strncpy(i2c_info.type, pentry->name, SFI_NAME_LEN); | ||
727 | i2c_info.irq = pentry->irq; | ||
728 | i2c_info.addr = pentry->addr; | ||
729 | pr_debug("info[%2d]: I2C bus = %d, name = %16.16s, " | ||
730 | "irq = 0x%2x, addr = 0x%x\n", i, bus, | ||
731 | i2c_info.type, | ||
732 | i2c_info.irq, | ||
733 | i2c_info.addr); | ||
734 | sfi_handle_i2c_dev(bus, &i2c_info); | ||
735 | break; | ||
736 | case SFI_DEV_TYPE_UART: | ||
737 | case SFI_DEV_TYPE_HSI: | ||
738 | default: | ||
739 | ; | ||
740 | } | ||
741 | } | ||
742 | return 0; | ||
743 | } | ||
744 | |||
745 | static int __init mrst_platform_init(void) | ||
746 | { | ||
747 | sfi_table_parse(SFI_SIG_GPIO, NULL, NULL, sfi_parse_gpio); | ||
748 | sfi_table_parse(SFI_SIG_DEVS, NULL, NULL, sfi_parse_devs); | ||
749 | return 0; | ||
750 | } | ||
751 | arch_initcall(mrst_platform_init); | ||
752 | |||
753 | /* | ||
754 | * we will search these buttons in SFI GPIO table (by name) | ||
755 | * and register them dynamically. Please add all possible | ||
756 | * buttons here, we will shrink them if no GPIO found. | ||
757 | */ | ||
758 | static struct gpio_keys_button gpio_button[] = { | ||
759 | {KEY_POWER, -1, 1, "power_btn", EV_KEY, 0, 3000}, | ||
760 | {KEY_PROG1, -1, 1, "prog_btn1", EV_KEY, 0, 20}, | ||
761 | {KEY_PROG2, -1, 1, "prog_btn2", EV_KEY, 0, 20}, | ||
762 | {SW_LID, -1, 1, "lid_switch", EV_SW, 0, 20}, | ||
763 | {KEY_VOLUMEUP, -1, 1, "vol_up", EV_KEY, 0, 20}, | ||
764 | {KEY_VOLUMEDOWN, -1, 1, "vol_down", EV_KEY, 0, 20}, | ||
765 | {KEY_CAMERA, -1, 1, "camera_full", EV_KEY, 0, 20}, | ||
766 | {KEY_CAMERA_FOCUS, -1, 1, "camera_half", EV_KEY, 0, 20}, | ||
767 | {SW_KEYPAD_SLIDE, -1, 1, "MagSw1", EV_SW, 0, 20}, | ||
768 | {SW_KEYPAD_SLIDE, -1, 1, "MagSw2", EV_SW, 0, 20}, | ||
769 | }; | ||
770 | |||
771 | static struct gpio_keys_platform_data mrst_gpio_keys = { | ||
772 | .buttons = gpio_button, | ||
773 | .rep = 1, | ||
774 | .nbuttons = -1, /* will fill it after search */ | ||
775 | }; | ||
776 | |||
777 | static struct platform_device pb_device = { | ||
778 | .name = "gpio-keys", | ||
779 | .id = -1, | ||
780 | .dev = { | ||
781 | .platform_data = &mrst_gpio_keys, | ||
782 | }, | ||
783 | }; | ||
784 | |||
785 | /* | ||
786 | * Shrink the non-existent buttons, register the gpio button | ||
787 | * device if there is some | ||
788 | */ | ||
789 | static int __init pb_keys_init(void) | ||
790 | { | ||
791 | struct gpio_keys_button *gb = gpio_button; | ||
792 | int i, num, good = 0; | ||
793 | |||
794 | num = sizeof(gpio_button) / sizeof(struct gpio_keys_button); | ||
795 | for (i = 0; i < num; i++) { | ||
796 | gb[i].gpio = get_gpio_by_name(gb[i].desc); | ||
797 | if (gb[i].gpio == -1) | ||
798 | continue; | ||
799 | |||
800 | if (i != good) | ||
801 | gb[good] = gb[i]; | ||
802 | good++; | ||
803 | } | ||
804 | |||
805 | if (good) { | ||
806 | mrst_gpio_keys.nbuttons = good; | ||
807 | return platform_device_register(&pb_device); | ||
808 | } | ||
809 | return 0; | ||
810 | } | ||
811 | late_initcall(pb_keys_init); | ||
diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/mrst/vrtc.c new file mode 100644 index 000000000000..73d70d65e76e --- /dev/null +++ b/arch/x86/platform/mrst/vrtc.c | |||
@@ -0,0 +1,159 @@ | |||
1 | /* | ||
2 | * vrtc.c: Driver for virtual RTC device on Intel MID platform | ||
3 | * | ||
4 | * (C) Copyright 2009 Intel Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; version 2 | ||
9 | * of the License. | ||
10 | * | ||
11 | * Note: | ||
12 | * VRTC is emulated by system controller firmware, the real HW | ||
13 | * RTC is located in the PMIC device. SCU FW shadows PMIC RTC | ||
14 | * in a memory mapped IO space that is visible to the host IA | ||
15 | * processor. | ||
16 | * | ||
17 | * This driver is based on RTC CMOS driver. | ||
18 | */ | ||
19 | |||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/sfi.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | |||
25 | #include <asm/mrst.h> | ||
26 | #include <asm/mrst-vrtc.h> | ||
27 | #include <asm/time.h> | ||
28 | #include <asm/fixmap.h> | ||
29 | |||
30 | static unsigned char __iomem *vrtc_virt_base; | ||
31 | |||
32 | unsigned char vrtc_cmos_read(unsigned char reg) | ||
33 | { | ||
34 | unsigned char retval; | ||
35 | |||
36 | /* vRTC's registers range from 0x0 to 0xD */ | ||
37 | if (reg > 0xd || !vrtc_virt_base) | ||
38 | return 0xff; | ||
39 | |||
40 | lock_cmos_prefix(reg); | ||
41 | retval = __raw_readb(vrtc_virt_base + (reg << 2)); | ||
42 | lock_cmos_suffix(reg); | ||
43 | return retval; | ||
44 | } | ||
45 | EXPORT_SYMBOL_GPL(vrtc_cmos_read); | ||
46 | |||
47 | void vrtc_cmos_write(unsigned char val, unsigned char reg) | ||
48 | { | ||
49 | if (reg > 0xd || !vrtc_virt_base) | ||
50 | return; | ||
51 | |||
52 | lock_cmos_prefix(reg); | ||
53 | __raw_writeb(val, vrtc_virt_base + (reg << 2)); | ||
54 | lock_cmos_suffix(reg); | ||
55 | } | ||
56 | EXPORT_SYMBOL_GPL(vrtc_cmos_write); | ||
57 | |||
58 | unsigned long vrtc_get_time(void) | ||
59 | { | ||
60 | u8 sec, min, hour, mday, mon; | ||
61 | u32 year; | ||
62 | |||
63 | while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP)) | ||
64 | cpu_relax(); | ||
65 | |||
66 | sec = vrtc_cmos_read(RTC_SECONDS); | ||
67 | min = vrtc_cmos_read(RTC_MINUTES); | ||
68 | hour = vrtc_cmos_read(RTC_HOURS); | ||
69 | mday = vrtc_cmos_read(RTC_DAY_OF_MONTH); | ||
70 | mon = vrtc_cmos_read(RTC_MONTH); | ||
71 | year = vrtc_cmos_read(RTC_YEAR); | ||
72 | |||
73 | /* vRTC YEAR reg contains the offset to 1960 */ | ||
74 | year += 1960; | ||
75 | |||
76 | printk(KERN_INFO "vRTC: sec: %d min: %d hour: %d day: %d " | ||
77 | "mon: %d year: %d\n", sec, min, hour, mday, mon, year); | ||
78 | |||
79 | return mktime(year, mon, mday, hour, min, sec); | ||
80 | } | ||
81 | |||
82 | /* Only care about the minutes and seconds */ | ||
83 | int vrtc_set_mmss(unsigned long nowtime) | ||
84 | { | ||
85 | int real_sec, real_min; | ||
86 | int vrtc_min; | ||
87 | |||
88 | vrtc_min = vrtc_cmos_read(RTC_MINUTES); | ||
89 | |||
90 | real_sec = nowtime % 60; | ||
91 | real_min = nowtime / 60; | ||
92 | if (((abs(real_min - vrtc_min) + 15)/30) & 1) | ||
93 | real_min += 30; | ||
94 | real_min %= 60; | ||
95 | |||
96 | vrtc_cmos_write(real_sec, RTC_SECONDS); | ||
97 | vrtc_cmos_write(real_min, RTC_MINUTES); | ||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | void __init mrst_rtc_init(void) | ||
102 | { | ||
103 | unsigned long vrtc_paddr; | ||
104 | |||
105 | sfi_table_parse(SFI_SIG_MRTC, NULL, NULL, sfi_parse_mrtc); | ||
106 | |||
107 | vrtc_paddr = sfi_mrtc_array[0].phys_addr; | ||
108 | if (!sfi_mrtc_num || !vrtc_paddr) | ||
109 | return; | ||
110 | |||
111 | vrtc_virt_base = (void __iomem *)set_fixmap_offset_nocache(FIX_LNW_VRTC, | ||
112 | vrtc_paddr); | ||
113 | x86_platform.get_wallclock = vrtc_get_time; | ||
114 | x86_platform.set_wallclock = vrtc_set_mmss; | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * The Moorestown platform has a memory mapped virtual RTC device that emulates | ||
119 | * the programming interface of the RTC. | ||
120 | */ | ||
121 | |||
122 | static struct resource vrtc_resources[] = { | ||
123 | [0] = { | ||
124 | .flags = IORESOURCE_MEM, | ||
125 | }, | ||
126 | [1] = { | ||
127 | .flags = IORESOURCE_IRQ, | ||
128 | } | ||
129 | }; | ||
130 | |||
131 | static struct platform_device vrtc_device = { | ||
132 | .name = "rtc_mrst", | ||
133 | .id = -1, | ||
134 | .resource = vrtc_resources, | ||
135 | .num_resources = ARRAY_SIZE(vrtc_resources), | ||
136 | }; | ||
137 | |||
138 | /* Register the RTC device if appropriate */ | ||
139 | static int __init mrst_device_create(void) | ||
140 | { | ||
141 | /* No Moorestown, no device */ | ||
142 | if (!mrst_identify_cpu()) | ||
143 | return -ENODEV; | ||
144 | /* No timer, no device */ | ||
145 | if (!sfi_mrtc_num) | ||
146 | return -ENODEV; | ||
147 | |||
148 | /* iomem resource */ | ||
149 | vrtc_resources[0].start = sfi_mrtc_array[0].phys_addr; | ||
150 | vrtc_resources[0].end = sfi_mrtc_array[0].phys_addr + | ||
151 | MRST_VRTC_MAP_SZ; | ||
152 | /* irq resource */ | ||
153 | vrtc_resources[1].start = sfi_mrtc_array[0].irq; | ||
154 | vrtc_resources[1].end = sfi_mrtc_array[0].irq; | ||
155 | |||
156 | return platform_device_register(&vrtc_device); | ||
157 | } | ||
158 | |||
159 | module_init(mrst_device_create); | ||
diff --git a/arch/x86/platform/olpc/Makefile b/arch/x86/platform/olpc/Makefile new file mode 100644 index 000000000000..81c5e2165c24 --- /dev/null +++ b/arch/x86/platform/olpc/Makefile | |||
@@ -0,0 +1,2 @@ | |||
1 | obj-$(CONFIG_OLPC) += olpc.o olpc_ofw.o olpc_dt.o | ||
2 | obj-$(CONFIG_OLPC_XO1) += olpc-xo1.o | ||
diff --git a/arch/x86/platform/olpc/olpc-xo1.c b/arch/x86/platform/olpc/olpc-xo1.c new file mode 100644 index 000000000000..ab81fb271760 --- /dev/null +++ b/arch/x86/platform/olpc/olpc-xo1.c | |||
@@ -0,0 +1,146 @@ | |||
1 | /* | ||
2 | * Support for features of the OLPC XO-1 laptop | ||
3 | * | ||
4 | * Copyright (C) 2010 Andres Salomon <dilinger@queued.net> | ||
5 | * Copyright (C) 2010 One Laptop per Child | ||
6 | * Copyright (C) 2006 Red Hat, Inc. | ||
7 | * Copyright (C) 2006 Advanced Micro Devices, Inc. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/pm.h> | ||
18 | #include <linux/mfd/core.h> | ||
19 | |||
20 | #include <asm/io.h> | ||
21 | #include <asm/olpc.h> | ||
22 | |||
23 | #define DRV_NAME "olpc-xo1" | ||
24 | |||
25 | /* PMC registers (PMS block) */ | ||
26 | #define PM_SCLK 0x10 | ||
27 | #define PM_IN_SLPCTL 0x20 | ||
28 | #define PM_WKXD 0x34 | ||
29 | #define PM_WKD 0x30 | ||
30 | #define PM_SSC 0x54 | ||
31 | |||
32 | /* PM registers (ACPI block) */ | ||
33 | #define PM1_CNT 0x08 | ||
34 | #define PM_GPE0_STS 0x18 | ||
35 | |||
36 | static unsigned long acpi_base; | ||
37 | static unsigned long pms_base; | ||
38 | |||
39 | static void xo1_power_off(void) | ||
40 | { | ||
41 | printk(KERN_INFO "OLPC XO-1 power off sequence...\n"); | ||
42 | |||
43 | /* Enable all of these controls with 0 delay */ | ||
44 | outl(0x40000000, pms_base + PM_SCLK); | ||
45 | outl(0x40000000, pms_base + PM_IN_SLPCTL); | ||
46 | outl(0x40000000, pms_base + PM_WKXD); | ||
47 | outl(0x40000000, pms_base + PM_WKD); | ||
48 | |||
49 | /* Clear status bits (possibly unnecessary) */ | ||
50 | outl(0x0002ffff, pms_base + PM_SSC); | ||
51 | outl(0xffffffff, acpi_base + PM_GPE0_STS); | ||
52 | |||
53 | /* Write SLP_EN bit to start the machinery */ | ||
54 | outl(0x00002000, acpi_base + PM1_CNT); | ||
55 | } | ||
56 | |||
57 | static int __devinit olpc_xo1_probe(struct platform_device *pdev) | ||
58 | { | ||
59 | struct resource *res; | ||
60 | int err; | ||
61 | |||
62 | /* don't run on non-XOs */ | ||
63 | if (!machine_is_olpc()) | ||
64 | return -ENODEV; | ||
65 | |||
66 | err = mfd_cell_enable(pdev); | ||
67 | if (err) | ||
68 | return err; | ||
69 | |||
70 | res = platform_get_resource(pdev, IORESOURCE_IO, 0); | ||
71 | if (!res) { | ||
72 | dev_err(&pdev->dev, "can't fetch device resource info\n"); | ||
73 | return -EIO; | ||
74 | } | ||
75 | if (strcmp(pdev->name, "cs5535-pms") == 0) | ||
76 | pms_base = res->start; | ||
77 | else if (strcmp(pdev->name, "olpc-xo1-pm-acpi") == 0) | ||
78 | acpi_base = res->start; | ||
79 | |||
80 | /* If we have both addresses, we can override the poweroff hook */ | ||
81 | if (pms_base && acpi_base) { | ||
82 | pm_power_off = xo1_power_off; | ||
83 | printk(KERN_INFO "OLPC XO-1 support registered\n"); | ||
84 | } | ||
85 | |||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | static int __devexit olpc_xo1_remove(struct platform_device *pdev) | ||
90 | { | ||
91 | mfd_cell_disable(pdev); | ||
92 | |||
93 | if (strcmp(pdev->name, "cs5535-pms") == 0) | ||
94 | pms_base = 0; | ||
95 | else if (strcmp(pdev->name, "olpc-xo1-pm-acpi") == 0) | ||
96 | acpi_base = 0; | ||
97 | |||
98 | pm_power_off = NULL; | ||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | static struct platform_driver cs5535_pms_drv = { | ||
103 | .driver = { | ||
104 | .name = "cs5535-pms", | ||
105 | .owner = THIS_MODULE, | ||
106 | }, | ||
107 | .probe = olpc_xo1_probe, | ||
108 | .remove = __devexit_p(olpc_xo1_remove), | ||
109 | }; | ||
110 | |||
111 | static struct platform_driver cs5535_acpi_drv = { | ||
112 | .driver = { | ||
113 | .name = "olpc-xo1-pm-acpi", | ||
114 | .owner = THIS_MODULE, | ||
115 | }, | ||
116 | .probe = olpc_xo1_probe, | ||
117 | .remove = __devexit_p(olpc_xo1_remove), | ||
118 | }; | ||
119 | |||
120 | static int __init olpc_xo1_init(void) | ||
121 | { | ||
122 | int r; | ||
123 | |||
124 | r = platform_driver_register(&cs5535_pms_drv); | ||
125 | if (r) | ||
126 | return r; | ||
127 | |||
128 | r = platform_driver_register(&cs5535_acpi_drv); | ||
129 | if (r) | ||
130 | platform_driver_unregister(&cs5535_pms_drv); | ||
131 | |||
132 | return r; | ||
133 | } | ||
134 | |||
135 | static void __exit olpc_xo1_exit(void) | ||
136 | { | ||
137 | platform_driver_unregister(&cs5535_acpi_drv); | ||
138 | platform_driver_unregister(&cs5535_pms_drv); | ||
139 | } | ||
140 | |||
141 | MODULE_AUTHOR("Daniel Drake <dsd@laptop.org>"); | ||
142 | MODULE_LICENSE("GPL"); | ||
143 | MODULE_ALIAS("platform:cs5535-pms"); | ||
144 | |||
145 | module_init(olpc_xo1_init); | ||
146 | module_exit(olpc_xo1_exit); | ||
diff --git a/arch/x86/platform/olpc/olpc.c b/arch/x86/platform/olpc/olpc.c new file mode 100644 index 000000000000..0060fd59ea00 --- /dev/null +++ b/arch/x86/platform/olpc/olpc.c | |||
@@ -0,0 +1,284 @@ | |||
1 | /* | ||
2 | * Support for the OLPC DCON and OLPC EC access | ||
3 | * | ||
4 | * Copyright © 2006 Advanced Micro Devices, Inc. | ||
5 | * Copyright © 2007-2008 Andres Salomon <dilinger@debian.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/delay.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | #include <linux/io.h> | ||
19 | #include <linux/string.h> | ||
20 | #include <linux/platform_device.h> | ||
21 | #include <linux/of.h> | ||
22 | |||
23 | #include <asm/geode.h> | ||
24 | #include <asm/setup.h> | ||
25 | #include <asm/olpc.h> | ||
26 | #include <asm/olpc_ofw.h> | ||
27 | |||
28 | struct olpc_platform_t olpc_platform_info; | ||
29 | EXPORT_SYMBOL_GPL(olpc_platform_info); | ||
30 | |||
31 | static DEFINE_SPINLOCK(ec_lock); | ||
32 | |||
33 | /* what the timeout *should* be (in ms) */ | ||
34 | #define EC_BASE_TIMEOUT 20 | ||
35 | |||
36 | /* the timeout that bugs in the EC might force us to actually use */ | ||
37 | static int ec_timeout = EC_BASE_TIMEOUT; | ||
38 | |||
39 | static int __init olpc_ec_timeout_set(char *str) | ||
40 | { | ||
41 | if (get_option(&str, &ec_timeout) != 1) { | ||
42 | ec_timeout = EC_BASE_TIMEOUT; | ||
43 | printk(KERN_ERR "olpc-ec: invalid argument to " | ||
44 | "'olpc_ec_timeout=', ignoring!\n"); | ||
45 | } | ||
46 | printk(KERN_DEBUG "olpc-ec: using %d ms delay for EC commands.\n", | ||
47 | ec_timeout); | ||
48 | return 1; | ||
49 | } | ||
50 | __setup("olpc_ec_timeout=", olpc_ec_timeout_set); | ||
51 | |||
52 | /* | ||
53 | * These {i,o}bf_status functions return whether the buffers are full or not. | ||
54 | */ | ||
55 | |||
56 | static inline unsigned int ibf_status(unsigned int port) | ||
57 | { | ||
58 | return !!(inb(port) & 0x02); | ||
59 | } | ||
60 | |||
61 | static inline unsigned int obf_status(unsigned int port) | ||
62 | { | ||
63 | return inb(port) & 0x01; | ||
64 | } | ||
65 | |||
66 | #define wait_on_ibf(p, d) __wait_on_ibf(__LINE__, (p), (d)) | ||
67 | static int __wait_on_ibf(unsigned int line, unsigned int port, int desired) | ||
68 | { | ||
69 | unsigned int timeo; | ||
70 | int state = ibf_status(port); | ||
71 | |||
72 | for (timeo = ec_timeout; state != desired && timeo; timeo--) { | ||
73 | mdelay(1); | ||
74 | state = ibf_status(port); | ||
75 | } | ||
76 | |||
77 | if ((state == desired) && (ec_timeout > EC_BASE_TIMEOUT) && | ||
78 | timeo < (ec_timeout - EC_BASE_TIMEOUT)) { | ||
79 | printk(KERN_WARNING "olpc-ec: %d: waited %u ms for IBF!\n", | ||
80 | line, ec_timeout - timeo); | ||
81 | } | ||
82 | |||
83 | return !(state == desired); | ||
84 | } | ||
85 | |||
86 | #define wait_on_obf(p, d) __wait_on_obf(__LINE__, (p), (d)) | ||
87 | static int __wait_on_obf(unsigned int line, unsigned int port, int desired) | ||
88 | { | ||
89 | unsigned int timeo; | ||
90 | int state = obf_status(port); | ||
91 | |||
92 | for (timeo = ec_timeout; state != desired && timeo; timeo--) { | ||
93 | mdelay(1); | ||
94 | state = obf_status(port); | ||
95 | } | ||
96 | |||
97 | if ((state == desired) && (ec_timeout > EC_BASE_TIMEOUT) && | ||
98 | timeo < (ec_timeout - EC_BASE_TIMEOUT)) { | ||
99 | printk(KERN_WARNING "olpc-ec: %d: waited %u ms for OBF!\n", | ||
100 | line, ec_timeout - timeo); | ||
101 | } | ||
102 | |||
103 | return !(state == desired); | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * This allows the kernel to run Embedded Controller commands. The EC is | ||
108 | * documented at <http://wiki.laptop.org/go/Embedded_controller>, and the | ||
109 | * available EC commands are here: | ||
110 | * <http://wiki.laptop.org/go/Ec_specification>. Unfortunately, while | ||
111 | * OpenFirmware's source is available, the EC's is not. | ||
112 | */ | ||
113 | int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen, | ||
114 | unsigned char *outbuf, size_t outlen) | ||
115 | { | ||
116 | unsigned long flags; | ||
117 | int ret = -EIO; | ||
118 | int i; | ||
119 | int restarts = 0; | ||
120 | |||
121 | spin_lock_irqsave(&ec_lock, flags); | ||
122 | |||
123 | /* Clear OBF */ | ||
124 | for (i = 0; i < 10 && (obf_status(0x6c) == 1); i++) | ||
125 | inb(0x68); | ||
126 | if (i == 10) { | ||
127 | printk(KERN_ERR "olpc-ec: timeout while attempting to " | ||
128 | "clear OBF flag!\n"); | ||
129 | goto err; | ||
130 | } | ||
131 | |||
132 | if (wait_on_ibf(0x6c, 0)) { | ||
133 | printk(KERN_ERR "olpc-ec: timeout waiting for EC to " | ||
134 | "quiesce!\n"); | ||
135 | goto err; | ||
136 | } | ||
137 | |||
138 | restart: | ||
139 | /* | ||
140 | * Note that if we time out during any IBF checks, that's a failure; | ||
141 | * we have to return. There's no way for the kernel to clear that. | ||
142 | * | ||
143 | * If we time out during an OBF check, we can restart the command; | ||
144 | * reissuing it will clear the OBF flag, and we should be alright. | ||
145 | * The OBF flag will sometimes misbehave due to what we believe | ||
146 | * is a hardware quirk.. | ||
147 | */ | ||
148 | pr_devel("olpc-ec: running cmd 0x%x\n", cmd); | ||
149 | outb(cmd, 0x6c); | ||
150 | |||
151 | if (wait_on_ibf(0x6c, 0)) { | ||
152 | printk(KERN_ERR "olpc-ec: timeout waiting for EC to read " | ||
153 | "command!\n"); | ||
154 | goto err; | ||
155 | } | ||
156 | |||
157 | if (inbuf && inlen) { | ||
158 | /* write data to EC */ | ||
159 | for (i = 0; i < inlen; i++) { | ||
160 | if (wait_on_ibf(0x6c, 0)) { | ||
161 | printk(KERN_ERR "olpc-ec: timeout waiting for" | ||
162 | " EC accept data!\n"); | ||
163 | goto err; | ||
164 | } | ||
165 | pr_devel("olpc-ec: sending cmd arg 0x%x\n", inbuf[i]); | ||
166 | outb(inbuf[i], 0x68); | ||
167 | } | ||
168 | } | ||
169 | if (outbuf && outlen) { | ||
170 | /* read data from EC */ | ||
171 | for (i = 0; i < outlen; i++) { | ||
172 | if (wait_on_obf(0x6c, 1)) { | ||
173 | printk(KERN_ERR "olpc-ec: timeout waiting for" | ||
174 | " EC to provide data!\n"); | ||
175 | if (restarts++ < 10) | ||
176 | goto restart; | ||
177 | goto err; | ||
178 | } | ||
179 | outbuf[i] = inb(0x68); | ||
180 | pr_devel("olpc-ec: received 0x%x\n", outbuf[i]); | ||
181 | } | ||
182 | } | ||
183 | |||
184 | ret = 0; | ||
185 | err: | ||
186 | spin_unlock_irqrestore(&ec_lock, flags); | ||
187 | return ret; | ||
188 | } | ||
189 | EXPORT_SYMBOL_GPL(olpc_ec_cmd); | ||
190 | |||
191 | static bool __init check_ofw_architecture(struct device_node *root) | ||
192 | { | ||
193 | const char *olpc_arch; | ||
194 | int propsize; | ||
195 | |||
196 | olpc_arch = of_get_property(root, "architecture", &propsize); | ||
197 | return propsize == 5 && strncmp("OLPC", olpc_arch, 5) == 0; | ||
198 | } | ||
199 | |||
200 | static u32 __init get_board_revision(struct device_node *root) | ||
201 | { | ||
202 | int propsize; | ||
203 | const __be32 *rev; | ||
204 | |||
205 | rev = of_get_property(root, "board-revision-int", &propsize); | ||
206 | if (propsize != 4) | ||
207 | return 0; | ||
208 | |||
209 | return be32_to_cpu(*rev); | ||
210 | } | ||
211 | |||
212 | static bool __init platform_detect(void) | ||
213 | { | ||
214 | struct device_node *root = of_find_node_by_path("/"); | ||
215 | bool success; | ||
216 | |||
217 | if (!root) | ||
218 | return false; | ||
219 | |||
220 | success = check_ofw_architecture(root); | ||
221 | if (success) { | ||
222 | olpc_platform_info.boardrev = get_board_revision(root); | ||
223 | olpc_platform_info.flags |= OLPC_F_PRESENT; | ||
224 | } | ||
225 | |||
226 | of_node_put(root); | ||
227 | return success; | ||
228 | } | ||
229 | |||
230 | static int __init add_xo1_platform_devices(void) | ||
231 | { | ||
232 | struct platform_device *pdev; | ||
233 | |||
234 | pdev = platform_device_register_simple("xo1-rfkill", -1, NULL, 0); | ||
235 | if (IS_ERR(pdev)) | ||
236 | return PTR_ERR(pdev); | ||
237 | |||
238 | pdev = platform_device_register_simple("olpc-xo1", -1, NULL, 0); | ||
239 | if (IS_ERR(pdev)) | ||
240 | return PTR_ERR(pdev); | ||
241 | |||
242 | return 0; | ||
243 | } | ||
244 | |||
245 | static int __init olpc_init(void) | ||
246 | { | ||
247 | int r = 0; | ||
248 | |||
249 | if (!olpc_ofw_present() || !platform_detect()) | ||
250 | return 0; | ||
251 | |||
252 | spin_lock_init(&ec_lock); | ||
253 | |||
254 | /* assume B1 and above models always have a DCON */ | ||
255 | if (olpc_board_at_least(olpc_board(0xb1))) | ||
256 | olpc_platform_info.flags |= OLPC_F_DCON; | ||
257 | |||
258 | /* get the EC revision */ | ||
259 | olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0, | ||
260 | (unsigned char *) &olpc_platform_info.ecver, 1); | ||
261 | |||
262 | #ifdef CONFIG_PCI_OLPC | ||
263 | /* If the VSA exists let it emulate PCI, if not emulate in kernel. | ||
264 | * XO-1 only. */ | ||
265 | if (olpc_platform_info.boardrev < olpc_board_pre(0xd0) && | ||
266 | !cs5535_has_vsa2()) | ||
267 | x86_init.pci.arch_init = pci_olpc_init; | ||
268 | #endif | ||
269 | |||
270 | printk(KERN_INFO "OLPC board revision %s%X (EC=%x)\n", | ||
271 | ((olpc_platform_info.boardrev & 0xf) < 8) ? "pre" : "", | ||
272 | olpc_platform_info.boardrev >> 4, | ||
273 | olpc_platform_info.ecver); | ||
274 | |||
275 | if (olpc_platform_info.boardrev < olpc_board_pre(0xd0)) { /* XO-1 */ | ||
276 | r = add_xo1_platform_devices(); | ||
277 | if (r) | ||
278 | return r; | ||
279 | } | ||
280 | |||
281 | return 0; | ||
282 | } | ||
283 | |||
284 | postcore_initcall(olpc_init); | ||
diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c new file mode 100644 index 000000000000..d39f63d017d2 --- /dev/null +++ b/arch/x86/platform/olpc/olpc_dt.c | |||
@@ -0,0 +1,201 @@ | |||
1 | /* | ||
2 | * OLPC-specific OFW device tree support code. | ||
3 | * | ||
4 | * Paul Mackerras August 1996. | ||
5 | * Copyright (C) 1996-2005 Paul Mackerras. | ||
6 | * | ||
7 | * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. | ||
8 | * {engebret|bergner}@us.ibm.com | ||
9 | * | ||
10 | * Adapted for sparc by David S. Miller davem@davemloft.net | ||
11 | * Adapted for x86/OLPC by Andres Salomon <dilinger@queued.net> | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or | ||
14 | * modify it under the terms of the GNU General Public License | ||
15 | * as published by the Free Software Foundation; either version | ||
16 | * 2 of the License, or (at your option) any later version. | ||
17 | */ | ||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/bootmem.h> | ||
21 | #include <linux/of.h> | ||
22 | #include <linux/of_platform.h> | ||
23 | #include <linux/of_pdt.h> | ||
24 | #include <asm/olpc.h> | ||
25 | #include <asm/olpc_ofw.h> | ||
26 | |||
27 | static phandle __init olpc_dt_getsibling(phandle node) | ||
28 | { | ||
29 | const void *args[] = { (void *)node }; | ||
30 | void *res[] = { &node }; | ||
31 | |||
32 | if ((s32)node == -1) | ||
33 | return 0; | ||
34 | |||
35 | if (olpc_ofw("peer", args, res) || (s32)node == -1) | ||
36 | return 0; | ||
37 | |||
38 | return node; | ||
39 | } | ||
40 | |||
41 | static phandle __init olpc_dt_getchild(phandle node) | ||
42 | { | ||
43 | const void *args[] = { (void *)node }; | ||
44 | void *res[] = { &node }; | ||
45 | |||
46 | if ((s32)node == -1) | ||
47 | return 0; | ||
48 | |||
49 | if (olpc_ofw("child", args, res) || (s32)node == -1) { | ||
50 | pr_err("PROM: %s: fetching child failed!\n", __func__); | ||
51 | return 0; | ||
52 | } | ||
53 | |||
54 | return node; | ||
55 | } | ||
56 | |||
57 | static int __init olpc_dt_getproplen(phandle node, const char *prop) | ||
58 | { | ||
59 | const void *args[] = { (void *)node, prop }; | ||
60 | int len; | ||
61 | void *res[] = { &len }; | ||
62 | |||
63 | if ((s32)node == -1) | ||
64 | return -1; | ||
65 | |||
66 | if (olpc_ofw("getproplen", args, res)) { | ||
67 | pr_err("PROM: %s: getproplen failed!\n", __func__); | ||
68 | return -1; | ||
69 | } | ||
70 | |||
71 | return len; | ||
72 | } | ||
73 | |||
74 | static int __init olpc_dt_getproperty(phandle node, const char *prop, | ||
75 | char *buf, int bufsize) | ||
76 | { | ||
77 | int plen; | ||
78 | |||
79 | plen = olpc_dt_getproplen(node, prop); | ||
80 | if (plen > bufsize || plen < 1) { | ||
81 | return -1; | ||
82 | } else { | ||
83 | const void *args[] = { (void *)node, prop, buf, (void *)plen }; | ||
84 | void *res[] = { &plen }; | ||
85 | |||
86 | if (olpc_ofw("getprop", args, res)) { | ||
87 | pr_err("PROM: %s: getprop failed!\n", __func__); | ||
88 | return -1; | ||
89 | } | ||
90 | } | ||
91 | |||
92 | return plen; | ||
93 | } | ||
94 | |||
95 | static int __init olpc_dt_nextprop(phandle node, char *prev, char *buf) | ||
96 | { | ||
97 | const void *args[] = { (void *)node, prev, buf }; | ||
98 | int success; | ||
99 | void *res[] = { &success }; | ||
100 | |||
101 | buf[0] = '\0'; | ||
102 | |||
103 | if ((s32)node == -1) | ||
104 | return -1; | ||
105 | |||
106 | if (olpc_ofw("nextprop", args, res) || success != 1) | ||
107 | return -1; | ||
108 | |||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | static int __init olpc_dt_pkg2path(phandle node, char *buf, | ||
113 | const int buflen, int *len) | ||
114 | { | ||
115 | const void *args[] = { (void *)node, buf, (void *)buflen }; | ||
116 | void *res[] = { len }; | ||
117 | |||
118 | if ((s32)node == -1) | ||
119 | return -1; | ||
120 | |||
121 | if (olpc_ofw("package-to-path", args, res) || *len < 1) | ||
122 | return -1; | ||
123 | |||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | static unsigned int prom_early_allocated __initdata; | ||
128 | |||
129 | void * __init prom_early_alloc(unsigned long size) | ||
130 | { | ||
131 | static u8 *mem; | ||
132 | static size_t free_mem; | ||
133 | void *res; | ||
134 | |||
135 | if (free_mem < size) { | ||
136 | const size_t chunk_size = max(PAGE_SIZE, size); | ||
137 | |||
138 | /* | ||
139 | * To mimimize the number of allocations, grab at least | ||
140 | * PAGE_SIZE of memory (that's an arbitrary choice that's | ||
141 | * fast enough on the platforms we care about while minimizing | ||
142 | * wasted bootmem) and hand off chunks of it to callers. | ||
143 | */ | ||
144 | res = alloc_bootmem(chunk_size); | ||
145 | BUG_ON(!res); | ||
146 | prom_early_allocated += chunk_size; | ||
147 | memset(res, 0, chunk_size); | ||
148 | free_mem = chunk_size; | ||
149 | mem = res; | ||
150 | } | ||
151 | |||
152 | /* allocate from the local cache */ | ||
153 | free_mem -= size; | ||
154 | res = mem; | ||
155 | mem += size; | ||
156 | return res; | ||
157 | } | ||
158 | |||
159 | static struct of_pdt_ops prom_olpc_ops __initdata = { | ||
160 | .nextprop = olpc_dt_nextprop, | ||
161 | .getproplen = olpc_dt_getproplen, | ||
162 | .getproperty = olpc_dt_getproperty, | ||
163 | .getchild = olpc_dt_getchild, | ||
164 | .getsibling = olpc_dt_getsibling, | ||
165 | .pkg2path = olpc_dt_pkg2path, | ||
166 | }; | ||
167 | |||
168 | void __init olpc_dt_build_devicetree(void) | ||
169 | { | ||
170 | phandle root; | ||
171 | |||
172 | if (!olpc_ofw_is_installed()) | ||
173 | return; | ||
174 | |||
175 | root = olpc_dt_getsibling(0); | ||
176 | if (!root) { | ||
177 | pr_err("PROM: unable to get root node from OFW!\n"); | ||
178 | return; | ||
179 | } | ||
180 | of_pdt_build_devicetree(root, &prom_olpc_ops); | ||
181 | |||
182 | pr_info("PROM DT: Built device tree with %u bytes of memory.\n", | ||
183 | prom_early_allocated); | ||
184 | } | ||
185 | |||
186 | /* A list of DT node/bus matches that we want to expose as platform devices */ | ||
187 | static struct of_device_id __initdata of_ids[] = { | ||
188 | { .compatible = "olpc,xo1-battery" }, | ||
189 | { .compatible = "olpc,xo1-dcon" }, | ||
190 | { .compatible = "olpc,xo1-rtc" }, | ||
191 | {}, | ||
192 | }; | ||
193 | |||
194 | static int __init olpc_create_platform_devices(void) | ||
195 | { | ||
196 | if (machine_is_olpc()) | ||
197 | return of_platform_bus_probe(NULL, of_ids, NULL); | ||
198 | else | ||
199 | return 0; | ||
200 | } | ||
201 | device_initcall(olpc_create_platform_devices); | ||
diff --git a/arch/x86/platform/olpc/olpc_ofw.c b/arch/x86/platform/olpc/olpc_ofw.c new file mode 100644 index 000000000000..e7604f62870d --- /dev/null +++ b/arch/x86/platform/olpc/olpc_ofw.c | |||
@@ -0,0 +1,117 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/module.h> | ||
3 | #include <linux/init.h> | ||
4 | #include <asm/page.h> | ||
5 | #include <asm/setup.h> | ||
6 | #include <asm/io.h> | ||
7 | #include <asm/pgtable.h> | ||
8 | #include <asm/olpc_ofw.h> | ||
9 | |||
10 | /* address of OFW callback interface; will be NULL if OFW isn't found */ | ||
11 | static int (*olpc_ofw_cif)(int *); | ||
12 | |||
13 | /* page dir entry containing OFW's pgdir table; filled in by head_32.S */ | ||
14 | u32 olpc_ofw_pgd __initdata; | ||
15 | |||
16 | static DEFINE_SPINLOCK(ofw_lock); | ||
17 | |||
18 | #define MAXARGS 10 | ||
19 | |||
20 | void __init setup_olpc_ofw_pgd(void) | ||
21 | { | ||
22 | pgd_t *base, *ofw_pde; | ||
23 | |||
24 | if (!olpc_ofw_cif) | ||
25 | return; | ||
26 | |||
27 | /* fetch OFW's PDE */ | ||
28 | base = early_ioremap(olpc_ofw_pgd, sizeof(olpc_ofw_pgd) * PTRS_PER_PGD); | ||
29 | if (!base) { | ||
30 | printk(KERN_ERR "failed to remap OFW's pgd - disabling OFW!\n"); | ||
31 | olpc_ofw_cif = NULL; | ||
32 | return; | ||
33 | } | ||
34 | ofw_pde = &base[OLPC_OFW_PDE_NR]; | ||
35 | |||
36 | /* install OFW's PDE permanently into the kernel's pgtable */ | ||
37 | set_pgd(&swapper_pg_dir[OLPC_OFW_PDE_NR], *ofw_pde); | ||
38 | /* implicit optimization barrier here due to uninline function return */ | ||
39 | |||
40 | early_iounmap(base, sizeof(olpc_ofw_pgd) * PTRS_PER_PGD); | ||
41 | } | ||
42 | |||
43 | int __olpc_ofw(const char *name, int nr_args, const void **args, int nr_res, | ||
44 | void **res) | ||
45 | { | ||
46 | int ofw_args[MAXARGS + 3]; | ||
47 | unsigned long flags; | ||
48 | int ret, i, *p; | ||
49 | |||
50 | BUG_ON(nr_args + nr_res > MAXARGS); | ||
51 | |||
52 | if (!olpc_ofw_cif) | ||
53 | return -EIO; | ||
54 | |||
55 | ofw_args[0] = (int)name; | ||
56 | ofw_args[1] = nr_args; | ||
57 | ofw_args[2] = nr_res; | ||
58 | |||
59 | p = &ofw_args[3]; | ||
60 | for (i = 0; i < nr_args; i++, p++) | ||
61 | *p = (int)args[i]; | ||
62 | |||
63 | /* call into ofw */ | ||
64 | spin_lock_irqsave(&ofw_lock, flags); | ||
65 | ret = olpc_ofw_cif(ofw_args); | ||
66 | spin_unlock_irqrestore(&ofw_lock, flags); | ||
67 | |||
68 | if (!ret) { | ||
69 | for (i = 0; i < nr_res; i++, p++) | ||
70 | *((int *)res[i]) = *p; | ||
71 | } | ||
72 | |||
73 | return ret; | ||
74 | } | ||
75 | EXPORT_SYMBOL_GPL(__olpc_ofw); | ||
76 | |||
77 | bool olpc_ofw_present(void) | ||
78 | { | ||
79 | return olpc_ofw_cif != NULL; | ||
80 | } | ||
81 | EXPORT_SYMBOL_GPL(olpc_ofw_present); | ||
82 | |||
83 | /* OFW cif _should_ be above this address */ | ||
84 | #define OFW_MIN 0xff000000 | ||
85 | |||
86 | /* OFW starts on a 1MB boundary */ | ||
87 | #define OFW_BOUND (1<<20) | ||
88 | |||
89 | void __init olpc_ofw_detect(void) | ||
90 | { | ||
91 | struct olpc_ofw_header *hdr = &boot_params.olpc_ofw_header; | ||
92 | unsigned long start; | ||
93 | |||
94 | /* ensure OFW booted us by checking for "OFW " string */ | ||
95 | if (hdr->ofw_magic != OLPC_OFW_SIG) | ||
96 | return; | ||
97 | |||
98 | olpc_ofw_cif = (int (*)(int *))hdr->cif_handler; | ||
99 | |||
100 | if ((unsigned long)olpc_ofw_cif < OFW_MIN) { | ||
101 | printk(KERN_ERR "OFW detected, but cif has invalid address 0x%lx - disabling.\n", | ||
102 | (unsigned long)olpc_ofw_cif); | ||
103 | olpc_ofw_cif = NULL; | ||
104 | return; | ||
105 | } | ||
106 | |||
107 | /* determine where OFW starts in memory */ | ||
108 | start = round_down((unsigned long)olpc_ofw_cif, OFW_BOUND); | ||
109 | printk(KERN_INFO "OFW detected in memory, cif @ 0x%lx (reserving top %ldMB)\n", | ||
110 | (unsigned long)olpc_ofw_cif, (-start) >> 20); | ||
111 | reserve_top_address(-start); | ||
112 | } | ||
113 | |||
114 | bool __init olpc_ofw_is_installed(void) | ||
115 | { | ||
116 | return olpc_ofw_cif != NULL; | ||
117 | } | ||
diff --git a/arch/x86/platform/scx200/Makefile b/arch/x86/platform/scx200/Makefile new file mode 100644 index 000000000000..762b4c7f4314 --- /dev/null +++ b/arch/x86/platform/scx200/Makefile | |||
@@ -0,0 +1,2 @@ | |||
1 | obj-$(CONFIG_SCx200) += scx200.o | ||
2 | scx200-y += scx200_32.o | ||
diff --git a/arch/x86/platform/scx200/scx200_32.c b/arch/x86/platform/scx200/scx200_32.c new file mode 100644 index 000000000000..7e004acbe526 --- /dev/null +++ b/arch/x86/platform/scx200/scx200_32.c | |||
@@ -0,0 +1,131 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> | ||
3 | * | ||
4 | * National Semiconductor SCx200 support. | ||
5 | */ | ||
6 | |||
7 | #include <linux/module.h> | ||
8 | #include <linux/errno.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/mutex.h> | ||
12 | #include <linux/pci.h> | ||
13 | |||
14 | #include <linux/scx200.h> | ||
15 | #include <linux/scx200_gpio.h> | ||
16 | |||
17 | /* Verify that the configuration block really is there */ | ||
18 | #define scx200_cb_probe(base) (inw((base) + SCx200_CBA) == (base)) | ||
19 | |||
20 | #define NAME "scx200" | ||
21 | |||
22 | MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>"); | ||
23 | MODULE_DESCRIPTION("NatSemi SCx200 Driver"); | ||
24 | MODULE_LICENSE("GPL"); | ||
25 | |||
26 | unsigned scx200_gpio_base = 0; | ||
27 | unsigned long scx200_gpio_shadow[2]; | ||
28 | |||
29 | unsigned scx200_cb_base = 0; | ||
30 | |||
31 | static struct pci_device_id scx200_tbl[] = { | ||
32 | { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE) }, | ||
33 | { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE) }, | ||
34 | { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_XBUS) }, | ||
35 | { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_XBUS) }, | ||
36 | { }, | ||
37 | }; | ||
38 | MODULE_DEVICE_TABLE(pci,scx200_tbl); | ||
39 | |||
40 | static int __devinit scx200_probe(struct pci_dev *, const struct pci_device_id *); | ||
41 | |||
42 | static struct pci_driver scx200_pci_driver = { | ||
43 | .name = "scx200", | ||
44 | .id_table = scx200_tbl, | ||
45 | .probe = scx200_probe, | ||
46 | }; | ||
47 | |||
48 | static DEFINE_MUTEX(scx200_gpio_config_lock); | ||
49 | |||
50 | static void __devinit scx200_init_shadow(void) | ||
51 | { | ||
52 | int bank; | ||
53 | |||
54 | /* read the current values driven on the GPIO signals */ | ||
55 | for (bank = 0; bank < 2; ++bank) | ||
56 | scx200_gpio_shadow[bank] = inl(scx200_gpio_base + 0x10 * bank); | ||
57 | } | ||
58 | |||
59 | static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
60 | { | ||
61 | unsigned base; | ||
62 | |||
63 | if (pdev->device == PCI_DEVICE_ID_NS_SCx200_BRIDGE || | ||
64 | pdev->device == PCI_DEVICE_ID_NS_SC1100_BRIDGE) { | ||
65 | base = pci_resource_start(pdev, 0); | ||
66 | printk(KERN_INFO NAME ": GPIO base 0x%x\n", base); | ||
67 | |||
68 | if (!request_region(base, SCx200_GPIO_SIZE, "NatSemi SCx200 GPIO")) { | ||
69 | printk(KERN_ERR NAME ": can't allocate I/O for GPIOs\n"); | ||
70 | return -EBUSY; | ||
71 | } | ||
72 | |||
73 | scx200_gpio_base = base; | ||
74 | scx200_init_shadow(); | ||
75 | |||
76 | } else { | ||
77 | /* find the base of the Configuration Block */ | ||
78 | if (scx200_cb_probe(SCx200_CB_BASE_FIXED)) { | ||
79 | scx200_cb_base = SCx200_CB_BASE_FIXED; | ||
80 | } else { | ||
81 | pci_read_config_dword(pdev, SCx200_CBA_SCRATCH, &base); | ||
82 | if (scx200_cb_probe(base)) { | ||
83 | scx200_cb_base = base; | ||
84 | } else { | ||
85 | printk(KERN_WARNING NAME ": Configuration Block not found\n"); | ||
86 | return -ENODEV; | ||
87 | } | ||
88 | } | ||
89 | printk(KERN_INFO NAME ": Configuration Block base 0x%x\n", scx200_cb_base); | ||
90 | } | ||
91 | |||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | u32 scx200_gpio_configure(unsigned index, u32 mask, u32 bits) | ||
96 | { | ||
97 | u32 config, new_config; | ||
98 | |||
99 | mutex_lock(&scx200_gpio_config_lock); | ||
100 | |||
101 | outl(index, scx200_gpio_base + 0x20); | ||
102 | config = inl(scx200_gpio_base + 0x24); | ||
103 | |||
104 | new_config = (config & mask) | bits; | ||
105 | outl(new_config, scx200_gpio_base + 0x24); | ||
106 | |||
107 | mutex_unlock(&scx200_gpio_config_lock); | ||
108 | |||
109 | return config; | ||
110 | } | ||
111 | |||
112 | static int __init scx200_init(void) | ||
113 | { | ||
114 | printk(KERN_INFO NAME ": NatSemi SCx200 Driver\n"); | ||
115 | |||
116 | return pci_register_driver(&scx200_pci_driver); | ||
117 | } | ||
118 | |||
119 | static void __exit scx200_cleanup(void) | ||
120 | { | ||
121 | pci_unregister_driver(&scx200_pci_driver); | ||
122 | release_region(scx200_gpio_base, SCx200_GPIO_SIZE); | ||
123 | } | ||
124 | |||
125 | module_init(scx200_init); | ||
126 | module_exit(scx200_cleanup); | ||
127 | |||
128 | EXPORT_SYMBOL(scx200_gpio_base); | ||
129 | EXPORT_SYMBOL(scx200_gpio_shadow); | ||
130 | EXPORT_SYMBOL(scx200_gpio_configure); | ||
131 | EXPORT_SYMBOL(scx200_cb_base); | ||
diff --git a/arch/x86/platform/sfi/Makefile b/arch/x86/platform/sfi/Makefile new file mode 100644 index 000000000000..cc5db1168a5e --- /dev/null +++ b/arch/x86/platform/sfi/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-$(CONFIG_SFI) += sfi.o | |||
diff --git a/arch/x86/platform/sfi/sfi.c b/arch/x86/platform/sfi/sfi.c new file mode 100644 index 000000000000..7785b72ecc3a --- /dev/null +++ b/arch/x86/platform/sfi/sfi.c | |||
@@ -0,0 +1,109 @@ | |||
1 | /* | ||
2 | * sfi.c - x86 architecture SFI support. | ||
3 | * | ||
4 | * Copyright (c) 2009, Intel Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along with | ||
16 | * this program; if not, write to the Free Software Foundation, Inc., | ||
17 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | #define KMSG_COMPONENT "SFI" | ||
22 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
23 | |||
24 | #include <linux/acpi.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/sfi.h> | ||
27 | #include <linux/io.h> | ||
28 | |||
29 | #include <asm/io_apic.h> | ||
30 | #include <asm/mpspec.h> | ||
31 | #include <asm/setup.h> | ||
32 | #include <asm/apic.h> | ||
33 | |||
34 | #ifdef CONFIG_X86_LOCAL_APIC | ||
35 | static unsigned long sfi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; | ||
36 | |||
37 | /* All CPUs enumerated by SFI must be present and enabled */ | ||
38 | static void __cpuinit mp_sfi_register_lapic(u8 id) | ||
39 | { | ||
40 | if (MAX_LOCAL_APIC - id <= 0) { | ||
41 | pr_warning("Processor #%d invalid (max %d)\n", | ||
42 | id, MAX_LOCAL_APIC); | ||
43 | return; | ||
44 | } | ||
45 | |||
46 | pr_info("registering lapic[%d]\n", id); | ||
47 | |||
48 | generic_processor_info(id, GET_APIC_VERSION(apic_read(APIC_LVR))); | ||
49 | } | ||
50 | |||
51 | static int __init sfi_parse_cpus(struct sfi_table_header *table) | ||
52 | { | ||
53 | struct sfi_table_simple *sb; | ||
54 | struct sfi_cpu_table_entry *pentry; | ||
55 | int i; | ||
56 | int cpu_num; | ||
57 | |||
58 | sb = (struct sfi_table_simple *)table; | ||
59 | cpu_num = SFI_GET_NUM_ENTRIES(sb, struct sfi_cpu_table_entry); | ||
60 | pentry = (struct sfi_cpu_table_entry *)sb->pentry; | ||
61 | |||
62 | for (i = 0; i < cpu_num; i++) { | ||
63 | mp_sfi_register_lapic(pentry->apic_id); | ||
64 | pentry++; | ||
65 | } | ||
66 | |||
67 | smp_found_config = 1; | ||
68 | return 0; | ||
69 | } | ||
70 | #endif /* CONFIG_X86_LOCAL_APIC */ | ||
71 | |||
72 | #ifdef CONFIG_X86_IO_APIC | ||
73 | |||
74 | static int __init sfi_parse_ioapic(struct sfi_table_header *table) | ||
75 | { | ||
76 | struct sfi_table_simple *sb; | ||
77 | struct sfi_apic_table_entry *pentry; | ||
78 | int i, num; | ||
79 | |||
80 | sb = (struct sfi_table_simple *)table; | ||
81 | num = SFI_GET_NUM_ENTRIES(sb, struct sfi_apic_table_entry); | ||
82 | pentry = (struct sfi_apic_table_entry *)sb->pentry; | ||
83 | |||
84 | for (i = 0; i < num; i++) { | ||
85 | mp_register_ioapic(i, pentry->phys_addr, gsi_top); | ||
86 | pentry++; | ||
87 | } | ||
88 | |||
89 | WARN(pic_mode, KERN_WARNING | ||
90 | "SFI: pic_mod shouldn't be 1 when IOAPIC table is present\n"); | ||
91 | pic_mode = 0; | ||
92 | return 0; | ||
93 | } | ||
94 | #endif /* CONFIG_X86_IO_APIC */ | ||
95 | |||
96 | /* | ||
97 | * sfi_platform_init(): register lapics & io-apics | ||
98 | */ | ||
99 | int __init sfi_platform_init(void) | ||
100 | { | ||
101 | #ifdef CONFIG_X86_LOCAL_APIC | ||
102 | register_lapic_address(sfi_lapic_addr); | ||
103 | sfi_table_parse(SFI_SIG_CPUS, NULL, NULL, sfi_parse_cpus); | ||
104 | #endif | ||
105 | #ifdef CONFIG_X86_IO_APIC | ||
106 | sfi_table_parse(SFI_SIG_APIC, NULL, NULL, sfi_parse_ioapic); | ||
107 | #endif | ||
108 | return 0; | ||
109 | } | ||
diff --git a/arch/x86/platform/uv/Makefile b/arch/x86/platform/uv/Makefile new file mode 100644 index 000000000000..6c40995fefb8 --- /dev/null +++ b/arch/x86/platform/uv/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o | |||
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c new file mode 100644 index 000000000000..8bc57baaa9ad --- /dev/null +++ b/arch/x86/platform/uv/bios_uv.c | |||
@@ -0,0 +1,215 @@ | |||
1 | /* | ||
2 | * BIOS run time interface routines. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | * | ||
18 | * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved. | ||
19 | * Copyright (c) Russ Anderson <rja@sgi.com> | ||
20 | */ | ||
21 | |||
22 | #include <linux/efi.h> | ||
23 | #include <asm/efi.h> | ||
24 | #include <linux/io.h> | ||
25 | #include <asm/uv/bios.h> | ||
26 | #include <asm/uv/uv_hub.h> | ||
27 | |||
28 | static struct uv_systab uv_systab; | ||
29 | |||
30 | s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) | ||
31 | { | ||
32 | struct uv_systab *tab = &uv_systab; | ||
33 | s64 ret; | ||
34 | |||
35 | if (!tab->function) | ||
36 | /* | ||
37 | * BIOS does not support UV systab | ||
38 | */ | ||
39 | return BIOS_STATUS_UNIMPLEMENTED; | ||
40 | |||
41 | ret = efi_call6((void *)__va(tab->function), (u64)which, | ||
42 | a1, a2, a3, a4, a5); | ||
43 | return ret; | ||
44 | } | ||
45 | EXPORT_SYMBOL_GPL(uv_bios_call); | ||
46 | |||
47 | s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, | ||
48 | u64 a4, u64 a5) | ||
49 | { | ||
50 | unsigned long bios_flags; | ||
51 | s64 ret; | ||
52 | |||
53 | local_irq_save(bios_flags); | ||
54 | ret = uv_bios_call(which, a1, a2, a3, a4, a5); | ||
55 | local_irq_restore(bios_flags); | ||
56 | |||
57 | return ret; | ||
58 | } | ||
59 | |||
60 | s64 uv_bios_call_reentrant(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, | ||
61 | u64 a4, u64 a5) | ||
62 | { | ||
63 | s64 ret; | ||
64 | |||
65 | preempt_disable(); | ||
66 | ret = uv_bios_call(which, a1, a2, a3, a4, a5); | ||
67 | preempt_enable(); | ||
68 | |||
69 | return ret; | ||
70 | } | ||
71 | |||
72 | |||
73 | long sn_partition_id; | ||
74 | EXPORT_SYMBOL_GPL(sn_partition_id); | ||
75 | long sn_coherency_id; | ||
76 | EXPORT_SYMBOL_GPL(sn_coherency_id); | ||
77 | long sn_region_size; | ||
78 | EXPORT_SYMBOL_GPL(sn_region_size); | ||
79 | long system_serial_number; | ||
80 | EXPORT_SYMBOL_GPL(system_serial_number); | ||
81 | int uv_type; | ||
82 | EXPORT_SYMBOL_GPL(uv_type); | ||
83 | |||
84 | |||
85 | s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher, | ||
86 | long *region, long *ssn) | ||
87 | { | ||
88 | s64 ret; | ||
89 | u64 v0, v1; | ||
90 | union partition_info_u part; | ||
91 | |||
92 | ret = uv_bios_call_irqsave(UV_BIOS_GET_SN_INFO, fc, | ||
93 | (u64)(&v0), (u64)(&v1), 0, 0); | ||
94 | if (ret != BIOS_STATUS_SUCCESS) | ||
95 | return ret; | ||
96 | |||
97 | part.val = v0; | ||
98 | if (uvtype) | ||
99 | *uvtype = part.hub_version; | ||
100 | if (partid) | ||
101 | *partid = part.partition_id; | ||
102 | if (coher) | ||
103 | *coher = part.coherence_id; | ||
104 | if (region) | ||
105 | *region = part.region_size; | ||
106 | if (ssn) | ||
107 | *ssn = v1; | ||
108 | return ret; | ||
109 | } | ||
110 | EXPORT_SYMBOL_GPL(uv_bios_get_sn_info); | ||
111 | |||
112 | int | ||
113 | uv_bios_mq_watchlist_alloc(unsigned long addr, unsigned int mq_size, | ||
114 | unsigned long *intr_mmr_offset) | ||
115 | { | ||
116 | u64 watchlist; | ||
117 | s64 ret; | ||
118 | |||
119 | /* | ||
120 | * bios returns watchlist number or negative error number. | ||
121 | */ | ||
122 | ret = (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_ALLOC, addr, | ||
123 | mq_size, (u64)intr_mmr_offset, | ||
124 | (u64)&watchlist, 0); | ||
125 | if (ret < BIOS_STATUS_SUCCESS) | ||
126 | return ret; | ||
127 | |||
128 | return watchlist; | ||
129 | } | ||
130 | EXPORT_SYMBOL_GPL(uv_bios_mq_watchlist_alloc); | ||
131 | |||
132 | int | ||
133 | uv_bios_mq_watchlist_free(int blade, int watchlist_num) | ||
134 | { | ||
135 | return (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_FREE, | ||
136 | blade, watchlist_num, 0, 0, 0); | ||
137 | } | ||
138 | EXPORT_SYMBOL_GPL(uv_bios_mq_watchlist_free); | ||
139 | |||
140 | s64 | ||
141 | uv_bios_change_memprotect(u64 paddr, u64 len, enum uv_memprotect perms) | ||
142 | { | ||
143 | return uv_bios_call_irqsave(UV_BIOS_MEMPROTECT, paddr, len, | ||
144 | perms, 0, 0); | ||
145 | } | ||
146 | EXPORT_SYMBOL_GPL(uv_bios_change_memprotect); | ||
147 | |||
148 | s64 | ||
149 | uv_bios_reserved_page_pa(u64 buf, u64 *cookie, u64 *addr, u64 *len) | ||
150 | { | ||
151 | s64 ret; | ||
152 | |||
153 | ret = uv_bios_call_irqsave(UV_BIOS_GET_PARTITION_ADDR, (u64)cookie, | ||
154 | (u64)addr, buf, (u64)len, 0); | ||
155 | return ret; | ||
156 | } | ||
157 | EXPORT_SYMBOL_GPL(uv_bios_reserved_page_pa); | ||
158 | |||
159 | s64 uv_bios_freq_base(u64 clock_type, u64 *ticks_per_second) | ||
160 | { | ||
161 | return uv_bios_call(UV_BIOS_FREQ_BASE, clock_type, | ||
162 | (u64)ticks_per_second, 0, 0, 0); | ||
163 | } | ||
164 | EXPORT_SYMBOL_GPL(uv_bios_freq_base); | ||
165 | |||
166 | /* | ||
167 | * uv_bios_set_legacy_vga_target - Set Legacy VGA I/O Target | ||
168 | * @decode: true to enable target, false to disable target | ||
169 | * @domain: PCI domain number | ||
170 | * @bus: PCI bus number | ||
171 | * | ||
172 | * Returns: | ||
173 | * 0: Success | ||
174 | * -EINVAL: Invalid domain or bus number | ||
175 | * -ENOSYS: Capability not available | ||
176 | * -EBUSY: Legacy VGA I/O cannot be retargeted at this time | ||
177 | */ | ||
178 | int uv_bios_set_legacy_vga_target(bool decode, int domain, int bus) | ||
179 | { | ||
180 | return uv_bios_call(UV_BIOS_SET_LEGACY_VGA_TARGET, | ||
181 | (u64)decode, (u64)domain, (u64)bus, 0, 0); | ||
182 | } | ||
183 | EXPORT_SYMBOL_GPL(uv_bios_set_legacy_vga_target); | ||
184 | |||
185 | |||
186 | #ifdef CONFIG_EFI | ||
187 | void uv_bios_init(void) | ||
188 | { | ||
189 | struct uv_systab *tab; | ||
190 | |||
191 | if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) || | ||
192 | (efi.uv_systab == (unsigned long)NULL)) { | ||
193 | printk(KERN_CRIT "No EFI UV System Table.\n"); | ||
194 | uv_systab.function = (unsigned long)NULL; | ||
195 | return; | ||
196 | } | ||
197 | |||
198 | tab = (struct uv_systab *)ioremap(efi.uv_systab, | ||
199 | sizeof(struct uv_systab)); | ||
200 | if (strncmp(tab->signature, "UVST", 4) != 0) | ||
201 | printk(KERN_ERR "bad signature in UV system table!"); | ||
202 | |||
203 | /* | ||
204 | * Copy table to permanent spot for later use. | ||
205 | */ | ||
206 | memcpy(&uv_systab, tab, sizeof(struct uv_systab)); | ||
207 | iounmap(tab); | ||
208 | |||
209 | printk(KERN_INFO "EFI UV System Table Revision %d\n", | ||
210 | uv_systab.revision); | ||
211 | } | ||
212 | #else /* !CONFIG_EFI */ | ||
213 | |||
214 | void uv_bios_init(void) { } | ||
215 | #endif | ||
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c new file mode 100644 index 000000000000..68e467f69fec --- /dev/null +++ b/arch/x86/platform/uv/tlb_uv.c | |||
@@ -0,0 +1,1857 @@ | |||
1 | /* | ||
2 | * SGI UltraViolet TLB flush routines. | ||
3 | * | ||
4 | * (c) 2008-2011 Cliff Wickman <cpw@sgi.com>, SGI. | ||
5 | * | ||
6 | * This code is released under the GNU General Public License version 2 or | ||
7 | * later. | ||
8 | */ | ||
9 | #include <linux/seq_file.h> | ||
10 | #include <linux/proc_fs.h> | ||
11 | #include <linux/debugfs.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/delay.h> | ||
15 | |||
16 | #include <asm/mmu_context.h> | ||
17 | #include <asm/uv/uv.h> | ||
18 | #include <asm/uv/uv_mmrs.h> | ||
19 | #include <asm/uv/uv_hub.h> | ||
20 | #include <asm/uv/uv_bau.h> | ||
21 | #include <asm/apic.h> | ||
22 | #include <asm/idle.h> | ||
23 | #include <asm/tsc.h> | ||
24 | #include <asm/irq_vectors.h> | ||
25 | #include <asm/timer.h> | ||
26 | |||
27 | /* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */ | ||
28 | static int timeout_base_ns[] = { | ||
29 | 20, | ||
30 | 160, | ||
31 | 1280, | ||
32 | 10240, | ||
33 | 81920, | ||
34 | 655360, | ||
35 | 5242880, | ||
36 | 167772160 | ||
37 | }; | ||
38 | |||
39 | static int timeout_us; | ||
40 | static int nobau; | ||
41 | static int baudisabled; | ||
42 | static spinlock_t disable_lock; | ||
43 | static cycles_t congested_cycles; | ||
44 | |||
45 | /* tunables: */ | ||
46 | static int max_concurr = MAX_BAU_CONCURRENT; | ||
47 | static int max_concurr_const = MAX_BAU_CONCURRENT; | ||
48 | static int plugged_delay = PLUGGED_DELAY; | ||
49 | static int plugsb4reset = PLUGSB4RESET; | ||
50 | static int timeoutsb4reset = TIMEOUTSB4RESET; | ||
51 | static int ipi_reset_limit = IPI_RESET_LIMIT; | ||
52 | static int complete_threshold = COMPLETE_THRESHOLD; | ||
53 | static int congested_respns_us = CONGESTED_RESPONSE_US; | ||
54 | static int congested_reps = CONGESTED_REPS; | ||
55 | static int congested_period = CONGESTED_PERIOD; | ||
56 | |||
57 | static struct tunables tunables[] = { | ||
58 | {&max_concurr, MAX_BAU_CONCURRENT}, /* must be [0] */ | ||
59 | {&plugged_delay, PLUGGED_DELAY}, | ||
60 | {&plugsb4reset, PLUGSB4RESET}, | ||
61 | {&timeoutsb4reset, TIMEOUTSB4RESET}, | ||
62 | {&ipi_reset_limit, IPI_RESET_LIMIT}, | ||
63 | {&complete_threshold, COMPLETE_THRESHOLD}, | ||
64 | {&congested_respns_us, CONGESTED_RESPONSE_US}, | ||
65 | {&congested_reps, CONGESTED_REPS}, | ||
66 | {&congested_period, CONGESTED_PERIOD} | ||
67 | }; | ||
68 | |||
69 | static struct dentry *tunables_dir; | ||
70 | static struct dentry *tunables_file; | ||
71 | |||
72 | /* these correspond to the statistics printed by ptc_seq_show() */ | ||
73 | static char *stat_description[] = { | ||
74 | "sent: number of shootdown messages sent", | ||
75 | "stime: time spent sending messages", | ||
76 | "numuvhubs: number of hubs targeted with shootdown", | ||
77 | "numuvhubs16: number times 16 or more hubs targeted", | ||
78 | "numuvhubs8: number times 8 or more hubs targeted", | ||
79 | "numuvhubs4: number times 4 or more hubs targeted", | ||
80 | "numuvhubs2: number times 2 or more hubs targeted", | ||
81 | "numuvhubs1: number times 1 hub targeted", | ||
82 | "numcpus: number of cpus targeted with shootdown", | ||
83 | "dto: number of destination timeouts", | ||
84 | "retries: destination timeout retries sent", | ||
85 | "rok: : destination timeouts successfully retried", | ||
86 | "resetp: ipi-style resource resets for plugs", | ||
87 | "resett: ipi-style resource resets for timeouts", | ||
88 | "giveup: fall-backs to ipi-style shootdowns", | ||
89 | "sto: number of source timeouts", | ||
90 | "bz: number of stay-busy's", | ||
91 | "throt: number times spun in throttle", | ||
92 | "swack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE", | ||
93 | "recv: shootdown messages received", | ||
94 | "rtime: time spent processing messages", | ||
95 | "all: shootdown all-tlb messages", | ||
96 | "one: shootdown one-tlb messages", | ||
97 | "mult: interrupts that found multiple messages", | ||
98 | "none: interrupts that found no messages", | ||
99 | "retry: number of retry messages processed", | ||
100 | "canc: number messages canceled by retries", | ||
101 | "nocan: number retries that found nothing to cancel", | ||
102 | "reset: number of ipi-style reset requests processed", | ||
103 | "rcan: number messages canceled by reset requests", | ||
104 | "disable: number times use of the BAU was disabled", | ||
105 | "enable: number times use of the BAU was re-enabled" | ||
106 | }; | ||
107 | |||
108 | static int __init | ||
109 | setup_nobau(char *arg) | ||
110 | { | ||
111 | nobau = 1; | ||
112 | return 0; | ||
113 | } | ||
114 | early_param("nobau", setup_nobau); | ||
115 | |||
116 | /* base pnode in this partition */ | ||
117 | static int uv_base_pnode __read_mostly; | ||
118 | /* position of pnode (which is nasid>>1): */ | ||
119 | static int uv_nshift __read_mostly; | ||
120 | static unsigned long uv_mmask __read_mostly; | ||
121 | |||
122 | static DEFINE_PER_CPU(struct ptc_stats, ptcstats); | ||
123 | static DEFINE_PER_CPU(struct bau_control, bau_control); | ||
124 | static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask); | ||
125 | |||
126 | /* | ||
127 | * Determine the first node on a uvhub. 'Nodes' are used for kernel | ||
128 | * memory allocation. | ||
129 | */ | ||
130 | static int __init uvhub_to_first_node(int uvhub) | ||
131 | { | ||
132 | int node, b; | ||
133 | |||
134 | for_each_online_node(node) { | ||
135 | b = uv_node_to_blade_id(node); | ||
136 | if (uvhub == b) | ||
137 | return node; | ||
138 | } | ||
139 | return -1; | ||
140 | } | ||
141 | |||
142 | /* | ||
143 | * Determine the apicid of the first cpu on a uvhub. | ||
144 | */ | ||
145 | static int __init uvhub_to_first_apicid(int uvhub) | ||
146 | { | ||
147 | int cpu; | ||
148 | |||
149 | for_each_present_cpu(cpu) | ||
150 | if (uvhub == uv_cpu_to_blade_id(cpu)) | ||
151 | return per_cpu(x86_cpu_to_apicid, cpu); | ||
152 | return -1; | ||
153 | } | ||
154 | |||
155 | /* | ||
156 | * Free a software acknowledge hardware resource by clearing its Pending | ||
157 | * bit. This will return a reply to the sender. | ||
158 | * If the message has timed out, a reply has already been sent by the | ||
159 | * hardware but the resource has not been released. In that case our | ||
160 | * clear of the Timeout bit (as well) will free the resource. No reply will | ||
161 | * be sent (the hardware will only do one reply per message). | ||
162 | */ | ||
163 | static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp) | ||
164 | { | ||
165 | unsigned long dw; | ||
166 | struct bau_pq_entry *msg; | ||
167 | |||
168 | msg = mdp->msg; | ||
169 | if (!msg->canceled) { | ||
170 | dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec; | ||
171 | write_mmr_sw_ack(dw); | ||
172 | } | ||
173 | msg->replied_to = 1; | ||
174 | msg->swack_vec = 0; | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * Process the receipt of a RETRY message | ||
179 | */ | ||
180 | static void bau_process_retry_msg(struct msg_desc *mdp, | ||
181 | struct bau_control *bcp) | ||
182 | { | ||
183 | int i; | ||
184 | int cancel_count = 0; | ||
185 | unsigned long msg_res; | ||
186 | unsigned long mmr = 0; | ||
187 | struct bau_pq_entry *msg = mdp->msg; | ||
188 | struct bau_pq_entry *msg2; | ||
189 | struct ptc_stats *stat = bcp->statp; | ||
190 | |||
191 | stat->d_retries++; | ||
192 | /* | ||
193 | * cancel any message from msg+1 to the retry itself | ||
194 | */ | ||
195 | for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) { | ||
196 | if (msg2 > mdp->queue_last) | ||
197 | msg2 = mdp->queue_first; | ||
198 | if (msg2 == msg) | ||
199 | break; | ||
200 | |||
201 | /* same conditions for cancellation as do_reset */ | ||
202 | if ((msg2->replied_to == 0) && (msg2->canceled == 0) && | ||
203 | (msg2->swack_vec) && ((msg2->swack_vec & | ||
204 | msg->swack_vec) == 0) && | ||
205 | (msg2->sending_cpu == msg->sending_cpu) && | ||
206 | (msg2->msg_type != MSG_NOOP)) { | ||
207 | mmr = read_mmr_sw_ack(); | ||
208 | msg_res = msg2->swack_vec; | ||
209 | /* | ||
210 | * This is a message retry; clear the resources held | ||
211 | * by the previous message only if they timed out. | ||
212 | * If it has not timed out we have an unexpected | ||
213 | * situation to report. | ||
214 | */ | ||
215 | if (mmr & (msg_res << UV_SW_ACK_NPENDING)) { | ||
216 | unsigned long mr; | ||
217 | /* | ||
218 | * is the resource timed out? | ||
219 | * make everyone ignore the cancelled message. | ||
220 | */ | ||
221 | msg2->canceled = 1; | ||
222 | stat->d_canceled++; | ||
223 | cancel_count++; | ||
224 | mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res; | ||
225 | write_mmr_sw_ack(mr); | ||
226 | } | ||
227 | } | ||
228 | } | ||
229 | if (!cancel_count) | ||
230 | stat->d_nocanceled++; | ||
231 | } | ||
232 | |||
233 | /* | ||
234 | * Do all the things a cpu should do for a TLB shootdown message. | ||
235 | * Other cpu's may come here at the same time for this message. | ||
236 | */ | ||
237 | static void bau_process_message(struct msg_desc *mdp, | ||
238 | struct bau_control *bcp) | ||
239 | { | ||
240 | short socket_ack_count = 0; | ||
241 | short *sp; | ||
242 | struct atomic_short *asp; | ||
243 | struct ptc_stats *stat = bcp->statp; | ||
244 | struct bau_pq_entry *msg = mdp->msg; | ||
245 | struct bau_control *smaster = bcp->socket_master; | ||
246 | |||
247 | /* | ||
248 | * This must be a normal message, or retry of a normal message | ||
249 | */ | ||
250 | if (msg->address == TLB_FLUSH_ALL) { | ||
251 | local_flush_tlb(); | ||
252 | stat->d_alltlb++; | ||
253 | } else { | ||
254 | __flush_tlb_one(msg->address); | ||
255 | stat->d_onetlb++; | ||
256 | } | ||
257 | stat->d_requestee++; | ||
258 | |||
259 | /* | ||
260 | * One cpu on each uvhub has the additional job on a RETRY | ||
261 | * of releasing the resource held by the message that is | ||
262 | * being retried. That message is identified by sending | ||
263 | * cpu number. | ||
264 | */ | ||
265 | if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master) | ||
266 | bau_process_retry_msg(mdp, bcp); | ||
267 | |||
268 | /* | ||
269 | * This is a swack message, so we have to reply to it. | ||
270 | * Count each responding cpu on the socket. This avoids | ||
271 | * pinging the count's cache line back and forth between | ||
272 | * the sockets. | ||
273 | */ | ||
274 | sp = &smaster->socket_acknowledge_count[mdp->msg_slot]; | ||
275 | asp = (struct atomic_short *)sp; | ||
276 | socket_ack_count = atom_asr(1, asp); | ||
277 | if (socket_ack_count == bcp->cpus_in_socket) { | ||
278 | int msg_ack_count; | ||
279 | /* | ||
280 | * Both sockets dump their completed count total into | ||
281 | * the message's count. | ||
282 | */ | ||
283 | smaster->socket_acknowledge_count[mdp->msg_slot] = 0; | ||
284 | asp = (struct atomic_short *)&msg->acknowledge_count; | ||
285 | msg_ack_count = atom_asr(socket_ack_count, asp); | ||
286 | |||
287 | if (msg_ack_count == bcp->cpus_in_uvhub) { | ||
288 | /* | ||
289 | * All cpus in uvhub saw it; reply | ||
290 | */ | ||
291 | reply_to_message(mdp, bcp); | ||
292 | } | ||
293 | } | ||
294 | |||
295 | return; | ||
296 | } | ||
297 | |||
298 | /* | ||
299 | * Determine the first cpu on a uvhub. | ||
300 | */ | ||
301 | static int uvhub_to_first_cpu(int uvhub) | ||
302 | { | ||
303 | int cpu; | ||
304 | for_each_present_cpu(cpu) | ||
305 | if (uvhub == uv_cpu_to_blade_id(cpu)) | ||
306 | return cpu; | ||
307 | return -1; | ||
308 | } | ||
309 | |||
310 | /* | ||
311 | * Last resort when we get a large number of destination timeouts is | ||
312 | * to clear resources held by a given cpu. | ||
313 | * Do this with IPI so that all messages in the BAU message queue | ||
314 | * can be identified by their nonzero swack_vec field. | ||
315 | * | ||
316 | * This is entered for a single cpu on the uvhub. | ||
317 | * The sender want's this uvhub to free a specific message's | ||
318 | * swack resources. | ||
319 | */ | ||
320 | static void do_reset(void *ptr) | ||
321 | { | ||
322 | int i; | ||
323 | struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id()); | ||
324 | struct reset_args *rap = (struct reset_args *)ptr; | ||
325 | struct bau_pq_entry *msg; | ||
326 | struct ptc_stats *stat = bcp->statp; | ||
327 | |||
328 | stat->d_resets++; | ||
329 | /* | ||
330 | * We're looking for the given sender, and | ||
331 | * will free its swack resource. | ||
332 | * If all cpu's finally responded after the timeout, its | ||
333 | * message 'replied_to' was set. | ||
334 | */ | ||
335 | for (msg = bcp->queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) { | ||
336 | unsigned long msg_res; | ||
337 | /* do_reset: same conditions for cancellation as | ||
338 | bau_process_retry_msg() */ | ||
339 | if ((msg->replied_to == 0) && | ||
340 | (msg->canceled == 0) && | ||
341 | (msg->sending_cpu == rap->sender) && | ||
342 | (msg->swack_vec) && | ||
343 | (msg->msg_type != MSG_NOOP)) { | ||
344 | unsigned long mmr; | ||
345 | unsigned long mr; | ||
346 | /* | ||
347 | * make everyone else ignore this message | ||
348 | */ | ||
349 | msg->canceled = 1; | ||
350 | /* | ||
351 | * only reset the resource if it is still pending | ||
352 | */ | ||
353 | mmr = read_mmr_sw_ack(); | ||
354 | msg_res = msg->swack_vec; | ||
355 | mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res; | ||
356 | if (mmr & msg_res) { | ||
357 | stat->d_rcanceled++; | ||
358 | write_mmr_sw_ack(mr); | ||
359 | } | ||
360 | } | ||
361 | } | ||
362 | return; | ||
363 | } | ||
364 | |||
365 | /* | ||
366 | * Use IPI to get all target uvhubs to release resources held by | ||
367 | * a given sending cpu number. | ||
368 | */ | ||
369 | static void reset_with_ipi(struct bau_targ_hubmask *distribution, int sender) | ||
370 | { | ||
371 | int uvhub; | ||
372 | int maskbits; | ||
373 | cpumask_t mask; | ||
374 | struct reset_args reset_args; | ||
375 | |||
376 | reset_args.sender = sender; | ||
377 | cpus_clear(mask); | ||
378 | /* find a single cpu for each uvhub in this distribution mask */ | ||
379 | maskbits = sizeof(struct bau_targ_hubmask) * BITSPERBYTE; | ||
380 | for (uvhub = 0; uvhub < maskbits; uvhub++) { | ||
381 | int cpu; | ||
382 | if (!bau_uvhub_isset(uvhub, distribution)) | ||
383 | continue; | ||
384 | /* find a cpu for this uvhub */ | ||
385 | cpu = uvhub_to_first_cpu(uvhub); | ||
386 | cpu_set(cpu, mask); | ||
387 | } | ||
388 | |||
389 | /* IPI all cpus; preemption is already disabled */ | ||
390 | smp_call_function_many(&mask, do_reset, (void *)&reset_args, 1); | ||
391 | return; | ||
392 | } | ||
393 | |||
394 | static inline unsigned long cycles_2_us(unsigned long long cyc) | ||
395 | { | ||
396 | unsigned long long ns; | ||
397 | unsigned long us; | ||
398 | int cpu = smp_processor_id(); | ||
399 | |||
400 | ns = (cyc * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR; | ||
401 | us = ns / 1000; | ||
402 | return us; | ||
403 | } | ||
404 | |||
405 | /* | ||
406 | * wait for all cpus on this hub to finish their sends and go quiet | ||
407 | * leaves uvhub_quiesce set so that no new broadcasts are started by | ||
408 | * bau_flush_send_and_wait() | ||
409 | */ | ||
410 | static inline void quiesce_local_uvhub(struct bau_control *hmaster) | ||
411 | { | ||
412 | atom_asr(1, (struct atomic_short *)&hmaster->uvhub_quiesce); | ||
413 | } | ||
414 | |||
415 | /* | ||
416 | * mark this quiet-requestor as done | ||
417 | */ | ||
418 | static inline void end_uvhub_quiesce(struct bau_control *hmaster) | ||
419 | { | ||
420 | atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce); | ||
421 | } | ||
422 | |||
423 | static unsigned long uv1_read_status(unsigned long mmr_offset, int right_shift) | ||
424 | { | ||
425 | unsigned long descriptor_status; | ||
426 | |||
427 | descriptor_status = uv_read_local_mmr(mmr_offset); | ||
428 | descriptor_status >>= right_shift; | ||
429 | descriptor_status &= UV_ACT_STATUS_MASK; | ||
430 | return descriptor_status; | ||
431 | } | ||
432 | |||
433 | /* | ||
434 | * Wait for completion of a broadcast software ack message | ||
435 | * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP | ||
436 | */ | ||
437 | static int uv1_wait_completion(struct bau_desc *bau_desc, | ||
438 | unsigned long mmr_offset, int right_shift, | ||
439 | struct bau_control *bcp, long try) | ||
440 | { | ||
441 | unsigned long descriptor_status; | ||
442 | cycles_t ttm; | ||
443 | struct ptc_stats *stat = bcp->statp; | ||
444 | |||
445 | descriptor_status = uv1_read_status(mmr_offset, right_shift); | ||
446 | /* spin on the status MMR, waiting for it to go idle */ | ||
447 | while ((descriptor_status != DS_IDLE)) { | ||
448 | /* | ||
449 | * Our software ack messages may be blocked because | ||
450 | * there are no swack resources available. As long | ||
451 | * as none of them has timed out hardware will NACK | ||
452 | * our message and its state will stay IDLE. | ||
453 | */ | ||
454 | if (descriptor_status == DS_SOURCE_TIMEOUT) { | ||
455 | stat->s_stimeout++; | ||
456 | return FLUSH_GIVEUP; | ||
457 | } else if (descriptor_status == DS_DESTINATION_TIMEOUT) { | ||
458 | stat->s_dtimeout++; | ||
459 | ttm = get_cycles(); | ||
460 | |||
461 | /* | ||
462 | * Our retries may be blocked by all destination | ||
463 | * swack resources being consumed, and a timeout | ||
464 | * pending. In that case hardware returns the | ||
465 | * ERROR that looks like a destination timeout. | ||
466 | */ | ||
467 | if (cycles_2_us(ttm - bcp->send_message) < timeout_us) { | ||
468 | bcp->conseccompletes = 0; | ||
469 | return FLUSH_RETRY_PLUGGED; | ||
470 | } | ||
471 | |||
472 | bcp->conseccompletes = 0; | ||
473 | return FLUSH_RETRY_TIMEOUT; | ||
474 | } else { | ||
475 | /* | ||
476 | * descriptor_status is still BUSY | ||
477 | */ | ||
478 | cpu_relax(); | ||
479 | } | ||
480 | descriptor_status = uv1_read_status(mmr_offset, right_shift); | ||
481 | } | ||
482 | bcp->conseccompletes++; | ||
483 | return FLUSH_COMPLETE; | ||
484 | } | ||
485 | |||
486 | /* | ||
487 | * UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register. | ||
488 | */ | ||
489 | static unsigned long uv2_read_status(unsigned long offset, int rshft, int cpu) | ||
490 | { | ||
491 | unsigned long descriptor_status; | ||
492 | unsigned long descriptor_status2; | ||
493 | |||
494 | descriptor_status = ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK); | ||
495 | descriptor_status2 = (read_mmr_uv2_status() >> cpu) & 0x1UL; | ||
496 | descriptor_status = (descriptor_status << 1) | descriptor_status2; | ||
497 | return descriptor_status; | ||
498 | } | ||
499 | |||
500 | static int uv2_wait_completion(struct bau_desc *bau_desc, | ||
501 | unsigned long mmr_offset, int right_shift, | ||
502 | struct bau_control *bcp, long try) | ||
503 | { | ||
504 | unsigned long descriptor_stat; | ||
505 | cycles_t ttm; | ||
506 | int cpu = bcp->uvhub_cpu; | ||
507 | struct ptc_stats *stat = bcp->statp; | ||
508 | |||
509 | descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu); | ||
510 | |||
511 | /* spin on the status MMR, waiting for it to go idle */ | ||
512 | while (descriptor_stat != UV2H_DESC_IDLE) { | ||
513 | /* | ||
514 | * Our software ack messages may be blocked because | ||
515 | * there are no swack resources available. As long | ||
516 | * as none of them has timed out hardware will NACK | ||
517 | * our message and its state will stay IDLE. | ||
518 | */ | ||
519 | if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) || | ||
520 | (descriptor_stat == UV2H_DESC_DEST_STRONG_NACK) || | ||
521 | (descriptor_stat == UV2H_DESC_DEST_PUT_ERR)) { | ||
522 | stat->s_stimeout++; | ||
523 | return FLUSH_GIVEUP; | ||
524 | } else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) { | ||
525 | stat->s_dtimeout++; | ||
526 | ttm = get_cycles(); | ||
527 | /* | ||
528 | * Our retries may be blocked by all destination | ||
529 | * swack resources being consumed, and a timeout | ||
530 | * pending. In that case hardware returns the | ||
531 | * ERROR that looks like a destination timeout. | ||
532 | */ | ||
533 | if (cycles_2_us(ttm - bcp->send_message) < timeout_us) { | ||
534 | bcp->conseccompletes = 0; | ||
535 | return FLUSH_RETRY_PLUGGED; | ||
536 | } | ||
537 | bcp->conseccompletes = 0; | ||
538 | return FLUSH_RETRY_TIMEOUT; | ||
539 | } else { | ||
540 | /* | ||
541 | * descriptor_stat is still BUSY | ||
542 | */ | ||
543 | cpu_relax(); | ||
544 | } | ||
545 | descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu); | ||
546 | } | ||
547 | bcp->conseccompletes++; | ||
548 | return FLUSH_COMPLETE; | ||
549 | } | ||
550 | |||
551 | /* | ||
552 | * There are 2 status registers; each and array[32] of 2 bits. Set up for | ||
553 | * which register to read and position in that register based on cpu in | ||
554 | * current hub. | ||
555 | */ | ||
556 | static int wait_completion(struct bau_desc *bau_desc, | ||
557 | struct bau_control *bcp, long try) | ||
558 | { | ||
559 | int right_shift; | ||
560 | unsigned long mmr_offset; | ||
561 | int cpu = bcp->uvhub_cpu; | ||
562 | |||
563 | if (cpu < UV_CPUS_PER_AS) { | ||
564 | mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0; | ||
565 | right_shift = cpu * UV_ACT_STATUS_SIZE; | ||
566 | } else { | ||
567 | mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1; | ||
568 | right_shift = ((cpu - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE); | ||
569 | } | ||
570 | |||
571 | if (is_uv1_hub()) | ||
572 | return uv1_wait_completion(bau_desc, mmr_offset, right_shift, | ||
573 | bcp, try); | ||
574 | else | ||
575 | return uv2_wait_completion(bau_desc, mmr_offset, right_shift, | ||
576 | bcp, try); | ||
577 | } | ||
578 | |||
579 | static inline cycles_t sec_2_cycles(unsigned long sec) | ||
580 | { | ||
581 | unsigned long ns; | ||
582 | cycles_t cyc; | ||
583 | |||
584 | ns = sec * 1000000000; | ||
585 | cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id())); | ||
586 | return cyc; | ||
587 | } | ||
588 | |||
589 | /* | ||
590 | * Our retries are blocked by all destination sw ack resources being | ||
591 | * in use, and a timeout is pending. In that case hardware immediately | ||
592 | * returns the ERROR that looks like a destination timeout. | ||
593 | */ | ||
594 | static void destination_plugged(struct bau_desc *bau_desc, | ||
595 | struct bau_control *bcp, | ||
596 | struct bau_control *hmaster, struct ptc_stats *stat) | ||
597 | { | ||
598 | udelay(bcp->plugged_delay); | ||
599 | bcp->plugged_tries++; | ||
600 | |||
601 | if (bcp->plugged_tries >= bcp->plugsb4reset) { | ||
602 | bcp->plugged_tries = 0; | ||
603 | |||
604 | quiesce_local_uvhub(hmaster); | ||
605 | |||
606 | spin_lock(&hmaster->queue_lock); | ||
607 | reset_with_ipi(&bau_desc->distribution, bcp->cpu); | ||
608 | spin_unlock(&hmaster->queue_lock); | ||
609 | |||
610 | end_uvhub_quiesce(hmaster); | ||
611 | |||
612 | bcp->ipi_attempts++; | ||
613 | stat->s_resets_plug++; | ||
614 | } | ||
615 | } | ||
616 | |||
617 | static void destination_timeout(struct bau_desc *bau_desc, | ||
618 | struct bau_control *bcp, struct bau_control *hmaster, | ||
619 | struct ptc_stats *stat) | ||
620 | { | ||
621 | hmaster->max_concurr = 1; | ||
622 | bcp->timeout_tries++; | ||
623 | if (bcp->timeout_tries >= bcp->timeoutsb4reset) { | ||
624 | bcp->timeout_tries = 0; | ||
625 | |||
626 | quiesce_local_uvhub(hmaster); | ||
627 | |||
628 | spin_lock(&hmaster->queue_lock); | ||
629 | reset_with_ipi(&bau_desc->distribution, bcp->cpu); | ||
630 | spin_unlock(&hmaster->queue_lock); | ||
631 | |||
632 | end_uvhub_quiesce(hmaster); | ||
633 | |||
634 | bcp->ipi_attempts++; | ||
635 | stat->s_resets_timeout++; | ||
636 | } | ||
637 | } | ||
638 | |||
639 | /* | ||
640 | * Completions are taking a very long time due to a congested numalink | ||
641 | * network. | ||
642 | */ | ||
643 | static void disable_for_congestion(struct bau_control *bcp, | ||
644 | struct ptc_stats *stat) | ||
645 | { | ||
646 | /* let only one cpu do this disabling */ | ||
647 | spin_lock(&disable_lock); | ||
648 | |||
649 | if (!baudisabled && bcp->period_requests && | ||
650 | ((bcp->period_time / bcp->period_requests) > congested_cycles)) { | ||
651 | int tcpu; | ||
652 | struct bau_control *tbcp; | ||
653 | /* it becomes this cpu's job to turn on the use of the | ||
654 | BAU again */ | ||
655 | baudisabled = 1; | ||
656 | bcp->set_bau_off = 1; | ||
657 | bcp->set_bau_on_time = get_cycles(); | ||
658 | bcp->set_bau_on_time += sec_2_cycles(bcp->cong_period); | ||
659 | stat->s_bau_disabled++; | ||
660 | for_each_present_cpu(tcpu) { | ||
661 | tbcp = &per_cpu(bau_control, tcpu); | ||
662 | tbcp->baudisabled = 1; | ||
663 | } | ||
664 | } | ||
665 | |||
666 | spin_unlock(&disable_lock); | ||
667 | } | ||
668 | |||
669 | static void count_max_concurr(int stat, struct bau_control *bcp, | ||
670 | struct bau_control *hmaster) | ||
671 | { | ||
672 | bcp->plugged_tries = 0; | ||
673 | bcp->timeout_tries = 0; | ||
674 | if (stat != FLUSH_COMPLETE) | ||
675 | return; | ||
676 | if (bcp->conseccompletes <= bcp->complete_threshold) | ||
677 | return; | ||
678 | if (hmaster->max_concurr >= hmaster->max_concurr_const) | ||
679 | return; | ||
680 | hmaster->max_concurr++; | ||
681 | } | ||
682 | |||
683 | static void record_send_stats(cycles_t time1, cycles_t time2, | ||
684 | struct bau_control *bcp, struct ptc_stats *stat, | ||
685 | int completion_status, int try) | ||
686 | { | ||
687 | cycles_t elapsed; | ||
688 | |||
689 | if (time2 > time1) { | ||
690 | elapsed = time2 - time1; | ||
691 | stat->s_time += elapsed; | ||
692 | |||
693 | if ((completion_status == FLUSH_COMPLETE) && (try == 1)) { | ||
694 | bcp->period_requests++; | ||
695 | bcp->period_time += elapsed; | ||
696 | if ((elapsed > congested_cycles) && | ||
697 | (bcp->period_requests > bcp->cong_reps)) | ||
698 | disable_for_congestion(bcp, stat); | ||
699 | } | ||
700 | } else | ||
701 | stat->s_requestor--; | ||
702 | |||
703 | if (completion_status == FLUSH_COMPLETE && try > 1) | ||
704 | stat->s_retriesok++; | ||
705 | else if (completion_status == FLUSH_GIVEUP) | ||
706 | stat->s_giveup++; | ||
707 | } | ||
708 | |||
709 | /* | ||
710 | * Because of a uv1 hardware bug only a limited number of concurrent | ||
711 | * requests can be made. | ||
712 | */ | ||
713 | static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat) | ||
714 | { | ||
715 | spinlock_t *lock = &hmaster->uvhub_lock; | ||
716 | atomic_t *v; | ||
717 | |||
718 | v = &hmaster->active_descriptor_count; | ||
719 | if (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr)) { | ||
720 | stat->s_throttles++; | ||
721 | do { | ||
722 | cpu_relax(); | ||
723 | } while (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr)); | ||
724 | } | ||
725 | } | ||
726 | |||
727 | /* | ||
728 | * Handle the completion status of a message send. | ||
729 | */ | ||
730 | static void handle_cmplt(int completion_status, struct bau_desc *bau_desc, | ||
731 | struct bau_control *bcp, struct bau_control *hmaster, | ||
732 | struct ptc_stats *stat) | ||
733 | { | ||
734 | if (completion_status == FLUSH_RETRY_PLUGGED) | ||
735 | destination_plugged(bau_desc, bcp, hmaster, stat); | ||
736 | else if (completion_status == FLUSH_RETRY_TIMEOUT) | ||
737 | destination_timeout(bau_desc, bcp, hmaster, stat); | ||
738 | } | ||
739 | |||
740 | /* | ||
741 | * Send a broadcast and wait for it to complete. | ||
742 | * | ||
743 | * The flush_mask contains the cpus the broadcast is to be sent to including | ||
744 | * cpus that are on the local uvhub. | ||
745 | * | ||
746 | * Returns 0 if all flushing represented in the mask was done. | ||
747 | * Returns 1 if it gives up entirely and the original cpu mask is to be | ||
748 | * returned to the kernel. | ||
749 | */ | ||
750 | int uv_flush_send_and_wait(struct bau_desc *bau_desc, | ||
751 | struct cpumask *flush_mask, struct bau_control *bcp) | ||
752 | { | ||
753 | int seq_number = 0; | ||
754 | int completion_stat = 0; | ||
755 | long try = 0; | ||
756 | unsigned long index; | ||
757 | cycles_t time1; | ||
758 | cycles_t time2; | ||
759 | struct ptc_stats *stat = bcp->statp; | ||
760 | struct bau_control *hmaster = bcp->uvhub_master; | ||
761 | |||
762 | if (is_uv1_hub()) | ||
763 | uv1_throttle(hmaster, stat); | ||
764 | |||
765 | while (hmaster->uvhub_quiesce) | ||
766 | cpu_relax(); | ||
767 | |||
768 | time1 = get_cycles(); | ||
769 | do { | ||
770 | if (try == 0) { | ||
771 | bau_desc->header.msg_type = MSG_REGULAR; | ||
772 | seq_number = bcp->message_number++; | ||
773 | } else { | ||
774 | bau_desc->header.msg_type = MSG_RETRY; | ||
775 | stat->s_retry_messages++; | ||
776 | } | ||
777 | |||
778 | bau_desc->header.sequence = seq_number; | ||
779 | index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu; | ||
780 | bcp->send_message = get_cycles(); | ||
781 | |||
782 | write_mmr_activation(index); | ||
783 | |||
784 | try++; | ||
785 | completion_stat = wait_completion(bau_desc, bcp, try); | ||
786 | |||
787 | handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat); | ||
788 | |||
789 | if (bcp->ipi_attempts >= bcp->ipi_reset_limit) { | ||
790 | bcp->ipi_attempts = 0; | ||
791 | completion_stat = FLUSH_GIVEUP; | ||
792 | break; | ||
793 | } | ||
794 | cpu_relax(); | ||
795 | } while ((completion_stat == FLUSH_RETRY_PLUGGED) || | ||
796 | (completion_stat == FLUSH_RETRY_TIMEOUT)); | ||
797 | |||
798 | time2 = get_cycles(); | ||
799 | |||
800 | count_max_concurr(completion_stat, bcp, hmaster); | ||
801 | |||
802 | while (hmaster->uvhub_quiesce) | ||
803 | cpu_relax(); | ||
804 | |||
805 | atomic_dec(&hmaster->active_descriptor_count); | ||
806 | |||
807 | record_send_stats(time1, time2, bcp, stat, completion_stat, try); | ||
808 | |||
809 | if (completion_stat == FLUSH_GIVEUP) | ||
810 | return 1; | ||
811 | return 0; | ||
812 | } | ||
813 | |||
814 | /* | ||
815 | * The BAU is disabled. When the disabled time period has expired, the cpu | ||
816 | * that disabled it must re-enable it. | ||
817 | * Return 0 if it is re-enabled for all cpus. | ||
818 | */ | ||
819 | static int check_enable(struct bau_control *bcp, struct ptc_stats *stat) | ||
820 | { | ||
821 | int tcpu; | ||
822 | struct bau_control *tbcp; | ||
823 | |||
824 | if (bcp->set_bau_off) { | ||
825 | if (get_cycles() >= bcp->set_bau_on_time) { | ||
826 | stat->s_bau_reenabled++; | ||
827 | baudisabled = 0; | ||
828 | for_each_present_cpu(tcpu) { | ||
829 | tbcp = &per_cpu(bau_control, tcpu); | ||
830 | tbcp->baudisabled = 0; | ||
831 | tbcp->period_requests = 0; | ||
832 | tbcp->period_time = 0; | ||
833 | } | ||
834 | return 0; | ||
835 | } | ||
836 | } | ||
837 | return -1; | ||
838 | } | ||
839 | |||
840 | static void record_send_statistics(struct ptc_stats *stat, int locals, int hubs, | ||
841 | int remotes, struct bau_desc *bau_desc) | ||
842 | { | ||
843 | stat->s_requestor++; | ||
844 | stat->s_ntargcpu += remotes + locals; | ||
845 | stat->s_ntargremotes += remotes; | ||
846 | stat->s_ntarglocals += locals; | ||
847 | |||
848 | /* uvhub statistics */ | ||
849 | hubs = bau_uvhub_weight(&bau_desc->distribution); | ||
850 | if (locals) { | ||
851 | stat->s_ntarglocaluvhub++; | ||
852 | stat->s_ntargremoteuvhub += (hubs - 1); | ||
853 | } else | ||
854 | stat->s_ntargremoteuvhub += hubs; | ||
855 | |||
856 | stat->s_ntarguvhub += hubs; | ||
857 | |||
858 | if (hubs >= 16) | ||
859 | stat->s_ntarguvhub16++; | ||
860 | else if (hubs >= 8) | ||
861 | stat->s_ntarguvhub8++; | ||
862 | else if (hubs >= 4) | ||
863 | stat->s_ntarguvhub4++; | ||
864 | else if (hubs >= 2) | ||
865 | stat->s_ntarguvhub2++; | ||
866 | else | ||
867 | stat->s_ntarguvhub1++; | ||
868 | } | ||
869 | |||
870 | /* | ||
871 | * Translate a cpu mask to the uvhub distribution mask in the BAU | ||
872 | * activation descriptor. | ||
873 | */ | ||
874 | static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp, | ||
875 | struct bau_desc *bau_desc, int *localsp, int *remotesp) | ||
876 | { | ||
877 | int cpu; | ||
878 | int pnode; | ||
879 | int cnt = 0; | ||
880 | struct hub_and_pnode *hpp; | ||
881 | |||
882 | for_each_cpu(cpu, flush_mask) { | ||
883 | /* | ||
884 | * The distribution vector is a bit map of pnodes, relative | ||
885 | * to the partition base pnode (and the partition base nasid | ||
886 | * in the header). | ||
887 | * Translate cpu to pnode and hub using a local memory array. | ||
888 | */ | ||
889 | hpp = &bcp->socket_master->thp[cpu]; | ||
890 | pnode = hpp->pnode - bcp->partition_base_pnode; | ||
891 | bau_uvhub_set(pnode, &bau_desc->distribution); | ||
892 | cnt++; | ||
893 | if (hpp->uvhub == bcp->uvhub) | ||
894 | (*localsp)++; | ||
895 | else | ||
896 | (*remotesp)++; | ||
897 | } | ||
898 | if (!cnt) | ||
899 | return 1; | ||
900 | return 0; | ||
901 | } | ||
902 | |||
903 | /* | ||
904 | * globally purge translation cache of a virtual address or all TLB's | ||
905 | * @cpumask: mask of all cpu's in which the address is to be removed | ||
906 | * @mm: mm_struct containing virtual address range | ||
907 | * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu) | ||
908 | * @cpu: the current cpu | ||
909 | * | ||
910 | * This is the entry point for initiating any UV global TLB shootdown. | ||
911 | * | ||
912 | * Purges the translation caches of all specified processors of the given | ||
913 | * virtual address, or purges all TLB's on specified processors. | ||
914 | * | ||
915 | * The caller has derived the cpumask from the mm_struct. This function | ||
916 | * is called only if there are bits set in the mask. (e.g. flush_tlb_page()) | ||
917 | * | ||
918 | * The cpumask is converted into a uvhubmask of the uvhubs containing | ||
919 | * those cpus. | ||
920 | * | ||
921 | * Note that this function should be called with preemption disabled. | ||
922 | * | ||
923 | * Returns NULL if all remote flushing was done. | ||
924 | * Returns pointer to cpumask if some remote flushing remains to be | ||
925 | * done. The returned pointer is valid till preemption is re-enabled. | ||
926 | */ | ||
927 | const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | ||
928 | struct mm_struct *mm, unsigned long va, | ||
929 | unsigned int cpu) | ||
930 | { | ||
931 | int locals = 0; | ||
932 | int remotes = 0; | ||
933 | int hubs = 0; | ||
934 | struct bau_desc *bau_desc; | ||
935 | struct cpumask *flush_mask; | ||
936 | struct ptc_stats *stat; | ||
937 | struct bau_control *bcp; | ||
938 | |||
939 | /* kernel was booted 'nobau' */ | ||
940 | if (nobau) | ||
941 | return cpumask; | ||
942 | |||
943 | bcp = &per_cpu(bau_control, cpu); | ||
944 | stat = bcp->statp; | ||
945 | |||
946 | /* bau was disabled due to slow response */ | ||
947 | if (bcp->baudisabled) { | ||
948 | if (check_enable(bcp, stat)) | ||
949 | return cpumask; | ||
950 | } | ||
951 | |||
952 | /* | ||
953 | * Each sending cpu has a per-cpu mask which it fills from the caller's | ||
954 | * cpu mask. All cpus are converted to uvhubs and copied to the | ||
955 | * activation descriptor. | ||
956 | */ | ||
957 | flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu); | ||
958 | /* don't actually do a shootdown of the local cpu */ | ||
959 | cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); | ||
960 | |||
961 | if (cpu_isset(cpu, *cpumask)) | ||
962 | stat->s_ntargself++; | ||
963 | |||
964 | bau_desc = bcp->descriptor_base; | ||
965 | bau_desc += ITEMS_PER_DESC * bcp->uvhub_cpu; | ||
966 | bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); | ||
967 | if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes)) | ||
968 | return NULL; | ||
969 | |||
970 | record_send_statistics(stat, locals, hubs, remotes, bau_desc); | ||
971 | |||
972 | bau_desc->payload.address = va; | ||
973 | bau_desc->payload.sending_cpu = cpu; | ||
974 | /* | ||
975 | * uv_flush_send_and_wait returns 0 if all cpu's were messaged, | ||
976 | * or 1 if it gave up and the original cpumask should be returned. | ||
977 | */ | ||
978 | if (!uv_flush_send_and_wait(bau_desc, flush_mask, bcp)) | ||
979 | return NULL; | ||
980 | else | ||
981 | return cpumask; | ||
982 | } | ||
983 | |||
984 | /* | ||
985 | * The BAU message interrupt comes here. (registered by set_intr_gate) | ||
986 | * See entry_64.S | ||
987 | * | ||
988 | * We received a broadcast assist message. | ||
989 | * | ||
990 | * Interrupts are disabled; this interrupt could represent | ||
991 | * the receipt of several messages. | ||
992 | * | ||
993 | * All cores/threads on this hub get this interrupt. | ||
994 | * The last one to see it does the software ack. | ||
995 | * (the resource will not be freed until noninterruptable cpus see this | ||
996 | * interrupt; hardware may timeout the s/w ack and reply ERROR) | ||
997 | */ | ||
998 | void uv_bau_message_interrupt(struct pt_regs *regs) | ||
999 | { | ||
1000 | int count = 0; | ||
1001 | cycles_t time_start; | ||
1002 | struct bau_pq_entry *msg; | ||
1003 | struct bau_control *bcp; | ||
1004 | struct ptc_stats *stat; | ||
1005 | struct msg_desc msgdesc; | ||
1006 | |||
1007 | time_start = get_cycles(); | ||
1008 | |||
1009 | bcp = &per_cpu(bau_control, smp_processor_id()); | ||
1010 | stat = bcp->statp; | ||
1011 | |||
1012 | msgdesc.queue_first = bcp->queue_first; | ||
1013 | msgdesc.queue_last = bcp->queue_last; | ||
1014 | |||
1015 | msg = bcp->bau_msg_head; | ||
1016 | while (msg->swack_vec) { | ||
1017 | count++; | ||
1018 | |||
1019 | msgdesc.msg_slot = msg - msgdesc.queue_first; | ||
1020 | msgdesc.swack_slot = ffs(msg->swack_vec) - 1; | ||
1021 | msgdesc.msg = msg; | ||
1022 | bau_process_message(&msgdesc, bcp); | ||
1023 | |||
1024 | msg++; | ||
1025 | if (msg > msgdesc.queue_last) | ||
1026 | msg = msgdesc.queue_first; | ||
1027 | bcp->bau_msg_head = msg; | ||
1028 | } | ||
1029 | stat->d_time += (get_cycles() - time_start); | ||
1030 | if (!count) | ||
1031 | stat->d_nomsg++; | ||
1032 | else if (count > 1) | ||
1033 | stat->d_multmsg++; | ||
1034 | |||
1035 | ack_APIC_irq(); | ||
1036 | } | ||
1037 | |||
1038 | /* | ||
1039 | * Each target uvhub (i.e. a uvhub that has cpu's) needs to have | ||
1040 | * shootdown message timeouts enabled. The timeout does not cause | ||
1041 | * an interrupt, but causes an error message to be returned to | ||
1042 | * the sender. | ||
1043 | */ | ||
1044 | static void __init enable_timeouts(void) | ||
1045 | { | ||
1046 | int uvhub; | ||
1047 | int nuvhubs; | ||
1048 | int pnode; | ||
1049 | unsigned long mmr_image; | ||
1050 | |||
1051 | nuvhubs = uv_num_possible_blades(); | ||
1052 | |||
1053 | for (uvhub = 0; uvhub < nuvhubs; uvhub++) { | ||
1054 | if (!uv_blade_nr_possible_cpus(uvhub)) | ||
1055 | continue; | ||
1056 | |||
1057 | pnode = uv_blade_to_pnode(uvhub); | ||
1058 | mmr_image = read_mmr_misc_control(pnode); | ||
1059 | /* | ||
1060 | * Set the timeout period and then lock it in, in three | ||
1061 | * steps; captures and locks in the period. | ||
1062 | * | ||
1063 | * To program the period, the SOFT_ACK_MODE must be off. | ||
1064 | */ | ||
1065 | mmr_image &= ~(1L << SOFTACK_MSHIFT); | ||
1066 | write_mmr_misc_control(pnode, mmr_image); | ||
1067 | /* | ||
1068 | * Set the 4-bit period. | ||
1069 | */ | ||
1070 | mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT); | ||
1071 | mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT); | ||
1072 | write_mmr_misc_control(pnode, mmr_image); | ||
1073 | /* | ||
1074 | * UV1: | ||
1075 | * Subsequent reversals of the timebase bit (3) cause an | ||
1076 | * immediate timeout of one or all INTD resources as | ||
1077 | * indicated in bits 2:0 (7 causes all of them to timeout). | ||
1078 | */ | ||
1079 | mmr_image |= (1L << SOFTACK_MSHIFT); | ||
1080 | if (is_uv2_hub()) { | ||
1081 | mmr_image |= (1L << UV2_LEG_SHFT); | ||
1082 | mmr_image |= (1L << UV2_EXT_SHFT); | ||
1083 | } | ||
1084 | write_mmr_misc_control(pnode, mmr_image); | ||
1085 | } | ||
1086 | } | ||
1087 | |||
1088 | static void *ptc_seq_start(struct seq_file *file, loff_t *offset) | ||
1089 | { | ||
1090 | if (*offset < num_possible_cpus()) | ||
1091 | return offset; | ||
1092 | return NULL; | ||
1093 | } | ||
1094 | |||
1095 | static void *ptc_seq_next(struct seq_file *file, void *data, loff_t *offset) | ||
1096 | { | ||
1097 | (*offset)++; | ||
1098 | if (*offset < num_possible_cpus()) | ||
1099 | return offset; | ||
1100 | return NULL; | ||
1101 | } | ||
1102 | |||
1103 | static void ptc_seq_stop(struct seq_file *file, void *data) | ||
1104 | { | ||
1105 | } | ||
1106 | |||
1107 | static inline unsigned long long usec_2_cycles(unsigned long microsec) | ||
1108 | { | ||
1109 | unsigned long ns; | ||
1110 | unsigned long long cyc; | ||
1111 | |||
1112 | ns = microsec * 1000; | ||
1113 | cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id())); | ||
1114 | return cyc; | ||
1115 | } | ||
1116 | |||
1117 | /* | ||
1118 | * Display the statistics thru /proc/sgi_uv/ptc_statistics | ||
1119 | * 'data' points to the cpu number | ||
1120 | * Note: see the descriptions in stat_description[]. | ||
1121 | */ | ||
1122 | static int ptc_seq_show(struct seq_file *file, void *data) | ||
1123 | { | ||
1124 | struct ptc_stats *stat; | ||
1125 | int cpu; | ||
1126 | |||
1127 | cpu = *(loff_t *)data; | ||
1128 | if (!cpu) { | ||
1129 | seq_printf(file, | ||
1130 | "# cpu sent stime self locals remotes ncpus localhub "); | ||
1131 | seq_printf(file, | ||
1132 | "remotehub numuvhubs numuvhubs16 numuvhubs8 "); | ||
1133 | seq_printf(file, | ||
1134 | "numuvhubs4 numuvhubs2 numuvhubs1 dto retries rok "); | ||
1135 | seq_printf(file, | ||
1136 | "resetp resett giveup sto bz throt swack recv rtime "); | ||
1137 | seq_printf(file, | ||
1138 | "all one mult none retry canc nocan reset rcan "); | ||
1139 | seq_printf(file, | ||
1140 | "disable enable\n"); | ||
1141 | } | ||
1142 | if (cpu < num_possible_cpus() && cpu_online(cpu)) { | ||
1143 | stat = &per_cpu(ptcstats, cpu); | ||
1144 | /* source side statistics */ | ||
1145 | seq_printf(file, | ||
1146 | "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ", | ||
1147 | cpu, stat->s_requestor, cycles_2_us(stat->s_time), | ||
1148 | stat->s_ntargself, stat->s_ntarglocals, | ||
1149 | stat->s_ntargremotes, stat->s_ntargcpu, | ||
1150 | stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub, | ||
1151 | stat->s_ntarguvhub, stat->s_ntarguvhub16); | ||
1152 | seq_printf(file, "%ld %ld %ld %ld %ld ", | ||
1153 | stat->s_ntarguvhub8, stat->s_ntarguvhub4, | ||
1154 | stat->s_ntarguvhub2, stat->s_ntarguvhub1, | ||
1155 | stat->s_dtimeout); | ||
1156 | seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ", | ||
1157 | stat->s_retry_messages, stat->s_retriesok, | ||
1158 | stat->s_resets_plug, stat->s_resets_timeout, | ||
1159 | stat->s_giveup, stat->s_stimeout, | ||
1160 | stat->s_busy, stat->s_throttles); | ||
1161 | |||
1162 | /* destination side statistics */ | ||
1163 | seq_printf(file, | ||
1164 | "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ", | ||
1165 | read_gmmr_sw_ack(uv_cpu_to_pnode(cpu)), | ||
1166 | stat->d_requestee, cycles_2_us(stat->d_time), | ||
1167 | stat->d_alltlb, stat->d_onetlb, stat->d_multmsg, | ||
1168 | stat->d_nomsg, stat->d_retries, stat->d_canceled, | ||
1169 | stat->d_nocanceled, stat->d_resets, | ||
1170 | stat->d_rcanceled); | ||
1171 | seq_printf(file, "%ld %ld\n", | ||
1172 | stat->s_bau_disabled, stat->s_bau_reenabled); | ||
1173 | } | ||
1174 | return 0; | ||
1175 | } | ||
1176 | |||
1177 | /* | ||
1178 | * Display the tunables thru debugfs | ||
1179 | */ | ||
1180 | static ssize_t tunables_read(struct file *file, char __user *userbuf, | ||
1181 | size_t count, loff_t *ppos) | ||
1182 | { | ||
1183 | char *buf; | ||
1184 | int ret; | ||
1185 | |||
1186 | buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n", | ||
1187 | "max_concur plugged_delay plugsb4reset", | ||
1188 | "timeoutsb4reset ipi_reset_limit complete_threshold", | ||
1189 | "congested_response_us congested_reps congested_period", | ||
1190 | max_concurr, plugged_delay, plugsb4reset, | ||
1191 | timeoutsb4reset, ipi_reset_limit, complete_threshold, | ||
1192 | congested_respns_us, congested_reps, congested_period); | ||
1193 | |||
1194 | if (!buf) | ||
1195 | return -ENOMEM; | ||
1196 | |||
1197 | ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf)); | ||
1198 | kfree(buf); | ||
1199 | return ret; | ||
1200 | } | ||
1201 | |||
1202 | /* | ||
1203 | * handle a write to /proc/sgi_uv/ptc_statistics | ||
1204 | * -1: reset the statistics | ||
1205 | * 0: display meaning of the statistics | ||
1206 | */ | ||
1207 | static ssize_t ptc_proc_write(struct file *file, const char __user *user, | ||
1208 | size_t count, loff_t *data) | ||
1209 | { | ||
1210 | int cpu; | ||
1211 | int i; | ||
1212 | int elements; | ||
1213 | long input_arg; | ||
1214 | char optstr[64]; | ||
1215 | struct ptc_stats *stat; | ||
1216 | |||
1217 | if (count == 0 || count > sizeof(optstr)) | ||
1218 | return -EINVAL; | ||
1219 | if (copy_from_user(optstr, user, count)) | ||
1220 | return -EFAULT; | ||
1221 | optstr[count - 1] = '\0'; | ||
1222 | |||
1223 | if (strict_strtol(optstr, 10, &input_arg) < 0) { | ||
1224 | printk(KERN_DEBUG "%s is invalid\n", optstr); | ||
1225 | return -EINVAL; | ||
1226 | } | ||
1227 | |||
1228 | if (input_arg == 0) { | ||
1229 | elements = sizeof(stat_description)/sizeof(*stat_description); | ||
1230 | printk(KERN_DEBUG "# cpu: cpu number\n"); | ||
1231 | printk(KERN_DEBUG "Sender statistics:\n"); | ||
1232 | for (i = 0; i < elements; i++) | ||
1233 | printk(KERN_DEBUG "%s\n", stat_description[i]); | ||
1234 | } else if (input_arg == -1) { | ||
1235 | for_each_present_cpu(cpu) { | ||
1236 | stat = &per_cpu(ptcstats, cpu); | ||
1237 | memset(stat, 0, sizeof(struct ptc_stats)); | ||
1238 | } | ||
1239 | } | ||
1240 | |||
1241 | return count; | ||
1242 | } | ||
1243 | |||
1244 | static int local_atoi(const char *name) | ||
1245 | { | ||
1246 | int val = 0; | ||
1247 | |||
1248 | for (;; name++) { | ||
1249 | switch (*name) { | ||
1250 | case '0' ... '9': | ||
1251 | val = 10*val+(*name-'0'); | ||
1252 | break; | ||
1253 | default: | ||
1254 | return val; | ||
1255 | } | ||
1256 | } | ||
1257 | } | ||
1258 | |||
1259 | /* | ||
1260 | * Parse the values written to /sys/kernel/debug/sgi_uv/bau_tunables. | ||
1261 | * Zero values reset them to defaults. | ||
1262 | */ | ||
1263 | static int parse_tunables_write(struct bau_control *bcp, char *instr, | ||
1264 | int count) | ||
1265 | { | ||
1266 | char *p; | ||
1267 | char *q; | ||
1268 | int cnt = 0; | ||
1269 | int val; | ||
1270 | int e = sizeof(tunables) / sizeof(*tunables); | ||
1271 | |||
1272 | p = instr + strspn(instr, WHITESPACE); | ||
1273 | q = p; | ||
1274 | for (; *p; p = q + strspn(q, WHITESPACE)) { | ||
1275 | q = p + strcspn(p, WHITESPACE); | ||
1276 | cnt++; | ||
1277 | if (q == p) | ||
1278 | break; | ||
1279 | } | ||
1280 | if (cnt != e) { | ||
1281 | printk(KERN_INFO "bau tunable error: should be %d values\n", e); | ||
1282 | return -EINVAL; | ||
1283 | } | ||
1284 | |||
1285 | p = instr + strspn(instr, WHITESPACE); | ||
1286 | q = p; | ||
1287 | for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) { | ||
1288 | q = p + strcspn(p, WHITESPACE); | ||
1289 | val = local_atoi(p); | ||
1290 | switch (cnt) { | ||
1291 | case 0: | ||
1292 | if (val == 0) { | ||
1293 | max_concurr = MAX_BAU_CONCURRENT; | ||
1294 | max_concurr_const = MAX_BAU_CONCURRENT; | ||
1295 | continue; | ||
1296 | } | ||
1297 | if (val < 1 || val > bcp->cpus_in_uvhub) { | ||
1298 | printk(KERN_DEBUG | ||
1299 | "Error: BAU max concurrent %d is invalid\n", | ||
1300 | val); | ||
1301 | return -EINVAL; | ||
1302 | } | ||
1303 | max_concurr = val; | ||
1304 | max_concurr_const = val; | ||
1305 | continue; | ||
1306 | default: | ||
1307 | if (val == 0) | ||
1308 | *tunables[cnt].tunp = tunables[cnt].deflt; | ||
1309 | else | ||
1310 | *tunables[cnt].tunp = val; | ||
1311 | continue; | ||
1312 | } | ||
1313 | if (q == p) | ||
1314 | break; | ||
1315 | } | ||
1316 | return 0; | ||
1317 | } | ||
1318 | |||
1319 | /* | ||
1320 | * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables) | ||
1321 | */ | ||
1322 | static ssize_t tunables_write(struct file *file, const char __user *user, | ||
1323 | size_t count, loff_t *data) | ||
1324 | { | ||
1325 | int cpu; | ||
1326 | int ret; | ||
1327 | char instr[100]; | ||
1328 | struct bau_control *bcp; | ||
1329 | |||
1330 | if (count == 0 || count > sizeof(instr)-1) | ||
1331 | return -EINVAL; | ||
1332 | if (copy_from_user(instr, user, count)) | ||
1333 | return -EFAULT; | ||
1334 | |||
1335 | instr[count] = '\0'; | ||
1336 | |||
1337 | bcp = &per_cpu(bau_control, smp_processor_id()); | ||
1338 | |||
1339 | ret = parse_tunables_write(bcp, instr, count); | ||
1340 | if (ret) | ||
1341 | return ret; | ||
1342 | |||
1343 | for_each_present_cpu(cpu) { | ||
1344 | bcp = &per_cpu(bau_control, cpu); | ||
1345 | bcp->max_concurr = max_concurr; | ||
1346 | bcp->max_concurr_const = max_concurr; | ||
1347 | bcp->plugged_delay = plugged_delay; | ||
1348 | bcp->plugsb4reset = plugsb4reset; | ||
1349 | bcp->timeoutsb4reset = timeoutsb4reset; | ||
1350 | bcp->ipi_reset_limit = ipi_reset_limit; | ||
1351 | bcp->complete_threshold = complete_threshold; | ||
1352 | bcp->cong_response_us = congested_respns_us; | ||
1353 | bcp->cong_reps = congested_reps; | ||
1354 | bcp->cong_period = congested_period; | ||
1355 | } | ||
1356 | return count; | ||
1357 | } | ||
1358 | |||
1359 | static const struct seq_operations uv_ptc_seq_ops = { | ||
1360 | .start = ptc_seq_start, | ||
1361 | .next = ptc_seq_next, | ||
1362 | .stop = ptc_seq_stop, | ||
1363 | .show = ptc_seq_show | ||
1364 | }; | ||
1365 | |||
1366 | static int ptc_proc_open(struct inode *inode, struct file *file) | ||
1367 | { | ||
1368 | return seq_open(file, &uv_ptc_seq_ops); | ||
1369 | } | ||
1370 | |||
1371 | static int tunables_open(struct inode *inode, struct file *file) | ||
1372 | { | ||
1373 | return 0; | ||
1374 | } | ||
1375 | |||
1376 | static const struct file_operations proc_uv_ptc_operations = { | ||
1377 | .open = ptc_proc_open, | ||
1378 | .read = seq_read, | ||
1379 | .write = ptc_proc_write, | ||
1380 | .llseek = seq_lseek, | ||
1381 | .release = seq_release, | ||
1382 | }; | ||
1383 | |||
1384 | static const struct file_operations tunables_fops = { | ||
1385 | .open = tunables_open, | ||
1386 | .read = tunables_read, | ||
1387 | .write = tunables_write, | ||
1388 | .llseek = default_llseek, | ||
1389 | }; | ||
1390 | |||
1391 | static int __init uv_ptc_init(void) | ||
1392 | { | ||
1393 | struct proc_dir_entry *proc_uv_ptc; | ||
1394 | |||
1395 | if (!is_uv_system()) | ||
1396 | return 0; | ||
1397 | |||
1398 | proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL, | ||
1399 | &proc_uv_ptc_operations); | ||
1400 | if (!proc_uv_ptc) { | ||
1401 | printk(KERN_ERR "unable to create %s proc entry\n", | ||
1402 | UV_PTC_BASENAME); | ||
1403 | return -EINVAL; | ||
1404 | } | ||
1405 | |||
1406 | tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL); | ||
1407 | if (!tunables_dir) { | ||
1408 | printk(KERN_ERR "unable to create debugfs directory %s\n", | ||
1409 | UV_BAU_TUNABLES_DIR); | ||
1410 | return -EINVAL; | ||
1411 | } | ||
1412 | tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600, | ||
1413 | tunables_dir, NULL, &tunables_fops); | ||
1414 | if (!tunables_file) { | ||
1415 | printk(KERN_ERR "unable to create debugfs file %s\n", | ||
1416 | UV_BAU_TUNABLES_FILE); | ||
1417 | return -EINVAL; | ||
1418 | } | ||
1419 | return 0; | ||
1420 | } | ||
1421 | |||
1422 | /* | ||
1423 | * Initialize the sending side's sending buffers. | ||
1424 | */ | ||
1425 | static void activation_descriptor_init(int node, int pnode, int base_pnode) | ||
1426 | { | ||
1427 | int i; | ||
1428 | int cpu; | ||
1429 | unsigned long pa; | ||
1430 | unsigned long m; | ||
1431 | unsigned long n; | ||
1432 | size_t dsize; | ||
1433 | struct bau_desc *bau_desc; | ||
1434 | struct bau_desc *bd2; | ||
1435 | struct bau_control *bcp; | ||
1436 | |||
1437 | /* | ||
1438 | * each bau_desc is 64 bytes; there are 8 (ITEMS_PER_DESC) | ||
1439 | * per cpu; and one per cpu on the uvhub (ADP_SZ) | ||
1440 | */ | ||
1441 | dsize = sizeof(struct bau_desc) * ADP_SZ * ITEMS_PER_DESC; | ||
1442 | bau_desc = kmalloc_node(dsize, GFP_KERNEL, node); | ||
1443 | BUG_ON(!bau_desc); | ||
1444 | |||
1445 | pa = uv_gpa(bau_desc); /* need the real nasid*/ | ||
1446 | n = pa >> uv_nshift; | ||
1447 | m = pa & uv_mmask; | ||
1448 | |||
1449 | /* the 14-bit pnode */ | ||
1450 | write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m)); | ||
1451 | /* | ||
1452 | * Initializing all 8 (ITEMS_PER_DESC) descriptors for each | ||
1453 | * cpu even though we only use the first one; one descriptor can | ||
1454 | * describe a broadcast to 256 uv hubs. | ||
1455 | */ | ||
1456 | for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) { | ||
1457 | memset(bd2, 0, sizeof(struct bau_desc)); | ||
1458 | bd2->header.swack_flag = 1; | ||
1459 | /* | ||
1460 | * The base_dest_nasid set in the message header is the nasid | ||
1461 | * of the first uvhub in the partition. The bit map will | ||
1462 | * indicate destination pnode numbers relative to that base. | ||
1463 | * They may not be consecutive if nasid striding is being used. | ||
1464 | */ | ||
1465 | bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode); | ||
1466 | bd2->header.dest_subnodeid = UV_LB_SUBNODEID; | ||
1467 | bd2->header.command = UV_NET_ENDPOINT_INTD; | ||
1468 | bd2->header.int_both = 1; | ||
1469 | /* | ||
1470 | * all others need to be set to zero: | ||
1471 | * fairness chaining multilevel count replied_to | ||
1472 | */ | ||
1473 | } | ||
1474 | for_each_present_cpu(cpu) { | ||
1475 | if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu))) | ||
1476 | continue; | ||
1477 | bcp = &per_cpu(bau_control, cpu); | ||
1478 | bcp->descriptor_base = bau_desc; | ||
1479 | } | ||
1480 | } | ||
1481 | |||
1482 | /* | ||
1483 | * initialize the destination side's receiving buffers | ||
1484 | * entered for each uvhub in the partition | ||
1485 | * - node is first node (kernel memory notion) on the uvhub | ||
1486 | * - pnode is the uvhub's physical identifier | ||
1487 | */ | ||
1488 | static void pq_init(int node, int pnode) | ||
1489 | { | ||
1490 | int cpu; | ||
1491 | size_t plsize; | ||
1492 | char *cp; | ||
1493 | void *vp; | ||
1494 | unsigned long pn; | ||
1495 | unsigned long first; | ||
1496 | unsigned long pn_first; | ||
1497 | unsigned long last; | ||
1498 | struct bau_pq_entry *pqp; | ||
1499 | struct bau_control *bcp; | ||
1500 | |||
1501 | plsize = (DEST_Q_SIZE + 1) * sizeof(struct bau_pq_entry); | ||
1502 | vp = kmalloc_node(plsize, GFP_KERNEL, node); | ||
1503 | pqp = (struct bau_pq_entry *)vp; | ||
1504 | BUG_ON(!pqp); | ||
1505 | |||
1506 | cp = (char *)pqp + 31; | ||
1507 | pqp = (struct bau_pq_entry *)(((unsigned long)cp >> 5) << 5); | ||
1508 | |||
1509 | for_each_present_cpu(cpu) { | ||
1510 | if (pnode != uv_cpu_to_pnode(cpu)) | ||
1511 | continue; | ||
1512 | /* for every cpu on this pnode: */ | ||
1513 | bcp = &per_cpu(bau_control, cpu); | ||
1514 | bcp->queue_first = pqp; | ||
1515 | bcp->bau_msg_head = pqp; | ||
1516 | bcp->queue_last = pqp + (DEST_Q_SIZE - 1); | ||
1517 | } | ||
1518 | /* | ||
1519 | * need the pnode of where the memory was really allocated | ||
1520 | */ | ||
1521 | pn = uv_gpa(pqp) >> uv_nshift; | ||
1522 | first = uv_physnodeaddr(pqp); | ||
1523 | pn_first = ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | first; | ||
1524 | last = uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1)); | ||
1525 | write_mmr_payload_first(pnode, pn_first); | ||
1526 | write_mmr_payload_tail(pnode, first); | ||
1527 | write_mmr_payload_last(pnode, last); | ||
1528 | |||
1529 | /* in effect, all msg_type's are set to MSG_NOOP */ | ||
1530 | memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE); | ||
1531 | } | ||
1532 | |||
1533 | /* | ||
1534 | * Initialization of each UV hub's structures | ||
1535 | */ | ||
1536 | static void __init init_uvhub(int uvhub, int vector, int base_pnode) | ||
1537 | { | ||
1538 | int node; | ||
1539 | int pnode; | ||
1540 | unsigned long apicid; | ||
1541 | |||
1542 | node = uvhub_to_first_node(uvhub); | ||
1543 | pnode = uv_blade_to_pnode(uvhub); | ||
1544 | |||
1545 | activation_descriptor_init(node, pnode, base_pnode); | ||
1546 | |||
1547 | pq_init(node, pnode); | ||
1548 | /* | ||
1549 | * The below initialization can't be in firmware because the | ||
1550 | * messaging IRQ will be determined by the OS. | ||
1551 | */ | ||
1552 | apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits; | ||
1553 | write_mmr_data_config(pnode, ((apicid << 32) | vector)); | ||
1554 | } | ||
1555 | |||
1556 | /* | ||
1557 | * We will set BAU_MISC_CONTROL with a timeout period. | ||
1558 | * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT. | ||
1559 | * So the destination timeout period has to be calculated from them. | ||
1560 | */ | ||
1561 | static int calculate_destination_timeout(void) | ||
1562 | { | ||
1563 | unsigned long mmr_image; | ||
1564 | int mult1; | ||
1565 | int mult2; | ||
1566 | int index; | ||
1567 | int base; | ||
1568 | int ret; | ||
1569 | unsigned long ts_ns; | ||
1570 | |||
1571 | if (is_uv1_hub()) { | ||
1572 | mult1 = SOFTACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK; | ||
1573 | mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL); | ||
1574 | index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK; | ||
1575 | mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT); | ||
1576 | mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK; | ||
1577 | base = timeout_base_ns[index]; | ||
1578 | ts_ns = base * mult1 * mult2; | ||
1579 | ret = ts_ns / 1000; | ||
1580 | } else { | ||
1581 | /* 4 bits 0/1 for 10/80us, 3 bits of multiplier */ | ||
1582 | mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL); | ||
1583 | mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT; | ||
1584 | if (mmr_image & (1L << UV2_ACK_UNITS_SHFT)) | ||
1585 | mult1 = 80; | ||
1586 | else | ||
1587 | mult1 = 10; | ||
1588 | base = mmr_image & UV2_ACK_MASK; | ||
1589 | ret = mult1 * base; | ||
1590 | } | ||
1591 | return ret; | ||
1592 | } | ||
1593 | |||
1594 | static void __init init_per_cpu_tunables(void) | ||
1595 | { | ||
1596 | int cpu; | ||
1597 | struct bau_control *bcp; | ||
1598 | |||
1599 | for_each_present_cpu(cpu) { | ||
1600 | bcp = &per_cpu(bau_control, cpu); | ||
1601 | bcp->baudisabled = 0; | ||
1602 | bcp->statp = &per_cpu(ptcstats, cpu); | ||
1603 | /* time interval to catch a hardware stay-busy bug */ | ||
1604 | bcp->timeout_interval = usec_2_cycles(2*timeout_us); | ||
1605 | bcp->max_concurr = max_concurr; | ||
1606 | bcp->max_concurr_const = max_concurr; | ||
1607 | bcp->plugged_delay = plugged_delay; | ||
1608 | bcp->plugsb4reset = plugsb4reset; | ||
1609 | bcp->timeoutsb4reset = timeoutsb4reset; | ||
1610 | bcp->ipi_reset_limit = ipi_reset_limit; | ||
1611 | bcp->complete_threshold = complete_threshold; | ||
1612 | bcp->cong_response_us = congested_respns_us; | ||
1613 | bcp->cong_reps = congested_reps; | ||
1614 | bcp->cong_period = congested_period; | ||
1615 | } | ||
1616 | } | ||
1617 | |||
1618 | /* | ||
1619 | * Scan all cpus to collect blade and socket summaries. | ||
1620 | */ | ||
1621 | static int __init get_cpu_topology(int base_pnode, | ||
1622 | struct uvhub_desc *uvhub_descs, | ||
1623 | unsigned char *uvhub_mask) | ||
1624 | { | ||
1625 | int cpu; | ||
1626 | int pnode; | ||
1627 | int uvhub; | ||
1628 | int socket; | ||
1629 | struct bau_control *bcp; | ||
1630 | struct uvhub_desc *bdp; | ||
1631 | struct socket_desc *sdp; | ||
1632 | |||
1633 | for_each_present_cpu(cpu) { | ||
1634 | bcp = &per_cpu(bau_control, cpu); | ||
1635 | |||
1636 | memset(bcp, 0, sizeof(struct bau_control)); | ||
1637 | |||
1638 | pnode = uv_cpu_hub_info(cpu)->pnode; | ||
1639 | if ((pnode - base_pnode) >= UV_DISTRIBUTION_SIZE) { | ||
1640 | printk(KERN_EMERG | ||
1641 | "cpu %d pnode %d-%d beyond %d; BAU disabled\n", | ||
1642 | cpu, pnode, base_pnode, UV_DISTRIBUTION_SIZE); | ||
1643 | return 1; | ||
1644 | } | ||
1645 | |||
1646 | bcp->osnode = cpu_to_node(cpu); | ||
1647 | bcp->partition_base_pnode = base_pnode; | ||
1648 | |||
1649 | uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; | ||
1650 | *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8)); | ||
1651 | bdp = &uvhub_descs[uvhub]; | ||
1652 | |||
1653 | bdp->num_cpus++; | ||
1654 | bdp->uvhub = uvhub; | ||
1655 | bdp->pnode = pnode; | ||
1656 | |||
1657 | /* kludge: 'assuming' one node per socket, and assuming that | ||
1658 | disabling a socket just leaves a gap in node numbers */ | ||
1659 | socket = bcp->osnode & 1; | ||
1660 | bdp->socket_mask |= (1 << socket); | ||
1661 | sdp = &bdp->socket[socket]; | ||
1662 | sdp->cpu_number[sdp->num_cpus] = cpu; | ||
1663 | sdp->num_cpus++; | ||
1664 | if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) { | ||
1665 | printk(KERN_EMERG "%d cpus per socket invalid\n", | ||
1666 | sdp->num_cpus); | ||
1667 | return 1; | ||
1668 | } | ||
1669 | } | ||
1670 | return 0; | ||
1671 | } | ||
1672 | |||
1673 | /* | ||
1674 | * Each socket is to get a local array of pnodes/hubs. | ||
1675 | */ | ||
1676 | static void make_per_cpu_thp(struct bau_control *smaster) | ||
1677 | { | ||
1678 | int cpu; | ||
1679 | size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus(); | ||
1680 | |||
1681 | smaster->thp = kmalloc_node(hpsz, GFP_KERNEL, smaster->osnode); | ||
1682 | memset(smaster->thp, 0, hpsz); | ||
1683 | for_each_present_cpu(cpu) { | ||
1684 | smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode; | ||
1685 | smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; | ||
1686 | } | ||
1687 | } | ||
1688 | |||
1689 | /* | ||
1690 | * Initialize all the per_cpu information for the cpu's on a given socket, | ||
1691 | * given what has been gathered into the socket_desc struct. | ||
1692 | * And reports the chosen hub and socket masters back to the caller. | ||
1693 | */ | ||
1694 | static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp, | ||
1695 | struct bau_control **smasterp, | ||
1696 | struct bau_control **hmasterp) | ||
1697 | { | ||
1698 | int i; | ||
1699 | int cpu; | ||
1700 | struct bau_control *bcp; | ||
1701 | |||
1702 | for (i = 0; i < sdp->num_cpus; i++) { | ||
1703 | cpu = sdp->cpu_number[i]; | ||
1704 | bcp = &per_cpu(bau_control, cpu); | ||
1705 | bcp->cpu = cpu; | ||
1706 | if (i == 0) { | ||
1707 | *smasterp = bcp; | ||
1708 | if (!(*hmasterp)) | ||
1709 | *hmasterp = bcp; | ||
1710 | } | ||
1711 | bcp->cpus_in_uvhub = bdp->num_cpus; | ||
1712 | bcp->cpus_in_socket = sdp->num_cpus; | ||
1713 | bcp->socket_master = *smasterp; | ||
1714 | bcp->uvhub = bdp->uvhub; | ||
1715 | bcp->uvhub_master = *hmasterp; | ||
1716 | bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id; | ||
1717 | if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) { | ||
1718 | printk(KERN_EMERG "%d cpus per uvhub invalid\n", | ||
1719 | bcp->uvhub_cpu); | ||
1720 | return 1; | ||
1721 | } | ||
1722 | } | ||
1723 | return 0; | ||
1724 | } | ||
1725 | |||
1726 | /* | ||
1727 | * Summarize the blade and socket topology into the per_cpu structures. | ||
1728 | */ | ||
1729 | static int __init summarize_uvhub_sockets(int nuvhubs, | ||
1730 | struct uvhub_desc *uvhub_descs, | ||
1731 | unsigned char *uvhub_mask) | ||
1732 | { | ||
1733 | int socket; | ||
1734 | int uvhub; | ||
1735 | unsigned short socket_mask; | ||
1736 | |||
1737 | for (uvhub = 0; uvhub < nuvhubs; uvhub++) { | ||
1738 | struct uvhub_desc *bdp; | ||
1739 | struct bau_control *smaster = NULL; | ||
1740 | struct bau_control *hmaster = NULL; | ||
1741 | |||
1742 | if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8)))) | ||
1743 | continue; | ||
1744 | |||
1745 | bdp = &uvhub_descs[uvhub]; | ||
1746 | socket_mask = bdp->socket_mask; | ||
1747 | socket = 0; | ||
1748 | while (socket_mask) { | ||
1749 | struct socket_desc *sdp; | ||
1750 | if ((socket_mask & 1)) { | ||
1751 | sdp = &bdp->socket[socket]; | ||
1752 | if (scan_sock(sdp, bdp, &smaster, &hmaster)) | ||
1753 | return 1; | ||
1754 | } | ||
1755 | socket++; | ||
1756 | socket_mask = (socket_mask >> 1); | ||
1757 | make_per_cpu_thp(smaster); | ||
1758 | } | ||
1759 | } | ||
1760 | return 0; | ||
1761 | } | ||
1762 | |||
1763 | /* | ||
1764 | * initialize the bau_control structure for each cpu | ||
1765 | */ | ||
1766 | static int __init init_per_cpu(int nuvhubs, int base_part_pnode) | ||
1767 | { | ||
1768 | unsigned char *uvhub_mask; | ||
1769 | void *vp; | ||
1770 | struct uvhub_desc *uvhub_descs; | ||
1771 | |||
1772 | timeout_us = calculate_destination_timeout(); | ||
1773 | |||
1774 | vp = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL); | ||
1775 | uvhub_descs = (struct uvhub_desc *)vp; | ||
1776 | memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc)); | ||
1777 | uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL); | ||
1778 | |||
1779 | if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask)) | ||
1780 | return 1; | ||
1781 | |||
1782 | if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask)) | ||
1783 | return 1; | ||
1784 | |||
1785 | kfree(uvhub_descs); | ||
1786 | kfree(uvhub_mask); | ||
1787 | init_per_cpu_tunables(); | ||
1788 | return 0; | ||
1789 | } | ||
1790 | |||
1791 | /* | ||
1792 | * Initialization of BAU-related structures | ||
1793 | */ | ||
1794 | static int __init uv_bau_init(void) | ||
1795 | { | ||
1796 | int uvhub; | ||
1797 | int pnode; | ||
1798 | int nuvhubs; | ||
1799 | int cur_cpu; | ||
1800 | int cpus; | ||
1801 | int vector; | ||
1802 | cpumask_var_t *mask; | ||
1803 | |||
1804 | if (!is_uv_system()) | ||
1805 | return 0; | ||
1806 | |||
1807 | if (nobau) | ||
1808 | return 0; | ||
1809 | |||
1810 | for_each_possible_cpu(cur_cpu) { | ||
1811 | mask = &per_cpu(uv_flush_tlb_mask, cur_cpu); | ||
1812 | zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu)); | ||
1813 | } | ||
1814 | |||
1815 | uv_nshift = uv_hub_info->m_val; | ||
1816 | uv_mmask = (1UL << uv_hub_info->m_val) - 1; | ||
1817 | nuvhubs = uv_num_possible_blades(); | ||
1818 | spin_lock_init(&disable_lock); | ||
1819 | congested_cycles = usec_2_cycles(congested_respns_us); | ||
1820 | |||
1821 | uv_base_pnode = 0x7fffffff; | ||
1822 | for (uvhub = 0; uvhub < nuvhubs; uvhub++) { | ||
1823 | cpus = uv_blade_nr_possible_cpus(uvhub); | ||
1824 | if (cpus && (uv_blade_to_pnode(uvhub) < uv_base_pnode)) | ||
1825 | uv_base_pnode = uv_blade_to_pnode(uvhub); | ||
1826 | } | ||
1827 | |||
1828 | if (init_per_cpu(nuvhubs, uv_base_pnode)) { | ||
1829 | nobau = 1; | ||
1830 | return 0; | ||
1831 | } | ||
1832 | |||
1833 | vector = UV_BAU_MESSAGE; | ||
1834 | for_each_possible_blade(uvhub) | ||
1835 | if (uv_blade_nr_possible_cpus(uvhub)) | ||
1836 | init_uvhub(uvhub, vector, uv_base_pnode); | ||
1837 | |||
1838 | enable_timeouts(); | ||
1839 | alloc_intr_gate(vector, uv_bau_message_intr1); | ||
1840 | |||
1841 | for_each_possible_blade(uvhub) { | ||
1842 | if (uv_blade_nr_possible_cpus(uvhub)) { | ||
1843 | unsigned long val; | ||
1844 | unsigned long mmr; | ||
1845 | pnode = uv_blade_to_pnode(uvhub); | ||
1846 | /* INIT the bau */ | ||
1847 | val = 1L << 63; | ||
1848 | write_gmmr_activation(pnode, val); | ||
1849 | mmr = 1; /* should be 1 to broadcast to both sockets */ | ||
1850 | write_mmr_data_broadcast(pnode, mmr); | ||
1851 | } | ||
1852 | } | ||
1853 | |||
1854 | return 0; | ||
1855 | } | ||
1856 | core_initcall(uv_bau_init); | ||
1857 | fs_initcall(uv_ptc_init); | ||
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c new file mode 100644 index 000000000000..374a05d8ad22 --- /dev/null +++ b/arch/x86/platform/uv/uv_irq.c | |||
@@ -0,0 +1,285 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * SGI UV IRQ functions | ||
7 | * | ||
8 | * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved. | ||
9 | */ | ||
10 | |||
11 | #include <linux/module.h> | ||
12 | #include <linux/rbtree.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/irq.h> | ||
15 | |||
16 | #include <asm/apic.h> | ||
17 | #include <asm/uv/uv_irq.h> | ||
18 | #include <asm/uv/uv_hub.h> | ||
19 | |||
20 | /* MMR offset and pnode of hub sourcing interrupts for a given irq */ | ||
21 | struct uv_irq_2_mmr_pnode{ | ||
22 | struct rb_node list; | ||
23 | unsigned long offset; | ||
24 | int pnode; | ||
25 | int irq; | ||
26 | }; | ||
27 | |||
28 | static spinlock_t uv_irq_lock; | ||
29 | static struct rb_root uv_irq_root; | ||
30 | |||
31 | static int uv_set_irq_affinity(struct irq_data *, const struct cpumask *, bool); | ||
32 | |||
33 | static void uv_noop(struct irq_data *data) { } | ||
34 | |||
35 | static void uv_ack_apic(struct irq_data *data) | ||
36 | { | ||
37 | ack_APIC_irq(); | ||
38 | } | ||
39 | |||
40 | static struct irq_chip uv_irq_chip = { | ||
41 | .name = "UV-CORE", | ||
42 | .irq_mask = uv_noop, | ||
43 | .irq_unmask = uv_noop, | ||
44 | .irq_eoi = uv_ack_apic, | ||
45 | .irq_set_affinity = uv_set_irq_affinity, | ||
46 | }; | ||
47 | |||
48 | /* | ||
49 | * Add offset and pnode information of the hub sourcing interrupts to the | ||
50 | * rb tree for a specific irq. | ||
51 | */ | ||
52 | static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade) | ||
53 | { | ||
54 | struct rb_node **link = &uv_irq_root.rb_node; | ||
55 | struct rb_node *parent = NULL; | ||
56 | struct uv_irq_2_mmr_pnode *n; | ||
57 | struct uv_irq_2_mmr_pnode *e; | ||
58 | unsigned long irqflags; | ||
59 | |||
60 | n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL, | ||
61 | uv_blade_to_memory_nid(blade)); | ||
62 | if (!n) | ||
63 | return -ENOMEM; | ||
64 | |||
65 | n->irq = irq; | ||
66 | n->offset = offset; | ||
67 | n->pnode = uv_blade_to_pnode(blade); | ||
68 | spin_lock_irqsave(&uv_irq_lock, irqflags); | ||
69 | /* Find the right place in the rbtree: */ | ||
70 | while (*link) { | ||
71 | parent = *link; | ||
72 | e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list); | ||
73 | |||
74 | if (unlikely(irq == e->irq)) { | ||
75 | /* irq entry exists */ | ||
76 | e->pnode = uv_blade_to_pnode(blade); | ||
77 | e->offset = offset; | ||
78 | spin_unlock_irqrestore(&uv_irq_lock, irqflags); | ||
79 | kfree(n); | ||
80 | return 0; | ||
81 | } | ||
82 | |||
83 | if (irq < e->irq) | ||
84 | link = &(*link)->rb_left; | ||
85 | else | ||
86 | link = &(*link)->rb_right; | ||
87 | } | ||
88 | |||
89 | /* Insert the node into the rbtree. */ | ||
90 | rb_link_node(&n->list, parent, link); | ||
91 | rb_insert_color(&n->list, &uv_irq_root); | ||
92 | |||
93 | spin_unlock_irqrestore(&uv_irq_lock, irqflags); | ||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | /* Retrieve offset and pnode information from the rb tree for a specific irq */ | ||
98 | int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode) | ||
99 | { | ||
100 | struct uv_irq_2_mmr_pnode *e; | ||
101 | struct rb_node *n; | ||
102 | unsigned long irqflags; | ||
103 | |||
104 | spin_lock_irqsave(&uv_irq_lock, irqflags); | ||
105 | n = uv_irq_root.rb_node; | ||
106 | while (n) { | ||
107 | e = rb_entry(n, struct uv_irq_2_mmr_pnode, list); | ||
108 | |||
109 | if (e->irq == irq) { | ||
110 | *offset = e->offset; | ||
111 | *pnode = e->pnode; | ||
112 | spin_unlock_irqrestore(&uv_irq_lock, irqflags); | ||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | if (irq < e->irq) | ||
117 | n = n->rb_left; | ||
118 | else | ||
119 | n = n->rb_right; | ||
120 | } | ||
121 | spin_unlock_irqrestore(&uv_irq_lock, irqflags); | ||
122 | return -1; | ||
123 | } | ||
124 | |||
125 | /* | ||
126 | * Re-target the irq to the specified CPU and enable the specified MMR located | ||
127 | * on the specified blade to allow the sending of MSIs to the specified CPU. | ||
128 | */ | ||
129 | static int | ||
130 | arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | ||
131 | unsigned long mmr_offset, int limit) | ||
132 | { | ||
133 | const struct cpumask *eligible_cpu = cpumask_of(cpu); | ||
134 | struct irq_cfg *cfg = irq_get_chip_data(irq); | ||
135 | unsigned long mmr_value; | ||
136 | struct uv_IO_APIC_route_entry *entry; | ||
137 | int mmr_pnode, err; | ||
138 | |||
139 | BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != | ||
140 | sizeof(unsigned long)); | ||
141 | |||
142 | err = assign_irq_vector(irq, cfg, eligible_cpu); | ||
143 | if (err != 0) | ||
144 | return err; | ||
145 | |||
146 | if (limit == UV_AFFINITY_CPU) | ||
147 | irq_set_status_flags(irq, IRQ_NO_BALANCING); | ||
148 | else | ||
149 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); | ||
150 | |||
151 | irq_set_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, | ||
152 | irq_name); | ||
153 | |||
154 | mmr_value = 0; | ||
155 | entry = (struct uv_IO_APIC_route_entry *)&mmr_value; | ||
156 | entry->vector = cfg->vector; | ||
157 | entry->delivery_mode = apic->irq_delivery_mode; | ||
158 | entry->dest_mode = apic->irq_dest_mode; | ||
159 | entry->polarity = 0; | ||
160 | entry->trigger = 0; | ||
161 | entry->mask = 0; | ||
162 | entry->dest = apic->cpu_mask_to_apicid(eligible_cpu); | ||
163 | |||
164 | mmr_pnode = uv_blade_to_pnode(mmr_blade); | ||
165 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | ||
166 | |||
167 | if (cfg->move_in_progress) | ||
168 | send_cleanup_vector(cfg); | ||
169 | |||
170 | return irq; | ||
171 | } | ||
172 | |||
173 | /* | ||
174 | * Disable the specified MMR located on the specified blade so that MSIs are | ||
175 | * longer allowed to be sent. | ||
176 | */ | ||
177 | static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset) | ||
178 | { | ||
179 | unsigned long mmr_value; | ||
180 | struct uv_IO_APIC_route_entry *entry; | ||
181 | |||
182 | BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != | ||
183 | sizeof(unsigned long)); | ||
184 | |||
185 | mmr_value = 0; | ||
186 | entry = (struct uv_IO_APIC_route_entry *)&mmr_value; | ||
187 | entry->mask = 1; | ||
188 | |||
189 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | ||
190 | } | ||
191 | |||
192 | static int | ||
193 | uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask, | ||
194 | bool force) | ||
195 | { | ||
196 | struct irq_cfg *cfg = data->chip_data; | ||
197 | unsigned int dest; | ||
198 | unsigned long mmr_value, mmr_offset; | ||
199 | struct uv_IO_APIC_route_entry *entry; | ||
200 | int mmr_pnode; | ||
201 | |||
202 | if (__ioapic_set_affinity(data, mask, &dest)) | ||
203 | return -1; | ||
204 | |||
205 | mmr_value = 0; | ||
206 | entry = (struct uv_IO_APIC_route_entry *)&mmr_value; | ||
207 | |||
208 | entry->vector = cfg->vector; | ||
209 | entry->delivery_mode = apic->irq_delivery_mode; | ||
210 | entry->dest_mode = apic->irq_dest_mode; | ||
211 | entry->polarity = 0; | ||
212 | entry->trigger = 0; | ||
213 | entry->mask = 0; | ||
214 | entry->dest = dest; | ||
215 | |||
216 | /* Get previously stored MMR and pnode of hub sourcing interrupts */ | ||
217 | if (uv_irq_2_mmr_info(data->irq, &mmr_offset, &mmr_pnode)) | ||
218 | return -1; | ||
219 | |||
220 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | ||
221 | |||
222 | if (cfg->move_in_progress) | ||
223 | send_cleanup_vector(cfg); | ||
224 | |||
225 | return 0; | ||
226 | } | ||
227 | |||
228 | /* | ||
229 | * Set up a mapping of an available irq and vector, and enable the specified | ||
230 | * MMR that defines the MSI that is to be sent to the specified CPU when an | ||
231 | * interrupt is raised. | ||
232 | */ | ||
233 | int uv_setup_irq(char *irq_name, int cpu, int mmr_blade, | ||
234 | unsigned long mmr_offset, int limit) | ||
235 | { | ||
236 | int irq, ret; | ||
237 | |||
238 | irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade)); | ||
239 | |||
240 | if (irq <= 0) | ||
241 | return -EBUSY; | ||
242 | |||
243 | ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset, | ||
244 | limit); | ||
245 | if (ret == irq) | ||
246 | uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade); | ||
247 | else | ||
248 | destroy_irq(irq); | ||
249 | |||
250 | return ret; | ||
251 | } | ||
252 | EXPORT_SYMBOL_GPL(uv_setup_irq); | ||
253 | |||
254 | /* | ||
255 | * Tear down a mapping of an irq and vector, and disable the specified MMR that | ||
256 | * defined the MSI that was to be sent to the specified CPU when an interrupt | ||
257 | * was raised. | ||
258 | * | ||
259 | * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq(). | ||
260 | */ | ||
261 | void uv_teardown_irq(unsigned int irq) | ||
262 | { | ||
263 | struct uv_irq_2_mmr_pnode *e; | ||
264 | struct rb_node *n; | ||
265 | unsigned long irqflags; | ||
266 | |||
267 | spin_lock_irqsave(&uv_irq_lock, irqflags); | ||
268 | n = uv_irq_root.rb_node; | ||
269 | while (n) { | ||
270 | e = rb_entry(n, struct uv_irq_2_mmr_pnode, list); | ||
271 | if (e->irq == irq) { | ||
272 | arch_disable_uv_irq(e->pnode, e->offset); | ||
273 | rb_erase(n, &uv_irq_root); | ||
274 | kfree(e); | ||
275 | break; | ||
276 | } | ||
277 | if (irq < e->irq) | ||
278 | n = n->rb_left; | ||
279 | else | ||
280 | n = n->rb_right; | ||
281 | } | ||
282 | spin_unlock_irqrestore(&uv_irq_lock, irqflags); | ||
283 | destroy_irq(irq); | ||
284 | } | ||
285 | EXPORT_SYMBOL_GPL(uv_teardown_irq); | ||
diff --git a/arch/x86/platform/uv/uv_sysfs.c b/arch/x86/platform/uv/uv_sysfs.c new file mode 100644 index 000000000000..309c70fb7759 --- /dev/null +++ b/arch/x86/platform/uv/uv_sysfs.c | |||
@@ -0,0 +1,76 @@ | |||
1 | /* | ||
2 | * This file supports the /sys/firmware/sgi_uv interfaces for SGI UV. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | * | ||
18 | * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. | ||
19 | * Copyright (c) Russ Anderson | ||
20 | */ | ||
21 | |||
22 | #include <linux/sysdev.h> | ||
23 | #include <asm/uv/bios.h> | ||
24 | #include <asm/uv/uv.h> | ||
25 | |||
26 | struct kobject *sgi_uv_kobj; | ||
27 | |||
28 | static ssize_t partition_id_show(struct kobject *kobj, | ||
29 | struct kobj_attribute *attr, char *buf) | ||
30 | { | ||
31 | return snprintf(buf, PAGE_SIZE, "%ld\n", sn_partition_id); | ||
32 | } | ||
33 | |||
34 | static ssize_t coherence_id_show(struct kobject *kobj, | ||
35 | struct kobj_attribute *attr, char *buf) | ||
36 | { | ||
37 | return snprintf(buf, PAGE_SIZE, "%ld\n", partition_coherence_id()); | ||
38 | } | ||
39 | |||
40 | static struct kobj_attribute partition_id_attr = | ||
41 | __ATTR(partition_id, S_IRUGO, partition_id_show, NULL); | ||
42 | |||
43 | static struct kobj_attribute coherence_id_attr = | ||
44 | __ATTR(coherence_id, S_IRUGO, coherence_id_show, NULL); | ||
45 | |||
46 | |||
47 | static int __init sgi_uv_sysfs_init(void) | ||
48 | { | ||
49 | unsigned long ret; | ||
50 | |||
51 | if (!is_uv_system()) | ||
52 | return -ENODEV; | ||
53 | |||
54 | if (!sgi_uv_kobj) | ||
55 | sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj); | ||
56 | if (!sgi_uv_kobj) { | ||
57 | printk(KERN_WARNING "kobject_create_and_add sgi_uv failed\n"); | ||
58 | return -EINVAL; | ||
59 | } | ||
60 | |||
61 | ret = sysfs_create_file(sgi_uv_kobj, &partition_id_attr.attr); | ||
62 | if (ret) { | ||
63 | printk(KERN_WARNING "sysfs_create_file partition_id failed\n"); | ||
64 | return ret; | ||
65 | } | ||
66 | |||
67 | ret = sysfs_create_file(sgi_uv_kobj, &coherence_id_attr.attr); | ||
68 | if (ret) { | ||
69 | printk(KERN_WARNING "sysfs_create_file coherence_id failed\n"); | ||
70 | return ret; | ||
71 | } | ||
72 | |||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | device_initcall(sgi_uv_sysfs_init); | ||
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c new file mode 100644 index 000000000000..9f29a01ee1b3 --- /dev/null +++ b/arch/x86/platform/uv/uv_time.c | |||
@@ -0,0 +1,429 @@ | |||
1 | /* | ||
2 | * SGI RTC clock/timer routines. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | * | ||
18 | * Copyright (c) 2009 Silicon Graphics, Inc. All Rights Reserved. | ||
19 | * Copyright (c) Dimitri Sivanich | ||
20 | */ | ||
21 | #include <linux/clockchips.h> | ||
22 | #include <linux/slab.h> | ||
23 | |||
24 | #include <asm/uv/uv_mmrs.h> | ||
25 | #include <asm/uv/uv_hub.h> | ||
26 | #include <asm/uv/bios.h> | ||
27 | #include <asm/uv/uv.h> | ||
28 | #include <asm/apic.h> | ||
29 | #include <asm/cpu.h> | ||
30 | |||
31 | #define RTC_NAME "sgi_rtc" | ||
32 | |||
33 | static cycle_t uv_read_rtc(struct clocksource *cs); | ||
34 | static int uv_rtc_next_event(unsigned long, struct clock_event_device *); | ||
35 | static void uv_rtc_timer_setup(enum clock_event_mode, | ||
36 | struct clock_event_device *); | ||
37 | |||
38 | static struct clocksource clocksource_uv = { | ||
39 | .name = RTC_NAME, | ||
40 | .rating = 400, | ||
41 | .read = uv_read_rtc, | ||
42 | .mask = (cycle_t)UVH_RTC_REAL_TIME_CLOCK_MASK, | ||
43 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
44 | }; | ||
45 | |||
46 | static struct clock_event_device clock_event_device_uv = { | ||
47 | .name = RTC_NAME, | ||
48 | .features = CLOCK_EVT_FEAT_ONESHOT, | ||
49 | .shift = 20, | ||
50 | .rating = 400, | ||
51 | .irq = -1, | ||
52 | .set_next_event = uv_rtc_next_event, | ||
53 | .set_mode = uv_rtc_timer_setup, | ||
54 | .event_handler = NULL, | ||
55 | }; | ||
56 | |||
57 | static DEFINE_PER_CPU(struct clock_event_device, cpu_ced); | ||
58 | |||
59 | /* There is one of these allocated per node */ | ||
60 | struct uv_rtc_timer_head { | ||
61 | spinlock_t lock; | ||
62 | /* next cpu waiting for timer, local node relative: */ | ||
63 | int next_cpu; | ||
64 | /* number of cpus on this node: */ | ||
65 | int ncpus; | ||
66 | struct { | ||
67 | int lcpu; /* systemwide logical cpu number */ | ||
68 | u64 expires; /* next timer expiration for this cpu */ | ||
69 | } cpu[1]; | ||
70 | }; | ||
71 | |||
72 | /* | ||
73 | * Access to uv_rtc_timer_head via blade id. | ||
74 | */ | ||
75 | static struct uv_rtc_timer_head **blade_info __read_mostly; | ||
76 | |||
77 | static int uv_rtc_evt_enable; | ||
78 | |||
79 | /* | ||
80 | * Hardware interface routines | ||
81 | */ | ||
82 | |||
83 | /* Send IPIs to another node */ | ||
84 | static void uv_rtc_send_IPI(int cpu) | ||
85 | { | ||
86 | unsigned long apicid, val; | ||
87 | int pnode; | ||
88 | |||
89 | apicid = cpu_physical_id(cpu); | ||
90 | pnode = uv_apicid_to_pnode(apicid); | ||
91 | apicid |= uv_apicid_hibits; | ||
92 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | | ||
93 | (apicid << UVH_IPI_INT_APIC_ID_SHFT) | | ||
94 | (X86_PLATFORM_IPI_VECTOR << UVH_IPI_INT_VECTOR_SHFT); | ||
95 | |||
96 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); | ||
97 | } | ||
98 | |||
99 | /* Check for an RTC interrupt pending */ | ||
100 | static int uv_intr_pending(int pnode) | ||
101 | { | ||
102 | if (is_uv1_hub()) | ||
103 | return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) & | ||
104 | UV1H_EVENT_OCCURRED0_RTC1_MASK; | ||
105 | else | ||
106 | return uv_read_global_mmr64(pnode, UV2H_EVENT_OCCURRED2) & | ||
107 | UV2H_EVENT_OCCURRED2_RTC_1_MASK; | ||
108 | } | ||
109 | |||
110 | /* Setup interrupt and return non-zero if early expiration occurred. */ | ||
111 | static int uv_setup_intr(int cpu, u64 expires) | ||
112 | { | ||
113 | u64 val; | ||
114 | unsigned long apicid = cpu_physical_id(cpu) | uv_apicid_hibits; | ||
115 | int pnode = uv_cpu_to_pnode(cpu); | ||
116 | |||
117 | uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, | ||
118 | UVH_RTC1_INT_CONFIG_M_MASK); | ||
119 | uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L); | ||
120 | |||
121 | if (is_uv1_hub()) | ||
122 | uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS, | ||
123 | UV1H_EVENT_OCCURRED0_RTC1_MASK); | ||
124 | else | ||
125 | uv_write_global_mmr64(pnode, UV2H_EVENT_OCCURRED2_ALIAS, | ||
126 | UV2H_EVENT_OCCURRED2_RTC_1_MASK); | ||
127 | |||
128 | val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) | | ||
129 | ((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT); | ||
130 | |||
131 | /* Set configuration */ | ||
132 | uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, val); | ||
133 | /* Initialize comparator value */ | ||
134 | uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires); | ||
135 | |||
136 | if (uv_read_rtc(NULL) <= expires) | ||
137 | return 0; | ||
138 | |||
139 | return !uv_intr_pending(pnode); | ||
140 | } | ||
141 | |||
142 | /* | ||
143 | * Per-cpu timer tracking routines | ||
144 | */ | ||
145 | |||
146 | static __init void uv_rtc_deallocate_timers(void) | ||
147 | { | ||
148 | int bid; | ||
149 | |||
150 | for_each_possible_blade(bid) { | ||
151 | kfree(blade_info[bid]); | ||
152 | } | ||
153 | kfree(blade_info); | ||
154 | } | ||
155 | |||
156 | /* Allocate per-node list of cpu timer expiration times. */ | ||
157 | static __init int uv_rtc_allocate_timers(void) | ||
158 | { | ||
159 | int cpu; | ||
160 | |||
161 | blade_info = kmalloc(uv_possible_blades * sizeof(void *), GFP_KERNEL); | ||
162 | if (!blade_info) | ||
163 | return -ENOMEM; | ||
164 | memset(blade_info, 0, uv_possible_blades * sizeof(void *)); | ||
165 | |||
166 | for_each_present_cpu(cpu) { | ||
167 | int nid = cpu_to_node(cpu); | ||
168 | int bid = uv_cpu_to_blade_id(cpu); | ||
169 | int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id; | ||
170 | struct uv_rtc_timer_head *head = blade_info[bid]; | ||
171 | |||
172 | if (!head) { | ||
173 | head = kmalloc_node(sizeof(struct uv_rtc_timer_head) + | ||
174 | (uv_blade_nr_possible_cpus(bid) * | ||
175 | 2 * sizeof(u64)), | ||
176 | GFP_KERNEL, nid); | ||
177 | if (!head) { | ||
178 | uv_rtc_deallocate_timers(); | ||
179 | return -ENOMEM; | ||
180 | } | ||
181 | spin_lock_init(&head->lock); | ||
182 | head->ncpus = uv_blade_nr_possible_cpus(bid); | ||
183 | head->next_cpu = -1; | ||
184 | blade_info[bid] = head; | ||
185 | } | ||
186 | |||
187 | head->cpu[bcpu].lcpu = cpu; | ||
188 | head->cpu[bcpu].expires = ULLONG_MAX; | ||
189 | } | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | /* Find and set the next expiring timer. */ | ||
195 | static void uv_rtc_find_next_timer(struct uv_rtc_timer_head *head, int pnode) | ||
196 | { | ||
197 | u64 lowest = ULLONG_MAX; | ||
198 | int c, bcpu = -1; | ||
199 | |||
200 | head->next_cpu = -1; | ||
201 | for (c = 0; c < head->ncpus; c++) { | ||
202 | u64 exp = head->cpu[c].expires; | ||
203 | if (exp < lowest) { | ||
204 | bcpu = c; | ||
205 | lowest = exp; | ||
206 | } | ||
207 | } | ||
208 | if (bcpu >= 0) { | ||
209 | head->next_cpu = bcpu; | ||
210 | c = head->cpu[bcpu].lcpu; | ||
211 | if (uv_setup_intr(c, lowest)) | ||
212 | /* If we didn't set it up in time, trigger */ | ||
213 | uv_rtc_send_IPI(c); | ||
214 | } else { | ||
215 | uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, | ||
216 | UVH_RTC1_INT_CONFIG_M_MASK); | ||
217 | } | ||
218 | } | ||
219 | |||
220 | /* | ||
221 | * Set expiration time for current cpu. | ||
222 | * | ||
223 | * Returns 1 if we missed the expiration time. | ||
224 | */ | ||
225 | static int uv_rtc_set_timer(int cpu, u64 expires) | ||
226 | { | ||
227 | int pnode = uv_cpu_to_pnode(cpu); | ||
228 | int bid = uv_cpu_to_blade_id(cpu); | ||
229 | struct uv_rtc_timer_head *head = blade_info[bid]; | ||
230 | int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id; | ||
231 | u64 *t = &head->cpu[bcpu].expires; | ||
232 | unsigned long flags; | ||
233 | int next_cpu; | ||
234 | |||
235 | spin_lock_irqsave(&head->lock, flags); | ||
236 | |||
237 | next_cpu = head->next_cpu; | ||
238 | *t = expires; | ||
239 | |||
240 | /* Will this one be next to go off? */ | ||
241 | if (next_cpu < 0 || bcpu == next_cpu || | ||
242 | expires < head->cpu[next_cpu].expires) { | ||
243 | head->next_cpu = bcpu; | ||
244 | if (uv_setup_intr(cpu, expires)) { | ||
245 | *t = ULLONG_MAX; | ||
246 | uv_rtc_find_next_timer(head, pnode); | ||
247 | spin_unlock_irqrestore(&head->lock, flags); | ||
248 | return -ETIME; | ||
249 | } | ||
250 | } | ||
251 | |||
252 | spin_unlock_irqrestore(&head->lock, flags); | ||
253 | return 0; | ||
254 | } | ||
255 | |||
256 | /* | ||
257 | * Unset expiration time for current cpu. | ||
258 | * | ||
259 | * Returns 1 if this timer was pending. | ||
260 | */ | ||
261 | static int uv_rtc_unset_timer(int cpu, int force) | ||
262 | { | ||
263 | int pnode = uv_cpu_to_pnode(cpu); | ||
264 | int bid = uv_cpu_to_blade_id(cpu); | ||
265 | struct uv_rtc_timer_head *head = blade_info[bid]; | ||
266 | int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id; | ||
267 | u64 *t = &head->cpu[bcpu].expires; | ||
268 | unsigned long flags; | ||
269 | int rc = 0; | ||
270 | |||
271 | spin_lock_irqsave(&head->lock, flags); | ||
272 | |||
273 | if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force) | ||
274 | rc = 1; | ||
275 | |||
276 | if (rc) { | ||
277 | *t = ULLONG_MAX; | ||
278 | /* Was the hardware setup for this timer? */ | ||
279 | if (head->next_cpu == bcpu) | ||
280 | uv_rtc_find_next_timer(head, pnode); | ||
281 | } | ||
282 | |||
283 | spin_unlock_irqrestore(&head->lock, flags); | ||
284 | |||
285 | return rc; | ||
286 | } | ||
287 | |||
288 | |||
289 | /* | ||
290 | * Kernel interface routines. | ||
291 | */ | ||
292 | |||
293 | /* | ||
294 | * Read the RTC. | ||
295 | * | ||
296 | * Starting with HUB rev 2.0, the UV RTC register is replicated across all | ||
297 | * cachelines of it's own page. This allows faster simultaneous reads | ||
298 | * from a given socket. | ||
299 | */ | ||
300 | static cycle_t uv_read_rtc(struct clocksource *cs) | ||
301 | { | ||
302 | unsigned long offset; | ||
303 | |||
304 | if (uv_get_min_hub_revision_id() == 1) | ||
305 | offset = 0; | ||
306 | else | ||
307 | offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE; | ||
308 | |||
309 | return (cycle_t)uv_read_local_mmr(UVH_RTC | offset); | ||
310 | } | ||
311 | |||
312 | /* | ||
313 | * Program the next event, relative to now | ||
314 | */ | ||
315 | static int uv_rtc_next_event(unsigned long delta, | ||
316 | struct clock_event_device *ced) | ||
317 | { | ||
318 | int ced_cpu = cpumask_first(ced->cpumask); | ||
319 | |||
320 | return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc(NULL)); | ||
321 | } | ||
322 | |||
323 | /* | ||
324 | * Setup the RTC timer in oneshot mode | ||
325 | */ | ||
326 | static void uv_rtc_timer_setup(enum clock_event_mode mode, | ||
327 | struct clock_event_device *evt) | ||
328 | { | ||
329 | int ced_cpu = cpumask_first(evt->cpumask); | ||
330 | |||
331 | switch (mode) { | ||
332 | case CLOCK_EVT_MODE_PERIODIC: | ||
333 | case CLOCK_EVT_MODE_ONESHOT: | ||
334 | case CLOCK_EVT_MODE_RESUME: | ||
335 | /* Nothing to do here yet */ | ||
336 | break; | ||
337 | case CLOCK_EVT_MODE_UNUSED: | ||
338 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
339 | uv_rtc_unset_timer(ced_cpu, 1); | ||
340 | break; | ||
341 | } | ||
342 | } | ||
343 | |||
344 | static void uv_rtc_interrupt(void) | ||
345 | { | ||
346 | int cpu = smp_processor_id(); | ||
347 | struct clock_event_device *ced = &per_cpu(cpu_ced, cpu); | ||
348 | |||
349 | if (!ced || !ced->event_handler) | ||
350 | return; | ||
351 | |||
352 | if (uv_rtc_unset_timer(cpu, 0) != 1) | ||
353 | return; | ||
354 | |||
355 | ced->event_handler(ced); | ||
356 | } | ||
357 | |||
358 | static int __init uv_enable_evt_rtc(char *str) | ||
359 | { | ||
360 | uv_rtc_evt_enable = 1; | ||
361 | |||
362 | return 1; | ||
363 | } | ||
364 | __setup("uvrtcevt", uv_enable_evt_rtc); | ||
365 | |||
366 | static __init void uv_rtc_register_clockevents(struct work_struct *dummy) | ||
367 | { | ||
368 | struct clock_event_device *ced = &__get_cpu_var(cpu_ced); | ||
369 | |||
370 | *ced = clock_event_device_uv; | ||
371 | ced->cpumask = cpumask_of(smp_processor_id()); | ||
372 | clockevents_register_device(ced); | ||
373 | } | ||
374 | |||
375 | static __init int uv_rtc_setup_clock(void) | ||
376 | { | ||
377 | int rc; | ||
378 | |||
379 | if (!is_uv_system()) | ||
380 | return -ENODEV; | ||
381 | |||
382 | /* If single blade, prefer tsc */ | ||
383 | if (uv_num_possible_blades() == 1) | ||
384 | clocksource_uv.rating = 250; | ||
385 | |||
386 | rc = clocksource_register_hz(&clocksource_uv, sn_rtc_cycles_per_second); | ||
387 | if (rc) | ||
388 | printk(KERN_INFO "UV RTC clocksource failed rc %d\n", rc); | ||
389 | else | ||
390 | printk(KERN_INFO "UV RTC clocksource registered freq %lu MHz\n", | ||
391 | sn_rtc_cycles_per_second/(unsigned long)1E6); | ||
392 | |||
393 | if (rc || !uv_rtc_evt_enable || x86_platform_ipi_callback) | ||
394 | return rc; | ||
395 | |||
396 | /* Setup and register clockevents */ | ||
397 | rc = uv_rtc_allocate_timers(); | ||
398 | if (rc) | ||
399 | goto error; | ||
400 | |||
401 | x86_platform_ipi_callback = uv_rtc_interrupt; | ||
402 | |||
403 | clock_event_device_uv.mult = div_sc(sn_rtc_cycles_per_second, | ||
404 | NSEC_PER_SEC, clock_event_device_uv.shift); | ||
405 | |||
406 | clock_event_device_uv.min_delta_ns = NSEC_PER_SEC / | ||
407 | sn_rtc_cycles_per_second; | ||
408 | |||
409 | clock_event_device_uv.max_delta_ns = clocksource_uv.mask * | ||
410 | (NSEC_PER_SEC / sn_rtc_cycles_per_second); | ||
411 | |||
412 | rc = schedule_on_each_cpu(uv_rtc_register_clockevents); | ||
413 | if (rc) { | ||
414 | x86_platform_ipi_callback = NULL; | ||
415 | uv_rtc_deallocate_timers(); | ||
416 | goto error; | ||
417 | } | ||
418 | |||
419 | printk(KERN_INFO "UV RTC clockevents registered\n"); | ||
420 | |||
421 | return 0; | ||
422 | |||
423 | error: | ||
424 | clocksource_unregister(&clocksource_uv); | ||
425 | printk(KERN_INFO "UV RTC clockevents failed rc %d\n", rc); | ||
426 | |||
427 | return rc; | ||
428 | } | ||
429 | arch_initcall(uv_rtc_setup_clock); | ||
diff --git a/arch/x86/platform/visws/Makefile b/arch/x86/platform/visws/Makefile new file mode 100644 index 000000000000..91bc17ab2fd5 --- /dev/null +++ b/arch/x86/platform/visws/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-$(CONFIG_X86_VISWS) += visws_quirks.o | |||
diff --git a/arch/x86/platform/visws/visws_quirks.c b/arch/x86/platform/visws/visws_quirks.c new file mode 100644 index 000000000000..c7abf13a213f --- /dev/null +++ b/arch/x86/platform/visws/visws_quirks.c | |||
@@ -0,0 +1,608 @@ | |||
1 | /* | ||
2 | * SGI Visual Workstation support and quirks, unmaintained. | ||
3 | * | ||
4 | * Split out from setup.c by davej@suse.de | ||
5 | * | ||
6 | * Copyright (C) 1999 Bent Hagemark, Ingo Molnar | ||
7 | * | ||
8 | * SGI Visual Workstation interrupt controller | ||
9 | * | ||
10 | * The Cobalt system ASIC in the Visual Workstation contains a "Cobalt" APIC | ||
11 | * which serves as the main interrupt controller in the system. Non-legacy | ||
12 | * hardware in the system uses this controller directly. Legacy devices | ||
13 | * are connected to the PIIX4 which in turn has its 8259(s) connected to | ||
14 | * a of the Cobalt APIC entry. | ||
15 | * | ||
16 | * 09/02/2000 - Updated for 2.4 by jbarnes@sgi.com | ||
17 | * | ||
18 | * 25/11/2002 - Updated for 2.5 by Andrey Panin <pazke@orbita1.ru> | ||
19 | */ | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/smp.h> | ||
24 | |||
25 | #include <asm/visws/cobalt.h> | ||
26 | #include <asm/visws/piix4.h> | ||
27 | #include <asm/io_apic.h> | ||
28 | #include <asm/fixmap.h> | ||
29 | #include <asm/reboot.h> | ||
30 | #include <asm/setup.h> | ||
31 | #include <asm/apic.h> | ||
32 | #include <asm/e820.h> | ||
33 | #include <asm/time.h> | ||
34 | #include <asm/io.h> | ||
35 | |||
36 | #include <linux/kernel_stat.h> | ||
37 | |||
38 | #include <asm/i8259.h> | ||
39 | #include <asm/irq_vectors.h> | ||
40 | #include <asm/visws/lithium.h> | ||
41 | |||
42 | #include <linux/sched.h> | ||
43 | #include <linux/kernel.h> | ||
44 | #include <linux/pci.h> | ||
45 | #include <linux/pci_ids.h> | ||
46 | |||
47 | extern int no_broadcast; | ||
48 | |||
49 | char visws_board_type = -1; | ||
50 | char visws_board_rev = -1; | ||
51 | |||
52 | static void __init visws_time_init(void) | ||
53 | { | ||
54 | printk(KERN_INFO "Starting Cobalt Timer system clock\n"); | ||
55 | |||
56 | /* Set the countdown value */ | ||
57 | co_cpu_write(CO_CPU_TIMEVAL, CO_TIME_HZ/HZ); | ||
58 | |||
59 | /* Start the timer */ | ||
60 | co_cpu_write(CO_CPU_CTRL, co_cpu_read(CO_CPU_CTRL) | CO_CTRL_TIMERUN); | ||
61 | |||
62 | /* Enable (unmask) the timer interrupt */ | ||
63 | co_cpu_write(CO_CPU_CTRL, co_cpu_read(CO_CPU_CTRL) & ~CO_CTRL_TIMEMASK); | ||
64 | |||
65 | setup_default_timer_irq(); | ||
66 | } | ||
67 | |||
68 | /* Replaces the default init_ISA_irqs in the generic setup */ | ||
69 | static void __init visws_pre_intr_init(void); | ||
70 | |||
71 | /* Quirk for machine specific memory setup. */ | ||
72 | |||
73 | #define MB (1024 * 1024) | ||
74 | |||
75 | unsigned long sgivwfb_mem_phys; | ||
76 | unsigned long sgivwfb_mem_size; | ||
77 | EXPORT_SYMBOL(sgivwfb_mem_phys); | ||
78 | EXPORT_SYMBOL(sgivwfb_mem_size); | ||
79 | |||
80 | long long mem_size __initdata = 0; | ||
81 | |||
82 | static char * __init visws_memory_setup(void) | ||
83 | { | ||
84 | long long gfx_mem_size = 8 * MB; | ||
85 | |||
86 | mem_size = boot_params.alt_mem_k; | ||
87 | |||
88 | if (!mem_size) { | ||
89 | printk(KERN_WARNING "Bootloader didn't set memory size, upgrade it !\n"); | ||
90 | mem_size = 128 * MB; | ||
91 | } | ||
92 | |||
93 | /* | ||
94 | * this hardcodes the graphics memory to 8 MB | ||
95 | * it really should be sized dynamically (or at least | ||
96 | * set as a boot param) | ||
97 | */ | ||
98 | if (!sgivwfb_mem_size) { | ||
99 | printk(KERN_WARNING "Defaulting to 8 MB framebuffer size\n"); | ||
100 | sgivwfb_mem_size = 8 * MB; | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * Trim to nearest MB | ||
105 | */ | ||
106 | sgivwfb_mem_size &= ~((1 << 20) - 1); | ||
107 | sgivwfb_mem_phys = mem_size - gfx_mem_size; | ||
108 | |||
109 | e820_add_region(0, LOWMEMSIZE(), E820_RAM); | ||
110 | e820_add_region(HIGH_MEMORY, mem_size - sgivwfb_mem_size - HIGH_MEMORY, E820_RAM); | ||
111 | e820_add_region(sgivwfb_mem_phys, sgivwfb_mem_size, E820_RESERVED); | ||
112 | |||
113 | return "PROM"; | ||
114 | } | ||
115 | |||
116 | static void visws_machine_emergency_restart(void) | ||
117 | { | ||
118 | /* | ||
119 | * Visual Workstations restart after this | ||
120 | * register is poked on the PIIX4 | ||
121 | */ | ||
122 | outb(PIIX4_RESET_VAL, PIIX4_RESET_PORT); | ||
123 | } | ||
124 | |||
125 | static void visws_machine_power_off(void) | ||
126 | { | ||
127 | unsigned short pm_status; | ||
128 | /* extern unsigned int pci_bus0; */ | ||
129 | |||
130 | while ((pm_status = inw(PMSTS_PORT)) & 0x100) | ||
131 | outw(pm_status, PMSTS_PORT); | ||
132 | |||
133 | outw(PM_SUSPEND_ENABLE, PMCNTRL_PORT); | ||
134 | |||
135 | mdelay(10); | ||
136 | |||
137 | #define PCI_CONF1_ADDRESS(bus, devfn, reg) \ | ||
138 | (0x80000000 | (bus << 16) | (devfn << 8) | (reg & ~3)) | ||
139 | |||
140 | /* outl(PCI_CONF1_ADDRESS(pci_bus0, SPECIAL_DEV, SPECIAL_REG), 0xCF8); */ | ||
141 | outl(PIIX_SPECIAL_STOP, 0xCFC); | ||
142 | } | ||
143 | |||
144 | static void __init visws_get_smp_config(unsigned int early) | ||
145 | { | ||
146 | } | ||
147 | |||
148 | /* | ||
149 | * The Visual Workstation is Intel MP compliant in the hardware | ||
150 | * sense, but it doesn't have a BIOS(-configuration table). | ||
151 | * No problem for Linux. | ||
152 | */ | ||
153 | |||
154 | static void __init MP_processor_info(struct mpc_cpu *m) | ||
155 | { | ||
156 | int ver, logical_apicid; | ||
157 | physid_mask_t apic_cpus; | ||
158 | |||
159 | if (!(m->cpuflag & CPU_ENABLED)) | ||
160 | return; | ||
161 | |||
162 | logical_apicid = m->apicid; | ||
163 | printk(KERN_INFO "%sCPU #%d %u:%u APIC version %d\n", | ||
164 | m->cpuflag & CPU_BOOTPROCESSOR ? "Bootup " : "", | ||
165 | m->apicid, (m->cpufeature & CPU_FAMILY_MASK) >> 8, | ||
166 | (m->cpufeature & CPU_MODEL_MASK) >> 4, m->apicver); | ||
167 | |||
168 | if (m->cpuflag & CPU_BOOTPROCESSOR) | ||
169 | boot_cpu_physical_apicid = m->apicid; | ||
170 | |||
171 | ver = m->apicver; | ||
172 | if ((ver >= 0x14 && m->apicid >= 0xff) || m->apicid >= 0xf) { | ||
173 | printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n", | ||
174 | m->apicid, MAX_LOCAL_APIC); | ||
175 | return; | ||
176 | } | ||
177 | |||
178 | apic->apicid_to_cpu_present(m->apicid, &apic_cpus); | ||
179 | physids_or(phys_cpu_present_map, phys_cpu_present_map, apic_cpus); | ||
180 | /* | ||
181 | * Validate version | ||
182 | */ | ||
183 | if (ver == 0x0) { | ||
184 | printk(KERN_ERR "BIOS bug, APIC version is 0 for CPU#%d! " | ||
185 | "fixing up to 0x10. (tell your hw vendor)\n", | ||
186 | m->apicid); | ||
187 | ver = 0x10; | ||
188 | } | ||
189 | apic_version[m->apicid] = ver; | ||
190 | } | ||
191 | |||
192 | static void __init visws_find_smp_config(void) | ||
193 | { | ||
194 | struct mpc_cpu *mp = phys_to_virt(CO_CPU_TAB_PHYS); | ||
195 | unsigned short ncpus = readw(phys_to_virt(CO_CPU_NUM_PHYS)); | ||
196 | |||
197 | if (ncpus > CO_CPU_MAX) { | ||
198 | printk(KERN_WARNING "find_visws_smp: got cpu count of %d at %p\n", | ||
199 | ncpus, mp); | ||
200 | |||
201 | ncpus = CO_CPU_MAX; | ||
202 | } | ||
203 | |||
204 | if (ncpus > setup_max_cpus) | ||
205 | ncpus = setup_max_cpus; | ||
206 | |||
207 | #ifdef CONFIG_X86_LOCAL_APIC | ||
208 | smp_found_config = 1; | ||
209 | #endif | ||
210 | while (ncpus--) | ||
211 | MP_processor_info(mp++); | ||
212 | |||
213 | mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; | ||
214 | } | ||
215 | |||
216 | static void visws_trap_init(void); | ||
217 | |||
218 | void __init visws_early_detect(void) | ||
219 | { | ||
220 | int raw; | ||
221 | |||
222 | visws_board_type = (char)(inb_p(PIIX_GPI_BD_REG) & PIIX_GPI_BD_REG) | ||
223 | >> PIIX_GPI_BD_SHIFT; | ||
224 | |||
225 | if (visws_board_type < 0) | ||
226 | return; | ||
227 | |||
228 | /* | ||
229 | * Override the default platform setup functions | ||
230 | */ | ||
231 | x86_init.resources.memory_setup = visws_memory_setup; | ||
232 | x86_init.mpparse.get_smp_config = visws_get_smp_config; | ||
233 | x86_init.mpparse.find_smp_config = visws_find_smp_config; | ||
234 | x86_init.irqs.pre_vector_init = visws_pre_intr_init; | ||
235 | x86_init.irqs.trap_init = visws_trap_init; | ||
236 | x86_init.timers.timer_init = visws_time_init; | ||
237 | x86_init.pci.init = pci_visws_init; | ||
238 | x86_init.pci.init_irq = x86_init_noop; | ||
239 | |||
240 | /* | ||
241 | * Install reboot quirks: | ||
242 | */ | ||
243 | pm_power_off = visws_machine_power_off; | ||
244 | machine_ops.emergency_restart = visws_machine_emergency_restart; | ||
245 | |||
246 | /* | ||
247 | * Do not use broadcast IPIs: | ||
248 | */ | ||
249 | no_broadcast = 0; | ||
250 | |||
251 | #ifdef CONFIG_X86_IO_APIC | ||
252 | /* | ||
253 | * Turn off IO-APIC detection and initialization: | ||
254 | */ | ||
255 | skip_ioapic_setup = 1; | ||
256 | #endif | ||
257 | |||
258 | /* | ||
259 | * Get Board rev. | ||
260 | * First, we have to initialize the 307 part to allow us access | ||
261 | * to the GPIO registers. Let's map them at 0x0fc0 which is right | ||
262 | * after the PIIX4 PM section. | ||
263 | */ | ||
264 | outb_p(SIO_DEV_SEL, SIO_INDEX); | ||
265 | outb_p(SIO_GP_DEV, SIO_DATA); /* Talk to GPIO regs. */ | ||
266 | |||
267 | outb_p(SIO_DEV_MSB, SIO_INDEX); | ||
268 | outb_p(SIO_GP_MSB, SIO_DATA); /* MSB of GPIO base address */ | ||
269 | |||
270 | outb_p(SIO_DEV_LSB, SIO_INDEX); | ||
271 | outb_p(SIO_GP_LSB, SIO_DATA); /* LSB of GPIO base address */ | ||
272 | |||
273 | outb_p(SIO_DEV_ENB, SIO_INDEX); | ||
274 | outb_p(1, SIO_DATA); /* Enable GPIO registers. */ | ||
275 | |||
276 | /* | ||
277 | * Now, we have to map the power management section to write | ||
278 | * a bit which enables access to the GPIO registers. | ||
279 | * What lunatic came up with this shit? | ||
280 | */ | ||
281 | outb_p(SIO_DEV_SEL, SIO_INDEX); | ||
282 | outb_p(SIO_PM_DEV, SIO_DATA); /* Talk to GPIO regs. */ | ||
283 | |||
284 | outb_p(SIO_DEV_MSB, SIO_INDEX); | ||
285 | outb_p(SIO_PM_MSB, SIO_DATA); /* MSB of PM base address */ | ||
286 | |||
287 | outb_p(SIO_DEV_LSB, SIO_INDEX); | ||
288 | outb_p(SIO_PM_LSB, SIO_DATA); /* LSB of PM base address */ | ||
289 | |||
290 | outb_p(SIO_DEV_ENB, SIO_INDEX); | ||
291 | outb_p(1, SIO_DATA); /* Enable PM registers. */ | ||
292 | |||
293 | /* | ||
294 | * Now, write the PM register which enables the GPIO registers. | ||
295 | */ | ||
296 | outb_p(SIO_PM_FER2, SIO_PM_INDEX); | ||
297 | outb_p(SIO_PM_GP_EN, SIO_PM_DATA); | ||
298 | |||
299 | /* | ||
300 | * Now, initialize the GPIO registers. | ||
301 | * We want them all to be inputs which is the | ||
302 | * power on default, so let's leave them alone. | ||
303 | * So, let's just read the board rev! | ||
304 | */ | ||
305 | raw = inb_p(SIO_GP_DATA1); | ||
306 | raw &= 0x7f; /* 7 bits of valid board revision ID. */ | ||
307 | |||
308 | if (visws_board_type == VISWS_320) { | ||
309 | if (raw < 0x6) { | ||
310 | visws_board_rev = 4; | ||
311 | } else if (raw < 0xc) { | ||
312 | visws_board_rev = 5; | ||
313 | } else { | ||
314 | visws_board_rev = 6; | ||
315 | } | ||
316 | } else if (visws_board_type == VISWS_540) { | ||
317 | visws_board_rev = 2; | ||
318 | } else { | ||
319 | visws_board_rev = raw; | ||
320 | } | ||
321 | |||
322 | printk(KERN_INFO "Silicon Graphics Visual Workstation %s (rev %d) detected\n", | ||
323 | (visws_board_type == VISWS_320 ? "320" : | ||
324 | (visws_board_type == VISWS_540 ? "540" : | ||
325 | "unknown")), visws_board_rev); | ||
326 | } | ||
327 | |||
328 | #define A01234 (LI_INTA_0 | LI_INTA_1 | LI_INTA_2 | LI_INTA_3 | LI_INTA_4) | ||
329 | #define BCD (LI_INTB | LI_INTC | LI_INTD) | ||
330 | #define ALLDEVS (A01234 | BCD) | ||
331 | |||
332 | static __init void lithium_init(void) | ||
333 | { | ||
334 | set_fixmap(FIX_LI_PCIA, LI_PCI_A_PHYS); | ||
335 | set_fixmap(FIX_LI_PCIB, LI_PCI_B_PHYS); | ||
336 | |||
337 | if ((li_pcia_read16(PCI_VENDOR_ID) != PCI_VENDOR_ID_SGI) || | ||
338 | (li_pcia_read16(PCI_DEVICE_ID) != PCI_DEVICE_ID_SGI_LITHIUM)) { | ||
339 | printk(KERN_EMERG "Lithium hostbridge %c not found\n", 'A'); | ||
340 | /* panic("This machine is not SGI Visual Workstation 320/540"); */ | ||
341 | } | ||
342 | |||
343 | if ((li_pcib_read16(PCI_VENDOR_ID) != PCI_VENDOR_ID_SGI) || | ||
344 | (li_pcib_read16(PCI_DEVICE_ID) != PCI_DEVICE_ID_SGI_LITHIUM)) { | ||
345 | printk(KERN_EMERG "Lithium hostbridge %c not found\n", 'B'); | ||
346 | /* panic("This machine is not SGI Visual Workstation 320/540"); */ | ||
347 | } | ||
348 | |||
349 | li_pcia_write16(LI_PCI_INTEN, ALLDEVS); | ||
350 | li_pcib_write16(LI_PCI_INTEN, ALLDEVS); | ||
351 | } | ||
352 | |||
353 | static __init void cobalt_init(void) | ||
354 | { | ||
355 | /* | ||
356 | * On normal SMP PC this is used only with SMP, but we have to | ||
357 | * use it and set it up here to start the Cobalt clock | ||
358 | */ | ||
359 | set_fixmap(FIX_APIC_BASE, APIC_DEFAULT_PHYS_BASE); | ||
360 | setup_local_APIC(); | ||
361 | printk(KERN_INFO "Local APIC Version %#x, ID %#x\n", | ||
362 | (unsigned int)apic_read(APIC_LVR), | ||
363 | (unsigned int)apic_read(APIC_ID)); | ||
364 | |||
365 | set_fixmap(FIX_CO_CPU, CO_CPU_PHYS); | ||
366 | set_fixmap(FIX_CO_APIC, CO_APIC_PHYS); | ||
367 | printk(KERN_INFO "Cobalt Revision %#lx, APIC ID %#lx\n", | ||
368 | co_cpu_read(CO_CPU_REV), co_apic_read(CO_APIC_ID)); | ||
369 | |||
370 | /* Enable Cobalt APIC being careful to NOT change the ID! */ | ||
371 | co_apic_write(CO_APIC_ID, co_apic_read(CO_APIC_ID) | CO_APIC_ENABLE); | ||
372 | |||
373 | printk(KERN_INFO "Cobalt APIC enabled: ID reg %#lx\n", | ||
374 | co_apic_read(CO_APIC_ID)); | ||
375 | } | ||
376 | |||
377 | static void __init visws_trap_init(void) | ||
378 | { | ||
379 | lithium_init(); | ||
380 | cobalt_init(); | ||
381 | } | ||
382 | |||
383 | /* | ||
384 | * IRQ controller / APIC support: | ||
385 | */ | ||
386 | |||
387 | static DEFINE_SPINLOCK(cobalt_lock); | ||
388 | |||
389 | /* | ||
390 | * Set the given Cobalt APIC Redirection Table entry to point | ||
391 | * to the given IDT vector/index. | ||
392 | */ | ||
393 | static inline void co_apic_set(int entry, int irq) | ||
394 | { | ||
395 | co_apic_write(CO_APIC_LO(entry), CO_APIC_LEVEL | (irq + FIRST_EXTERNAL_VECTOR)); | ||
396 | co_apic_write(CO_APIC_HI(entry), 0); | ||
397 | } | ||
398 | |||
399 | /* | ||
400 | * Cobalt (IO)-APIC functions to handle PCI devices. | ||
401 | */ | ||
402 | static inline int co_apic_ide0_hack(void) | ||
403 | { | ||
404 | extern char visws_board_type; | ||
405 | extern char visws_board_rev; | ||
406 | |||
407 | if (visws_board_type == VISWS_320 && visws_board_rev == 5) | ||
408 | return 5; | ||
409 | return CO_APIC_IDE0; | ||
410 | } | ||
411 | |||
412 | static int is_co_apic(unsigned int irq) | ||
413 | { | ||
414 | if (IS_CO_APIC(irq)) | ||
415 | return CO_APIC(irq); | ||
416 | |||
417 | switch (irq) { | ||
418 | case 0: return CO_APIC_CPU; | ||
419 | case CO_IRQ_IDE0: return co_apic_ide0_hack(); | ||
420 | case CO_IRQ_IDE1: return CO_APIC_IDE1; | ||
421 | default: return -1; | ||
422 | } | ||
423 | } | ||
424 | |||
425 | |||
426 | /* | ||
427 | * This is the SGI Cobalt (IO-)APIC: | ||
428 | */ | ||
429 | static void enable_cobalt_irq(struct irq_data *data) | ||
430 | { | ||
431 | co_apic_set(is_co_apic(data->irq), data->irq); | ||
432 | } | ||
433 | |||
434 | static void disable_cobalt_irq(struct irq_data *data) | ||
435 | { | ||
436 | int entry = is_co_apic(data->irq); | ||
437 | |||
438 | co_apic_write(CO_APIC_LO(entry), CO_APIC_MASK); | ||
439 | co_apic_read(CO_APIC_LO(entry)); | ||
440 | } | ||
441 | |||
442 | static void ack_cobalt_irq(struct irq_data *data) | ||
443 | { | ||
444 | unsigned long flags; | ||
445 | |||
446 | spin_lock_irqsave(&cobalt_lock, flags); | ||
447 | disable_cobalt_irq(data); | ||
448 | apic_write(APIC_EOI, APIC_EIO_ACK); | ||
449 | spin_unlock_irqrestore(&cobalt_lock, flags); | ||
450 | } | ||
451 | |||
452 | static struct irq_chip cobalt_irq_type = { | ||
453 | .name = "Cobalt-APIC", | ||
454 | .irq_enable = enable_cobalt_irq, | ||
455 | .irq_disable = disable_cobalt_irq, | ||
456 | .irq_ack = ack_cobalt_irq, | ||
457 | }; | ||
458 | |||
459 | |||
460 | /* | ||
461 | * This is the PIIX4-based 8259 that is wired up indirectly to Cobalt | ||
462 | * -- not the manner expected by the code in i8259.c. | ||
463 | * | ||
464 | * there is a 'master' physical interrupt source that gets sent to | ||
465 | * the CPU. But in the chipset there are various 'virtual' interrupts | ||
466 | * waiting to be handled. We represent this to Linux through a 'master' | ||
467 | * interrupt controller type, and through a special virtual interrupt- | ||
468 | * controller. Device drivers only see the virtual interrupt sources. | ||
469 | */ | ||
470 | static unsigned int startup_piix4_master_irq(struct irq_data *data) | ||
471 | { | ||
472 | legacy_pic->init(0); | ||
473 | enable_cobalt_irq(data); | ||
474 | return 0; | ||
475 | } | ||
476 | |||
477 | static struct irq_chip piix4_master_irq_type = { | ||
478 | .name = "PIIX4-master", | ||
479 | .irq_startup = startup_piix4_master_irq, | ||
480 | .irq_ack = ack_cobalt_irq, | ||
481 | }; | ||
482 | |||
483 | static void pii4_mask(struct irq_data *data) { } | ||
484 | |||
485 | static struct irq_chip piix4_virtual_irq_type = { | ||
486 | .name = "PIIX4-virtual", | ||
487 | .irq_mask = pii4_mask, | ||
488 | }; | ||
489 | |||
490 | /* | ||
491 | * PIIX4-8259 master/virtual functions to handle interrupt requests | ||
492 | * from legacy devices: floppy, parallel, serial, rtc. | ||
493 | * | ||
494 | * None of these get Cobalt APIC entries, neither do they have IDT | ||
495 | * entries. These interrupts are purely virtual and distributed from | ||
496 | * the 'master' interrupt source: CO_IRQ_8259. | ||
497 | * | ||
498 | * When the 8259 interrupts its handler figures out which of these | ||
499 | * devices is interrupting and dispatches to its handler. | ||
500 | * | ||
501 | * CAREFUL: devices see the 'virtual' interrupt only. Thus disable/ | ||
502 | * enable_irq gets the right irq. This 'master' irq is never directly | ||
503 | * manipulated by any driver. | ||
504 | */ | ||
505 | static irqreturn_t piix4_master_intr(int irq, void *dev_id) | ||
506 | { | ||
507 | unsigned long flags; | ||
508 | int realirq; | ||
509 | |||
510 | raw_spin_lock_irqsave(&i8259A_lock, flags); | ||
511 | |||
512 | /* Find out what's interrupting in the PIIX4 master 8259 */ | ||
513 | outb(0x0c, 0x20); /* OCW3 Poll command */ | ||
514 | realirq = inb(0x20); | ||
515 | |||
516 | /* | ||
517 | * Bit 7 == 0 means invalid/spurious | ||
518 | */ | ||
519 | if (unlikely(!(realirq & 0x80))) | ||
520 | goto out_unlock; | ||
521 | |||
522 | realirq &= 7; | ||
523 | |||
524 | if (unlikely(realirq == 2)) { | ||
525 | outb(0x0c, 0xa0); | ||
526 | realirq = inb(0xa0); | ||
527 | |||
528 | if (unlikely(!(realirq & 0x80))) | ||
529 | goto out_unlock; | ||
530 | |||
531 | realirq = (realirq & 7) + 8; | ||
532 | } | ||
533 | |||
534 | /* mask and ack interrupt */ | ||
535 | cached_irq_mask |= 1 << realirq; | ||
536 | if (unlikely(realirq > 7)) { | ||
537 | inb(0xa1); | ||
538 | outb(cached_slave_mask, 0xa1); | ||
539 | outb(0x60 + (realirq & 7), 0xa0); | ||
540 | outb(0x60 + 2, 0x20); | ||
541 | } else { | ||
542 | inb(0x21); | ||
543 | outb(cached_master_mask, 0x21); | ||
544 | outb(0x60 + realirq, 0x20); | ||
545 | } | ||
546 | |||
547 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | ||
548 | |||
549 | /* | ||
550 | * handle this 'virtual interrupt' as a Cobalt one now. | ||
551 | */ | ||
552 | generic_handle_irq(realirq); | ||
553 | |||
554 | return IRQ_HANDLED; | ||
555 | |||
556 | out_unlock: | ||
557 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | ||
558 | return IRQ_NONE; | ||
559 | } | ||
560 | |||
561 | static struct irqaction master_action = { | ||
562 | .handler = piix4_master_intr, | ||
563 | .name = "PIIX4-8259", | ||
564 | .flags = IRQF_NO_THREAD, | ||
565 | }; | ||
566 | |||
567 | static struct irqaction cascade_action = { | ||
568 | .handler = no_action, | ||
569 | .name = "cascade", | ||
570 | .flags = IRQF_NO_THREAD, | ||
571 | }; | ||
572 | |||
573 | static inline void set_piix4_virtual_irq_type(void) | ||
574 | { | ||
575 | piix4_virtual_irq_type.irq_enable = i8259A_chip.irq_unmask; | ||
576 | piix4_virtual_irq_type.irq_disable = i8259A_chip.irq_mask; | ||
577 | piix4_virtual_irq_type.irq_unmask = i8259A_chip.irq_unmask; | ||
578 | } | ||
579 | |||
580 | static void __init visws_pre_intr_init(void) | ||
581 | { | ||
582 | int i; | ||
583 | |||
584 | set_piix4_virtual_irq_type(); | ||
585 | |||
586 | for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) { | ||
587 | struct irq_chip *chip = NULL; | ||
588 | |||
589 | if (i == 0) | ||
590 | chip = &cobalt_irq_type; | ||
591 | else if (i == CO_IRQ_IDE0) | ||
592 | chip = &cobalt_irq_type; | ||
593 | else if (i == CO_IRQ_IDE1) | ||
594 | chip = &cobalt_irq_type; | ||
595 | else if (i == CO_IRQ_8259) | ||
596 | chip = &piix4_master_irq_type; | ||
597 | else if (i < CO_IRQ_APIC0) | ||
598 | chip = &piix4_virtual_irq_type; | ||
599 | else if (IS_CO_APIC(i)) | ||
600 | chip = &cobalt_irq_type; | ||
601 | |||
602 | if (chip) | ||
603 | irq_set_chip(i, chip); | ||
604 | } | ||
605 | |||
606 | setup_irq(CO_IRQ_8259, &master_action); | ||
607 | setup_irq(2, &cascade_action); | ||
608 | } | ||