aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/acpi
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/acpi')
-rw-r--r--arch/x86/kernel/acpi/Makefile5
-rw-r--r--arch/x86/kernel/acpi/Makefile_3210
-rw-r--r--arch/x86/kernel/acpi/Makefile_647
-rw-r--r--arch/x86/kernel/acpi/boot.c1326
-rw-r--r--arch/x86/kernel/acpi/cstate.c164
-rw-r--r--arch/x86/kernel/acpi/earlyquirk_32.c84
-rw-r--r--arch/x86/kernel/acpi/processor.c75
-rw-r--r--arch/x86/kernel/acpi/sleep_32.c110
-rw-r--r--arch/x86/kernel/acpi/sleep_64.c120
-rw-r--r--arch/x86/kernel/acpi/wakeup_32.S321
-rw-r--r--arch/x86/kernel/acpi/wakeup_64.S456
11 files changed, 2678 insertions, 0 deletions
diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile
new file mode 100644
index 000000000000..3d5671939542
--- /dev/null
+++ b/arch/x86/kernel/acpi/Makefile
@@ -0,0 +1,5 @@
1ifeq ($(CONFIG_X86_32),y)
2include ${srctree}/arch/x86/kernel/acpi/Makefile_32
3else
4include ${srctree}/arch/x86/kernel/acpi/Makefile_64
5endif
diff --git a/arch/x86/kernel/acpi/Makefile_32 b/arch/x86/kernel/acpi/Makefile_32
new file mode 100644
index 000000000000..a4852a2e9190
--- /dev/null
+++ b/arch/x86/kernel/acpi/Makefile_32
@@ -0,0 +1,10 @@
1obj-$(CONFIG_ACPI) += boot.o
2ifneq ($(CONFIG_PCI),)
3obj-$(CONFIG_X86_IO_APIC) += earlyquirk_32.o
4endif
5obj-$(CONFIG_ACPI_SLEEP) += sleep_32.o wakeup_32.o
6
7ifneq ($(CONFIG_ACPI_PROCESSOR),)
8obj-y += cstate.o processor.o
9endif
10
diff --git a/arch/x86/kernel/acpi/Makefile_64 b/arch/x86/kernel/acpi/Makefile_64
new file mode 100644
index 000000000000..629425bc002d
--- /dev/null
+++ b/arch/x86/kernel/acpi/Makefile_64
@@ -0,0 +1,7 @@
1obj-y := boot.o
2obj-$(CONFIG_ACPI_SLEEP) += sleep_64.o wakeup_64.o
3
4ifneq ($(CONFIG_ACPI_PROCESSOR),)
5obj-y += processor.o cstate.o
6endif
7
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
new file mode 100644
index 000000000000..afd2afe9102d
--- /dev/null
+++ b/arch/x86/kernel/acpi/boot.c
@@ -0,0 +1,1326 @@
1/*
2 * boot.c - Architecture-Specific Low-Level ACPI Boot Support
3 *
4 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
5 * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
6 *
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */
25
26#include <linux/init.h>
27#include <linux/acpi.h>
28#include <linux/acpi_pmtmr.h>
29#include <linux/efi.h>
30#include <linux/cpumask.h>
31#include <linux/module.h>
32#include <linux/dmi.h>
33#include <linux/irq.h>
34#include <linux/bootmem.h>
35#include <linux/ioport.h>
36
37#include <asm/pgtable.h>
38#include <asm/io_apic.h>
39#include <asm/apic.h>
40#include <asm/io.h>
41#include <asm/mpspec.h>
42
43static int __initdata acpi_force = 0;
44
45#ifdef CONFIG_ACPI
46int acpi_disabled = 0;
47#else
48int acpi_disabled = 1;
49#endif
50EXPORT_SYMBOL(acpi_disabled);
51
52#ifdef CONFIG_X86_64
53
54#include <asm/proto.h>
55
56static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; }
57
58
59#else /* X86 */
60
61#ifdef CONFIG_X86_LOCAL_APIC
62#include <mach_apic.h>
63#include <mach_mpparse.h>
64#endif /* CONFIG_X86_LOCAL_APIC */
65
66#endif /* X86 */
67
68#define BAD_MADT_ENTRY(entry, end) ( \
69 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
70 ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
71
72#define PREFIX "ACPI: "
73
74int acpi_noirq; /* skip ACPI IRQ initialization */
75int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */
76int acpi_ht __initdata = 1; /* enable HT */
77
78int acpi_lapic;
79int acpi_ioapic;
80int acpi_strict;
81EXPORT_SYMBOL(acpi_strict);
82
83u8 acpi_sci_flags __initdata;
84int acpi_sci_override_gsi __initdata;
85int acpi_skip_timer_override __initdata;
86int acpi_use_timer_override __initdata;
87
88#ifdef CONFIG_X86_LOCAL_APIC
89static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
90#endif
91
92#ifndef __HAVE_ARCH_CMPXCHG
93#warning ACPI uses CMPXCHG, i486 and later hardware
94#endif
95
96/* --------------------------------------------------------------------------
97 Boot-time Configuration
98 -------------------------------------------------------------------------- */
99
100/*
101 * The default interrupt routing model is PIC (8259). This gets
102 * overriden if IOAPICs are enumerated (below).
103 */
104enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
105
106#ifdef CONFIG_X86_64
107
108/* rely on all ACPI tables being in the direct mapping */
109char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
110{
111 if (!phys_addr || !size)
112 return NULL;
113
114 if (phys_addr+size <= (end_pfn_map << PAGE_SHIFT) + PAGE_SIZE)
115 return __va(phys_addr);
116
117 return NULL;
118}
119
120#else
121
122/*
123 * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
124 * to map the target physical address. The problem is that set_fixmap()
125 * provides a single page, and it is possible that the page is not
126 * sufficient.
127 * By using this area, we can map up to MAX_IO_APICS pages temporarily,
128 * i.e. until the next __va_range() call.
129 *
130 * Important Safety Note: The fixed I/O APIC page numbers are *subtracted*
131 * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and
132 * count idx down while incrementing the phys address.
133 */
134char *__acpi_map_table(unsigned long phys, unsigned long size)
135{
136 unsigned long base, offset, mapped_size;
137 int idx;
138
139 if (phys + size < 8 * 1024 * 1024)
140 return __va(phys);
141
142 offset = phys & (PAGE_SIZE - 1);
143 mapped_size = PAGE_SIZE - offset;
144 set_fixmap(FIX_ACPI_END, phys);
145 base = fix_to_virt(FIX_ACPI_END);
146
147 /*
148 * Most cases can be covered by the below.
149 */
150 idx = FIX_ACPI_END;
151 while (mapped_size < size) {
152 if (--idx < FIX_ACPI_BEGIN)
153 return NULL; /* cannot handle this */
154 phys += PAGE_SIZE;
155 set_fixmap(idx, phys);
156 mapped_size += PAGE_SIZE;
157 }
158
159 return ((unsigned char *)base + offset);
160}
161#endif
162
163#ifdef CONFIG_PCI_MMCONFIG
164/* The physical address of the MMCONFIG aperture. Set from ACPI tables. */
165struct acpi_mcfg_allocation *pci_mmcfg_config;
166int pci_mmcfg_config_num;
167
168int __init acpi_parse_mcfg(struct acpi_table_header *header)
169{
170 struct acpi_table_mcfg *mcfg;
171 unsigned long i;
172 int config_size;
173
174 if (!header)
175 return -EINVAL;
176
177 mcfg = (struct acpi_table_mcfg *)header;
178
179 /* how many config structures do we have */
180 pci_mmcfg_config_num = 0;
181 i = header->length - sizeof(struct acpi_table_mcfg);
182 while (i >= sizeof(struct acpi_mcfg_allocation)) {
183 ++pci_mmcfg_config_num;
184 i -= sizeof(struct acpi_mcfg_allocation);
185 };
186 if (pci_mmcfg_config_num == 0) {
187 printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
188 return -ENODEV;
189 }
190
191 config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config);
192 pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL);
193 if (!pci_mmcfg_config) {
194 printk(KERN_WARNING PREFIX
195 "No memory for MCFG config tables\n");
196 return -ENOMEM;
197 }
198
199 memcpy(pci_mmcfg_config, &mcfg[1], config_size);
200 for (i = 0; i < pci_mmcfg_config_num; ++i) {
201 if (pci_mmcfg_config[i].address > 0xFFFFFFFF) {
202 printk(KERN_ERR PREFIX
203 "MMCONFIG not in low 4GB of memory\n");
204 kfree(pci_mmcfg_config);
205 pci_mmcfg_config_num = 0;
206 return -ENODEV;
207 }
208 }
209
210 return 0;
211}
212#endif /* CONFIG_PCI_MMCONFIG */
213
214#ifdef CONFIG_X86_LOCAL_APIC
215static int __init acpi_parse_madt(struct acpi_table_header *table)
216{
217 struct acpi_table_madt *madt = NULL;
218
219 if (!cpu_has_apic)
220 return -EINVAL;
221
222 madt = (struct acpi_table_madt *)table;
223 if (!madt) {
224 printk(KERN_WARNING PREFIX "Unable to map MADT\n");
225 return -ENODEV;
226 }
227
228 if (madt->address) {
229 acpi_lapic_addr = (u64) madt->address;
230
231 printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
232 madt->address);
233 }
234
235 acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
236
237 return 0;
238}
239
240static int __init
241acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
242{
243 struct acpi_madt_local_apic *processor = NULL;
244
245 processor = (struct acpi_madt_local_apic *)header;
246
247 if (BAD_MADT_ENTRY(processor, end))
248 return -EINVAL;
249
250 acpi_table_print_madt_entry(header);
251
252 /*
253 * We need to register disabled CPU as well to permit
254 * counting disabled CPUs. This allows us to size
255 * cpus_possible_map more accurately, to permit
256 * to not preallocating memory for all NR_CPUS
257 * when we use CPU hotplug.
258 */
259 mp_register_lapic(processor->id, /* APIC ID */
260 processor->lapic_flags & ACPI_MADT_ENABLED); /* Enabled? */
261
262 return 0;
263}
264
265static int __init
266acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
267 const unsigned long end)
268{
269 struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL;
270
271 lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header;
272
273 if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
274 return -EINVAL;
275
276 acpi_lapic_addr = lapic_addr_ovr->address;
277
278 return 0;
279}
280
281static int __init
282acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
283{
284 struct acpi_madt_local_apic_nmi *lapic_nmi = NULL;
285
286 lapic_nmi = (struct acpi_madt_local_apic_nmi *)header;
287
288 if (BAD_MADT_ENTRY(lapic_nmi, end))
289 return -EINVAL;
290
291 acpi_table_print_madt_entry(header);
292
293 if (lapic_nmi->lint != 1)
294 printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
295
296 return 0;
297}
298
299#endif /*CONFIG_X86_LOCAL_APIC */
300
301#ifdef CONFIG_X86_IO_APIC
302
303static int __init
304acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
305{
306 struct acpi_madt_io_apic *ioapic = NULL;
307
308 ioapic = (struct acpi_madt_io_apic *)header;
309
310 if (BAD_MADT_ENTRY(ioapic, end))
311 return -EINVAL;
312
313 acpi_table_print_madt_entry(header);
314
315 mp_register_ioapic(ioapic->id,
316 ioapic->address, ioapic->global_irq_base);
317
318 return 0;
319}
320
321/*
322 * Parse Interrupt Source Override for the ACPI SCI
323 */
324static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
325{
326 if (trigger == 0) /* compatible SCI trigger is level */
327 trigger = 3;
328
329 if (polarity == 0) /* compatible SCI polarity is low */
330 polarity = 3;
331
332 /* Command-line over-ride via acpi_sci= */
333 if (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)
334 trigger = (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2;
335
336 if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK)
337 polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
338
339 /*
340 * mp_config_acpi_legacy_irqs() already setup IRQs < 16
341 * If GSI is < 16, this will update its flags,
342 * else it will create a new mp_irqs[] entry.
343 */
344 mp_override_legacy_irq(gsi, polarity, trigger, gsi);
345
346 /*
347 * stash over-ride to indicate we've been here
348 * and for later update of acpi_gbl_FADT
349 */
350 acpi_sci_override_gsi = gsi;
351 return;
352}
353
354static int __init
355acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
356 const unsigned long end)
357{
358 struct acpi_madt_interrupt_override *intsrc = NULL;
359
360 intsrc = (struct acpi_madt_interrupt_override *)header;
361
362 if (BAD_MADT_ENTRY(intsrc, end))
363 return -EINVAL;
364
365 acpi_table_print_madt_entry(header);
366
367 if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) {
368 acpi_sci_ioapic_setup(intsrc->global_irq,
369 intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
370 (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2);
371 return 0;
372 }
373
374 if (acpi_skip_timer_override &&
375 intsrc->source_irq == 0 && intsrc->global_irq == 2) {
376 printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
377 return 0;
378 }
379
380 mp_override_legacy_irq(intsrc->source_irq,
381 intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
382 (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2,
383 intsrc->global_irq);
384
385 return 0;
386}
387
388static int __init
389acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
390{
391 struct acpi_madt_nmi_source *nmi_src = NULL;
392
393 nmi_src = (struct acpi_madt_nmi_source *)header;
394
395 if (BAD_MADT_ENTRY(nmi_src, end))
396 return -EINVAL;
397
398 acpi_table_print_madt_entry(header);
399
400 /* TBD: Support nimsrc entries? */
401
402 return 0;
403}
404
405#endif /* CONFIG_X86_IO_APIC */
406
407/*
408 * acpi_pic_sci_set_trigger()
409 *
410 * use ELCR to set PIC-mode trigger type for SCI
411 *
412 * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
413 * it may require Edge Trigger -- use "acpi_sci=edge"
414 *
415 * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
416 * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge.
417 * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0)
418 * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0)
419 */
420
421void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
422{
423 unsigned int mask = 1 << irq;
424 unsigned int old, new;
425
426 /* Real old ELCR mask */
427 old = inb(0x4d0) | (inb(0x4d1) << 8);
428
429 /*
430 * If we use ACPI to set PCI irq's, then we should clear ELCR
431 * since we will set it correctly as we enable the PCI irq
432 * routing.
433 */
434 new = acpi_noirq ? old : 0;
435
436 /*
437 * Update SCI information in the ELCR, it isn't in the PCI
438 * routing tables..
439 */
440 switch (trigger) {
441 case 1: /* Edge - clear */
442 new &= ~mask;
443 break;
444 case 3: /* Level - set */
445 new |= mask;
446 break;
447 }
448
449 if (old == new)
450 return;
451
452 printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
453 outb(new, 0x4d0);
454 outb(new >> 8, 0x4d1);
455}
456
457int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
458{
459 *irq = gsi;
460 return 0;
461}
462
463/*
464 * success: return IRQ number (>=0)
465 * failure: return < 0
466 */
467int acpi_register_gsi(u32 gsi, int triggering, int polarity)
468{
469 unsigned int irq;
470 unsigned int plat_gsi = gsi;
471
472#ifdef CONFIG_PCI
473 /*
474 * Make sure all (legacy) PCI IRQs are set as level-triggered.
475 */
476 if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
477 extern void eisa_set_level_irq(unsigned int irq);
478
479 if (triggering == ACPI_LEVEL_SENSITIVE)
480 eisa_set_level_irq(gsi);
481 }
482#endif
483
484#ifdef CONFIG_X86_IO_APIC
485 if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
486 plat_gsi = mp_register_gsi(gsi, triggering, polarity);
487 }
488#endif
489 acpi_gsi_to_irq(plat_gsi, &irq);
490 return irq;
491}
492
493EXPORT_SYMBOL(acpi_register_gsi);
494
495/*
496 * ACPI based hotplug support for CPU
497 */
498#ifdef CONFIG_ACPI_HOTPLUG_CPU
499int acpi_map_lsapic(acpi_handle handle, int *pcpu)
500{
501 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
502 union acpi_object *obj;
503 struct acpi_madt_local_apic *lapic;
504 cpumask_t tmp_map, new_map;
505 u8 physid;
506 int cpu;
507
508 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
509 return -EINVAL;
510
511 if (!buffer.length || !buffer.pointer)
512 return -EINVAL;
513
514 obj = buffer.pointer;
515 if (obj->type != ACPI_TYPE_BUFFER ||
516 obj->buffer.length < sizeof(*lapic)) {
517 kfree(buffer.pointer);
518 return -EINVAL;
519 }
520
521 lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer;
522
523 if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC ||
524 !(lapic->lapic_flags & ACPI_MADT_ENABLED)) {
525 kfree(buffer.pointer);
526 return -EINVAL;
527 }
528
529 physid = lapic->id;
530
531 kfree(buffer.pointer);
532 buffer.length = ACPI_ALLOCATE_BUFFER;
533 buffer.pointer = NULL;
534
535 tmp_map = cpu_present_map;
536 mp_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
537
538 /*
539 * If mp_register_lapic successfully generates a new logical cpu
540 * number, then the following will get us exactly what was mapped
541 */
542 cpus_andnot(new_map, cpu_present_map, tmp_map);
543 if (cpus_empty(new_map)) {
544 printk ("Unable to map lapic to logical cpu number\n");
545 return -EINVAL;
546 }
547
548 cpu = first_cpu(new_map);
549
550 *pcpu = cpu;
551 return 0;
552}
553
554EXPORT_SYMBOL(acpi_map_lsapic);
555
556int acpi_unmap_lsapic(int cpu)
557{
558 x86_cpu_to_apicid[cpu] = -1;
559 cpu_clear(cpu, cpu_present_map);
560 num_processors--;
561
562 return (0);
563}
564
565EXPORT_SYMBOL(acpi_unmap_lsapic);
566#endif /* CONFIG_ACPI_HOTPLUG_CPU */
567
568int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
569{
570 /* TBD */
571 return -EINVAL;
572}
573
574EXPORT_SYMBOL(acpi_register_ioapic);
575
576int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
577{
578 /* TBD */
579 return -EINVAL;
580}
581
582EXPORT_SYMBOL(acpi_unregister_ioapic);
583
584static unsigned long __init
585acpi_scan_rsdp(unsigned long start, unsigned long length)
586{
587 unsigned long offset = 0;
588 unsigned long sig_len = sizeof("RSD PTR ") - 1;
589
590 /*
591 * Scan all 16-byte boundaries of the physical memory region for the
592 * RSDP signature.
593 */
594 for (offset = 0; offset < length; offset += 16) {
595 if (strncmp((char *)(phys_to_virt(start) + offset), "RSD PTR ", sig_len))
596 continue;
597 return (start + offset);
598 }
599
600 return 0;
601}
602
603static int __init acpi_parse_sbf(struct acpi_table_header *table)
604{
605 struct acpi_table_boot *sb;
606
607 sb = (struct acpi_table_boot *)table;
608 if (!sb) {
609 printk(KERN_WARNING PREFIX "Unable to map SBF\n");
610 return -ENODEV;
611 }
612
613 sbf_port = sb->cmos_index; /* Save CMOS port */
614
615 return 0;
616}
617
618#ifdef CONFIG_HPET_TIMER
619#include <asm/hpet.h>
620
621static struct __initdata resource *hpet_res;
622
623static int __init acpi_parse_hpet(struct acpi_table_header *table)
624{
625 struct acpi_table_hpet *hpet_tbl;
626
627 hpet_tbl = (struct acpi_table_hpet *)table;
628 if (!hpet_tbl) {
629 printk(KERN_WARNING PREFIX "Unable to map HPET\n");
630 return -ENODEV;
631 }
632
633 if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
634 printk(KERN_WARNING PREFIX "HPET timers must be located in "
635 "memory.\n");
636 return -1;
637 }
638
639 hpet_address = hpet_tbl->address.address;
640 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
641 hpet_tbl->id, hpet_address);
642
643 /*
644 * Allocate and initialize the HPET firmware resource for adding into
645 * the resource tree during the lateinit timeframe.
646 */
647#define HPET_RESOURCE_NAME_SIZE 9
648 hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE);
649
650 if (!hpet_res)
651 return 0;
652
653 memset(hpet_res, 0, sizeof(*hpet_res));
654 hpet_res->name = (void *)&hpet_res[1];
655 hpet_res->flags = IORESOURCE_MEM;
656 snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, "HPET %u",
657 hpet_tbl->sequence);
658
659 hpet_res->start = hpet_address;
660 hpet_res->end = hpet_address + (1 * 1024) - 1;
661
662 return 0;
663}
664
665/*
666 * hpet_insert_resource inserts the HPET resources used into the resource
667 * tree.
668 */
669static __init int hpet_insert_resource(void)
670{
671 if (!hpet_res)
672 return 1;
673
674 return insert_resource(&iomem_resource, hpet_res);
675}
676
677late_initcall(hpet_insert_resource);
678
679#else
680#define acpi_parse_hpet NULL
681#endif
682
683static int __init acpi_parse_fadt(struct acpi_table_header *table)
684{
685
686#ifdef CONFIG_X86_PM_TIMER
687 /* detect the location of the ACPI PM Timer */
688 if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) {
689 /* FADT rev. 2 */
690 if (acpi_gbl_FADT.xpm_timer_block.space_id !=
691 ACPI_ADR_SPACE_SYSTEM_IO)
692 return 0;
693
694 pmtmr_ioport = acpi_gbl_FADT.xpm_timer_block.address;
695 /*
696 * "X" fields are optional extensions to the original V1.0
697 * fields, so we must selectively expand V1.0 fields if the
698 * corresponding X field is zero.
699 */
700 if (!pmtmr_ioport)
701 pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
702 } else {
703 /* FADT rev. 1 */
704 pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
705 }
706 if (pmtmr_ioport)
707 printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
708 pmtmr_ioport);
709#endif
710 return 0;
711}
712
713unsigned long __init acpi_find_rsdp(void)
714{
715 unsigned long rsdp_phys = 0;
716
717 if (efi_enabled) {
718 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
719 return efi.acpi20;
720 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
721 return efi.acpi;
722 }
723 /*
724 * Scan memory looking for the RSDP signature. First search EBDA (low
725 * memory) paragraphs and then search upper memory (E0000-FFFFF).
726 */
727 rsdp_phys = acpi_scan_rsdp(0, 0x400);
728 if (!rsdp_phys)
729 rsdp_phys = acpi_scan_rsdp(0xE0000, 0x20000);
730
731 return rsdp_phys;
732}
733
734#ifdef CONFIG_X86_LOCAL_APIC
735/*
736 * Parse LAPIC entries in MADT
737 * returns 0 on success, < 0 on error
738 */
739static int __init acpi_parse_madt_lapic_entries(void)
740{
741 int count;
742
743 if (!cpu_has_apic)
744 return -ENODEV;
745
746 /*
747 * Note that the LAPIC address is obtained from the MADT (32-bit value)
748 * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
749 */
750
751 count =
752 acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
753 acpi_parse_lapic_addr_ovr, 0);
754 if (count < 0) {
755 printk(KERN_ERR PREFIX
756 "Error parsing LAPIC address override entry\n");
757 return count;
758 }
759
760 mp_register_lapic_address(acpi_lapic_addr);
761
762 count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic,
763 MAX_APICS);
764 if (!count) {
765 printk(KERN_ERR PREFIX "No LAPIC entries present\n");
766 /* TBD: Cleanup to allow fallback to MPS */
767 return -ENODEV;
768 } else if (count < 0) {
769 printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
770 /* TBD: Cleanup to allow fallback to MPS */
771 return count;
772 }
773
774 count =
775 acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0);
776 if (count < 0) {
777 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
778 /* TBD: Cleanup to allow fallback to MPS */
779 return count;
780 }
781 return 0;
782}
783#endif /* CONFIG_X86_LOCAL_APIC */
784
785#ifdef CONFIG_X86_IO_APIC
786/*
787 * Parse IOAPIC related entries in MADT
788 * returns 0 on success, < 0 on error
789 */
790static int __init acpi_parse_madt_ioapic_entries(void)
791{
792 int count;
793
794 /*
795 * ACPI interpreter is required to complete interrupt setup,
796 * so if it is off, don't enumerate the io-apics with ACPI.
797 * If MPS is present, it will handle them,
798 * otherwise the system will stay in PIC mode
799 */
800 if (acpi_disabled || acpi_noirq) {
801 return -ENODEV;
802 }
803
804 if (!cpu_has_apic)
805 return -ENODEV;
806
807 /*
808 * if "noapic" boot option, don't look for IO-APICs
809 */
810 if (skip_ioapic_setup) {
811 printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
812 "due to 'noapic' option.\n");
813 return -ENODEV;
814 }
815
816 count =
817 acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
818 MAX_IO_APICS);
819 if (!count) {
820 printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
821 return -ENODEV;
822 } else if (count < 0) {
823 printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
824 return count;
825 }
826
827 count =
828 acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr,
829 NR_IRQ_VECTORS);
830 if (count < 0) {
831 printk(KERN_ERR PREFIX
832 "Error parsing interrupt source overrides entry\n");
833 /* TBD: Cleanup to allow fallback to MPS */
834 return count;
835 }
836
837 /*
838 * If BIOS did not supply an INT_SRC_OVR for the SCI
839 * pretend we got one so we can set the SCI flags.
840 */
841 if (!acpi_sci_override_gsi)
842 acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0);
843
844 /* Fill in identity legacy mapings where no override */
845 mp_config_acpi_legacy_irqs();
846
847 count =
848 acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src,
849 NR_IRQ_VECTORS);
850 if (count < 0) {
851 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
852 /* TBD: Cleanup to allow fallback to MPS */
853 return count;
854 }
855
856 return 0;
857}
858#else
859static inline int acpi_parse_madt_ioapic_entries(void)
860{
861 return -1;
862}
863#endif /* !CONFIG_X86_IO_APIC */
864
865static void __init acpi_process_madt(void)
866{
867#ifdef CONFIG_X86_LOCAL_APIC
868 int error;
869
870 if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
871
872 /*
873 * Parse MADT LAPIC entries
874 */
875 error = acpi_parse_madt_lapic_entries();
876 if (!error) {
877 acpi_lapic = 1;
878
879#ifdef CONFIG_X86_GENERICARCH
880 generic_bigsmp_probe();
881#endif
882 /*
883 * Parse MADT IO-APIC entries
884 */
885 error = acpi_parse_madt_ioapic_entries();
886 if (!error) {
887 acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
888 acpi_irq_balance_set(NULL);
889 acpi_ioapic = 1;
890
891 smp_found_config = 1;
892 setup_apic_routing();
893 }
894 }
895 if (error == -EINVAL) {
896 /*
897 * Dell Precision Workstation 410, 610 come here.
898 */
899 printk(KERN_ERR PREFIX
900 "Invalid BIOS MADT, disabling ACPI\n");
901 disable_acpi();
902 }
903 }
904#endif
905 return;
906}
907
908#ifdef __i386__
909
910static int __init disable_acpi_irq(const struct dmi_system_id *d)
911{
912 if (!acpi_force) {
913 printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
914 d->ident);
915 acpi_noirq_set();
916 }
917 return 0;
918}
919
920static int __init disable_acpi_pci(const struct dmi_system_id *d)
921{
922 if (!acpi_force) {
923 printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
924 d->ident);
925 acpi_disable_pci();
926 }
927 return 0;
928}
929
930static int __init dmi_disable_acpi(const struct dmi_system_id *d)
931{
932 if (!acpi_force) {
933 printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
934 disable_acpi();
935 } else {
936 printk(KERN_NOTICE
937 "Warning: DMI blacklist says broken, but acpi forced\n");
938 }
939 return 0;
940}
941
942/*
943 * Limit ACPI to CPU enumeration for HT
944 */
945static int __init force_acpi_ht(const struct dmi_system_id *d)
946{
947 if (!acpi_force) {
948 printk(KERN_NOTICE "%s detected: force use of acpi=ht\n",
949 d->ident);
950 disable_acpi();
951 acpi_ht = 1;
952 } else {
953 printk(KERN_NOTICE
954 "Warning: acpi=force overrules DMI blacklist: acpi=ht\n");
955 }
956 return 0;
957}
958
959/*
960 * If your system is blacklisted here, but you find that acpi=force
961 * works for you, please contact acpi-devel@sourceforge.net
962 */
963static struct dmi_system_id __initdata acpi_dmi_table[] = {
964 /*
965 * Boxes that need ACPI disabled
966 */
967 {
968 .callback = dmi_disable_acpi,
969 .ident = "IBM Thinkpad",
970 .matches = {
971 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
972 DMI_MATCH(DMI_BOARD_NAME, "2629H1G"),
973 },
974 },
975
976 /*
977 * Boxes that need acpi=ht
978 */
979 {
980 .callback = force_acpi_ht,
981 .ident = "FSC Primergy T850",
982 .matches = {
983 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
984 DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"),
985 },
986 },
987 {
988 .callback = force_acpi_ht,
989 .ident = "HP VISUALIZE NT Workstation",
990 .matches = {
991 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
992 DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"),
993 },
994 },
995 {
996 .callback = force_acpi_ht,
997 .ident = "Compaq Workstation W8000",
998 .matches = {
999 DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
1000 DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
1001 },
1002 },
1003 {
1004 .callback = force_acpi_ht,
1005 .ident = "ASUS P4B266",
1006 .matches = {
1007 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1008 DMI_MATCH(DMI_BOARD_NAME, "P4B266"),
1009 },
1010 },
1011 {
1012 .callback = force_acpi_ht,
1013 .ident = "ASUS P2B-DS",
1014 .matches = {
1015 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1016 DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
1017 },
1018 },
1019 {
1020 .callback = force_acpi_ht,
1021 .ident = "ASUS CUR-DLS",
1022 .matches = {
1023 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1024 DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"),
1025 },
1026 },
1027 {
1028 .callback = force_acpi_ht,
1029 .ident = "ABIT i440BX-W83977",
1030 .matches = {
1031 DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"),
1032 DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"),
1033 },
1034 },
1035 {
1036 .callback = force_acpi_ht,
1037 .ident = "IBM Bladecenter",
1038 .matches = {
1039 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1040 DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"),
1041 },
1042 },
1043 {
1044 .callback = force_acpi_ht,
1045 .ident = "IBM eServer xSeries 360",
1046 .matches = {
1047 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1048 DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"),
1049 },
1050 },
1051 {
1052 .callback = force_acpi_ht,
1053 .ident = "IBM eserver xSeries 330",
1054 .matches = {
1055 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1056 DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"),
1057 },
1058 },
1059 {
1060 .callback = force_acpi_ht,
1061 .ident = "IBM eserver xSeries 440",
1062 .matches = {
1063 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1064 DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"),
1065 },
1066 },
1067
1068 /*
1069 * Boxes that need ACPI PCI IRQ routing disabled
1070 */
1071 {
1072 .callback = disable_acpi_irq,
1073 .ident = "ASUS A7V",
1074 .matches = {
1075 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
1076 DMI_MATCH(DMI_BOARD_NAME, "<A7V>"),
1077 /* newer BIOS, Revision 1011, does work */
1078 DMI_MATCH(DMI_BIOS_VERSION,
1079 "ASUS A7V ACPI BIOS Revision 1007"),
1080 },
1081 },
1082 {
1083 /*
1084 * Latest BIOS for IBM 600E (1.16) has bad pcinum
1085 * for LPC bridge, which is needed for the PCI
1086 * interrupt links to work. DSDT fix is in bug 5966.
1087 * 2645, 2646 model numbers are shared with 600/600E/600X
1088 */
1089 .callback = disable_acpi_irq,
1090 .ident = "IBM Thinkpad 600 Series 2645",
1091 .matches = {
1092 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1093 DMI_MATCH(DMI_BOARD_NAME, "2645"),
1094 },
1095 },
1096 {
1097 .callback = disable_acpi_irq,
1098 .ident = "IBM Thinkpad 600 Series 2646",
1099 .matches = {
1100 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1101 DMI_MATCH(DMI_BOARD_NAME, "2646"),
1102 },
1103 },
1104 /*
1105 * Boxes that need ACPI PCI IRQ routing and PCI scan disabled
1106 */
1107 { /* _BBN 0 bug */
1108 .callback = disable_acpi_pci,
1109 .ident = "ASUS PR-DLS",
1110 .matches = {
1111 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1112 DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"),
1113 DMI_MATCH(DMI_BIOS_VERSION,
1114 "ASUS PR-DLS ACPI BIOS Revision 1010"),
1115 DMI_MATCH(DMI_BIOS_DATE, "03/21/2003")
1116 },
1117 },
1118 {
1119 .callback = disable_acpi_pci,
1120 .ident = "Acer TravelMate 36x Laptop",
1121 .matches = {
1122 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1123 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
1124 },
1125 },
1126 {}
1127};
1128
1129#endif /* __i386__ */
1130
1131/*
1132 * acpi_boot_table_init() and acpi_boot_init()
1133 * called from setup_arch(), always.
1134 * 1. checksums all tables
1135 * 2. enumerates lapics
1136 * 3. enumerates io-apics
1137 *
1138 * acpi_table_init() is separate to allow reading SRAT without
1139 * other side effects.
1140 *
1141 * side effects of acpi_boot_init:
1142 * acpi_lapic = 1 if LAPIC found
1143 * acpi_ioapic = 1 if IOAPIC found
1144 * if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
1145 * if acpi_blacklisted() acpi_disabled = 1;
1146 * acpi_irq_model=...
1147 * ...
1148 *
1149 * return value: (currently ignored)
1150 * 0: success
1151 * !0: failure
1152 */
1153
1154int __init acpi_boot_table_init(void)
1155{
1156 int error;
1157
1158#ifdef __i386__
1159 dmi_check_system(acpi_dmi_table);
1160#endif
1161
1162 /*
1163 * If acpi_disabled, bail out
1164 * One exception: acpi=ht continues far enough to enumerate LAPICs
1165 */
1166 if (acpi_disabled && !acpi_ht)
1167 return 1;
1168
1169 /*
1170 * Initialize the ACPI boot-time table parser.
1171 */
1172 error = acpi_table_init();
1173 if (error) {
1174 disable_acpi();
1175 return error;
1176 }
1177
1178 acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
1179
1180 /*
1181 * blacklist may disable ACPI entirely
1182 */
1183 error = acpi_blacklisted();
1184 if (error) {
1185 if (acpi_force) {
1186 printk(KERN_WARNING PREFIX "acpi=force override\n");
1187 } else {
1188 printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
1189 disable_acpi();
1190 return error;
1191 }
1192 }
1193
1194 return 0;
1195}
1196
1197int __init acpi_boot_init(void)
1198{
1199 /*
1200 * If acpi_disabled, bail out
1201 * One exception: acpi=ht continues far enough to enumerate LAPICs
1202 */
1203 if (acpi_disabled && !acpi_ht)
1204 return 1;
1205
1206 acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
1207
1208 /*
1209 * set sci_int and PM timer address
1210 */
1211 acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt);
1212
1213 /*
1214 * Process the Multiple APIC Description Table (MADT), if present
1215 */
1216 acpi_process_madt();
1217
1218 acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
1219
1220 return 0;
1221}
1222
1223static int __init parse_acpi(char *arg)
1224{
1225 if (!arg)
1226 return -EINVAL;
1227
1228 /* "acpi=off" disables both ACPI table parsing and interpreter */
1229 if (strcmp(arg, "off") == 0) {
1230 disable_acpi();
1231 }
1232 /* acpi=force to over-ride black-list */
1233 else if (strcmp(arg, "force") == 0) {
1234 acpi_force = 1;
1235 acpi_ht = 1;
1236 acpi_disabled = 0;
1237 }
1238 /* acpi=strict disables out-of-spec workarounds */
1239 else if (strcmp(arg, "strict") == 0) {
1240 acpi_strict = 1;
1241 }
1242 /* Limit ACPI just to boot-time to enable HT */
1243 else if (strcmp(arg, "ht") == 0) {
1244 if (!acpi_force)
1245 disable_acpi();
1246 acpi_ht = 1;
1247 }
1248 /* "acpi=noirq" disables ACPI interrupt routing */
1249 else if (strcmp(arg, "noirq") == 0) {
1250 acpi_noirq_set();
1251 } else {
1252 /* Core will printk when we return error. */
1253 return -EINVAL;
1254 }
1255 return 0;
1256}
1257early_param("acpi", parse_acpi);
1258
1259/* FIXME: Using pci= for an ACPI parameter is a travesty. */
1260static int __init parse_pci(char *arg)
1261{
1262 if (arg && strcmp(arg, "noacpi") == 0)
1263 acpi_disable_pci();
1264 return 0;
1265}
1266early_param("pci", parse_pci);
1267
1268#ifdef CONFIG_X86_IO_APIC
1269static int __init parse_acpi_skip_timer_override(char *arg)
1270{
1271 acpi_skip_timer_override = 1;
1272 return 0;
1273}
1274early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override);
1275
1276static int __init parse_acpi_use_timer_override(char *arg)
1277{
1278 acpi_use_timer_override = 1;
1279 return 0;
1280}
1281early_param("acpi_use_timer_override", parse_acpi_use_timer_override);
1282#endif /* CONFIG_X86_IO_APIC */
1283
1284static int __init setup_acpi_sci(char *s)
1285{
1286 if (!s)
1287 return -EINVAL;
1288 if (!strcmp(s, "edge"))
1289 acpi_sci_flags = ACPI_MADT_TRIGGER_EDGE |
1290 (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
1291 else if (!strcmp(s, "level"))
1292 acpi_sci_flags = ACPI_MADT_TRIGGER_LEVEL |
1293 (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
1294 else if (!strcmp(s, "high"))
1295 acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_HIGH |
1296 (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
1297 else if (!strcmp(s, "low"))
1298 acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_LOW |
1299 (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
1300 else
1301 return -EINVAL;
1302 return 0;
1303}
1304early_param("acpi_sci", setup_acpi_sci);
1305
1306int __acpi_acquire_global_lock(unsigned int *lock)
1307{
1308 unsigned int old, new, val;
1309 do {
1310 old = *lock;
1311 new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
1312 val = cmpxchg(lock, old, new);
1313 } while (unlikely (val != old));
1314 return (new < 3) ? -1 : 0;
1315}
1316
1317int __acpi_release_global_lock(unsigned int *lock)
1318{
1319 unsigned int old, new, val;
1320 do {
1321 old = *lock;
1322 new = old & ~0x3;
1323 val = cmpxchg(lock, old, new);
1324 } while (unlikely (val != old));
1325 return old & 0x1;
1326}
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
new file mode 100644
index 000000000000..2d39f55d29a8
--- /dev/null
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -0,0 +1,164 @@
1/*
2 * arch/i386/kernel/acpi/cstate.c
3 *
4 * Copyright (C) 2005 Intel Corporation
5 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
6 * - Added _PDC for SMP C-states on Intel CPUs
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/acpi.h>
13#include <linux/cpu.h>
14#include <linux/sched.h>
15
16#include <acpi/processor.h>
17#include <asm/acpi.h>
18
19/*
20 * Initialize bm_flags based on the CPU cache properties
21 * On SMP it depends on cache configuration
22 * - When cache is not shared among all CPUs, we flush cache
23 * before entering C3.
24 * - When cache is shared among all CPUs, we use bm_check
25 * mechanism as in UP case
26 *
27 * This routine is called only after all the CPUs are online
28 */
29void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
30 unsigned int cpu)
31{
32 struct cpuinfo_x86 *c = cpu_data + cpu;
33
34 flags->bm_check = 0;
35 if (num_online_cpus() == 1)
36 flags->bm_check = 1;
37 else if (c->x86_vendor == X86_VENDOR_INTEL) {
38 /*
39 * Today all CPUs that support C3 share cache.
40 * TBD: This needs to look at cache shared map, once
41 * multi-core detection patch makes to the base.
42 */
43 flags->bm_check = 1;
44 }
45}
46EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
47
48/* The code below handles cstate entry with monitor-mwait pair on Intel*/
49
50struct cstate_entry {
51 struct {
52 unsigned int eax;
53 unsigned int ecx;
54 } states[ACPI_PROCESSOR_MAX_POWER];
55};
56static struct cstate_entry *cpu_cstate_entry; /* per CPU ptr */
57
58static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
59
60#define MWAIT_SUBSTATE_MASK (0xf)
61#define MWAIT_SUBSTATE_SIZE (4)
62
63#define CPUID_MWAIT_LEAF (5)
64#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
65#define CPUID5_ECX_INTERRUPT_BREAK (0x2)
66
67#define MWAIT_ECX_INTERRUPT_BREAK (0x1)
68
69#define NATIVE_CSTATE_BEYOND_HALT (2)
70
71int acpi_processor_ffh_cstate_probe(unsigned int cpu,
72 struct acpi_processor_cx *cx, struct acpi_power_register *reg)
73{
74 struct cstate_entry *percpu_entry;
75 struct cpuinfo_x86 *c = cpu_data + cpu;
76
77 cpumask_t saved_mask;
78 int retval;
79 unsigned int eax, ebx, ecx, edx;
80 unsigned int edx_part;
81 unsigned int cstate_type; /* C-state type and not ACPI C-state type */
82 unsigned int num_cstate_subtype;
83
84 if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF )
85 return -1;
86
87 if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT)
88 return -1;
89
90 percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
91 percpu_entry->states[cx->index].eax = 0;
92 percpu_entry->states[cx->index].ecx = 0;
93
94 /* Make sure we are running on right CPU */
95 saved_mask = current->cpus_allowed;
96 retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
97 if (retval)
98 return -1;
99
100 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
101
102 /* Check whether this particular cx_type (in CST) is supported or not */
103 cstate_type = (cx->address >> MWAIT_SUBSTATE_SIZE) + 1;
104 edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE);
105 num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
106
107 retval = 0;
108 if (num_cstate_subtype < (cx->address & MWAIT_SUBSTATE_MASK)) {
109 retval = -1;
110 goto out;
111 }
112
113 /* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */
114 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
115 !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) {
116 retval = -1;
117 goto out;
118 }
119 percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK;
120
121 /* Use the hint in CST */
122 percpu_entry->states[cx->index].eax = cx->address;
123
124 if (!mwait_supported[cstate_type]) {
125 mwait_supported[cstate_type] = 1;
126 printk(KERN_DEBUG "Monitor-Mwait will be used to enter C-%d "
127 "state\n", cx->type);
128 }
129
130out:
131 set_cpus_allowed(current, saved_mask);
132 return retval;
133}
134EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
135
136void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
137{
138 unsigned int cpu = smp_processor_id();
139 struct cstate_entry *percpu_entry;
140
141 percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
142 mwait_idle_with_hints(percpu_entry->states[cx->index].eax,
143 percpu_entry->states[cx->index].ecx);
144}
145EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_enter);
146
147static int __init ffh_cstate_init(void)
148{
149 struct cpuinfo_x86 *c = &boot_cpu_data;
150 if (c->x86_vendor != X86_VENDOR_INTEL)
151 return -1;
152
153 cpu_cstate_entry = alloc_percpu(struct cstate_entry);
154 return 0;
155}
156
157static void __exit ffh_cstate_exit(void)
158{
159 free_percpu(cpu_cstate_entry);
160 cpu_cstate_entry = NULL;
161}
162
163arch_initcall(ffh_cstate_init);
164__exitcall(ffh_cstate_exit);
diff --git a/arch/x86/kernel/acpi/earlyquirk_32.c b/arch/x86/kernel/acpi/earlyquirk_32.c
new file mode 100644
index 000000000000..23f78efc577d
--- /dev/null
+++ b/arch/x86/kernel/acpi/earlyquirk_32.c
@@ -0,0 +1,84 @@
1/*
2 * Do early PCI probing for bug detection when the main PCI subsystem is
3 * not up yet.
4 */
5#include <linux/init.h>
6#include <linux/kernel.h>
7#include <linux/pci.h>
8#include <linux/acpi.h>
9
10#include <asm/pci-direct.h>
11#include <asm/acpi.h>
12#include <asm/apic.h>
13
14#ifdef CONFIG_ACPI
15
16static int __init nvidia_hpet_check(struct acpi_table_header *header)
17{
18 return 0;
19}
20#endif
21
22static int __init check_bridge(int vendor, int device)
23{
24#ifdef CONFIG_ACPI
25 static int warned;
26 /* According to Nvidia all timer overrides are bogus unless HPET
27 is enabled. */
28 if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) {
29 if (!warned && acpi_table_parse(ACPI_SIG_HPET,
30 nvidia_hpet_check)) {
31 warned = 1;
32 acpi_skip_timer_override = 1;
33 printk(KERN_INFO "Nvidia board "
34 "detected. Ignoring ACPI "
35 "timer override.\n");
36 printk(KERN_INFO "If you got timer trouble "
37 "try acpi_use_timer_override\n");
38
39 }
40 }
41#endif
42 if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) {
43 timer_over_8254 = 0;
44 printk(KERN_INFO "ATI board detected. Disabling timer routing "
45 "over 8254.\n");
46 }
47 return 0;
48}
49
50void __init check_acpi_pci(void)
51{
52 int num, slot, func;
53
54 /* Assume the machine supports type 1. If not it will
55 always read ffffffff and should not have any side effect.
56 Actually a few buggy systems can machine check. Allow the user
57 to disable it by command line option at least -AK */
58 if (!early_pci_allowed())
59 return;
60
61 /* Poor man's PCI discovery */
62 for (num = 0; num < 32; num++) {
63 for (slot = 0; slot < 32; slot++) {
64 for (func = 0; func < 8; func++) {
65 u32 class;
66 u32 vendor;
67 class = read_pci_config(num, slot, func,
68 PCI_CLASS_REVISION);
69 if (class == 0xffffffff)
70 break;
71
72 if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
73 continue;
74
75 vendor = read_pci_config(num, slot, func,
76 PCI_VENDOR_ID);
77
78 if (check_bridge(vendor & 0xffff, vendor >> 16))
79 return;
80 }
81
82 }
83 }
84}
diff --git a/arch/x86/kernel/acpi/processor.c b/arch/x86/kernel/acpi/processor.c
new file mode 100644
index 000000000000..b54fded49834
--- /dev/null
+++ b/arch/x86/kernel/acpi/processor.c
@@ -0,0 +1,75 @@
1/*
2 * arch/i386/kernel/acpi/processor.c
3 *
4 * Copyright (C) 2005 Intel Corporation
5 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
6 * - Added _PDC for platforms with Intel CPUs
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/acpi.h>
13
14#include <acpi/processor.h>
15#include <asm/acpi.h>
16
17static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
18{
19 struct acpi_object_list *obj_list;
20 union acpi_object *obj;
21 u32 *buf;
22
23 /* allocate and initialize pdc. It will be used later. */
24 obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
25 if (!obj_list) {
26 printk(KERN_ERR "Memory allocation error\n");
27 return;
28 }
29
30 obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
31 if (!obj) {
32 printk(KERN_ERR "Memory allocation error\n");
33 kfree(obj_list);
34 return;
35 }
36
37 buf = kmalloc(12, GFP_KERNEL);
38 if (!buf) {
39 printk(KERN_ERR "Memory allocation error\n");
40 kfree(obj);
41 kfree(obj_list);
42 return;
43 }
44
45 buf[0] = ACPI_PDC_REVISION_ID;
46 buf[1] = 1;
47 buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
48
49 if (cpu_has(c, X86_FEATURE_EST))
50 buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP;
51
52 obj->type = ACPI_TYPE_BUFFER;
53 obj->buffer.length = 12;
54 obj->buffer.pointer = (u8 *) buf;
55 obj_list->count = 1;
56 obj_list->pointer = obj;
57 pr->pdc = obj_list;
58
59 return;
60}
61
62/* Initialize _PDC data based on the CPU vendor */
63void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
64{
65 unsigned int cpu = pr->id;
66 struct cpuinfo_x86 *c = cpu_data + cpu;
67
68 pr->pdc = NULL;
69 if (c->x86_vendor == X86_VENDOR_INTEL)
70 init_intel_pdc(pr, c);
71
72 return;
73}
74
75EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
diff --git a/arch/x86/kernel/acpi/sleep_32.c b/arch/x86/kernel/acpi/sleep_32.c
new file mode 100644
index 000000000000..10699489cfe7
--- /dev/null
+++ b/arch/x86/kernel/acpi/sleep_32.c
@@ -0,0 +1,110 @@
1/*
2 * sleep.c - x86-specific ACPI sleep support.
3 *
4 * Copyright (C) 2001-2003 Patrick Mochel
5 * Copyright (C) 2001-2003 Pavel Machek <pavel@suse.cz>
6 */
7
8#include <linux/acpi.h>
9#include <linux/bootmem.h>
10#include <linux/dmi.h>
11#include <linux/cpumask.h>
12
13#include <asm/smp.h>
14
15/* address in low memory of the wakeup routine. */
16unsigned long acpi_wakeup_address = 0;
17unsigned long acpi_realmode_flags;
18extern char wakeup_start, wakeup_end;
19
20extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long));
21
22/**
23 * acpi_save_state_mem - save kernel state
24 *
25 * Create an identity mapped page table and copy the wakeup routine to
26 * low memory.
27 */
28int acpi_save_state_mem(void)
29{
30 if (!acpi_wakeup_address)
31 return 1;
32 memcpy((void *)acpi_wakeup_address, &wakeup_start,
33 &wakeup_end - &wakeup_start);
34 acpi_copy_wakeup_routine(acpi_wakeup_address);
35
36 return 0;
37}
38
39/*
40 * acpi_restore_state - undo effects of acpi_save_state_mem
41 */
42void acpi_restore_state_mem(void)
43{
44}
45
46/**
47 * acpi_reserve_bootmem - do _very_ early ACPI initialisation
48 *
49 * We allocate a page from the first 1MB of memory for the wakeup
50 * routine for when we come back from a sleep state. The
51 * runtime allocator allows specification of <16MB pages, but not
52 * <1MB pages.
53 */
54void __init acpi_reserve_bootmem(void)
55{
56 if ((&wakeup_end - &wakeup_start) > PAGE_SIZE) {
57 printk(KERN_ERR
58 "ACPI: Wakeup code way too big, S3 disabled.\n");
59 return;
60 }
61
62 acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE);
63 if (!acpi_wakeup_address)
64 printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
65}
66
67static int __init acpi_sleep_setup(char *str)
68{
69 while ((str != NULL) && (*str != '\0')) {
70 if (strncmp(str, "s3_bios", 7) == 0)
71 acpi_realmode_flags |= 1;
72 if (strncmp(str, "s3_mode", 7) == 0)
73 acpi_realmode_flags |= 2;
74 if (strncmp(str, "s3_beep", 7) == 0)
75 acpi_realmode_flags |= 4;
76 str = strchr(str, ',');
77 if (str != NULL)
78 str += strspn(str, ", \t");
79 }
80 return 1;
81}
82
83__setup("acpi_sleep=", acpi_sleep_setup);
84
85/* Ouch, we want to delete this. We already have better version in userspace, in
86 s2ram from suspend.sf.net project */
87static __init int reset_videomode_after_s3(const struct dmi_system_id *d)
88{
89 acpi_realmode_flags |= 2;
90 return 0;
91}
92
93static __initdata struct dmi_system_id acpisleep_dmi_table[] = {
94 { /* Reset video mode after returning from ACPI S3 sleep */
95 .callback = reset_videomode_after_s3,
96 .ident = "Toshiba Satellite 4030cdt",
97 .matches = {
98 DMI_MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"),
99 },
100 },
101 {}
102};
103
104static int __init acpisleep_dmi_init(void)
105{
106 dmi_check_system(acpisleep_dmi_table);
107 return 0;
108}
109
110core_initcall(acpisleep_dmi_init);
diff --git a/arch/x86/kernel/acpi/sleep_64.c b/arch/x86/kernel/acpi/sleep_64.c
new file mode 100644
index 000000000000..79475d237071
--- /dev/null
+++ b/arch/x86/kernel/acpi/sleep_64.c
@@ -0,0 +1,120 @@
1/*
2 * acpi.c - Architecture-Specific Low-Level ACPI Support
3 *
4 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
5 * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
6 * Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
7 * Copyright (C) 2002 Andi Kleen, SuSE Labs (x86-64 port)
8 * Copyright (C) 2003 Pavel Machek, SuSE Labs
9 *
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27 */
28
29#include <linux/kernel.h>
30#include <linux/init.h>
31#include <linux/types.h>
32#include <linux/stddef.h>
33#include <linux/slab.h>
34#include <linux/pci.h>
35#include <linux/bootmem.h>
36#include <linux/acpi.h>
37#include <linux/cpumask.h>
38
39#include <asm/mpspec.h>
40#include <asm/io.h>
41#include <asm/apic.h>
42#include <asm/apicdef.h>
43#include <asm/page.h>
44#include <asm/pgtable.h>
45#include <asm/pgalloc.h>
46#include <asm/io_apic.h>
47#include <asm/proto.h>
48#include <asm/tlbflush.h>
49
50/* --------------------------------------------------------------------------
51 Low-Level Sleep Support
52 -------------------------------------------------------------------------- */
53
54/* address in low memory of the wakeup routine. */
55unsigned long acpi_wakeup_address = 0;
56unsigned long acpi_realmode_flags;
57extern char wakeup_start, wakeup_end;
58
59extern unsigned long acpi_copy_wakeup_routine(unsigned long);
60
61/**
62 * acpi_save_state_mem - save kernel state
63 *
64 * Create an identity mapped page table and copy the wakeup routine to
65 * low memory.
66 */
67int acpi_save_state_mem(void)
68{
69 memcpy((void *)acpi_wakeup_address, &wakeup_start,
70 &wakeup_end - &wakeup_start);
71 acpi_copy_wakeup_routine(acpi_wakeup_address);
72
73 return 0;
74}
75
76/*
77 * acpi_restore_state
78 */
79void acpi_restore_state_mem(void)
80{
81}
82
83/**
84 * acpi_reserve_bootmem - do _very_ early ACPI initialisation
85 *
86 * We allocate a page in low memory for the wakeup
87 * routine for when we come back from a sleep state. The
88 * runtime allocator allows specification of <16M pages, but not
89 * <1M pages.
90 */
91void __init acpi_reserve_bootmem(void)
92{
93 acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE*2);
94 if ((&wakeup_end - &wakeup_start) > (PAGE_SIZE*2))
95 printk(KERN_CRIT
96 "ACPI: Wakeup code way too big, will crash on attempt"
97 " to suspend\n");
98}
99
100static int __init acpi_sleep_setup(char *str)
101{
102 while ((str != NULL) && (*str != '\0')) {
103 if (strncmp(str, "s3_bios", 7) == 0)
104 acpi_realmode_flags |= 1;
105 if (strncmp(str, "s3_mode", 7) == 0)
106 acpi_realmode_flags |= 2;
107 if (strncmp(str, "s3_beep", 7) == 0)
108 acpi_realmode_flags |= 4;
109 str = strchr(str, ',');
110 if (str != NULL)
111 str += strspn(str, ", \t");
112 }
113 return 1;
114}
115
116__setup("acpi_sleep=", acpi_sleep_setup);
117
118void acpi_pci_link_exit(void)
119{
120}
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
new file mode 100644
index 000000000000..f22ba8534d26
--- /dev/null
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -0,0 +1,321 @@
1.text
2#include <linux/linkage.h>
3#include <asm/segment.h>
4#include <asm/page.h>
5
6#
7# wakeup_code runs in real mode, and at unknown address (determined at run-time).
8# Therefore it must only use relative jumps/calls.
9#
10# Do we need to deal with A20? It is okay: ACPI specs says A20 must be enabled
11#
12# If physical address of wakeup_code is 0x12345, BIOS should call us with
13# cs = 0x1234, eip = 0x05
14#
15
16#define BEEP \
17 inb $97, %al; \
18 outb %al, $0x80; \
19 movb $3, %al; \
20 outb %al, $97; \
21 outb %al, $0x80; \
22 movb $-74, %al; \
23 outb %al, $67; \
24 outb %al, $0x80; \
25 movb $-119, %al; \
26 outb %al, $66; \
27 outb %al, $0x80; \
28 movb $15, %al; \
29 outb %al, $66;
30
31ALIGN
32 .align 4096
33ENTRY(wakeup_start)
34wakeup_code:
35 wakeup_code_start = .
36 .code16
37
38 movw $0xb800, %ax
39 movw %ax,%fs
40 movw $0x0e00 + 'L', %fs:(0x10)
41
42 cli
43 cld
44
45 # setup data segment
46 movw %cs, %ax
47 movw %ax, %ds # Make ds:0 point to wakeup_start
48 movw %ax, %ss
49
50 testl $4, realmode_flags - wakeup_code
51 jz 1f
52 BEEP
531:
54 mov $(wakeup_stack - wakeup_code), %sp # Private stack is needed for ASUS board
55 movw $0x0e00 + 'S', %fs:(0x12)
56
57 pushl $0 # Kill any dangerous flags
58 popfl
59
60 movl real_magic - wakeup_code, %eax
61 cmpl $0x12345678, %eax
62 jne bogus_real_magic
63
64 testl $1, realmode_flags - wakeup_code
65 jz 1f
66 lcall $0xc000,$3
67 movw %cs, %ax
68 movw %ax, %ds # Bios might have played with that
69 movw %ax, %ss
701:
71
72 testl $2, realmode_flags - wakeup_code
73 jz 1f
74 mov video_mode - wakeup_code, %ax
75 call mode_set
761:
77
78 # set up page table
79 movl $swsusp_pg_dir-__PAGE_OFFSET, %eax
80 movl %eax, %cr3
81
82 testl $1, real_efer_save_restore - wakeup_code
83 jz 4f
84 # restore efer setting
85 movl real_save_efer_edx - wakeup_code, %edx
86 movl real_save_efer_eax - wakeup_code, %eax
87 mov $0xc0000080, %ecx
88 wrmsr
894:
90 # make sure %cr4 is set correctly (features, etc)
91 movl real_save_cr4 - wakeup_code, %eax
92 movl %eax, %cr4
93 movw $0xb800, %ax
94 movw %ax,%fs
95 movw $0x0e00 + 'i', %fs:(0x12)
96
97 # need a gdt -- use lgdtl to force 32-bit operands, in case
98 # the GDT is located past 16 megabytes.
99 lgdtl real_save_gdt - wakeup_code
100
101 movl real_save_cr0 - wakeup_code, %eax
102 movl %eax, %cr0
103 jmp 1f
1041:
105 movw $0x0e00 + 'n', %fs:(0x14)
106
107 movl real_magic - wakeup_code, %eax
108 cmpl $0x12345678, %eax
109 jne bogus_real_magic
110
111 testl $8, realmode_flags - wakeup_code
112 jz 1f
113 BEEP
1141:
115 ljmpl $__KERNEL_CS, $wakeup_pmode_return
116
117real_save_gdt: .word 0
118 .long 0
119real_save_cr0: .long 0
120real_save_cr3: .long 0
121real_save_cr4: .long 0
122real_magic: .long 0
123video_mode: .long 0
124realmode_flags: .long 0
125beep_flags: .long 0
126real_efer_save_restore: .long 0
127real_save_efer_edx: .long 0
128real_save_efer_eax: .long 0
129
130bogus_real_magic:
131 movw $0x0e00 + 'B', %fs:(0x12)
132 jmp bogus_real_magic
133
134/* This code uses an extended set of video mode numbers. These include:
135 * Aliases for standard modes
136 * NORMAL_VGA (-1)
137 * EXTENDED_VGA (-2)
138 * ASK_VGA (-3)
139 * Video modes numbered by menu position -- NOT RECOMMENDED because of lack
140 * of compatibility when extending the table. These are between 0x00 and 0xff.
141 */
142#define VIDEO_FIRST_MENU 0x0000
143
144/* Standard BIOS video modes (BIOS number + 0x0100) */
145#define VIDEO_FIRST_BIOS 0x0100
146
147/* VESA BIOS video modes (VESA number + 0x0200) */
148#define VIDEO_FIRST_VESA 0x0200
149
150/* Video7 special modes (BIOS number + 0x0900) */
151#define VIDEO_FIRST_V7 0x0900
152
153# Setting of user mode (AX=mode ID) => CF=success
154
155# For now, we only handle VESA modes (0x0200..0x03ff). To handle other
156# modes, we should probably compile in the video code from the boot
157# directory.
158mode_set:
159 movw %ax, %bx
160 subb $VIDEO_FIRST_VESA>>8, %bh
161 cmpb $2, %bh
162 jb check_vesa
163
164setbad:
165 clc
166 ret
167
168check_vesa:
169 orw $0x4000, %bx # Use linear frame buffer
170 movw $0x4f02, %ax # VESA BIOS mode set call
171 int $0x10
172 cmpw $0x004f, %ax # AL=4f if implemented
173 jnz setbad # AH=0 if OK
174
175 stc
176 ret
177
178 .code32
179 ALIGN
180
181.org 0x800
182wakeup_stack_begin: # Stack grows down
183
184.org 0xff0 # Just below end of page
185wakeup_stack:
186ENTRY(wakeup_end)
187
188.org 0x1000
189
190wakeup_pmode_return:
191 movw $__KERNEL_DS, %ax
192 movw %ax, %ss
193 movw %ax, %ds
194 movw %ax, %es
195 movw %ax, %fs
196 movw %ax, %gs
197 movw $0x0e00 + 'u', 0xb8016
198
199 # reload the gdt, as we need the full 32 bit address
200 lgdt saved_gdt
201 lidt saved_idt
202 lldt saved_ldt
203 ljmp $(__KERNEL_CS),$1f
2041:
205 movl %cr3, %eax
206 movl %eax, %cr3
207 wbinvd
208
209 # and restore the stack ... but you need gdt for this to work
210 movl saved_context_esp, %esp
211
212 movl %cs:saved_magic, %eax
213 cmpl $0x12345678, %eax
214 jne bogus_magic
215
216 # jump to place where we left off
217 movl saved_eip,%eax
218 jmp *%eax
219
220bogus_magic:
221 movw $0x0e00 + 'B', 0xb8018
222 jmp bogus_magic
223
224
225##
226# acpi_copy_wakeup_routine
227#
228# Copy the above routine to low memory.
229#
230# Parameters:
231# %eax: place to copy wakeup routine to
232#
233# Returned address is location of code in low memory (past data and stack)
234#
235ENTRY(acpi_copy_wakeup_routine)
236
237 pushl %ebx
238 sgdt saved_gdt
239 sidt saved_idt
240 sldt saved_ldt
241 str saved_tss
242
243 movl nx_enabled, %edx
244 movl %edx, real_efer_save_restore - wakeup_start (%eax)
245 testl $1, real_efer_save_restore - wakeup_start (%eax)
246 jz 2f
247 # save efer setting
248 pushl %eax
249 movl %eax, %ebx
250 mov $0xc0000080, %ecx
251 rdmsr
252 movl %edx, real_save_efer_edx - wakeup_start (%ebx)
253 movl %eax, real_save_efer_eax - wakeup_start (%ebx)
254 popl %eax
2552:
256
257 movl %cr3, %edx
258 movl %edx, real_save_cr3 - wakeup_start (%eax)
259 movl %cr4, %edx
260 movl %edx, real_save_cr4 - wakeup_start (%eax)
261 movl %cr0, %edx
262 movl %edx, real_save_cr0 - wakeup_start (%eax)
263 sgdt real_save_gdt - wakeup_start (%eax)
264
265 movl saved_videomode, %edx
266 movl %edx, video_mode - wakeup_start (%eax)
267 movl acpi_realmode_flags, %edx
268 movl %edx, realmode_flags - wakeup_start (%eax)
269 movl $0x12345678, real_magic - wakeup_start (%eax)
270 movl $0x12345678, saved_magic
271 popl %ebx
272 ret
273
274save_registers:
275 leal 4(%esp), %eax
276 movl %eax, saved_context_esp
277 movl %ebx, saved_context_ebx
278 movl %ebp, saved_context_ebp
279 movl %esi, saved_context_esi
280 movl %edi, saved_context_edi
281 pushfl ; popl saved_context_eflags
282
283 movl $ret_point, saved_eip
284 ret
285
286
287restore_registers:
288 movl saved_context_ebp, %ebp
289 movl saved_context_ebx, %ebx
290 movl saved_context_esi, %esi
291 movl saved_context_edi, %edi
292 pushl saved_context_eflags ; popfl
293 ret
294
295ENTRY(do_suspend_lowlevel)
296 call save_processor_state
297 call save_registers
298 pushl $3
299 call acpi_enter_sleep_state
300 addl $4, %esp
301
302# In case of S3 failure, we'll emerge here. Jump
303# to ret_point to recover
304 jmp ret_point
305 .p2align 4,,7
306ret_point:
307 call restore_registers
308 call restore_processor_state
309 ret
310
311.data
312ALIGN
313ENTRY(saved_magic) .long 0
314ENTRY(saved_eip) .long 0
315
316# saved registers
317saved_gdt: .long 0,0
318saved_idt: .long 0,0
319saved_ldt: .long 0
320saved_tss: .long 0
321
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
new file mode 100644
index 000000000000..8b4357e1efe0
--- /dev/null
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -0,0 +1,456 @@
1.text
2#include <linux/linkage.h>
3#include <asm/segment.h>
4#include <asm/pgtable.h>
5#include <asm/page.h>
6#include <asm/msr.h>
7
8# Copyright 2003 Pavel Machek <pavel@suse.cz>, distribute under GPLv2
9#
10# wakeup_code runs in real mode, and at unknown address (determined at run-time).
11# Therefore it must only use relative jumps/calls.
12#
13# Do we need to deal with A20? It is okay: ACPI specs says A20 must be enabled
14#
15# If physical address of wakeup_code is 0x12345, BIOS should call us with
16# cs = 0x1234, eip = 0x05
17#
18
19#define BEEP \
20 inb $97, %al; \
21 outb %al, $0x80; \
22 movb $3, %al; \
23 outb %al, $97; \
24 outb %al, $0x80; \
25 movb $-74, %al; \
26 outb %al, $67; \
27 outb %al, $0x80; \
28 movb $-119, %al; \
29 outb %al, $66; \
30 outb %al, $0x80; \
31 movb $15, %al; \
32 outb %al, $66;
33
34
35ALIGN
36 .align 16
37ENTRY(wakeup_start)
38wakeup_code:
39 wakeup_code_start = .
40 .code16
41
42# Running in *copy* of this code, somewhere in low 1MB.
43
44 movb $0xa1, %al ; outb %al, $0x80
45 cli
46 cld
47 # setup data segment
48 movw %cs, %ax
49 movw %ax, %ds # Make ds:0 point to wakeup_start
50 movw %ax, %ss
51
52 # Data segment must be set up before we can see whether to beep.
53 testl $4, realmode_flags - wakeup_code
54 jz 1f
55 BEEP
561:
57
58 # Private stack is needed for ASUS board
59 mov $(wakeup_stack - wakeup_code), %sp
60
61 pushl $0 # Kill any dangerous flags
62 popfl
63
64 movl real_magic - wakeup_code, %eax
65 cmpl $0x12345678, %eax
66 jne bogus_real_magic
67
68 call verify_cpu # Verify the cpu supports long
69 # mode
70 testl %eax, %eax
71 jnz no_longmode
72
73 testl $1, realmode_flags - wakeup_code
74 jz 1f
75 lcall $0xc000,$3
76 movw %cs, %ax
77 movw %ax, %ds # Bios might have played with that
78 movw %ax, %ss
791:
80
81 testl $2, realmode_flags - wakeup_code
82 jz 1f
83 mov video_mode - wakeup_code, %ax
84 call mode_set
851:
86
87 movw $0xb800, %ax
88 movw %ax,%fs
89 movw $0x0e00 + 'L', %fs:(0x10)
90
91 movb $0xa2, %al ; outb %al, $0x80
92
93 mov %ds, %ax # Find 32bit wakeup_code addr
94 movzx %ax, %esi # (Convert %ds:gdt to a liner ptr)
95 shll $4, %esi
96 # Fix up the vectors
97 addl %esi, wakeup_32_vector - wakeup_code
98 addl %esi, wakeup_long64_vector - wakeup_code
99 addl %esi, gdt_48a + 2 - wakeup_code # Fixup the gdt pointer
100
101 lidtl %ds:idt_48a - wakeup_code
102 lgdtl %ds:gdt_48a - wakeup_code # load gdt with whatever is
103 # appropriate
104
105 movl $1, %eax # protected mode (PE) bit
106 lmsw %ax # This is it!
107 jmp 1f
1081:
109
110 ljmpl *(wakeup_32_vector - wakeup_code)
111
112 .balign 4
113wakeup_32_vector:
114 .long wakeup_32 - wakeup_code
115 .word __KERNEL32_CS, 0
116
117 .code32
118wakeup_32:
119# Running in this code, but at low address; paging is not yet turned on.
120 movb $0xa5, %al ; outb %al, $0x80
121
122 movl $__KERNEL_DS, %eax
123 movl %eax, %ds
124
125 movw $0x0e00 + 'i', %ds:(0xb8012)
126 movb $0xa8, %al ; outb %al, $0x80;
127
128 /*
129 * Prepare for entering 64bits mode
130 */
131
132 /* Enable PAE */
133 xorl %eax, %eax
134 btsl $5, %eax
135 movl %eax, %cr4
136
137 /* Setup early boot stage 4 level pagetables */
138 leal (wakeup_level4_pgt - wakeup_code)(%esi), %eax
139 movl %eax, %cr3
140
141 /* Check if nx is implemented */
142 movl $0x80000001, %eax
143 cpuid
144 movl %edx,%edi
145
146 /* Enable Long Mode */
147 xorl %eax, %eax
148 btsl $_EFER_LME, %eax
149
150 /* No Execute supported? */
151 btl $20,%edi
152 jnc 1f
153 btsl $_EFER_NX, %eax
154
155 /* Make changes effective */
1561: movl $MSR_EFER, %ecx
157 xorl %edx, %edx
158 wrmsr
159
160 xorl %eax, %eax
161 btsl $31, %eax /* Enable paging and in turn activate Long Mode */
162 btsl $0, %eax /* Enable protected mode */
163
164 /* Make changes effective */
165 movl %eax, %cr0
166
167 /* At this point:
168 CR4.PAE must be 1
169 CS.L must be 0
170 CR3 must point to PML4
171 Next instruction must be a branch
172 This must be on identity-mapped page
173 */
174 /*
175 * At this point we're in long mode but in 32bit compatibility mode
176 * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
177 * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we load
178 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
179 */
180
181 /* Finally jump in 64bit mode */
182 ljmp *(wakeup_long64_vector - wakeup_code)(%esi)
183
184 .balign 4
185wakeup_long64_vector:
186 .long wakeup_long64 - wakeup_code
187 .word __KERNEL_CS, 0
188
189.code64
190
191 /* Hooray, we are in Long 64-bit mode (but still running in
192 * low memory)
193 */
194wakeup_long64:
195 /*
196 * We must switch to a new descriptor in kernel space for the GDT
197 * because soon the kernel won't have access anymore to the userspace
198 * addresses where we're currently running on. We have to do that here
199 * because in 32bit we couldn't load a 64bit linear address.
200 */
201 lgdt cpu_gdt_descr
202
203 movw $0x0e00 + 'n', %ds:(0xb8014)
204 movb $0xa9, %al ; outb %al, $0x80
205
206 movq saved_magic, %rax
207 movq $0x123456789abcdef0, %rdx
208 cmpq %rdx, %rax
209 jne bogus_64_magic
210
211 movw $0x0e00 + 'u', %ds:(0xb8016)
212
213 nop
214 nop
215 movw $__KERNEL_DS, %ax
216 movw %ax, %ss
217 movw %ax, %ds
218 movw %ax, %es
219 movw %ax, %fs
220 movw %ax, %gs
221 movq saved_rsp, %rsp
222
223 movw $0x0e00 + 'x', %ds:(0xb8018)
224 movq saved_rbx, %rbx
225 movq saved_rdi, %rdi
226 movq saved_rsi, %rsi
227 movq saved_rbp, %rbp
228
229 movw $0x0e00 + '!', %ds:(0xb801a)
230 movq saved_rip, %rax
231 jmp *%rax
232
233.code32
234
235 .align 64
236gdta:
237 /* Its good to keep gdt in sync with one in trampoline.S */
238 .word 0, 0, 0, 0 # dummy
239 /* ??? Why I need the accessed bit set in order for this to work? */
240 .quad 0x00cf9b000000ffff # __KERNEL32_CS
241 .quad 0x00af9b000000ffff # __KERNEL_CS
242 .quad 0x00cf93000000ffff # __KERNEL_DS
243
244idt_48a:
245 .word 0 # idt limit = 0
246 .word 0, 0 # idt base = 0L
247
248gdt_48a:
249 .word 0x800 # gdt limit=2048,
250 # 256 GDT entries
251 .long gdta - wakeup_code # gdt base (relocated in later)
252
253real_magic: .quad 0
254video_mode: .quad 0
255realmode_flags: .quad 0
256
257.code16
258bogus_real_magic:
259 movb $0xba,%al ; outb %al,$0x80
260 jmp bogus_real_magic
261
262.code64
263bogus_64_magic:
264 movb $0xb3,%al ; outb %al,$0x80
265 jmp bogus_64_magic
266
267.code16
268no_longmode:
269 movb $0xbc,%al ; outb %al,$0x80
270 jmp no_longmode
271
272#include "../verify_cpu_64.S"
273
274/* This code uses an extended set of video mode numbers. These include:
275 * Aliases for standard modes
276 * NORMAL_VGA (-1)
277 * EXTENDED_VGA (-2)
278 * ASK_VGA (-3)
279 * Video modes numbered by menu position -- NOT RECOMMENDED because of lack
280 * of compatibility when extending the table. These are between 0x00 and 0xff.
281 */
282#define VIDEO_FIRST_MENU 0x0000
283
284/* Standard BIOS video modes (BIOS number + 0x0100) */
285#define VIDEO_FIRST_BIOS 0x0100
286
287/* VESA BIOS video modes (VESA number + 0x0200) */
288#define VIDEO_FIRST_VESA 0x0200
289
290/* Video7 special modes (BIOS number + 0x0900) */
291#define VIDEO_FIRST_V7 0x0900
292
293# Setting of user mode (AX=mode ID) => CF=success
294
295# For now, we only handle VESA modes (0x0200..0x03ff). To handle other
296# modes, we should probably compile in the video code from the boot
297# directory.
298.code16
299mode_set:
300 movw %ax, %bx
301 subb $VIDEO_FIRST_VESA>>8, %bh
302 cmpb $2, %bh
303 jb check_vesa
304
305setbad:
306 clc
307 ret
308
309check_vesa:
310 orw $0x4000, %bx # Use linear frame buffer
311 movw $0x4f02, %ax # VESA BIOS mode set call
312 int $0x10
313 cmpw $0x004f, %ax # AL=4f if implemented
314 jnz setbad # AH=0 if OK
315
316 stc
317 ret
318
319wakeup_stack_begin: # Stack grows down
320
321.org 0xff0
322wakeup_stack: # Just below end of page
323
324.org 0x1000
325ENTRY(wakeup_level4_pgt)
326 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
327 .fill 510,8,0
328 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
329 .quad level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
330
331ENTRY(wakeup_end)
332
333##
334# acpi_copy_wakeup_routine
335#
336# Copy the above routine to low memory.
337#
338# Parameters:
339# %rdi: place to copy wakeup routine to
340#
341# Returned address is location of code in low memory (past data and stack)
342#
343 .code64
344ENTRY(acpi_copy_wakeup_routine)
345 pushq %rax
346 pushq %rdx
347
348 movl saved_video_mode, %edx
349 movl %edx, video_mode - wakeup_start (,%rdi)
350 movl acpi_realmode_flags, %edx
351 movl %edx, realmode_flags - wakeup_start (,%rdi)
352 movq $0x12345678, real_magic - wakeup_start (,%rdi)
353 movq $0x123456789abcdef0, %rdx
354 movq %rdx, saved_magic
355
356 movq saved_magic, %rax
357 movq $0x123456789abcdef0, %rdx
358 cmpq %rdx, %rax
359 jne bogus_64_magic
360
361 # restore the regs we used
362 popq %rdx
363 popq %rax
364ENTRY(do_suspend_lowlevel_s4bios)
365 ret
366
367 .align 2
368 .p2align 4,,15
369.globl do_suspend_lowlevel
370 .type do_suspend_lowlevel,@function
371do_suspend_lowlevel:
372.LFB5:
373 subq $8, %rsp
374 xorl %eax, %eax
375 call save_processor_state
376
377 movq %rsp, saved_context_esp(%rip)
378 movq %rax, saved_context_eax(%rip)
379 movq %rbx, saved_context_ebx(%rip)
380 movq %rcx, saved_context_ecx(%rip)
381 movq %rdx, saved_context_edx(%rip)
382 movq %rbp, saved_context_ebp(%rip)
383 movq %rsi, saved_context_esi(%rip)
384 movq %rdi, saved_context_edi(%rip)
385 movq %r8, saved_context_r08(%rip)
386 movq %r9, saved_context_r09(%rip)
387 movq %r10, saved_context_r10(%rip)
388 movq %r11, saved_context_r11(%rip)
389 movq %r12, saved_context_r12(%rip)
390 movq %r13, saved_context_r13(%rip)
391 movq %r14, saved_context_r14(%rip)
392 movq %r15, saved_context_r15(%rip)
393 pushfq ; popq saved_context_eflags(%rip)
394
395 movq $.L97, saved_rip(%rip)
396
397 movq %rsp,saved_rsp
398 movq %rbp,saved_rbp
399 movq %rbx,saved_rbx
400 movq %rdi,saved_rdi
401 movq %rsi,saved_rsi
402
403 addq $8, %rsp
404 movl $3, %edi
405 xorl %eax, %eax
406 jmp acpi_enter_sleep_state
407.L97:
408 .p2align 4,,7
409.L99:
410 .align 4
411 movl $24, %eax
412 movw %ax, %ds
413 movq saved_context+58(%rip), %rax
414 movq %rax, %cr4
415 movq saved_context+50(%rip), %rax
416 movq %rax, %cr3
417 movq saved_context+42(%rip), %rax
418 movq %rax, %cr2
419 movq saved_context+34(%rip), %rax
420 movq %rax, %cr0
421 pushq saved_context_eflags(%rip) ; popfq
422 movq saved_context_esp(%rip), %rsp
423 movq saved_context_ebp(%rip), %rbp
424 movq saved_context_eax(%rip), %rax
425 movq saved_context_ebx(%rip), %rbx
426 movq saved_context_ecx(%rip), %rcx
427 movq saved_context_edx(%rip), %rdx
428 movq saved_context_esi(%rip), %rsi
429 movq saved_context_edi(%rip), %rdi
430 movq saved_context_r08(%rip), %r8
431 movq saved_context_r09(%rip), %r9
432 movq saved_context_r10(%rip), %r10
433 movq saved_context_r11(%rip), %r11
434 movq saved_context_r12(%rip), %r12
435 movq saved_context_r13(%rip), %r13
436 movq saved_context_r14(%rip), %r14
437 movq saved_context_r15(%rip), %r15
438
439 xorl %eax, %eax
440 addq $8, %rsp
441 jmp restore_processor_state
442.LFE5:
443.Lfe5:
444 .size do_suspend_lowlevel,.Lfe5-do_suspend_lowlevel
445
446.data
447ALIGN
448ENTRY(saved_rbp) .quad 0
449ENTRY(saved_rsi) .quad 0
450ENTRY(saved_rdi) .quad 0
451ENTRY(saved_rbx) .quad 0
452
453ENTRY(saved_rip) .quad 0
454ENTRY(saved_rsp) .quad 0
455
456ENTRY(saved_magic) .quad 0