aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r--arch/i386/kernel/acpi/boot.c259
-rw-r--r--arch/i386/kernel/acpi/cstate.c10
-rw-r--r--arch/i386/kernel/acpi/earlyquirk.c4
-rw-r--r--arch/i386/kernel/apm.c6
-rw-r--r--arch/i386/kernel/cpu/common.c15
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c43
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longhaul.c131
-rw-r--r--arch/i386/kernel/cpu/cpufreq/p4-clockmod.c9
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c6
-rw-r--r--arch/i386/kernel/e820.c2
-rw-r--r--arch/i386/kernel/entry.S36
-rw-r--r--arch/i386/kernel/io_apic.c32
-rw-r--r--arch/i386/kernel/microcode.c2
-rw-r--r--arch/i386/kernel/mpparse.c12
-rw-r--r--arch/i386/kernel/nmi.c8
-rw-r--r--arch/i386/kernel/paravirt.c9
-rw-r--r--arch/i386/kernel/process.c7
-rw-r--r--arch/i386/kernel/ptrace.c21
-rw-r--r--arch/i386/kernel/setup.c6
-rw-r--r--arch/i386/kernel/smpboot.c23
-rw-r--r--arch/i386/kernel/srat.c84
-rw-r--r--arch/i386/kernel/sysenter.c14
-rw-r--r--arch/i386/kernel/trampoline.S5
-rw-r--r--arch/i386/kernel/traps.c83
-rw-r--r--arch/i386/kernel/tsc.c2
25 files changed, 347 insertions, 482 deletions
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index c8f96cff07c6..e94aff6888ca 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -66,7 +66,7 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return
66 66
67#define BAD_MADT_ENTRY(entry, end) ( \ 67#define BAD_MADT_ENTRY(entry, end) ( \
68 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ 68 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
69 ((acpi_table_entry_header *)entry)->length < sizeof(*entry)) 69 ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
70 70
71#define PREFIX "ACPI: " 71#define PREFIX "ACPI: "
72 72
@@ -79,7 +79,7 @@ int acpi_ioapic;
79int acpi_strict; 79int acpi_strict;
80EXPORT_SYMBOL(acpi_strict); 80EXPORT_SYMBOL(acpi_strict);
81 81
82acpi_interrupt_flags acpi_sci_flags __initdata; 82u8 acpi_sci_flags __initdata;
83int acpi_sci_override_gsi __initdata; 83int acpi_sci_override_gsi __initdata;
84int acpi_skip_timer_override __initdata; 84int acpi_skip_timer_override __initdata;
85int acpi_use_timer_override __initdata; 85int acpi_use_timer_override __initdata;
@@ -92,11 +92,6 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
92#warning ACPI uses CMPXCHG, i486 and later hardware 92#warning ACPI uses CMPXCHG, i486 and later hardware
93#endif 93#endif
94 94
95#define MAX_MADT_ENTRIES 256
96u8 x86_acpiid_to_apicid[MAX_MADT_ENTRIES] =
97 {[0 ... MAX_MADT_ENTRIES - 1] = 0xff };
98EXPORT_SYMBOL(x86_acpiid_to_apicid);
99
100/* -------------------------------------------------------------------------- 95/* --------------------------------------------------------------------------
101 Boot-time Configuration 96 Boot-time Configuration
102 -------------------------------------------------------------------------- */ 97 -------------------------------------------------------------------------- */
@@ -166,30 +161,26 @@ char *__acpi_map_table(unsigned long phys, unsigned long size)
166 161
167#ifdef CONFIG_PCI_MMCONFIG 162#ifdef CONFIG_PCI_MMCONFIG
168/* The physical address of the MMCONFIG aperture. Set from ACPI tables. */ 163/* The physical address of the MMCONFIG aperture. Set from ACPI tables. */
169struct acpi_table_mcfg_config *pci_mmcfg_config; 164struct acpi_mcfg_allocation *pci_mmcfg_config;
170int pci_mmcfg_config_num; 165int pci_mmcfg_config_num;
171 166
172int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size) 167int __init acpi_parse_mcfg(struct acpi_table_header *header)
173{ 168{
174 struct acpi_table_mcfg *mcfg; 169 struct acpi_table_mcfg *mcfg;
175 unsigned long i; 170 unsigned long i;
176 int config_size; 171 int config_size;
177 172
178 if (!phys_addr || !size) 173 if (!header)
179 return -EINVAL; 174 return -EINVAL;
180 175
181 mcfg = (struct acpi_table_mcfg *)__acpi_map_table(phys_addr, size); 176 mcfg = (struct acpi_table_mcfg *)header;
182 if (!mcfg) {
183 printk(KERN_WARNING PREFIX "Unable to map MCFG\n");
184 return -ENODEV;
185 }
186 177
187 /* how many config structures do we have */ 178 /* how many config structures do we have */
188 pci_mmcfg_config_num = 0; 179 pci_mmcfg_config_num = 0;
189 i = size - sizeof(struct acpi_table_mcfg); 180 i = header->length - sizeof(struct acpi_table_mcfg);
190 while (i >= sizeof(struct acpi_table_mcfg_config)) { 181 while (i >= sizeof(struct acpi_mcfg_allocation)) {
191 ++pci_mmcfg_config_num; 182 ++pci_mmcfg_config_num;
192 i -= sizeof(struct acpi_table_mcfg_config); 183 i -= sizeof(struct acpi_mcfg_allocation);
193 }; 184 };
194 if (pci_mmcfg_config_num == 0) { 185 if (pci_mmcfg_config_num == 0) {
195 printk(KERN_ERR PREFIX "MMCONFIG has no entries\n"); 186 printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
@@ -204,9 +195,9 @@ int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
204 return -ENOMEM; 195 return -ENOMEM;
205 } 196 }
206 197
207 memcpy(pci_mmcfg_config, &mcfg->config, config_size); 198 memcpy(pci_mmcfg_config, &mcfg[1], config_size);
208 for (i = 0; i < pci_mmcfg_config_num; ++i) { 199 for (i = 0; i < pci_mmcfg_config_num; ++i) {
209 if (mcfg->config[i].base_reserved) { 200 if (pci_mmcfg_config[i].address > 0xFFFFFFFF) {
210 printk(KERN_ERR PREFIX 201 printk(KERN_ERR PREFIX
211 "MMCONFIG not in low 4GB of memory\n"); 202 "MMCONFIG not in low 4GB of memory\n");
212 kfree(pci_mmcfg_config); 203 kfree(pci_mmcfg_config);
@@ -220,24 +211,24 @@ int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
220#endif /* CONFIG_PCI_MMCONFIG */ 211#endif /* CONFIG_PCI_MMCONFIG */
221 212
222#ifdef CONFIG_X86_LOCAL_APIC 213#ifdef CONFIG_X86_LOCAL_APIC
223static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size) 214static int __init acpi_parse_madt(struct acpi_table_header *table)
224{ 215{
225 struct acpi_table_madt *madt = NULL; 216 struct acpi_table_madt *madt = NULL;
226 217
227 if (!phys_addr || !size || !cpu_has_apic) 218 if (!cpu_has_apic)
228 return -EINVAL; 219 return -EINVAL;
229 220
230 madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size); 221 madt = (struct acpi_table_madt *)table;
231 if (!madt) { 222 if (!madt) {
232 printk(KERN_WARNING PREFIX "Unable to map MADT\n"); 223 printk(KERN_WARNING PREFIX "Unable to map MADT\n");
233 return -ENODEV; 224 return -ENODEV;
234 } 225 }
235 226
236 if (madt->lapic_address) { 227 if (madt->address) {
237 acpi_lapic_addr = (u64) madt->lapic_address; 228 acpi_lapic_addr = (u64) madt->address;
238 229
239 printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n", 230 printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
240 madt->lapic_address); 231 madt->address);
241 } 232 }
242 233
243 acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id); 234 acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
@@ -246,21 +237,17 @@ static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
246} 237}
247 238
248static int __init 239static int __init
249acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end) 240acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
250{ 241{
251 struct acpi_table_lapic *processor = NULL; 242 struct acpi_madt_local_apic *processor = NULL;
252 243
253 processor = (struct acpi_table_lapic *)header; 244 processor = (struct acpi_madt_local_apic *)header;
254 245
255 if (BAD_MADT_ENTRY(processor, end)) 246 if (BAD_MADT_ENTRY(processor, end))
256 return -EINVAL; 247 return -EINVAL;
257 248
258 acpi_table_print_madt_entry(header); 249 acpi_table_print_madt_entry(header);
259 250
260 /* Record local apic id only when enabled */
261 if (processor->flags.enabled)
262 x86_acpiid_to_apicid[processor->acpi_id] = processor->id;
263
264 /* 251 /*
265 * We need to register disabled CPU as well to permit 252 * We need to register disabled CPU as well to permit
266 * counting disabled CPUs. This allows us to size 253 * counting disabled CPUs. This allows us to size
@@ -269,18 +256,18 @@ acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end)
269 * when we use CPU hotplug. 256 * when we use CPU hotplug.
270 */ 257 */
271 mp_register_lapic(processor->id, /* APIC ID */ 258 mp_register_lapic(processor->id, /* APIC ID */
272 processor->flags.enabled); /* Enabled? */ 259 processor->lapic_flags & ACPI_MADT_ENABLED); /* Enabled? */
273 260
274 return 0; 261 return 0;
275} 262}
276 263
277static int __init 264static int __init
278acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header, 265acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
279 const unsigned long end) 266 const unsigned long end)
280{ 267{
281 struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL; 268 struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL;
282 269
283 lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr *)header; 270 lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header;
284 271
285 if (BAD_MADT_ENTRY(lapic_addr_ovr, end)) 272 if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
286 return -EINVAL; 273 return -EINVAL;
@@ -291,11 +278,11 @@ acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
291} 278}
292 279
293static int __init 280static int __init
294acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end) 281acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
295{ 282{
296 struct acpi_table_lapic_nmi *lapic_nmi = NULL; 283 struct acpi_madt_local_apic_nmi *lapic_nmi = NULL;
297 284
298 lapic_nmi = (struct acpi_table_lapic_nmi *)header; 285 lapic_nmi = (struct acpi_madt_local_apic_nmi *)header;
299 286
300 if (BAD_MADT_ENTRY(lapic_nmi, end)) 287 if (BAD_MADT_ENTRY(lapic_nmi, end))
301 return -EINVAL; 288 return -EINVAL;
@@ -313,11 +300,11 @@ acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
313#ifdef CONFIG_X86_IO_APIC 300#ifdef CONFIG_X86_IO_APIC
314 301
315static int __init 302static int __init
316acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end) 303acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
317{ 304{
318 struct acpi_table_ioapic *ioapic = NULL; 305 struct acpi_madt_io_apic *ioapic = NULL;
319 306
320 ioapic = (struct acpi_table_ioapic *)header; 307 ioapic = (struct acpi_madt_io_apic *)header;
321 308
322 if (BAD_MADT_ENTRY(ioapic, end)) 309 if (BAD_MADT_ENTRY(ioapic, end))
323 return -EINVAL; 310 return -EINVAL;
@@ -333,7 +320,7 @@ acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end)
333/* 320/*
334 * Parse Interrupt Source Override for the ACPI SCI 321 * Parse Interrupt Source Override for the ACPI SCI
335 */ 322 */
336static void acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger) 323static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
337{ 324{
338 if (trigger == 0) /* compatible SCI trigger is level */ 325 if (trigger == 0) /* compatible SCI trigger is level */
339 trigger = 3; 326 trigger = 3;
@@ -342,11 +329,11 @@ static void acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
342 polarity = 3; 329 polarity = 3;
343 330
344 /* Command-line over-ride via acpi_sci= */ 331 /* Command-line over-ride via acpi_sci= */
345 if (acpi_sci_flags.trigger) 332 if (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)
346 trigger = acpi_sci_flags.trigger; 333 trigger = (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2;
347 334
348 if (acpi_sci_flags.polarity) 335 if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK)
349 polarity = acpi_sci_flags.polarity; 336 polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
350 337
351 /* 338 /*
352 * mp_config_acpi_legacy_irqs() already setup IRQs < 16 339 * mp_config_acpi_legacy_irqs() already setup IRQs < 16
@@ -357,51 +344,52 @@ static void acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
357 344
358 /* 345 /*
359 * stash over-ride to indicate we've been here 346 * stash over-ride to indicate we've been here
360 * and for later update of acpi_fadt 347 * and for later update of acpi_gbl_FADT
361 */ 348 */
362 acpi_sci_override_gsi = gsi; 349 acpi_sci_override_gsi = gsi;
363 return; 350 return;
364} 351}
365 352
366static int __init 353static int __init
367acpi_parse_int_src_ovr(acpi_table_entry_header * header, 354acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
368 const unsigned long end) 355 const unsigned long end)
369{ 356{
370 struct acpi_table_int_src_ovr *intsrc = NULL; 357 struct acpi_madt_interrupt_override *intsrc = NULL;
371 358
372 intsrc = (struct acpi_table_int_src_ovr *)header; 359 intsrc = (struct acpi_madt_interrupt_override *)header;
373 360
374 if (BAD_MADT_ENTRY(intsrc, end)) 361 if (BAD_MADT_ENTRY(intsrc, end))
375 return -EINVAL; 362 return -EINVAL;
376 363
377 acpi_table_print_madt_entry(header); 364 acpi_table_print_madt_entry(header);
378 365
379 if (intsrc->bus_irq == acpi_fadt.sci_int) { 366 if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) {
380 acpi_sci_ioapic_setup(intsrc->global_irq, 367 acpi_sci_ioapic_setup(intsrc->global_irq,
381 intsrc->flags.polarity, 368 intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
382 intsrc->flags.trigger); 369 (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2);
383 return 0; 370 return 0;
384 } 371 }
385 372
386 if (acpi_skip_timer_override && 373 if (acpi_skip_timer_override &&
387 intsrc->bus_irq == 0 && intsrc->global_irq == 2) { 374 intsrc->source_irq == 0 && intsrc->global_irq == 2) {
388 printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); 375 printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
389 return 0; 376 return 0;
390 } 377 }
391 378
392 mp_override_legacy_irq(intsrc->bus_irq, 379 mp_override_legacy_irq(intsrc->source_irq,
393 intsrc->flags.polarity, 380 intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
394 intsrc->flags.trigger, intsrc->global_irq); 381 (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2,
382 intsrc->global_irq);
395 383
396 return 0; 384 return 0;
397} 385}
398 386
399static int __init 387static int __init
400acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end) 388acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
401{ 389{
402 struct acpi_table_nmi_src *nmi_src = NULL; 390 struct acpi_madt_nmi_source *nmi_src = NULL;
403 391
404 nmi_src = (struct acpi_table_nmi_src *)header; 392 nmi_src = (struct acpi_madt_nmi_source *)header;
405 393
406 if (BAD_MADT_ENTRY(nmi_src, end)) 394 if (BAD_MADT_ENTRY(nmi_src, end))
407 return -EINVAL; 395 return -EINVAL;
@@ -417,7 +405,7 @@ acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end)
417 405
418/* 406/*
419 * acpi_pic_sci_set_trigger() 407 * acpi_pic_sci_set_trigger()
420 * 408 *
421 * use ELCR to set PIC-mode trigger type for SCI 409 * use ELCR to set PIC-mode trigger type for SCI
422 * 410 *
423 * If a PIC-mode SCI is not recognized or gives spurious IRQ7's 411 * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
@@ -511,7 +499,7 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
511{ 499{
512 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 500 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
513 union acpi_object *obj; 501 union acpi_object *obj;
514 struct acpi_table_lapic *lapic; 502 struct acpi_madt_local_apic *lapic;
515 cpumask_t tmp_map, new_map; 503 cpumask_t tmp_map, new_map;
516 u8 physid; 504 u8 physid;
517 int cpu; 505 int cpu;
@@ -529,10 +517,10 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
529 return -EINVAL; 517 return -EINVAL;
530 } 518 }
531 519
532 lapic = (struct acpi_table_lapic *)obj->buffer.pointer; 520 lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer;
533 521
534 if ((lapic->header.type != ACPI_MADT_LAPIC) || 522 if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC ||
535 (!lapic->flags.enabled)) { 523 !(lapic->lapic_flags & ACPI_MADT_ENABLED)) {
536 kfree(buffer.pointer); 524 kfree(buffer.pointer);
537 return -EINVAL; 525 return -EINVAL;
538 } 526 }
@@ -544,7 +532,7 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
544 buffer.pointer = NULL; 532 buffer.pointer = NULL;
545 533
546 tmp_map = cpu_present_map; 534 tmp_map = cpu_present_map;
547 mp_register_lapic(physid, lapic->flags.enabled); 535 mp_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
548 536
549 /* 537 /*
550 * If mp_register_lapic successfully generates a new logical cpu 538 * If mp_register_lapic successfully generates a new logical cpu
@@ -566,14 +554,6 @@ EXPORT_SYMBOL(acpi_map_lsapic);
566 554
567int acpi_unmap_lsapic(int cpu) 555int acpi_unmap_lsapic(int cpu)
568{ 556{
569 int i;
570
571 for_each_possible_cpu(i) {
572 if (x86_acpiid_to_apicid[i] == x86_cpu_to_apicid[cpu]) {
573 x86_acpiid_to_apicid[i] = -1;
574 break;
575 }
576 }
577 x86_cpu_to_apicid[cpu] = -1; 557 x86_cpu_to_apicid[cpu] = -1;
578 cpu_clear(cpu, cpu_present_map); 558 cpu_clear(cpu, cpu_present_map);
579 num_processors--; 559 num_processors--;
@@ -619,42 +599,36 @@ acpi_scan_rsdp(unsigned long start, unsigned long length)
619 return 0; 599 return 0;
620} 600}
621 601
622static int __init acpi_parse_sbf(unsigned long phys_addr, unsigned long size) 602static int __init acpi_parse_sbf(struct acpi_table_header *table)
623{ 603{
624 struct acpi_table_sbf *sb; 604 struct acpi_table_boot *sb;
625
626 if (!phys_addr || !size)
627 return -EINVAL;
628 605
629 sb = (struct acpi_table_sbf *)__acpi_map_table(phys_addr, size); 606 sb = (struct acpi_table_boot *)table;
630 if (!sb) { 607 if (!sb) {
631 printk(KERN_WARNING PREFIX "Unable to map SBF\n"); 608 printk(KERN_WARNING PREFIX "Unable to map SBF\n");
632 return -ENODEV; 609 return -ENODEV;
633 } 610 }
634 611
635 sbf_port = sb->sbf_cmos; /* Save CMOS port */ 612 sbf_port = sb->cmos_index; /* Save CMOS port */
636 613
637 return 0; 614 return 0;
638} 615}
639 616
640#ifdef CONFIG_HPET_TIMER 617#ifdef CONFIG_HPET_TIMER
641 618
642static int __init acpi_parse_hpet(unsigned long phys, unsigned long size) 619static int __init acpi_parse_hpet(struct acpi_table_header *table)
643{ 620{
644 struct acpi_table_hpet *hpet_tbl; 621 struct acpi_table_hpet *hpet_tbl;
645 struct resource *hpet_res; 622 struct resource *hpet_res;
646 resource_size_t res_start; 623 resource_size_t res_start;
647 624
648 if (!phys || !size) 625 hpet_tbl = (struct acpi_table_hpet *)table;
649 return -EINVAL;
650
651 hpet_tbl = (struct acpi_table_hpet *)__acpi_map_table(phys, size);
652 if (!hpet_tbl) { 626 if (!hpet_tbl) {
653 printk(KERN_WARNING PREFIX "Unable to map HPET\n"); 627 printk(KERN_WARNING PREFIX "Unable to map HPET\n");
654 return -ENODEV; 628 return -ENODEV;
655 } 629 }
656 630
657 if (hpet_tbl->addr.space_id != ACPI_SPACE_MEM) { 631 if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
658 printk(KERN_WARNING PREFIX "HPET timers must be located in " 632 printk(KERN_WARNING PREFIX "HPET timers must be located in "
659 "memory.\n"); 633 "memory.\n");
660 return -1; 634 return -1;
@@ -667,29 +641,28 @@ static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
667 hpet_res->name = (void *)&hpet_res[1]; 641 hpet_res->name = (void *)&hpet_res[1];
668 hpet_res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 642 hpet_res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
669 snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, 643 snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE,
670 "HPET %u", hpet_tbl->number); 644 "HPET %u", hpet_tbl->sequence);
671 hpet_res->end = (1 * 1024) - 1; 645 hpet_res->end = (1 * 1024) - 1;
672 } 646 }
673 647
674#ifdef CONFIG_X86_64 648#ifdef CONFIG_X86_64
675 vxtime.hpet_address = hpet_tbl->addr.addrl | 649 vxtime.hpet_address = hpet_tbl->address.address;
676 ((long)hpet_tbl->addr.addrh << 32);
677 650
678 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", 651 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
679 hpet_tbl->id, vxtime.hpet_address); 652 hpet_tbl->id, vxtime.hpet_address);
680 653
681 res_start = vxtime.hpet_address; 654 res_start = vxtime.hpet_address;
682#else /* X86 */ 655#else /* X86 */
683 { 656 {
684 extern unsigned long hpet_address; 657 extern unsigned long hpet_address;
685 658
686 hpet_address = hpet_tbl->addr.addrl; 659 hpet_address = hpet_tbl->address.address;
687 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", 660 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
688 hpet_tbl->id, hpet_address); 661 hpet_tbl->id, hpet_address);
689 662
690 res_start = hpet_address; 663 res_start = hpet_address;
691 } 664 }
692#endif /* X86 */ 665#endif /* X86 */
693 666
694 if (hpet_res) { 667 if (hpet_res) {
695 hpet_res->start = res_start; 668 hpet_res->start = res_start;
@@ -707,42 +680,28 @@ static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
707extern u32 pmtmr_ioport; 680extern u32 pmtmr_ioport;
708#endif 681#endif
709 682
710static int __init acpi_parse_fadt(unsigned long phys, unsigned long size) 683static int __init acpi_parse_fadt(struct acpi_table_header *table)
711{ 684{
712 struct fadt_descriptor *fadt = NULL;
713
714 fadt = (struct fadt_descriptor *)__acpi_map_table(phys, size);
715 if (!fadt) {
716 printk(KERN_WARNING PREFIX "Unable to map FADT\n");
717 return 0;
718 }
719 /* initialize sci_int early for INT_SRC_OVR MADT parsing */
720 acpi_fadt.sci_int = fadt->sci_int;
721
722 /* initialize rev and apic_phys_dest_mode for x86_64 genapic */
723 acpi_fadt.revision = fadt->revision;
724 acpi_fadt.force_apic_physical_destination_mode =
725 fadt->force_apic_physical_destination_mode;
726 685
727#ifdef CONFIG_X86_PM_TIMER 686#ifdef CONFIG_X86_PM_TIMER
728 /* detect the location of the ACPI PM Timer */ 687 /* detect the location of the ACPI PM Timer */
729 if (fadt->revision >= FADT2_REVISION_ID) { 688 if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) {
730 /* FADT rev. 2 */ 689 /* FADT rev. 2 */
731 if (fadt->xpm_tmr_blk.address_space_id != 690 if (acpi_gbl_FADT.xpm_timer_block.space_id !=
732 ACPI_ADR_SPACE_SYSTEM_IO) 691 ACPI_ADR_SPACE_SYSTEM_IO)
733 return 0; 692 return 0;
734 693
735 pmtmr_ioport = fadt->xpm_tmr_blk.address; 694 pmtmr_ioport = acpi_gbl_FADT.xpm_timer_block.address;
736 /* 695 /*
737 * "X" fields are optional extensions to the original V1.0 696 * "X" fields are optional extensions to the original V1.0
738 * fields, so we must selectively expand V1.0 fields if the 697 * fields, so we must selectively expand V1.0 fields if the
739 * corresponding X field is zero. 698 * corresponding X field is zero.
740 */ 699 */
741 if (!pmtmr_ioport) 700 if (!pmtmr_ioport)
742 pmtmr_ioport = fadt->V1_pm_tmr_blk; 701 pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
743 } else { 702 } else {
744 /* FADT rev. 1 */ 703 /* FADT rev. 1 */
745 pmtmr_ioport = fadt->V1_pm_tmr_blk; 704 pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
746 } 705 }
747 if (pmtmr_ioport) 706 if (pmtmr_ioport)
748 printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n", 707 printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
@@ -784,13 +743,13 @@ static int __init acpi_parse_madt_lapic_entries(void)
784 if (!cpu_has_apic) 743 if (!cpu_has_apic)
785 return -ENODEV; 744 return -ENODEV;
786 745
787 /* 746 /*
788 * Note that the LAPIC address is obtained from the MADT (32-bit value) 747 * Note that the LAPIC address is obtained from the MADT (32-bit value)
789 * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). 748 * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
790 */ 749 */
791 750
792 count = 751 count =
793 acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, 752 acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
794 acpi_parse_lapic_addr_ovr, 0); 753 acpi_parse_lapic_addr_ovr, 0);
795 if (count < 0) { 754 if (count < 0) {
796 printk(KERN_ERR PREFIX 755 printk(KERN_ERR PREFIX
@@ -800,7 +759,7 @@ static int __init acpi_parse_madt_lapic_entries(void)
800 759
801 mp_register_lapic_address(acpi_lapic_addr); 760 mp_register_lapic_address(acpi_lapic_addr);
802 761
803 count = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic, 762 count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic,
804 MAX_APICS); 763 MAX_APICS);
805 if (!count) { 764 if (!count) {
806 printk(KERN_ERR PREFIX "No LAPIC entries present\n"); 765 printk(KERN_ERR PREFIX "No LAPIC entries present\n");
@@ -813,7 +772,7 @@ static int __init acpi_parse_madt_lapic_entries(void)
813 } 772 }
814 773
815 count = 774 count =
816 acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0); 775 acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0);
817 if (count < 0) { 776 if (count < 0) {
818 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); 777 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
819 /* TBD: Cleanup to allow fallback to MPS */ 778 /* TBD: Cleanup to allow fallback to MPS */
@@ -842,7 +801,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
842 return -ENODEV; 801 return -ENODEV;
843 } 802 }
844 803
845 if (!cpu_has_apic) 804 if (!cpu_has_apic)
846 return -ENODEV; 805 return -ENODEV;
847 806
848 /* 807 /*
@@ -855,7 +814,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
855 } 814 }
856 815
857 count = 816 count =
858 acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic, 817 acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
859 MAX_IO_APICS); 818 MAX_IO_APICS);
860 if (!count) { 819 if (!count) {
861 printk(KERN_ERR PREFIX "No IOAPIC entries present\n"); 820 printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
@@ -866,7 +825,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
866 } 825 }
867 826
868 count = 827 count =
869 acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, 828 acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr,
870 NR_IRQ_VECTORS); 829 NR_IRQ_VECTORS);
871 if (count < 0) { 830 if (count < 0) {
872 printk(KERN_ERR PREFIX 831 printk(KERN_ERR PREFIX
@@ -880,13 +839,13 @@ static int __init acpi_parse_madt_ioapic_entries(void)
880 * pretend we got one so we can set the SCI flags. 839 * pretend we got one so we can set the SCI flags.
881 */ 840 */
882 if (!acpi_sci_override_gsi) 841 if (!acpi_sci_override_gsi)
883 acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0); 842 acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0);
884 843
885 /* Fill in identity legacy mapings where no override */ 844 /* Fill in identity legacy mapings where no override */
886 mp_config_acpi_legacy_irqs(); 845 mp_config_acpi_legacy_irqs();
887 846
888 count = 847 count =
889 acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, 848 acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src,
890 NR_IRQ_VECTORS); 849 NR_IRQ_VECTORS);
891 if (count < 0) { 850 if (count < 0) {
892 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); 851 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
@@ -908,7 +867,7 @@ static void __init acpi_process_madt(void)
908#ifdef CONFIG_X86_LOCAL_APIC 867#ifdef CONFIG_X86_LOCAL_APIC
909 int count, error; 868 int count, error;
910 869
911 count = acpi_table_parse(ACPI_APIC, acpi_parse_madt); 870 count = acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt);
912 if (count >= 1) { 871 if (count >= 1) {
913 872
914 /* 873 /*
@@ -1195,7 +1154,7 @@ int __init acpi_boot_table_init(void)
1195 if (acpi_disabled && !acpi_ht) 1154 if (acpi_disabled && !acpi_ht)
1196 return 1; 1155 return 1;
1197 1156
1198 /* 1157 /*
1199 * Initialize the ACPI boot-time table parser. 1158 * Initialize the ACPI boot-time table parser.
1200 */ 1159 */
1201 error = acpi_table_init(); 1160 error = acpi_table_init();
@@ -1204,7 +1163,7 @@ int __init acpi_boot_table_init(void)
1204 return error; 1163 return error;
1205 } 1164 }
1206 1165
1207 acpi_table_parse(ACPI_BOOT, acpi_parse_sbf); 1166 acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
1208 1167
1209 /* 1168 /*
1210 * blacklist may disable ACPI entirely 1169 * blacklist may disable ACPI entirely
@@ -1232,19 +1191,19 @@ int __init acpi_boot_init(void)
1232 if (acpi_disabled && !acpi_ht) 1191 if (acpi_disabled && !acpi_ht)
1233 return 1; 1192 return 1;
1234 1193
1235 acpi_table_parse(ACPI_BOOT, acpi_parse_sbf); 1194 acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
1236 1195
1237 /* 1196 /*
1238 * set sci_int and PM timer address 1197 * set sci_int and PM timer address
1239 */ 1198 */
1240 acpi_table_parse(ACPI_FADT, acpi_parse_fadt); 1199 acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt);
1241 1200
1242 /* 1201 /*
1243 * Process the Multiple APIC Description Table (MADT), if present 1202 * Process the Multiple APIC Description Table (MADT), if present
1244 */ 1203 */
1245 acpi_process_madt(); 1204 acpi_process_madt();
1246 1205
1247 acpi_table_parse(ACPI_HPET, acpi_parse_hpet); 1206 acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
1248 1207
1249 return 0; 1208 return 0;
1250} 1209}
@@ -1315,15 +1274,41 @@ static int __init setup_acpi_sci(char *s)
1315 if (!s) 1274 if (!s)
1316 return -EINVAL; 1275 return -EINVAL;
1317 if (!strcmp(s, "edge")) 1276 if (!strcmp(s, "edge"))
1318 acpi_sci_flags.trigger = 1; 1277 acpi_sci_flags = ACPI_MADT_TRIGGER_EDGE |
1278 (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
1319 else if (!strcmp(s, "level")) 1279 else if (!strcmp(s, "level"))
1320 acpi_sci_flags.trigger = 3; 1280 acpi_sci_flags = ACPI_MADT_TRIGGER_LEVEL |
1281 (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
1321 else if (!strcmp(s, "high")) 1282 else if (!strcmp(s, "high"))
1322 acpi_sci_flags.polarity = 1; 1283 acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_HIGH |
1284 (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
1323 else if (!strcmp(s, "low")) 1285 else if (!strcmp(s, "low"))
1324 acpi_sci_flags.polarity = 3; 1286 acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_LOW |
1287 (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
1325 else 1288 else
1326 return -EINVAL; 1289 return -EINVAL;
1327 return 0; 1290 return 0;
1328} 1291}
1329early_param("acpi_sci", setup_acpi_sci); 1292early_param("acpi_sci", setup_acpi_sci);
1293
1294int __acpi_acquire_global_lock(unsigned int *lock)
1295{
1296 unsigned int old, new, val;
1297 do {
1298 old = *lock;
1299 new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
1300 val = cmpxchg(lock, old, new);
1301 } while (unlikely (val != old));
1302 return (new < 3) ? -1 : 0;
1303}
1304
1305int __acpi_release_global_lock(unsigned int *lock)
1306{
1307 unsigned int old, new, val;
1308 do {
1309 old = *lock;
1310 new = old & ~0x3;
1311 val = cmpxchg(lock, old, new);
1312 } while (unlikely (val != old));
1313 return old & 0x1;
1314}
diff --git a/arch/i386/kernel/acpi/cstate.c b/arch/i386/kernel/acpi/cstate.c
index 12e937c1ce4b..2d39f55d29a8 100644
--- a/arch/i386/kernel/acpi/cstate.c
+++ b/arch/i386/kernel/acpi/cstate.c
@@ -47,13 +47,13 @@ EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
47 47
48/* The code below handles cstate entry with monitor-mwait pair on Intel*/ 48/* The code below handles cstate entry with monitor-mwait pair on Intel*/
49 49
50struct cstate_entry_s { 50struct cstate_entry {
51 struct { 51 struct {
52 unsigned int eax; 52 unsigned int eax;
53 unsigned int ecx; 53 unsigned int ecx;
54 } states[ACPI_PROCESSOR_MAX_POWER]; 54 } states[ACPI_PROCESSOR_MAX_POWER];
55}; 55};
56static struct cstate_entry_s *cpu_cstate_entry; /* per CPU ptr */ 56static struct cstate_entry *cpu_cstate_entry; /* per CPU ptr */
57 57
58static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; 58static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
59 59
@@ -71,7 +71,7 @@ static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
71int acpi_processor_ffh_cstate_probe(unsigned int cpu, 71int acpi_processor_ffh_cstate_probe(unsigned int cpu,
72 struct acpi_processor_cx *cx, struct acpi_power_register *reg) 72 struct acpi_processor_cx *cx, struct acpi_power_register *reg)
73{ 73{
74 struct cstate_entry_s *percpu_entry; 74 struct cstate_entry *percpu_entry;
75 struct cpuinfo_x86 *c = cpu_data + cpu; 75 struct cpuinfo_x86 *c = cpu_data + cpu;
76 76
77 cpumask_t saved_mask; 77 cpumask_t saved_mask;
@@ -136,7 +136,7 @@ EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
136void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) 136void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
137{ 137{
138 unsigned int cpu = smp_processor_id(); 138 unsigned int cpu = smp_processor_id();
139 struct cstate_entry_s *percpu_entry; 139 struct cstate_entry *percpu_entry;
140 140
141 percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); 141 percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
142 mwait_idle_with_hints(percpu_entry->states[cx->index].eax, 142 mwait_idle_with_hints(percpu_entry->states[cx->index].eax,
@@ -150,7 +150,7 @@ static int __init ffh_cstate_init(void)
150 if (c->x86_vendor != X86_VENDOR_INTEL) 150 if (c->x86_vendor != X86_VENDOR_INTEL)
151 return -1; 151 return -1;
152 152
153 cpu_cstate_entry = alloc_percpu(struct cstate_entry_s); 153 cpu_cstate_entry = alloc_percpu(struct cstate_entry);
154 return 0; 154 return 0;
155} 155}
156 156
diff --git a/arch/i386/kernel/acpi/earlyquirk.c b/arch/i386/kernel/acpi/earlyquirk.c
index 4b60af7f91dd..bf86f7662d8b 100644
--- a/arch/i386/kernel/acpi/earlyquirk.c
+++ b/arch/i386/kernel/acpi/earlyquirk.c
@@ -16,7 +16,7 @@
16 16
17static int nvidia_hpet_detected __initdata; 17static int nvidia_hpet_detected __initdata;
18 18
19static int __init nvidia_hpet_check(unsigned long phys, unsigned long size) 19static int __init nvidia_hpet_check(struct acpi_table_header *header)
20{ 20{
21 nvidia_hpet_detected = 1; 21 nvidia_hpet_detected = 1;
22 return 0; 22 return 0;
@@ -30,7 +30,7 @@ static int __init check_bridge(int vendor, int device)
30 is enabled. */ 30 is enabled. */
31 if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) { 31 if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) {
32 nvidia_hpet_detected = 0; 32 nvidia_hpet_detected = 0;
33 acpi_table_parse(ACPI_HPET, nvidia_hpet_check); 33 acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check);
34 if (nvidia_hpet_detected == 0) { 34 if (nvidia_hpet_detected == 0) {
35 acpi_skip_timer_override = 1; 35 acpi_skip_timer_override = 1;
36 printk(KERN_INFO "Nvidia board " 36 printk(KERN_INFO "Nvidia board "
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index b75cff25de4b..199016927541 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -785,7 +785,11 @@ static int apm_do_idle(void)
785 polling = !!(current_thread_info()->status & TS_POLLING); 785 polling = !!(current_thread_info()->status & TS_POLLING);
786 if (polling) { 786 if (polling) {
787 current_thread_info()->status &= ~TS_POLLING; 787 current_thread_info()->status &= ~TS_POLLING;
788 smp_mb__after_clear_bit(); 788 /*
789 * TS_POLLING-cleared state must be visible before we
790 * test NEED_RESCHED:
791 */
792 smp_mb();
789 } 793 }
790 if (!need_resched()) { 794 if (!need_resched()) {
791 idled = 1; 795 idled = 1;
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index 1b34c56f8123..8a8bbdaaf38a 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -54,7 +54,7 @@ static struct cpu_dev __cpuinitdata default_cpu = {
54 .c_init = default_init, 54 .c_init = default_init,
55 .c_vendor = "Unknown", 55 .c_vendor = "Unknown",
56}; 56};
57static struct cpu_dev * this_cpu = &default_cpu; 57static struct cpu_dev * this_cpu __cpuinitdata = &default_cpu;
58 58
59static int __init cachesize_setup(char *str) 59static int __init cachesize_setup(char *str)
60{ 60{
@@ -710,11 +710,8 @@ __cpuinit int init_gdt(int cpu, struct task_struct *idle)
710 return 1; 710 return 1;
711} 711}
712 712
713/* Common CPU init for both boot and secondary CPUs */ 713void __cpuinit cpu_set_gdt(int cpu)
714static void __cpuinit _cpu_init(int cpu, struct task_struct *curr)
715{ 714{
716 struct tss_struct * t = &per_cpu(init_tss, cpu);
717 struct thread_struct *thread = &curr->thread;
718 struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); 715 struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
719 716
720 /* Reinit these anyway, even if they've already been done (on 717 /* Reinit these anyway, even if they've already been done (on
@@ -722,6 +719,13 @@ static void __cpuinit _cpu_init(int cpu, struct task_struct *curr)
722 the real ones). */ 719 the real ones). */
723 load_gdt(cpu_gdt_descr); 720 load_gdt(cpu_gdt_descr);
724 set_kernel_gs(); 721 set_kernel_gs();
722}
723
724/* Common CPU init for both boot and secondary CPUs */
725static void __cpuinit _cpu_init(int cpu, struct task_struct *curr)
726{
727 struct tss_struct * t = &per_cpu(init_tss, cpu);
728 struct thread_struct *thread = &curr->thread;
725 729
726 if (cpu_test_and_set(cpu, cpu_initialized)) { 730 if (cpu_test_and_set(cpu, cpu_initialized)) {
727 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); 731 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
@@ -807,6 +811,7 @@ void __cpuinit cpu_init(void)
807 local_irq_enable(); 811 local_irq_enable();
808 } 812 }
809 813
814 cpu_set_gdt(cpu);
810 _cpu_init(cpu, curr); 815 _cpu_init(cpu, curr);
811} 816}
812 817
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 18f4715c655d..10baa3501ed3 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -126,27 +126,6 @@ static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
126 } 126 }
127} 127}
128 128
129static void wrport(u16 port, u8 bit_width, u32 value)
130{
131 if (bit_width <= 8)
132 outb(value, port);
133 else if (bit_width <= 16)
134 outw(value, port);
135 else if (bit_width <= 32)
136 outl(value, port);
137}
138
139static void rdport(u16 port, u8 bit_width, u32 * ret)
140{
141 *ret = 0;
142 if (bit_width <= 8)
143 *ret = inb(port);
144 else if (bit_width <= 16)
145 *ret = inw(port);
146 else if (bit_width <= 32)
147 *ret = inl(port);
148}
149
150struct msr_addr { 129struct msr_addr {
151 u32 reg; 130 u32 reg;
152}; 131};
@@ -177,7 +156,9 @@ static void do_drv_read(struct drv_cmd *cmd)
177 rdmsr(cmd->addr.msr.reg, cmd->val, h); 156 rdmsr(cmd->addr.msr.reg, cmd->val, h);
178 break; 157 break;
179 case SYSTEM_IO_CAPABLE: 158 case SYSTEM_IO_CAPABLE:
180 rdport(cmd->addr.io.port, cmd->addr.io.bit_width, &cmd->val); 159 acpi_os_read_port((acpi_io_address)cmd->addr.io.port,
160 &cmd->val,
161 (u32)cmd->addr.io.bit_width);
181 break; 162 break;
182 default: 163 default:
183 break; 164 break;
@@ -193,7 +174,9 @@ static void do_drv_write(struct drv_cmd *cmd)
193 wrmsr(cmd->addr.msr.reg, cmd->val, h); 174 wrmsr(cmd->addr.msr.reg, cmd->val, h);
194 break; 175 break;
195 case SYSTEM_IO_CAPABLE: 176 case SYSTEM_IO_CAPABLE:
196 wrport(cmd->addr.io.port, cmd->addr.io.bit_width, cmd->val); 177 acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
178 cmd->val,
179 (u32)cmd->addr.io.bit_width);
197 break; 180 break;
198 default: 181 default:
199 break; 182 break;
@@ -390,8 +373,8 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
390 cpumask_t online_policy_cpus; 373 cpumask_t online_policy_cpus;
391 struct drv_cmd cmd; 374 struct drv_cmd cmd;
392 unsigned int msr; 375 unsigned int msr;
393 unsigned int next_state = 0; 376 unsigned int next_state = 0; /* Index into freq_table */
394 unsigned int next_perf_state = 0; 377 unsigned int next_perf_state = 0; /* Index into perf table */
395 unsigned int i; 378 unsigned int i;
396 int result = 0; 379 int result = 0;
397 380
@@ -437,6 +420,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
437 msr = 420 msr =
438 (u32) perf->states[next_perf_state]. 421 (u32) perf->states[next_perf_state].
439 control & INTEL_MSR_RANGE; 422 control & INTEL_MSR_RANGE;
423 cmd.val = get_cur_val(online_policy_cpus);
440 cmd.val = (cmd.val & ~INTEL_MSR_RANGE) | msr; 424 cmd.val = (cmd.val & ~INTEL_MSR_RANGE) | msr;
441 break; 425 break;
442 case SYSTEM_IO_CAPABLE: 426 case SYSTEM_IO_CAPABLE:
@@ -456,8 +440,8 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
456 else 440 else
457 cpu_set(policy->cpu, cmd.mask); 441 cpu_set(policy->cpu, cmd.mask);
458 442
459 freqs.old = data->freq_table[perf->state].frequency; 443 freqs.old = perf->states[perf->state].core_frequency * 1000;
460 freqs.new = data->freq_table[next_perf_state].frequency; 444 freqs.new = data->freq_table[next_state].frequency;
461 for_each_cpu_mask(i, cmd.mask) { 445 for_each_cpu_mask(i, cmd.mask) {
462 freqs.cpu = i; 446 freqs.cpu = i;
463 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 447 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
@@ -694,19 +678,20 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
694 valid_states++; 678 valid_states++;
695 } 679 }
696 data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END; 680 data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
681 perf->state = 0;
697 682
698 result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); 683 result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
699 if (result) 684 if (result)
700 goto err_freqfree; 685 goto err_freqfree;
701 686
702 switch (data->cpu_feature) { 687 switch (perf->control_register.space_id) {
703 case ACPI_ADR_SPACE_SYSTEM_IO: 688 case ACPI_ADR_SPACE_SYSTEM_IO:
704 /* Current speed is unknown and not detectable by IO port */ 689 /* Current speed is unknown and not detectable by IO port */
705 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); 690 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
706 break; 691 break;
707 case ACPI_ADR_SPACE_FIXED_HARDWARE: 692 case ACPI_ADR_SPACE_FIXED_HARDWARE:
708 acpi_cpufreq_driver.get = get_cur_freq_on_cpu; 693 acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
709 get_cur_freq_on_cpu(cpu); 694 policy->cur = get_cur_freq_on_cpu(cpu);
710 break; 695 break;
711 default: 696 default:
712 break; 697 break;
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c
index c548daad3476..a3db9332d652 100644
--- a/arch/i386/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c
@@ -52,6 +52,10 @@
52#define CPU_EZRA_T 4 52#define CPU_EZRA_T 4
53#define CPU_NEHEMIAH 5 53#define CPU_NEHEMIAH 5
54 54
55/* Flags */
56#define USE_ACPI_C3 (1 << 1)
57#define USE_NORTHBRIDGE (1 << 2)
58
55static int cpu_model; 59static int cpu_model;
56static unsigned int numscales=16; 60static unsigned int numscales=16;
57static unsigned int fsb; 61static unsigned int fsb;
@@ -68,7 +72,7 @@ static unsigned int minmult, maxmult;
68static int can_scale_voltage; 72static int can_scale_voltage;
69static struct acpi_processor *pr = NULL; 73static struct acpi_processor *pr = NULL;
70static struct acpi_processor_cx *cx = NULL; 74static struct acpi_processor_cx *cx = NULL;
71static int port22_en; 75static u8 longhaul_flags;
72 76
73/* Module parameters */ 77/* Module parameters */
74static int scale_voltage; 78static int scale_voltage;
@@ -80,7 +84,6 @@ static int ignore_latency;
80/* Clock ratios multiplied by 10 */ 84/* Clock ratios multiplied by 10 */
81static int clock_ratio[32]; 85static int clock_ratio[32];
82static int eblcr_table[32]; 86static int eblcr_table[32];
83static unsigned int highest_speed, lowest_speed; /* kHz */
84static int longhaul_version; 87static int longhaul_version;
85static struct cpufreq_frequency_table *longhaul_table; 88static struct cpufreq_frequency_table *longhaul_table;
86 89
@@ -178,7 +181,7 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
178 safe_halt(); 181 safe_halt();
179 /* Change frequency on next halt or sleep */ 182 /* Change frequency on next halt or sleep */
180 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); 183 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
181 if (port22_en) { 184 if (!cx_address) {
182 ACPI_FLUSH_CPU_CACHE(); 185 ACPI_FLUSH_CPU_CACHE();
183 /* Invoke C1 */ 186 /* Invoke C1 */
184 halt(); 187 halt();
@@ -187,9 +190,8 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
187 /* Invoke C3 */ 190 /* Invoke C3 */
188 inb(cx_address); 191 inb(cx_address);
189 /* Dummy op - must do something useless after P_LVL3 read */ 192 /* Dummy op - must do something useless after P_LVL3 read */
190 t = inl(acpi_fadt.xpm_tmr_blk.address); 193 t = inl(acpi_gbl_FADT.xpm_timer_block.address);
191 } 194 }
192
193 /* Disable bus ratio bit */ 195 /* Disable bus ratio bit */
194 local_irq_disable(); 196 local_irq_disable();
195 longhaul.bits.RevisionKey = longhaul.bits.RevisionID; 197 longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
@@ -243,15 +245,13 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
243 outb(0xFF,0xA1); /* Overkill */ 245 outb(0xFF,0xA1); /* Overkill */
244 outb(0xFE,0x21); /* TMR0 only */ 246 outb(0xFE,0x21); /* TMR0 only */
245 247
246 if (pr->flags.bm_control) { 248 if (longhaul_flags & USE_NORTHBRIDGE) {
247 /* Disable bus master arbitration */
248 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1,
249 ACPI_MTX_DO_NOT_LOCK);
250 } else if (port22_en) {
251 /* Disable AGP and PCI arbiters */ 249 /* Disable AGP and PCI arbiters */
252 outb(3, 0x22); 250 outb(3, 0x22);
251 } else if ((pr != NULL) && pr->flags.bm_control) {
252 /* Disable bus master arbitration */
253 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
253 } 254 }
254
255 switch (longhaul_version) { 255 switch (longhaul_version) {
256 256
257 /* 257 /*
@@ -278,22 +278,23 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
278 * to work in practice. 278 * to work in practice.
279 */ 279 */
280 case TYPE_POWERSAVER: 280 case TYPE_POWERSAVER:
281 /* Don't allow wakeup */ 281 if (longhaul_flags & USE_ACPI_C3) {
282 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0, 282 /* Don't allow wakeup */
283 ACPI_MTX_DO_NOT_LOCK); 283 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
284 do_powersaver(cx->address, clock_ratio_index); 284 do_powersaver(cx->address, clock_ratio_index);
285 } else {
286 do_powersaver(0, clock_ratio_index);
287 }
285 break; 288 break;
286 } 289 }
287 290
288 if (pr->flags.bm_control) { 291 if (longhaul_flags & USE_NORTHBRIDGE) {
289 /* Enable bus master arbitration */
290 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0,
291 ACPI_MTX_DO_NOT_LOCK);
292 } else if (port22_en) {
293 /* Enable arbiters */ 292 /* Enable arbiters */
294 outb(0, 0x22); 293 outb(0, 0x22);
294 } else if ((pr != NULL) && pr->flags.bm_control) {
295 /* Enable bus master arbitration */
296 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
295 } 297 }
296
297 outb(pic2_mask,0xA1); /* restore mask */ 298 outb(pic2_mask,0xA1); /* restore mask */
298 outb(pic1_mask,0x21); 299 outb(pic1_mask,0x21);
299 300
@@ -314,12 +315,12 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
314 315
315#define ROUNDING 0xf 316#define ROUNDING 0xf
316 317
317static int _guess(int guess) 318static int _guess(int guess, int mult)
318{ 319{
319 int target; 320 int target;
320 321
321 target = ((maxmult/10)*guess); 322 target = ((mult/10)*guess);
322 if (maxmult%10 != 0) 323 if (mult%10 != 0)
323 target += (guess/2); 324 target += (guess/2);
324 target += ROUNDING/2; 325 target += ROUNDING/2;
325 target &= ~ROUNDING; 326 target &= ~ROUNDING;
@@ -327,17 +328,17 @@ static int _guess(int guess)
327} 328}
328 329
329 330
330static int guess_fsb(void) 331static int guess_fsb(int mult)
331{ 332{
332 int speed = (cpu_khz/1000); 333 int speed = (cpu_khz/1000);
333 int i; 334 int i;
334 int speeds[3] = { 66, 100, 133 }; 335 int speeds[] = { 66, 100, 133, 200 };
335 336
336 speed += ROUNDING/2; 337 speed += ROUNDING/2;
337 speed &= ~ROUNDING; 338 speed &= ~ROUNDING;
338 339
339 for (i=0; i<3; i++) { 340 for (i=0; i<4; i++) {
340 if (_guess(speeds[i]) == speed) 341 if (_guess(speeds[i], mult) == speed)
341 return speeds[i]; 342 return speeds[i];
342 } 343 }
343 return 0; 344 return 0;
@@ -354,9 +355,7 @@ static int __init longhaul_get_ranges(void)
354 130, 150, 160, 140, -1, 155, -1, 145 }; 355 130, 150, 160, 140, -1, 155, -1, 145 };
355 unsigned int j, k = 0; 356 unsigned int j, k = 0;
356 union msr_longhaul longhaul; 357 union msr_longhaul longhaul;
357 unsigned long lo, hi; 358 int mult = 0;
358 unsigned int eblcr_fsb_table_v1[] = { 66, 133, 100, -1 };
359 unsigned int eblcr_fsb_table_v2[] = { 133, 100, -1, 66 };
360 359
361 switch (longhaul_version) { 360 switch (longhaul_version) {
362 case TYPE_LONGHAUL_V1: 361 case TYPE_LONGHAUL_V1:
@@ -364,30 +363,18 @@ static int __init longhaul_get_ranges(void)
364 /* Ugh, Longhaul v1 didn't have the min/max MSRs. 363 /* Ugh, Longhaul v1 didn't have the min/max MSRs.
365 Assume min=3.0x & max = whatever we booted at. */ 364 Assume min=3.0x & max = whatever we booted at. */
366 minmult = 30; 365 minmult = 30;
367 maxmult = longhaul_get_cpu_mult(); 366 maxmult = mult = longhaul_get_cpu_mult();
368 rdmsr (MSR_IA32_EBL_CR_POWERON, lo, hi);
369 invalue = (lo & (1<<18|1<<19)) >>18;
370 if (cpu_model==CPU_SAMUEL || cpu_model==CPU_SAMUEL2)
371 fsb = eblcr_fsb_table_v1[invalue];
372 else
373 fsb = guess_fsb();
374 break; 367 break;
375 368
376 case TYPE_POWERSAVER: 369 case TYPE_POWERSAVER:
377 /* Ezra-T */ 370 /* Ezra-T */
378 if (cpu_model==CPU_EZRA_T) { 371 if (cpu_model==CPU_EZRA_T) {
372 minmult = 30;
379 rdmsrl (MSR_VIA_LONGHAUL, longhaul.val); 373 rdmsrl (MSR_VIA_LONGHAUL, longhaul.val);
380 invalue = longhaul.bits.MaxMHzBR; 374 invalue = longhaul.bits.MaxMHzBR;
381 if (longhaul.bits.MaxMHzBR4) 375 if (longhaul.bits.MaxMHzBR4)
382 invalue += 16; 376 invalue += 16;
383 maxmult=ezra_t_multipliers[invalue]; 377 maxmult = mult = ezra_t_multipliers[invalue];
384
385 invalue = longhaul.bits.MinMHzBR;
386 if (longhaul.bits.MinMHzBR4 == 1)
387 minmult = 30;
388 else
389 minmult = ezra_t_multipliers[invalue];
390 fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB];
391 break; 378 break;
392 } 379 }
393 380
@@ -407,21 +394,16 @@ static int __init longhaul_get_ranges(void)
407 * But it works, so we don't grumble. 394 * But it works, so we don't grumble.
408 */ 395 */
409 minmult=40; 396 minmult=40;
410 maxmult=longhaul_get_cpu_mult(); 397 maxmult = mult = longhaul_get_cpu_mult();
411
412 /* Starting with the 1.2GHz parts, theres a 200MHz bus. */
413 if ((cpu_khz/maxmult) > 13400)
414 fsb = 200;
415 else
416 fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB];
417 break; 398 break;
418 } 399 }
419 } 400 }
401 fsb = guess_fsb(mult);
420 402
421 dprintk ("MinMult:%d.%dx MaxMult:%d.%dx\n", 403 dprintk ("MinMult:%d.%dx MaxMult:%d.%dx\n",
422 minmult/10, minmult%10, maxmult/10, maxmult%10); 404 minmult/10, minmult%10, maxmult/10, maxmult%10);
423 405
424 if (fsb == -1) { 406 if (fsb == 0) {
425 printk (KERN_INFO PFX "Invalid (reserved) FSB!\n"); 407 printk (KERN_INFO PFX "Invalid (reserved) FSB!\n");
426 return -EINVAL; 408 return -EINVAL;
427 } 409 }
@@ -429,7 +411,7 @@ static int __init longhaul_get_ranges(void)
429 highest_speed = calc_speed(maxmult); 411 highest_speed = calc_speed(maxmult);
430 lowest_speed = calc_speed(minmult); 412 lowest_speed = calc_speed(minmult);
431 dprintk ("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb, 413 dprintk ("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb,
432 print_speed(lowest_speed/1000), 414 print_speed(lowest_speed/1000),
433 print_speed(highest_speed/1000)); 415 print_speed(highest_speed/1000));
434 416
435 if (lowest_speed == highest_speed) { 417 if (lowest_speed == highest_speed) {
@@ -513,7 +495,7 @@ static void __init longhaul_setup_voltagescaling(void)
513 maxvid.mV/1000, maxvid.mV%1000, 495 maxvid.mV/1000, maxvid.mV%1000,
514 minvid.mV/1000, minvid.mV%1000, 496 minvid.mV/1000, minvid.mV%1000,
515 numvscales); 497 numvscales);
516 498
517 j = 0; 499 j = 0;
518 while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) { 500 while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) {
519 speed = longhaul_table[j].frequency; 501 speed = longhaul_table[j].frequency;
@@ -691,27 +673,32 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
691 /* Find ACPI data for processor */ 673 /* Find ACPI data for processor */
692 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, 674 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
693 &longhaul_walk_callback, NULL, (void *)&pr); 675 &longhaul_walk_callback, NULL, (void *)&pr);
694 if (pr == NULL)
695 goto err_acpi;
696 676
697 if (longhaul_version == TYPE_POWERSAVER) { 677 /* Check ACPI support for C3 state */
698 /* Check ACPI support for C3 state */ 678 if ((pr != NULL) && (longhaul_version == TYPE_POWERSAVER)) {
699 cx = &pr->power.states[ACPI_STATE_C3]; 679 cx = &pr->power.states[ACPI_STATE_C3];
700 if (cx->address > 0 && 680 if (cx->address > 0 &&
701 (cx->latency <= 1000 || ignore_latency != 0) ) { 681 (cx->latency <= 1000 || ignore_latency != 0) ) {
682 longhaul_flags |= USE_ACPI_C3;
702 goto print_support_type; 683 goto print_support_type;
703 } 684 }
704 } 685 }
686 /* Check if northbridge is friendly */
687 if (enable_arbiter_disable()) {
688 longhaul_flags |= USE_NORTHBRIDGE;
689 goto print_support_type;
690 }
691
692 /* No ACPI C3 or we can't use it */
705 /* Check ACPI support for bus master arbiter disable */ 693 /* Check ACPI support for bus master arbiter disable */
706 if (!pr->flags.bm_control) { 694 if ((pr == NULL) || !(pr->flags.bm_control)) {
707 if (enable_arbiter_disable()) { 695 printk(KERN_ERR PFX
708 port22_en = 1; 696 "No ACPI support. Unsupported northbridge.\n");
709 } else { 697 return -ENODEV;
710 goto err_acpi;
711 }
712 } 698 }
699
713print_support_type: 700print_support_type:
714 if (!port22_en) { 701 if (!(longhaul_flags & USE_NORTHBRIDGE)) {
715 printk (KERN_INFO PFX "Using ACPI support.\n"); 702 printk (KERN_INFO PFX "Using ACPI support.\n");
716 } else { 703 } else {
717 printk (KERN_INFO PFX "Using northbridge support.\n"); 704 printk (KERN_INFO PFX "Using northbridge support.\n");
@@ -736,10 +723,6 @@ print_support_type:
736 cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu); 723 cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu);
737 724
738 return 0; 725 return 0;
739
740err_acpi:
741 printk(KERN_ERR PFX "No ACPI support. Unsupported northbridge. Aborting.\n");
742 return -ENODEV;
743} 726}
744 727
745static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy) 728static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy)
@@ -774,8 +757,8 @@ static int __init longhaul_init(void)
774 757
775#ifdef CONFIG_SMP 758#ifdef CONFIG_SMP
776 if (num_online_cpus() > 1) { 759 if (num_online_cpus() > 1) {
777 return -ENODEV;
778 printk(KERN_ERR PFX "More than 1 CPU detected, longhaul disabled.\n"); 760 printk(KERN_ERR PFX "More than 1 CPU detected, longhaul disabled.\n");
761 return -ENODEV;
779 } 762 }
780#endif 763#endif
781#ifdef CONFIG_X86_IO_APIC 764#ifdef CONFIG_X86_IO_APIC
@@ -787,8 +770,10 @@ static int __init longhaul_init(void)
787 switch (c->x86_model) { 770 switch (c->x86_model) {
788 case 6 ... 9: 771 case 6 ... 9:
789 return cpufreq_register_driver(&longhaul_driver); 772 return cpufreq_register_driver(&longhaul_driver);
773 case 10:
774 printk(KERN_ERR PFX "Use acpi-cpufreq driver for VIA C7\n");
790 default: 775 default:
791 printk (KERN_INFO PFX "Unknown VIA CPU. Contact davej@codemonkey.org.uk\n"); 776 ;;
792 } 777 }
793 778
794 return -ENODEV; 779 return -ENODEV;
diff --git a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
index bec50170b75a..4786fedca6eb 100644
--- a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
@@ -51,7 +51,6 @@ enum {
51 51
52 52
53static int has_N44_O17_errata[NR_CPUS]; 53static int has_N44_O17_errata[NR_CPUS];
54static int has_N60_errata[NR_CPUS];
55static unsigned int stock_freq; 54static unsigned int stock_freq;
56static struct cpufreq_driver p4clockmod_driver; 55static struct cpufreq_driver p4clockmod_driver;
57static unsigned int cpufreq_p4_get(unsigned int cpu); 56static unsigned int cpufreq_p4_get(unsigned int cpu);
@@ -224,12 +223,6 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
224 case 0x0f12: 223 case 0x0f12:
225 has_N44_O17_errata[policy->cpu] = 1; 224 has_N44_O17_errata[policy->cpu] = 1;
226 dprintk("has errata -- disabling low frequencies\n"); 225 dprintk("has errata -- disabling low frequencies\n");
227 break;
228
229 case 0x0f29:
230 has_N60_errata[policy->cpu] = 1;
231 dprintk("has errata -- disabling frequencies lower than 2ghz\n");
232 break;
233 } 226 }
234 227
235 /* get max frequency */ 228 /* get max frequency */
@@ -241,8 +234,6 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
241 for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) { 234 for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) {
242 if ((i<2) && (has_N44_O17_errata[policy->cpu])) 235 if ((i<2) && (has_N44_O17_errata[policy->cpu]))
243 p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID; 236 p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
244 else if (has_N60_errata[policy->cpu] && ((stock_freq * i)/8) < 2000000)
245 p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
246 else 237 else
247 p4clockmod_table[i].frequency = (stock_freq * i)/8; 238 p4clockmod_table[i].frequency = (stock_freq * i)/8;
248 } 239 }
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index 5113e9231634..f43b987f952b 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -533,9 +533,9 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
533 533
534 /* notify BIOS that we exist */ 534 /* notify BIOS that we exist */
535 acpi_processor_notify_smm(THIS_MODULE); 535 acpi_processor_notify_smm(THIS_MODULE);
536 printk("speedstep-centrino with X86_SPEEDSTEP_CENTRINO_ACPI" 536 printk("speedstep-centrino with X86_SPEEDSTEP_CENTRINO_ACPI "
537 "config is deprecated.\n " 537 "config is deprecated.\n "
538 "Use X86_ACPI_CPUFREQ (acpi-cpufreq instead.\n" ); 538 "Use X86_ACPI_CPUFREQ (acpi-cpufreq) instead.\n" );
539 539
540 return 0; 540 return 0;
541 541
diff --git a/arch/i386/kernel/e820.c b/arch/i386/kernel/e820.c
index 2f7d0a92fd7c..f391abcf7da9 100644
--- a/arch/i386/kernel/e820.c
+++ b/arch/i386/kernel/e820.c
@@ -668,7 +668,7 @@ void __init register_bootmem_low_pages(unsigned long max_low_pfn)
668 } 668 }
669} 669}
670 670
671void __init register_memory(void) 671void __init e820_register_memory(void)
672{ 672{
673 unsigned long gapstart, gapsize, round; 673 unsigned long gapstart, gapsize, round;
674 unsigned long long last; 674 unsigned long long last;
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index de34b7fed3c1..5e47683fc63a 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -302,12 +302,16 @@ sysenter_past_esp:
302 pushl $(__USER_CS) 302 pushl $(__USER_CS)
303 CFI_ADJUST_CFA_OFFSET 4 303 CFI_ADJUST_CFA_OFFSET 4
304 /*CFI_REL_OFFSET cs, 0*/ 304 /*CFI_REL_OFFSET cs, 0*/
305#ifndef CONFIG_COMPAT_VDSO
305 /* 306 /*
306 * Push current_thread_info()->sysenter_return to the stack. 307 * Push current_thread_info()->sysenter_return to the stack.
307 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words 308 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
308 * pushed above; +8 corresponds to copy_thread's esp0 setting. 309 * pushed above; +8 corresponds to copy_thread's esp0 setting.
309 */ 310 */
310 pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) 311 pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
312#else
313 pushl $SYSENTER_RETURN
314#endif
311 CFI_ADJUST_CFA_OFFSET 4 315 CFI_ADJUST_CFA_OFFSET 4
312 CFI_REL_OFFSET eip, 0 316 CFI_REL_OFFSET eip, 0
313 317
@@ -979,38 +983,6 @@ ENTRY(spurious_interrupt_bug)
979 jmp error_code 983 jmp error_code
980 CFI_ENDPROC 984 CFI_ENDPROC
981 985
982#ifdef CONFIG_STACK_UNWIND
983ENTRY(arch_unwind_init_running)
984 CFI_STARTPROC
985 movl 4(%esp), %edx
986 movl (%esp), %ecx
987 leal 4(%esp), %eax
988 movl %ebx, PT_EBX(%edx)
989 xorl %ebx, %ebx
990 movl %ebx, PT_ECX(%edx)
991 movl %ebx, PT_EDX(%edx)
992 movl %esi, PT_ESI(%edx)
993 movl %edi, PT_EDI(%edx)
994 movl %ebp, PT_EBP(%edx)
995 movl %ebx, PT_EAX(%edx)
996 movl $__USER_DS, PT_DS(%edx)
997 movl $__USER_DS, PT_ES(%edx)
998 movl $0, PT_GS(%edx)
999 movl %ebx, PT_ORIG_EAX(%edx)
1000 movl %ecx, PT_EIP(%edx)
1001 movl 12(%esp), %ecx
1002 movl $__KERNEL_CS, PT_CS(%edx)
1003 movl %ebx, PT_EFLAGS(%edx)
1004 movl %eax, PT_OLDESP(%edx)
1005 movl 8(%esp), %eax
1006 movl %ecx, 8(%esp)
1007 movl PT_EBX(%edx), %ebx
1008 movl $__KERNEL_DS, PT_OLDSS(%edx)
1009 jmpl *%eax
1010 CFI_ENDPROC
1011ENDPROC(arch_unwind_init_running)
1012#endif
1013
1014ENTRY(kernel_thread_helper) 986ENTRY(kernel_thread_helper)
1015 pushl $0 # fake return address for unwinder 987 pushl $0 # fake return address for unwinder
1016 CFI_STARTPROC 988 CFI_STARTPROC
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 2424cc9c7b3d..6a3875f81a0a 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -1227,26 +1227,32 @@ static u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 }
1227 1227
1228static int __assign_irq_vector(int irq) 1228static int __assign_irq_vector(int irq)
1229{ 1229{
1230 static int current_vector = FIRST_DEVICE_VECTOR, offset = 0; 1230 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
1231 int vector; 1231 int vector, offset, i;
1232 1232
1233 BUG_ON((unsigned)irq >= NR_IRQ_VECTORS); 1233 BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
1234 1234
1235 if (irq_vector[irq] > 0) 1235 if (irq_vector[irq] > 0)
1236 return irq_vector[irq]; 1236 return irq_vector[irq];
1237 1237
1238 current_vector += 8;
1239 if (current_vector == SYSCALL_VECTOR)
1240 current_vector += 8;
1241
1242 if (current_vector >= FIRST_SYSTEM_VECTOR) {
1243 offset++;
1244 if (!(offset % 8))
1245 return -ENOSPC;
1246 current_vector = FIRST_DEVICE_VECTOR + offset;
1247 }
1248
1249 vector = current_vector; 1238 vector = current_vector;
1239 offset = current_offset;
1240next:
1241 vector += 8;
1242 if (vector >= FIRST_SYSTEM_VECTOR) {
1243 offset = (offset + 1) % 8;
1244 vector = FIRST_DEVICE_VECTOR + offset;
1245 }
1246 if (vector == current_vector)
1247 return -ENOSPC;
1248 if (vector == SYSCALL_VECTOR)
1249 goto next;
1250 for (i = 0; i < NR_IRQ_VECTORS; i++)
1251 if (irq_vector[i] == vector)
1252 goto next;
1253
1254 current_vector = vector;
1255 current_offset = offset;
1250 irq_vector[irq] = vector; 1256 irq_vector[irq] = vector;
1251 1257
1252 return vector; 1258 return vector;
diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c
index 47ffec57c0cb..c8fa13721bcb 100644
--- a/arch/i386/kernel/microcode.c
+++ b/arch/i386/kernel/microcode.c
@@ -722,7 +722,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
722 return NOTIFY_OK; 722 return NOTIFY_OK;
723} 723}
724 724
725static struct notifier_block mc_cpu_notifier = { 725static struct notifier_block __cpuinitdata mc_cpu_notifier = {
726 .notifier_call = mc_cpu_callback, 726 .notifier_call = mc_cpu_callback,
727}; 727};
728 728
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index 2ce67228dff8..4f5983c98669 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -36,7 +36,7 @@
36 36
37/* Have we found an MP table */ 37/* Have we found an MP table */
38int smp_found_config; 38int smp_found_config;
39unsigned int __initdata maxcpus = NR_CPUS; 39unsigned int __cpuinitdata maxcpus = NR_CPUS;
40 40
41/* 41/*
42 * Various Linux-internal data structures created from the 42 * Various Linux-internal data structures created from the
@@ -102,9 +102,9 @@ static int __init mpf_checksum(unsigned char *mp, int len)
102 */ 102 */
103 103
104static int mpc_record; 104static int mpc_record;
105static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdata; 105static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __cpuinitdata;
106 106
107static void __devinit MP_processor_info (struct mpc_config_processor *m) 107static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
108{ 108{
109 int ver, apicid; 109 int ver, apicid;
110 physid_mask_t phys_cpu; 110 physid_mask_t phys_cpu;
@@ -822,7 +822,7 @@ void __init mp_register_lapic_address(u64 address)
822 Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid); 822 Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
823} 823}
824 824
825void __devinit mp_register_lapic (u8 id, u8 enabled) 825void __cpuinit mp_register_lapic (u8 id, u8 enabled)
826{ 826{
827 struct mpc_config_processor processor; 827 struct mpc_config_processor processor;
828 int boot_cpu = 0; 828 int boot_cpu = 0;
@@ -1057,7 +1057,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
1057 static int gsi_to_irq[MAX_GSI_NUM]; 1057 static int gsi_to_irq[MAX_GSI_NUM];
1058 1058
1059 /* Don't set up the ACPI SCI because it's already set up */ 1059 /* Don't set up the ACPI SCI because it's already set up */
1060 if (acpi_fadt.sci_int == gsi) 1060 if (acpi_gbl_FADT.sci_interrupt == gsi)
1061 return gsi; 1061 return gsi;
1062 1062
1063 ioapic = mp_find_ioapic(gsi); 1063 ioapic = mp_find_ioapic(gsi);
@@ -1114,7 +1114,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
1114 /* 1114 /*
1115 * Don't assign IRQ used by ACPI SCI 1115 * Don't assign IRQ used by ACPI SCI
1116 */ 1116 */
1117 if (gsi == acpi_fadt.sci_int) 1117 if (gsi == acpi_gbl_FADT.sci_interrupt)
1118 gsi = pci_irq++; 1118 gsi = pci_irq++;
1119 gsi_to_irq[irq] = gsi; 1119 gsi_to_irq[irq] = gsi;
1120 } else { 1120 } else {
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index a5e34d655965..1a6f8bb8881c 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -310,13 +310,7 @@ static int __init setup_nmi_watchdog(char *str)
310 310
311 if ((nmi >= NMI_INVALID) || (nmi < NMI_NONE)) 311 if ((nmi >= NMI_INVALID) || (nmi < NMI_NONE))
312 return 0; 312 return 0;
313 /* 313
314 * If any other x86 CPU has a local APIC, then
315 * please test the NMI stuff there and send me the
316 * missing bits. Right now Intel P6/P4 and AMD K7 only.
317 */
318 if ((nmi == NMI_LOCAL_APIC) && (nmi_known_cpu() == 0))
319 return 0; /* no lapic support */
320 nmi_watchdog = nmi; 314 nmi_watchdog = nmi;
321 return 1; 315 return 1;
322} 316}
diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c
index 3dceab5828f1..e55fd05da0f5 100644
--- a/arch/i386/kernel/paravirt.c
+++ b/arch/i386/kernel/paravirt.c
@@ -566,4 +566,11 @@ struct paravirt_ops paravirt_ops = {
566 .irq_enable_sysexit = native_irq_enable_sysexit, 566 .irq_enable_sysexit = native_irq_enable_sysexit,
567 .iret = native_iret, 567 .iret = native_iret,
568}; 568};
569EXPORT_SYMBOL(paravirt_ops); 569
570/*
571 * NOTE: CONFIG_PARAVIRT is experimental and the paravirt_ops
572 * semantics are subject to change. Hence we only do this
573 * internal-only export of this, until it gets sorted out and
574 * all lowlevel CPU ops used by modules are separately exported.
575 */
576EXPORT_SYMBOL_GPL(paravirt_ops);
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 99308510a17c..c641056233a6 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -102,7 +102,12 @@ void default_idle(void)
102{ 102{
103 if (!hlt_counter && boot_cpu_data.hlt_works_ok) { 103 if (!hlt_counter && boot_cpu_data.hlt_works_ok) {
104 current_thread_info()->status &= ~TS_POLLING; 104 current_thread_info()->status &= ~TS_POLLING;
105 smp_mb__after_clear_bit(); 105 /*
106 * TS_POLLING-cleared state must be visible before we
107 * test NEED_RESCHED:
108 */
109 smp_mb();
110
106 local_irq_disable(); 111 local_irq_disable();
107 if (!need_resched()) 112 if (!need_resched())
108 safe_halt(); /* enables interrupts racelessly */ 113 safe_halt(); /* enables interrupts racelessly */
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index f3f94ac5736a..af8aabe85800 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -45,7 +45,7 @@
45/* 45/*
46 * Offset of eflags on child stack.. 46 * Offset of eflags on child stack..
47 */ 47 */
48#define EFL_OFFSET ((EFL-2)*4-sizeof(struct pt_regs)) 48#define EFL_OFFSET offsetof(struct pt_regs, eflags)
49 49
50static inline struct pt_regs *get_child_regs(struct task_struct *task) 50static inline struct pt_regs *get_child_regs(struct task_struct *task)
51{ 51{
@@ -54,24 +54,24 @@ static inline struct pt_regs *get_child_regs(struct task_struct *task)
54} 54}
55 55
56/* 56/*
57 * this routine will get a word off of the processes privileged stack. 57 * This routine will get a word off of the processes privileged stack.
58 * the offset is how far from the base addr as stored in the TSS. 58 * the offset is bytes into the pt_regs structure on the stack.
59 * this routine assumes that all the privileged stacks are in our 59 * This routine assumes that all the privileged stacks are in our
60 * data space. 60 * data space.
61 */ 61 */
62static inline int get_stack_long(struct task_struct *task, int offset) 62static inline int get_stack_long(struct task_struct *task, int offset)
63{ 63{
64 unsigned char *stack; 64 unsigned char *stack;
65 65
66 stack = (unsigned char *)task->thread.esp0; 66 stack = (unsigned char *)task->thread.esp0 - sizeof(struct pt_regs);
67 stack += offset; 67 stack += offset;
68 return (*((int *)stack)); 68 return (*((int *)stack));
69} 69}
70 70
71/* 71/*
72 * this routine will put a word on the processes privileged stack. 72 * This routine will put a word on the processes privileged stack.
73 * the offset is how far from the base addr as stored in the TSS. 73 * the offset is bytes into the pt_regs structure on the stack.
74 * this routine assumes that all the privileged stacks are in our 74 * This routine assumes that all the privileged stacks are in our
75 * data space. 75 * data space.
76 */ 76 */
77static inline int put_stack_long(struct task_struct *task, int offset, 77static inline int put_stack_long(struct task_struct *task, int offset,
@@ -79,7 +79,7 @@ static inline int put_stack_long(struct task_struct *task, int offset,
79{ 79{
80 unsigned char * stack; 80 unsigned char * stack;
81 81
82 stack = (unsigned char *) task->thread.esp0; 82 stack = (unsigned char *)task->thread.esp0 - sizeof(struct pt_regs);
83 stack += offset; 83 stack += offset;
84 *(unsigned long *) stack = data; 84 *(unsigned long *) stack = data;
85 return 0; 85 return 0;
@@ -114,7 +114,7 @@ static int putreg(struct task_struct *child,
114 } 114 }
115 if (regno > ES*4) 115 if (regno > ES*4)
116 regno -= 1*4; 116 regno -= 1*4;
117 put_stack_long(child, regno - sizeof(struct pt_regs), value); 117 put_stack_long(child, regno, value);
118 return 0; 118 return 0;
119} 119}
120 120
@@ -137,7 +137,6 @@ static unsigned long getreg(struct task_struct *child,
137 default: 137 default:
138 if (regno > ES*4) 138 if (regno > ES*4)
139 regno -= 1*4; 139 regno -= 1*4;
140 regno = regno - sizeof(struct pt_regs);
141 retval &= get_stack_long(child, regno); 140 retval &= get_stack_long(child, regno);
142 } 141 }
143 return retval; 142 return retval;
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 79df6e612dbd..4b31ad70c1ac 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -77,7 +77,7 @@ extern struct resource code_resource;
77extern struct resource data_resource; 77extern struct resource data_resource;
78 78
79/* cpu data as detected by the assembly code in head.S */ 79/* cpu data as detected by the assembly code in head.S */
80struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; 80struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
81/* common cpu data for all cpus */ 81/* common cpu data for all cpus */
82struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; 82struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
83EXPORT_SYMBOL(boot_cpu_data); 83EXPORT_SYMBOL(boot_cpu_data);
@@ -495,7 +495,7 @@ static void set_mca_bus(int x) { }
495#endif 495#endif
496 496
497/* Overridden in paravirt.c if CONFIG_PARAVIRT */ 497/* Overridden in paravirt.c if CONFIG_PARAVIRT */
498char * __attribute__((weak)) memory_setup(void) 498char * __init __attribute__((weak)) memory_setup(void)
499{ 499{
500 return machine_specific_memory_setup(); 500 return machine_specific_memory_setup();
501} 501}
@@ -639,7 +639,7 @@ void __init setup_arch(char **cmdline_p)
639 get_smp_config(); 639 get_smp_config();
640#endif 640#endif
641 641
642 register_memory(); 642 e820_register_memory();
643 643
644#ifdef CONFIG_VT 644#ifdef CONFIG_VT
645#if defined(CONFIG_VGA_CONSOLE) 645#if defined(CONFIG_VGA_CONSOLE)
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index aef39be81361..8c6c8c52b95c 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -159,7 +159,7 @@ void __init smp_alloc_memory(void)
159 * a given CPU 159 * a given CPU
160 */ 160 */
161 161
162static void __devinit smp_store_cpu_info(int id) 162static void __cpuinit smp_store_cpu_info(int id)
163{ 163{
164 struct cpuinfo_x86 *c = cpu_data + id; 164 struct cpuinfo_x86 *c = cpu_data + id;
165 165
@@ -227,7 +227,7 @@ static struct {
227 atomic_t count_start; 227 atomic_t count_start;
228 atomic_t count_stop; 228 atomic_t count_stop;
229 unsigned long long values[NR_CPUS]; 229 unsigned long long values[NR_CPUS];
230} tsc __initdata = { 230} tsc __cpuinitdata = {
231 .start_flag = ATOMIC_INIT(0), 231 .start_flag = ATOMIC_INIT(0),
232 .count_start = ATOMIC_INIT(0), 232 .count_start = ATOMIC_INIT(0),
233 .count_stop = ATOMIC_INIT(0), 233 .count_stop = ATOMIC_INIT(0),
@@ -332,7 +332,7 @@ static void __init synchronize_tsc_bp(void)
332 printk("passed.\n"); 332 printk("passed.\n");
333} 333}
334 334
335static void __init synchronize_tsc_ap(void) 335static void __cpuinit synchronize_tsc_ap(void)
336{ 336{
337 int i; 337 int i;
338 338
@@ -364,7 +364,7 @@ extern void calibrate_delay(void);
364 364
365static atomic_t init_deasserted; 365static atomic_t init_deasserted;
366 366
367static void __devinit smp_callin(void) 367static void __cpuinit smp_callin(void)
368{ 368{
369 int cpuid, phys_id; 369 int cpuid, phys_id;
370 unsigned long timeout; 370 unsigned long timeout;
@@ -538,7 +538,7 @@ set_cpu_sibling_map(int cpu)
538/* 538/*
539 * Activate a secondary processor. 539 * Activate a secondary processor.
540 */ 540 */
541static void __devinit start_secondary(void *unused) 541static void __cpuinit start_secondary(void *unused)
542{ 542{
543 /* 543 /*
544 * Don't put *anything* before secondary_cpu_init(), SMP 544 * Don't put *anything* before secondary_cpu_init(), SMP
@@ -596,6 +596,12 @@ static void __devinit start_secondary(void *unused)
596void __devinit initialize_secondary(void) 596void __devinit initialize_secondary(void)
597{ 597{
598 /* 598 /*
599 * switch to the per CPU GDT we already set up
600 * in do_boot_cpu()
601 */
602 cpu_set_gdt(current_thread_info()->cpu);
603
604 /*
599 * We don't actually need to load the full TSS, 605 * We don't actually need to load the full TSS,
600 * basically just the stack pointer and the eip. 606 * basically just the stack pointer and the eip.
601 */ 607 */
@@ -931,7 +937,7 @@ static inline struct task_struct * alloc_idle_task(int cpu)
931#define alloc_idle_task(cpu) fork_idle(cpu) 937#define alloc_idle_task(cpu) fork_idle(cpu)
932#endif 938#endif
933 939
934static int __devinit do_boot_cpu(int apicid, int cpu) 940static int __cpuinit do_boot_cpu(int apicid, int cpu)
935/* 941/*
936 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad 942 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
937 * (ie clustered apic addressing mode), this is a LOGICAL apic ID. 943 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
@@ -972,9 +978,6 @@ static int __devinit do_boot_cpu(int apicid, int cpu)
972 /* Stack for startup_32 can be just as for start_secondary onwards */ 978 /* Stack for startup_32 can be just as for start_secondary onwards */
973 stack_start.esp = (void *) idle->thread.esp; 979 stack_start.esp = (void *) idle->thread.esp;
974 980
975 start_pda = cpu_pda(cpu);
976 cpu_gdt_descr = per_cpu(cpu_gdt_descr, cpu);
977
978 irq_ctx_init(cpu); 981 irq_ctx_init(cpu);
979 982
980 x86_cpu_to_apicid[cpu] = apicid; 983 x86_cpu_to_apicid[cpu] = apicid;
@@ -1432,7 +1435,7 @@ void __cpu_die(unsigned int cpu)
1432} 1435}
1433#endif /* CONFIG_HOTPLUG_CPU */ 1436#endif /* CONFIG_HOTPLUG_CPU */
1434 1437
1435int __devinit __cpu_up(unsigned int cpu) 1438int __cpuinit __cpu_up(unsigned int cpu)
1436{ 1439{
1437#ifdef CONFIG_HOTPLUG_CPU 1440#ifdef CONFIG_HOTPLUG_CPU
1438 int ret=0; 1441 int ret=0;
diff --git a/arch/i386/kernel/srat.c b/arch/i386/kernel/srat.c
index f7e735c077c3..2a8713ec0f9a 100644
--- a/arch/i386/kernel/srat.c
+++ b/arch/i386/kernel/srat.c
@@ -62,19 +62,19 @@ extern void * boot_ioremap(unsigned long, unsigned long);
62/* Identify CPU proximity domains */ 62/* Identify CPU proximity domains */
63static void __init parse_cpu_affinity_structure(char *p) 63static void __init parse_cpu_affinity_structure(char *p)
64{ 64{
65 struct acpi_table_processor_affinity *cpu_affinity = 65 struct acpi_srat_cpu_affinity *cpu_affinity =
66 (struct acpi_table_processor_affinity *) p; 66 (struct acpi_srat_cpu_affinity *) p;
67 67
68 if (!cpu_affinity->flags.enabled) 68 if ((cpu_affinity->flags & ACPI_SRAT_CPU_ENABLED) == 0)
69 return; /* empty entry */ 69 return; /* empty entry */
70 70
71 /* mark this node as "seen" in node bitmap */ 71 /* mark this node as "seen" in node bitmap */
72 BMAP_SET(pxm_bitmap, cpu_affinity->proximity_domain); 72 BMAP_SET(pxm_bitmap, cpu_affinity->proximity_domain_lo);
73 73
74 apicid_to_pxm[cpu_affinity->apic_id] = cpu_affinity->proximity_domain; 74 apicid_to_pxm[cpu_affinity->apic_id] = cpu_affinity->proximity_domain_lo;
75 75
76 printk("CPU 0x%02X in proximity domain 0x%02X\n", 76 printk("CPU 0x%02X in proximity domain 0x%02X\n",
77 cpu_affinity->apic_id, cpu_affinity->proximity_domain); 77 cpu_affinity->apic_id, cpu_affinity->proximity_domain_lo);
78} 78}
79 79
80/* 80/*
@@ -84,28 +84,27 @@ static void __init parse_cpu_affinity_structure(char *p)
84static void __init parse_memory_affinity_structure (char *sratp) 84static void __init parse_memory_affinity_structure (char *sratp)
85{ 85{
86 unsigned long long paddr, size; 86 unsigned long long paddr, size;
87 unsigned long start_pfn, end_pfn; 87 unsigned long start_pfn, end_pfn;
88 u8 pxm; 88 u8 pxm;
89 struct node_memory_chunk_s *p, *q, *pend; 89 struct node_memory_chunk_s *p, *q, *pend;
90 struct acpi_table_memory_affinity *memory_affinity = 90 struct acpi_srat_mem_affinity *memory_affinity =
91 (struct acpi_table_memory_affinity *) sratp; 91 (struct acpi_srat_mem_affinity *) sratp;
92 92
93 if (!memory_affinity->flags.enabled) 93 if ((memory_affinity->flags & ACPI_SRAT_MEM_ENABLED) == 0)
94 return; /* empty entry */ 94 return; /* empty entry */
95 95
96 pxm = memory_affinity->proximity_domain & 0xff;
97
96 /* mark this node as "seen" in node bitmap */ 98 /* mark this node as "seen" in node bitmap */
97 BMAP_SET(pxm_bitmap, memory_affinity->proximity_domain); 99 BMAP_SET(pxm_bitmap, pxm);
98 100
99 /* calculate info for memory chunk structure */ 101 /* calculate info for memory chunk structure */
100 paddr = memory_affinity->base_addr_hi; 102 paddr = memory_affinity->base_address;
101 paddr = (paddr << 32) | memory_affinity->base_addr_lo; 103 size = memory_affinity->length;
102 size = memory_affinity->length_hi; 104
103 size = (size << 32) | memory_affinity->length_lo;
104
105 start_pfn = paddr >> PAGE_SHIFT; 105 start_pfn = paddr >> PAGE_SHIFT;
106 end_pfn = (paddr + size) >> PAGE_SHIFT; 106 end_pfn = (paddr + size) >> PAGE_SHIFT;
107 107
108 pxm = memory_affinity->proximity_domain;
109 108
110 if (num_memory_chunks >= MAXCHUNKS) { 109 if (num_memory_chunks >= MAXCHUNKS) {
111 printk("Too many mem chunks in SRAT. Ignoring %lld MBytes at %llx\n", 110 printk("Too many mem chunks in SRAT. Ignoring %lld MBytes at %llx\n",
@@ -132,8 +131,8 @@ static void __init parse_memory_affinity_structure (char *sratp)
132 printk("Memory range 0x%lX to 0x%lX (type 0x%X) in proximity domain 0x%02X %s\n", 131 printk("Memory range 0x%lX to 0x%lX (type 0x%X) in proximity domain 0x%02X %s\n",
133 start_pfn, end_pfn, 132 start_pfn, end_pfn,
134 memory_affinity->memory_type, 133 memory_affinity->memory_type,
135 memory_affinity->proximity_domain, 134 pxm,
136 (memory_affinity->flags.hot_pluggable ? 135 ((memory_affinity->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ?
137 "enabled and removable" : "enabled" ) ); 136 "enabled and removable" : "enabled" ) );
138} 137}
139 138
@@ -185,10 +184,10 @@ static int __init acpi20_parse_srat(struct acpi_table_srat *sratp)
185 num_memory_chunks = 0; 184 num_memory_chunks = 0;
186 while (p < end) { 185 while (p < end) {
187 switch (*p) { 186 switch (*p) {
188 case ACPI_SRAT_PROCESSOR_AFFINITY: 187 case ACPI_SRAT_TYPE_CPU_AFFINITY:
189 parse_cpu_affinity_structure(p); 188 parse_cpu_affinity_structure(p);
190 break; 189 break;
191 case ACPI_SRAT_MEMORY_AFFINITY: 190 case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
192 parse_memory_affinity_structure(p); 191 parse_memory_affinity_structure(p);
193 break; 192 break;
194 default: 193 default:
@@ -262,31 +261,30 @@ out_fail:
262 return 0; 261 return 0;
263} 262}
264 263
264struct acpi_static_rsdt {
265 struct acpi_table_rsdt table;
266 u32 padding[7]; /* Allow for 7 more table entries */
267};
268
265int __init get_memcfg_from_srat(void) 269int __init get_memcfg_from_srat(void)
266{ 270{
267 struct acpi_table_header *header = NULL; 271 struct acpi_table_header *header = NULL;
268 struct acpi_table_rsdp *rsdp = NULL; 272 struct acpi_table_rsdp *rsdp = NULL;
269 struct acpi_table_rsdt *rsdt = NULL; 273 struct acpi_table_rsdt *rsdt = NULL;
270 struct acpi_pointer *rsdp_address = NULL; 274 acpi_native_uint rsdp_address = 0;
271 struct acpi_table_rsdt saved_rsdt; 275 struct acpi_static_rsdt saved_rsdt;
272 int tables = 0; 276 int tables = 0;
273 int i = 0; 277 int i = 0;
274 278
275 if (ACPI_FAILURE(acpi_find_root_pointer(ACPI_PHYSICAL_ADDRESSING, 279 rsdp_address = acpi_find_rsdp();
276 rsdp_address))) { 280 if (!rsdp_address) {
277 printk("%s: System description tables not found\n", 281 printk("%s: System description tables not found\n",
278 __FUNCTION__); 282 __FUNCTION__);
279 goto out_err; 283 goto out_err;
280 } 284 }
281 285
282 if (rsdp_address->pointer_type == ACPI_PHYSICAL_POINTER) { 286 printk("%s: assigning address to rsdp\n", __FUNCTION__);
283 printk("%s: assigning address to rsdp\n", __FUNCTION__); 287 rsdp = (struct acpi_table_rsdp *)(u32)rsdp_address;
284 rsdp = (struct acpi_table_rsdp *)
285 (u32)rsdp_address->pointer.physical;
286 } else {
287 printk("%s: rsdp_address is not a physical pointer\n", __FUNCTION__);
288 goto out_err;
289 }
290 if (!rsdp) { 288 if (!rsdp) {
291 printk("%s: Didn't find ACPI root!\n", __FUNCTION__); 289 printk("%s: Didn't find ACPI root!\n", __FUNCTION__);
292 goto out_err; 290 goto out_err;
@@ -295,13 +293,13 @@ int __init get_memcfg_from_srat(void)
295 printk(KERN_INFO "%.8s v%d [%.6s]\n", rsdp->signature, rsdp->revision, 293 printk(KERN_INFO "%.8s v%d [%.6s]\n", rsdp->signature, rsdp->revision,
296 rsdp->oem_id); 294 rsdp->oem_id);
297 295
298 if (strncmp(rsdp->signature, RSDP_SIG,strlen(RSDP_SIG))) { 296 if (strncmp(rsdp->signature, ACPI_SIG_RSDP,strlen(ACPI_SIG_RSDP))) {
299 printk(KERN_WARNING "%s: RSDP table signature incorrect\n", __FUNCTION__); 297 printk(KERN_WARNING "%s: RSDP table signature incorrect\n", __FUNCTION__);
300 goto out_err; 298 goto out_err;
301 } 299 }
302 300
303 rsdt = (struct acpi_table_rsdt *) 301 rsdt = (struct acpi_table_rsdt *)
304 boot_ioremap(rsdp->rsdt_address, sizeof(struct acpi_table_rsdt)); 302 boot_ioremap(rsdp->rsdt_physical_address, sizeof(struct acpi_table_rsdt));
305 303
306 if (!rsdt) { 304 if (!rsdt) {
307 printk(KERN_WARNING 305 printk(KERN_WARNING
@@ -310,9 +308,9 @@ int __init get_memcfg_from_srat(void)
310 goto out_err; 308 goto out_err;
311 } 309 }
312 310
313 header = & rsdt->header; 311 header = &rsdt->header;
314 312
315 if (strncmp(header->signature, RSDT_SIG, strlen(RSDT_SIG))) { 313 if (strncmp(header->signature, ACPI_SIG_RSDT, strlen(ACPI_SIG_RSDT))) {
316 printk(KERN_WARNING "ACPI: RSDT signature incorrect\n"); 314 printk(KERN_WARNING "ACPI: RSDT signature incorrect\n");
317 goto out_err; 315 goto out_err;
318 } 316 }
@@ -330,9 +328,9 @@ int __init get_memcfg_from_srat(void)
330 328
331 memcpy(&saved_rsdt, rsdt, sizeof(saved_rsdt)); 329 memcpy(&saved_rsdt, rsdt, sizeof(saved_rsdt));
332 330
333 if (saved_rsdt.header.length > sizeof(saved_rsdt)) { 331 if (saved_rsdt.table.header.length > sizeof(saved_rsdt)) {
334 printk(KERN_WARNING "ACPI: Too big length in RSDT: %d\n", 332 printk(KERN_WARNING "ACPI: Too big length in RSDT: %d\n",
335 saved_rsdt.header.length); 333 saved_rsdt.table.header.length);
336 goto out_err; 334 goto out_err;
337 } 335 }
338 336
@@ -341,15 +339,15 @@ int __init get_memcfg_from_srat(void)
341 for (i = 0; i < tables; i++) { 339 for (i = 0; i < tables; i++) {
342 /* Map in header, then map in full table length. */ 340 /* Map in header, then map in full table length. */
343 header = (struct acpi_table_header *) 341 header = (struct acpi_table_header *)
344 boot_ioremap(saved_rsdt.entry[i], sizeof(struct acpi_table_header)); 342 boot_ioremap(saved_rsdt.table.table_offset_entry[i], sizeof(struct acpi_table_header));
345 if (!header) 343 if (!header)
346 break; 344 break;
347 header = (struct acpi_table_header *) 345 header = (struct acpi_table_header *)
348 boot_ioremap(saved_rsdt.entry[i], header->length); 346 boot_ioremap(saved_rsdt.table.table_offset_entry[i], header->length);
349 if (!header) 347 if (!header)
350 break; 348 break;
351 349
352 if (strncmp((char *) &header->signature, "SRAT", 4)) 350 if (strncmp((char *) &header->signature, ACPI_SIG_SRAT, 4))
353 continue; 351 continue;
354 352
355 /* we've found the srat table. don't need to look at any more tables */ 353 /* we've found the srat table. don't need to look at any more tables */
diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c
index 7de9117b5a3a..5da744204d10 100644
--- a/arch/i386/kernel/sysenter.c
+++ b/arch/i386/kernel/sysenter.c
@@ -79,11 +79,6 @@ int __init sysenter_setup(void)
79#ifdef CONFIG_COMPAT_VDSO 79#ifdef CONFIG_COMPAT_VDSO
80 __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY); 80 __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY);
81 printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO)); 81 printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
82#else
83 /*
84 * In the non-compat case the ELF coredumping code needs the fixmap:
85 */
86 __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_KERNEL_RO);
87#endif 82#endif
88 83
89 if (!boot_cpu_has(X86_FEATURE_SEP)) { 84 if (!boot_cpu_has(X86_FEATURE_SEP)) {
@@ -100,6 +95,7 @@ int __init sysenter_setup(void)
100 return 0; 95 return 0;
101} 96}
102 97
98#ifndef CONFIG_COMPAT_VDSO
103static struct page *syscall_nopage(struct vm_area_struct *vma, 99static struct page *syscall_nopage(struct vm_area_struct *vma,
104 unsigned long adr, int *type) 100 unsigned long adr, int *type)
105{ 101{
@@ -146,6 +142,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
146 vma->vm_end = addr + PAGE_SIZE; 142 vma->vm_end = addr + PAGE_SIZE;
147 /* MAYWRITE to allow gdb to COW and set breakpoints */ 143 /* MAYWRITE to allow gdb to COW and set breakpoints */
148 vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE; 144 vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
145 /*
146 * Make sure the vDSO gets into every core dump.
147 * Dumping its contents makes post-mortem fully interpretable later
148 * without matching up the same kernel and hardware config to see
149 * what PC values meant.
150 */
151 vma->vm_flags |= VM_ALWAYSDUMP;
149 vma->vm_flags |= mm->def_flags; 152 vma->vm_flags |= mm->def_flags;
150 vma->vm_page_prot = protection_map[vma->vm_flags & 7]; 153 vma->vm_page_prot = protection_map[vma->vm_flags & 7];
151 vma->vm_ops = &syscall_vm_ops; 154 vma->vm_ops = &syscall_vm_ops;
@@ -187,3 +190,4 @@ int in_gate_area_no_task(unsigned long addr)
187{ 190{
188 return 0; 191 return 0;
189} 192}
193#endif
diff --git a/arch/i386/kernel/trampoline.S b/arch/i386/kernel/trampoline.S
index fcce0e61b0e7..2f1814c5cfd7 100644
--- a/arch/i386/kernel/trampoline.S
+++ b/arch/i386/kernel/trampoline.S
@@ -38,6 +38,11 @@
38 38
39.data 39.data
40 40
41/* We can free up trampoline after bootup if cpu hotplug is not supported. */
42#ifndef CONFIG_HOTPLUG_CPU
43.section ".init.data","aw",@progbits
44#endif
45
41.code16 46.code16
42 47
43ENTRY(trampoline_data) 48ENTRY(trampoline_data)
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 2b30dbf8d117..0efad8aeb41a 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -94,11 +94,6 @@ asmlinkage void spurious_interrupt_bug(void);
94asmlinkage void machine_check(void); 94asmlinkage void machine_check(void);
95 95
96int kstack_depth_to_print = 24; 96int kstack_depth_to_print = 24;
97#ifdef CONFIG_STACK_UNWIND
98static int call_trace = 1;
99#else
100#define call_trace (-1)
101#endif
102ATOMIC_NOTIFIER_HEAD(i386die_chain); 97ATOMIC_NOTIFIER_HEAD(i386die_chain);
103 98
104int register_die_notifier(struct notifier_block *nb) 99int register_die_notifier(struct notifier_block *nb)
@@ -152,33 +147,6 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo,
152 return ebp; 147 return ebp;
153} 148}
154 149
155struct ops_and_data {
156 struct stacktrace_ops *ops;
157 void *data;
158};
159
160static asmlinkage int
161dump_trace_unwind(struct unwind_frame_info *info, void *data)
162{
163 struct ops_and_data *oad = (struct ops_and_data *)data;
164 int n = 0;
165 unsigned long sp = UNW_SP(info);
166
167 if (arch_unw_user_mode(info))
168 return -1;
169 while (unwind(info) == 0 && UNW_PC(info)) {
170 n++;
171 oad->ops->address(oad->data, UNW_PC(info));
172 if (arch_unw_user_mode(info))
173 break;
174 if ((sp & ~(PAGE_SIZE - 1)) == (UNW_SP(info) & ~(PAGE_SIZE - 1))
175 && sp > UNW_SP(info))
176 break;
177 sp = UNW_SP(info);
178 }
179 return n;
180}
181
182#define MSG(msg) ops->warning(data, msg) 150#define MSG(msg) ops->warning(data, msg)
183 151
184void dump_trace(struct task_struct *task, struct pt_regs *regs, 152void dump_trace(struct task_struct *task, struct pt_regs *regs,
@@ -190,41 +158,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
190 if (!task) 158 if (!task)
191 task = current; 159 task = current;
192 160
193 if (call_trace >= 0) {
194 int unw_ret = 0;
195 struct unwind_frame_info info;
196 struct ops_and_data oad = { .ops = ops, .data = data };
197
198 if (regs) {
199 if (unwind_init_frame_info(&info, task, regs) == 0)
200 unw_ret = dump_trace_unwind(&info, &oad);
201 } else if (task == current)
202 unw_ret = unwind_init_running(&info, dump_trace_unwind,
203 &oad);
204 else {
205 if (unwind_init_blocked(&info, task) == 0)
206 unw_ret = dump_trace_unwind(&info, &oad);
207 }
208 if (unw_ret > 0) {
209 if (call_trace == 1 && !arch_unw_user_mode(&info)) {
210 ops->warning_symbol(data,
211 "DWARF2 unwinder stuck at %s",
212 UNW_PC(&info));
213 if (UNW_SP(&info) >= PAGE_OFFSET) {
214 MSG("Leftover inexact backtrace:");
215 stack = (void *)UNW_SP(&info);
216 if (!stack)
217 return;
218 ebp = UNW_FP(&info);
219 } else
220 MSG("Full inexact backtrace again:");
221 } else if (call_trace >= 1)
222 return;
223 else
224 MSG("Full inexact backtrace again:");
225 } else
226 MSG("Inexact backtrace:");
227 }
228 if (!stack) { 161 if (!stack) {
229 unsigned long dummy; 162 unsigned long dummy;
230 stack = &dummy; 163 stack = &dummy;
@@ -1258,19 +1191,3 @@ static int __init kstack_setup(char *s)
1258 return 1; 1191 return 1;
1259} 1192}
1260__setup("kstack=", kstack_setup); 1193__setup("kstack=", kstack_setup);
1261
1262#ifdef CONFIG_STACK_UNWIND
1263static int __init call_trace_setup(char *s)
1264{
1265 if (strcmp(s, "old") == 0)
1266 call_trace = -1;
1267 else if (strcmp(s, "both") == 0)
1268 call_trace = 0;
1269 else if (strcmp(s, "newfallback") == 0)
1270 call_trace = 1;
1271 else if (strcmp(s, "new") == 2)
1272 call_trace = 2;
1273 return 1;
1274}
1275__setup("call_trace=", call_trace_setup);
1276#endif
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
index 1bbe45dca7a0..2cfc7b09b925 100644
--- a/arch/i386/kernel/tsc.c
+++ b/arch/i386/kernel/tsc.c
@@ -24,7 +24,7 @@
24 */ 24 */
25unsigned int tsc_khz; 25unsigned int tsc_khz;
26 26
27int tsc_disable __cpuinitdata = 0; 27int tsc_disable;
28 28
29#ifdef CONFIG_X86_TSC 29#ifdef CONFIG_X86_TSC
30static int __init tsc_setup(char *str) 30static int __init tsc_setup(char *str)