diff options
author | Paul Mackerras <paulus@samba.org> | 2006-12-03 23:59:07 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-12-03 23:59:07 -0500 |
commit | 79acbb3ff2d8095b692e1502b9eb2ccec348de26 (patch) | |
tree | 6ab773e5a8f9de2cd6443362b21d0d6fffe3b35e /arch/i386/kernel | |
parent | 19a79859e168640f8e16d7b216d211c1c52b687a (diff) | |
parent | 2b5f6dcce5bf94b9b119e9ed8d537098ec61c3d2 (diff) |
Merge branch 'linux-2.6' into for-linus
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r-- | arch/i386/kernel/acpi/boot.c | 20 | ||||
-rw-r--r-- | arch/i386/kernel/acpi/earlyquirk.c | 8 | ||||
-rw-r--r-- | arch/i386/kernel/alternative.c | 4 | ||||
-rw-r--r-- | arch/i386/kernel/apm.c | 39 | ||||
-rw-r--r-- | arch/i386/kernel/cpuid.c | 20 | ||||
-rw-r--r-- | arch/i386/kernel/head.S | 2 | ||||
-rw-r--r-- | arch/i386/kernel/i8253.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/i8259.c | 7 | ||||
-rw-r--r-- | arch/i386/kernel/io_apic.c | 113 | ||||
-rw-r--r-- | arch/i386/kernel/irq.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/kprobes.c | 22 | ||||
-rw-r--r-- | arch/i386/kernel/microcode.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/msr.c | 20 | ||||
-rw-r--r-- | arch/i386/kernel/nmi.c | 10 | ||||
-rw-r--r-- | arch/i386/kernel/pci-dma.c | 4 | ||||
-rw-r--r-- | arch/i386/kernel/process.c | 10 | ||||
-rw-r--r-- | arch/i386/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/traps.c | 10 | ||||
-rw-r--r-- | arch/i386/kernel/tsc.c | 6 | ||||
-rw-r--r-- | arch/i386/kernel/vmlinux.lds.S | 9 |
20 files changed, 209 insertions, 103 deletions
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c index ab974ff9707..c8f96cff07c 100644 --- a/arch/i386/kernel/acpi/boot.c +++ b/arch/i386/kernel/acpi/boot.c | |||
@@ -70,7 +70,7 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return | |||
70 | 70 | ||
71 | #define PREFIX "ACPI: " | 71 | #define PREFIX "ACPI: " |
72 | 72 | ||
73 | int acpi_noirq __initdata; /* skip ACPI IRQ initialization */ | 73 | int acpi_noirq; /* skip ACPI IRQ initialization */ |
74 | int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */ | 74 | int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */ |
75 | int acpi_ht __initdata = 1; /* enable HT */ | 75 | int acpi_ht __initdata = 1; /* enable HT */ |
76 | 76 | ||
@@ -82,6 +82,7 @@ EXPORT_SYMBOL(acpi_strict); | |||
82 | acpi_interrupt_flags acpi_sci_flags __initdata; | 82 | acpi_interrupt_flags acpi_sci_flags __initdata; |
83 | int acpi_sci_override_gsi __initdata; | 83 | int acpi_sci_override_gsi __initdata; |
84 | int acpi_skip_timer_override __initdata; | 84 | int acpi_skip_timer_override __initdata; |
85 | int acpi_use_timer_override __initdata; | ||
85 | 86 | ||
86 | #ifdef CONFIG_X86_LOCAL_APIC | 87 | #ifdef CONFIG_X86_LOCAL_APIC |
87 | static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; | 88 | static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; |
@@ -332,7 +333,7 @@ acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end) | |||
332 | /* | 333 | /* |
333 | * Parse Interrupt Source Override for the ACPI SCI | 334 | * Parse Interrupt Source Override for the ACPI SCI |
334 | */ | 335 | */ |
335 | static void acpi_sci_ioapic_setup(u32 bus_irq, u32 gsi, u16 polarity, u16 trigger) | 336 | static void acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger) |
336 | { | 337 | { |
337 | if (trigger == 0) /* compatible SCI trigger is level */ | 338 | if (trigger == 0) /* compatible SCI trigger is level */ |
338 | trigger = 3; | 339 | trigger = 3; |
@@ -352,13 +353,13 @@ static void acpi_sci_ioapic_setup(u32 bus_irq, u32 gsi, u16 polarity, u16 trigge | |||
352 | * If GSI is < 16, this will update its flags, | 353 | * If GSI is < 16, this will update its flags, |
353 | * else it will create a new mp_irqs[] entry. | 354 | * else it will create a new mp_irqs[] entry. |
354 | */ | 355 | */ |
355 | mp_override_legacy_irq(bus_irq, polarity, trigger, gsi); | 356 | mp_override_legacy_irq(gsi, polarity, trigger, gsi); |
356 | 357 | ||
357 | /* | 358 | /* |
358 | * stash over-ride to indicate we've been here | 359 | * stash over-ride to indicate we've been here |
359 | * and for later update of acpi_fadt | 360 | * and for later update of acpi_fadt |
360 | */ | 361 | */ |
361 | acpi_sci_override_gsi = bus_irq; | 362 | acpi_sci_override_gsi = gsi; |
362 | return; | 363 | return; |
363 | } | 364 | } |
364 | 365 | ||
@@ -376,7 +377,7 @@ acpi_parse_int_src_ovr(acpi_table_entry_header * header, | |||
376 | acpi_table_print_madt_entry(header); | 377 | acpi_table_print_madt_entry(header); |
377 | 378 | ||
378 | if (intsrc->bus_irq == acpi_fadt.sci_int) { | 379 | if (intsrc->bus_irq == acpi_fadt.sci_int) { |
379 | acpi_sci_ioapic_setup(intsrc->bus_irq, intsrc->global_irq, | 380 | acpi_sci_ioapic_setup(intsrc->global_irq, |
380 | intsrc->flags.polarity, | 381 | intsrc->flags.polarity, |
381 | intsrc->flags.trigger); | 382 | intsrc->flags.trigger); |
382 | return 0; | 383 | return 0; |
@@ -879,7 +880,7 @@ static int __init acpi_parse_madt_ioapic_entries(void) | |||
879 | * pretend we got one so we can set the SCI flags. | 880 | * pretend we got one so we can set the SCI flags. |
880 | */ | 881 | */ |
881 | if (!acpi_sci_override_gsi) | 882 | if (!acpi_sci_override_gsi) |
882 | acpi_sci_ioapic_setup(acpi_fadt.sci_int, acpi_fadt.sci_int, 0, 0); | 883 | acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0); |
883 | 884 | ||
884 | /* Fill in identity legacy mapings where no override */ | 885 | /* Fill in identity legacy mapings where no override */ |
885 | mp_config_acpi_legacy_irqs(); | 886 | mp_config_acpi_legacy_irqs(); |
@@ -1300,6 +1301,13 @@ static int __init parse_acpi_skip_timer_override(char *arg) | |||
1300 | return 0; | 1301 | return 0; |
1301 | } | 1302 | } |
1302 | early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override); | 1303 | early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override); |
1304 | |||
1305 | static int __init parse_acpi_use_timer_override(char *arg) | ||
1306 | { | ||
1307 | acpi_use_timer_override = 1; | ||
1308 | return 0; | ||
1309 | } | ||
1310 | early_param("acpi_use_timer_override", parse_acpi_use_timer_override); | ||
1303 | #endif /* CONFIG_X86_IO_APIC */ | 1311 | #endif /* CONFIG_X86_IO_APIC */ |
1304 | 1312 | ||
1305 | static int __init setup_acpi_sci(char *s) | 1313 | static int __init setup_acpi_sci(char *s) |
diff --git a/arch/i386/kernel/acpi/earlyquirk.c b/arch/i386/kernel/acpi/earlyquirk.c index fe799b11ac0..c9841692bb7 100644 --- a/arch/i386/kernel/acpi/earlyquirk.c +++ b/arch/i386/kernel/acpi/earlyquirk.c | |||
@@ -27,11 +27,17 @@ static int __init check_bridge(int vendor, int device) | |||
27 | #ifdef CONFIG_ACPI | 27 | #ifdef CONFIG_ACPI |
28 | /* According to Nvidia all timer overrides are bogus unless HPET | 28 | /* According to Nvidia all timer overrides are bogus unless HPET |
29 | is enabled. */ | 29 | is enabled. */ |
30 | if (vendor == PCI_VENDOR_ID_NVIDIA) { | 30 | if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) { |
31 | nvidia_hpet_detected = 0; | 31 | nvidia_hpet_detected = 0; |
32 | acpi_table_parse(ACPI_HPET, nvidia_hpet_check); | 32 | acpi_table_parse(ACPI_HPET, nvidia_hpet_check); |
33 | if (nvidia_hpet_detected == 0) { | 33 | if (nvidia_hpet_detected == 0) { |
34 | acpi_skip_timer_override = 1; | 34 | acpi_skip_timer_override = 1; |
35 | printk(KERN_INFO "Nvidia board " | ||
36 | "detected. Ignoring ACPI " | ||
37 | "timer override.\n"); | ||
38 | printk(KERN_INFO "If you got timer trouble " | ||
39 | "try acpi_use_timer_override\n"); | ||
40 | |||
35 | } | 41 | } |
36 | } | 42 | } |
37 | #endif | 43 | #endif |
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c index 28ab8064976..583c238e17f 100644 --- a/arch/i386/kernel/alternative.c +++ b/arch/i386/kernel/alternative.c | |||
@@ -344,6 +344,7 @@ void alternatives_smp_switch(int smp) | |||
344 | 344 | ||
345 | void __init alternative_instructions(void) | 345 | void __init alternative_instructions(void) |
346 | { | 346 | { |
347 | unsigned long flags; | ||
347 | if (no_replacement) { | 348 | if (no_replacement) { |
348 | printk(KERN_INFO "(SMP-)alternatives turned off\n"); | 349 | printk(KERN_INFO "(SMP-)alternatives turned off\n"); |
349 | free_init_pages("SMP alternatives", | 350 | free_init_pages("SMP alternatives", |
@@ -351,6 +352,8 @@ void __init alternative_instructions(void) | |||
351 | (unsigned long)__smp_alt_end); | 352 | (unsigned long)__smp_alt_end); |
352 | return; | 353 | return; |
353 | } | 354 | } |
355 | |||
356 | local_irq_save(flags); | ||
354 | apply_alternatives(__alt_instructions, __alt_instructions_end); | 357 | apply_alternatives(__alt_instructions, __alt_instructions_end); |
355 | 358 | ||
356 | /* switch to patch-once-at-boottime-only mode and free the | 359 | /* switch to patch-once-at-boottime-only mode and free the |
@@ -386,4 +389,5 @@ void __init alternative_instructions(void) | |||
386 | alternatives_smp_switch(0); | 389 | alternatives_smp_switch(0); |
387 | } | 390 | } |
388 | #endif | 391 | #endif |
392 | local_irq_restore(flags); | ||
389 | } | 393 | } |
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c index b42f2d914af..a60358fe9a4 100644 --- a/arch/i386/kernel/apm.c +++ b/arch/i386/kernel/apm.c | |||
@@ -198,7 +198,7 @@ | |||
198 | * (APM) BIOS Interface Specification, Revision 1.2, February 1996. | 198 | * (APM) BIOS Interface Specification, Revision 1.2, February 1996. |
199 | * | 199 | * |
200 | * [This document is available from Microsoft at: | 200 | * [This document is available from Microsoft at: |
201 | * http://www.microsoft.com/hwdev/busbios/amp_12.htm] | 201 | * http://www.microsoft.com/whdc/archive/amp_12.mspx] |
202 | */ | 202 | */ |
203 | 203 | ||
204 | #include <linux/module.h> | 204 | #include <linux/module.h> |
@@ -540,11 +540,30 @@ static inline void apm_restore_cpus(cpumask_t mask) | |||
540 | * Also, we KNOW that for the non error case of apm_bios_call, there | 540 | * Also, we KNOW that for the non error case of apm_bios_call, there |
541 | * is no useful data returned in the low order 8 bits of eax. | 541 | * is no useful data returned in the low order 8 bits of eax. |
542 | */ | 542 | */ |
543 | #define APM_DO_CLI \ | 543 | |
544 | if (apm_info.allow_ints) \ | 544 | static inline unsigned long __apm_irq_save(void) |
545 | local_irq_enable(); \ | 545 | { |
546 | else \ | 546 | unsigned long flags; |
547 | local_save_flags(flags); | ||
548 | if (apm_info.allow_ints) { | ||
549 | if (irqs_disabled_flags(flags)) | ||
550 | local_irq_enable(); | ||
551 | } else | ||
552 | local_irq_disable(); | ||
553 | |||
554 | return flags; | ||
555 | } | ||
556 | |||
557 | #define apm_irq_save(flags) \ | ||
558 | do { flags = __apm_irq_save(); } while (0) | ||
559 | |||
560 | static inline void apm_irq_restore(unsigned long flags) | ||
561 | { | ||
562 | if (irqs_disabled_flags(flags)) | ||
547 | local_irq_disable(); | 563 | local_irq_disable(); |
564 | else if (irqs_disabled()) | ||
565 | local_irq_enable(); | ||
566 | } | ||
548 | 567 | ||
549 | #ifdef APM_ZERO_SEGS | 568 | #ifdef APM_ZERO_SEGS |
550 | # define APM_DECL_SEGS \ | 569 | # define APM_DECL_SEGS \ |
@@ -596,12 +615,11 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in, | |||
596 | save_desc_40 = gdt[0x40 / 8]; | 615 | save_desc_40 = gdt[0x40 / 8]; |
597 | gdt[0x40 / 8] = bad_bios_desc; | 616 | gdt[0x40 / 8] = bad_bios_desc; |
598 | 617 | ||
599 | local_save_flags(flags); | 618 | apm_irq_save(flags); |
600 | APM_DO_CLI; | ||
601 | APM_DO_SAVE_SEGS; | 619 | APM_DO_SAVE_SEGS; |
602 | apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi); | 620 | apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi); |
603 | APM_DO_RESTORE_SEGS; | 621 | APM_DO_RESTORE_SEGS; |
604 | local_irq_restore(flags); | 622 | apm_irq_restore(flags); |
605 | gdt[0x40 / 8] = save_desc_40; | 623 | gdt[0x40 / 8] = save_desc_40; |
606 | put_cpu(); | 624 | put_cpu(); |
607 | apm_restore_cpus(cpus); | 625 | apm_restore_cpus(cpus); |
@@ -640,12 +658,11 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax) | |||
640 | save_desc_40 = gdt[0x40 / 8]; | 658 | save_desc_40 = gdt[0x40 / 8]; |
641 | gdt[0x40 / 8] = bad_bios_desc; | 659 | gdt[0x40 / 8] = bad_bios_desc; |
642 | 660 | ||
643 | local_save_flags(flags); | 661 | apm_irq_save(flags); |
644 | APM_DO_CLI; | ||
645 | APM_DO_SAVE_SEGS; | 662 | APM_DO_SAVE_SEGS; |
646 | error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax); | 663 | error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax); |
647 | APM_DO_RESTORE_SEGS; | 664 | APM_DO_RESTORE_SEGS; |
648 | local_irq_restore(flags); | 665 | apm_irq_restore(flags); |
649 | gdt[0x40 / 8] = save_desc_40; | 666 | gdt[0x40 / 8] = save_desc_40; |
650 | put_cpu(); | 667 | put_cpu(); |
651 | apm_restore_cpus(cpus); | 668 | apm_restore_cpus(cpus); |
diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c index fde8bea85ce..ab0c327e79d 100644 --- a/arch/i386/kernel/cpuid.c +++ b/arch/i386/kernel/cpuid.c | |||
@@ -156,14 +156,14 @@ static struct file_operations cpuid_fops = { | |||
156 | .open = cpuid_open, | 156 | .open = cpuid_open, |
157 | }; | 157 | }; |
158 | 158 | ||
159 | static int cpuid_class_device_create(int i) | 159 | static int cpuid_device_create(int i) |
160 | { | 160 | { |
161 | int err = 0; | 161 | int err = 0; |
162 | struct class_device *class_err; | 162 | struct device *dev; |
163 | 163 | ||
164 | class_err = class_device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, i), NULL, "cpu%d",i); | 164 | dev = device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, i), "cpu%d",i); |
165 | if (IS_ERR(class_err)) | 165 | if (IS_ERR(dev)) |
166 | err = PTR_ERR(class_err); | 166 | err = PTR_ERR(dev); |
167 | return err; | 167 | return err; |
168 | } | 168 | } |
169 | 169 | ||
@@ -174,10 +174,10 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long ac | |||
174 | 174 | ||
175 | switch (action) { | 175 | switch (action) { |
176 | case CPU_ONLINE: | 176 | case CPU_ONLINE: |
177 | cpuid_class_device_create(cpu); | 177 | cpuid_device_create(cpu); |
178 | break; | 178 | break; |
179 | case CPU_DEAD: | 179 | case CPU_DEAD: |
180 | class_device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); | 180 | device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); |
181 | break; | 181 | break; |
182 | } | 182 | } |
183 | return NOTIFY_OK; | 183 | return NOTIFY_OK; |
@@ -206,7 +206,7 @@ static int __init cpuid_init(void) | |||
206 | goto out_chrdev; | 206 | goto out_chrdev; |
207 | } | 207 | } |
208 | for_each_online_cpu(i) { | 208 | for_each_online_cpu(i) { |
209 | err = cpuid_class_device_create(i); | 209 | err = cpuid_device_create(i); |
210 | if (err != 0) | 210 | if (err != 0) |
211 | goto out_class; | 211 | goto out_class; |
212 | } | 212 | } |
@@ -218,7 +218,7 @@ static int __init cpuid_init(void) | |||
218 | out_class: | 218 | out_class: |
219 | i = 0; | 219 | i = 0; |
220 | for_each_online_cpu(i) { | 220 | for_each_online_cpu(i) { |
221 | class_device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, i)); | 221 | device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, i)); |
222 | } | 222 | } |
223 | class_destroy(cpuid_class); | 223 | class_destroy(cpuid_class); |
224 | out_chrdev: | 224 | out_chrdev: |
@@ -232,7 +232,7 @@ static void __exit cpuid_exit(void) | |||
232 | int cpu = 0; | 232 | int cpu = 0; |
233 | 233 | ||
234 | for_each_online_cpu(cpu) | 234 | for_each_online_cpu(cpu) |
235 | class_device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); | 235 | device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); |
236 | class_destroy(cpuid_class); | 236 | class_destroy(cpuid_class); |
237 | unregister_chrdev(CPUID_MAJOR, "cpu/cpuid"); | 237 | unregister_chrdev(CPUID_MAJOR, "cpu/cpuid"); |
238 | unregister_hotcpu_notifier(&cpuid_class_cpu_notifier); | 238 | unregister_hotcpu_notifier(&cpuid_class_cpu_notifier); |
diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S index be9d883c62c..ca31f18d277 100644 --- a/arch/i386/kernel/head.S +++ b/arch/i386/kernel/head.S | |||
@@ -317,7 +317,7 @@ is386: movl $2,%ecx # set MP | |||
317 | movl %eax,%gs | 317 | movl %eax,%gs |
318 | lldt %ax | 318 | lldt %ax |
319 | cld # gcc2 wants the direction flag cleared at all times | 319 | cld # gcc2 wants the direction flag cleared at all times |
320 | pushl %eax # fake return address | 320 | pushl $0 # fake return address for unwinder |
321 | #ifdef CONFIG_SMP | 321 | #ifdef CONFIG_SMP |
322 | movb ready, %cl | 322 | movb ready, %cl |
323 | movb $1, ready | 323 | movb $1, ready |
diff --git a/arch/i386/kernel/i8253.c b/arch/i386/kernel/i8253.c index 477b24daff5..9a0060b92e3 100644 --- a/arch/i386/kernel/i8253.c +++ b/arch/i386/kernel/i8253.c | |||
@@ -109,7 +109,7 @@ static struct clocksource clocksource_pit = { | |||
109 | 109 | ||
110 | static int __init init_pit_clocksource(void) | 110 | static int __init init_pit_clocksource(void) |
111 | { | 111 | { |
112 | if (num_possible_cpus() > 4) /* PIT does not scale! */ | 112 | if (num_possible_cpus() > 1) /* PIT does not scale! */ |
113 | return 0; | 113 | return 0; |
114 | 114 | ||
115 | clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20); | 115 | clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20); |
diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c index d53eafb6daa..62996cd1708 100644 --- a/arch/i386/kernel/i8259.c +++ b/arch/i386/kernel/i8259.c | |||
@@ -113,7 +113,8 @@ void make_8259A_irq(unsigned int irq) | |||
113 | { | 113 | { |
114 | disable_irq_nosync(irq); | 114 | disable_irq_nosync(irq); |
115 | io_apic_irqs &= ~(1<<irq); | 115 | io_apic_irqs &= ~(1<<irq); |
116 | set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq); | 116 | set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, |
117 | "XT"); | ||
117 | enable_irq(irq); | 118 | enable_irq(irq); |
118 | } | 119 | } |
119 | 120 | ||
@@ -369,8 +370,8 @@ void __init init_ISA_irqs (void) | |||
369 | /* | 370 | /* |
370 | * 16 old-style INTA-cycle interrupts: | 371 | * 16 old-style INTA-cycle interrupts: |
371 | */ | 372 | */ |
372 | set_irq_chip_and_handler(i, &i8259A_chip, | 373 | set_irq_chip_and_handler_name(i, &i8259A_chip, |
373 | handle_level_irq); | 374 | handle_level_irq, "XT"); |
374 | } else { | 375 | } else { |
375 | /* | 376 | /* |
376 | * 'high' PCI IRQs filled in on demand | 377 | * 'high' PCI IRQs filled in on demand |
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index 27bceaf5ce4..3b7a63e0ed1 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c | |||
@@ -91,6 +91,46 @@ static struct irq_pin_list { | |||
91 | int apic, pin, next; | 91 | int apic, pin, next; |
92 | } irq_2_pin[PIN_MAP_SIZE]; | 92 | } irq_2_pin[PIN_MAP_SIZE]; |
93 | 93 | ||
94 | struct io_apic { | ||
95 | unsigned int index; | ||
96 | unsigned int unused[3]; | ||
97 | unsigned int data; | ||
98 | }; | ||
99 | |||
100 | static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx) | ||
101 | { | ||
102 | return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx) | ||
103 | + (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK); | ||
104 | } | ||
105 | |||
106 | static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) | ||
107 | { | ||
108 | struct io_apic __iomem *io_apic = io_apic_base(apic); | ||
109 | writel(reg, &io_apic->index); | ||
110 | return readl(&io_apic->data); | ||
111 | } | ||
112 | |||
113 | static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) | ||
114 | { | ||
115 | struct io_apic __iomem *io_apic = io_apic_base(apic); | ||
116 | writel(reg, &io_apic->index); | ||
117 | writel(value, &io_apic->data); | ||
118 | } | ||
119 | |||
120 | /* | ||
121 | * Re-write a value: to be used for read-modify-write | ||
122 | * cycles where the read already set up the index register. | ||
123 | * | ||
124 | * Older SiS APIC requires we rewrite the index register | ||
125 | */ | ||
126 | static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) | ||
127 | { | ||
128 | volatile struct io_apic *io_apic = io_apic_base(apic); | ||
129 | if (sis_apic_bug) | ||
130 | writel(reg, &io_apic->index); | ||
131 | writel(value, &io_apic->data); | ||
132 | } | ||
133 | |||
94 | union entry_union { | 134 | union entry_union { |
95 | struct { u32 w1, w2; }; | 135 | struct { u32 w1, w2; }; |
96 | struct IO_APIC_route_entry entry; | 136 | struct IO_APIC_route_entry entry; |
@@ -107,12 +147,34 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin) | |||
107 | return eu.entry; | 147 | return eu.entry; |
108 | } | 148 | } |
109 | 149 | ||
150 | /* | ||
151 | * When we write a new IO APIC routing entry, we need to write the high | ||
152 | * word first! If the mask bit in the low word is clear, we will enable | ||
153 | * the interrupt, and we need to make sure the entry is fully populated | ||
154 | * before that happens. | ||
155 | */ | ||
110 | static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) | 156 | static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) |
111 | { | 157 | { |
112 | unsigned long flags; | 158 | unsigned long flags; |
113 | union entry_union eu; | 159 | union entry_union eu; |
114 | eu.entry = e; | 160 | eu.entry = e; |
115 | spin_lock_irqsave(&ioapic_lock, flags); | 161 | spin_lock_irqsave(&ioapic_lock, flags); |
162 | io_apic_write(apic, 0x11 + 2*pin, eu.w2); | ||
163 | io_apic_write(apic, 0x10 + 2*pin, eu.w1); | ||
164 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
165 | } | ||
166 | |||
167 | /* | ||
168 | * When we mask an IO APIC routing entry, we need to write the low | ||
169 | * word first, in order to set the mask bit before we change the | ||
170 | * high bits! | ||
171 | */ | ||
172 | static void ioapic_mask_entry(int apic, int pin) | ||
173 | { | ||
174 | unsigned long flags; | ||
175 | union entry_union eu = { .entry.mask = 1 }; | ||
176 | |||
177 | spin_lock_irqsave(&ioapic_lock, flags); | ||
116 | io_apic_write(apic, 0x10 + 2*pin, eu.w1); | 178 | io_apic_write(apic, 0x10 + 2*pin, eu.w1); |
117 | io_apic_write(apic, 0x11 + 2*pin, eu.w2); | 179 | io_apic_write(apic, 0x11 + 2*pin, eu.w2); |
118 | spin_unlock_irqrestore(&ioapic_lock, flags); | 180 | spin_unlock_irqrestore(&ioapic_lock, flags); |
@@ -234,9 +296,7 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) | |||
234 | /* | 296 | /* |
235 | * Disable it in the IO-APIC irq-routing table: | 297 | * Disable it in the IO-APIC irq-routing table: |
236 | */ | 298 | */ |
237 | memset(&entry, 0, sizeof(entry)); | 299 | ioapic_mask_entry(apic, pin); |
238 | entry.mask = 1; | ||
239 | ioapic_write_entry(apic, pin, entry); | ||
240 | } | 300 | } |
241 | 301 | ||
242 | static void clear_IO_APIC (void) | 302 | static void clear_IO_APIC (void) |
@@ -1225,11 +1285,13 @@ static void ioapic_register_intr(int irq, int vector, unsigned long trigger) | |||
1225 | { | 1285 | { |
1226 | if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || | 1286 | if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || |
1227 | trigger == IOAPIC_LEVEL) | 1287 | trigger == IOAPIC_LEVEL) |
1228 | set_irq_chip_and_handler(irq, &ioapic_chip, | 1288 | set_irq_chip_and_handler_name(irq, &ioapic_chip, |
1229 | handle_fasteoi_irq); | 1289 | handle_fasteoi_irq, "fasteoi"); |
1230 | else | 1290 | else { |
1231 | set_irq_chip_and_handler(irq, &ioapic_chip, | 1291 | irq_desc[irq].status |= IRQ_DELAYED_DISABLE; |
1232 | handle_edge_irq); | 1292 | set_irq_chip_and_handler_name(irq, &ioapic_chip, |
1293 | handle_edge_irq, "edge"); | ||
1294 | } | ||
1233 | set_intr_gate(vector, interrupt[irq]); | 1295 | set_intr_gate(vector, interrupt[irq]); |
1234 | } | 1296 | } |
1235 | 1297 | ||
@@ -2235,7 +2297,8 @@ static inline void check_timer(void) | |||
2235 | printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ..."); | 2297 | printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ..."); |
2236 | 2298 | ||
2237 | disable_8259A_irq(0); | 2299 | disable_8259A_irq(0); |
2238 | set_irq_chip_and_handler(0, &lapic_chip, handle_fasteoi_irq); | 2300 | set_irq_chip_and_handler_name(0, &lapic_chip, handle_fasteoi_irq, |
2301 | "fasteio"); | ||
2239 | apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */ | 2302 | apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */ |
2240 | enable_8259A_irq(0); | 2303 | enable_8259A_irq(0); |
2241 | 2304 | ||
@@ -2541,7 +2604,8 @@ int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev) | |||
2541 | 2604 | ||
2542 | write_msi_msg(irq, &msg); | 2605 | write_msi_msg(irq, &msg); |
2543 | 2606 | ||
2544 | set_irq_chip_and_handler(irq, &msi_chip, handle_edge_irq); | 2607 | set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, |
2608 | "edge"); | ||
2545 | 2609 | ||
2546 | return 0; | 2610 | return 0; |
2547 | } | 2611 | } |
@@ -2562,18 +2626,16 @@ void arch_teardown_msi_irq(unsigned int irq) | |||
2562 | 2626 | ||
2563 | static void target_ht_irq(unsigned int irq, unsigned int dest) | 2627 | static void target_ht_irq(unsigned int irq, unsigned int dest) |
2564 | { | 2628 | { |
2565 | u32 low, high; | 2629 | struct ht_irq_msg msg; |
2566 | low = read_ht_irq_low(irq); | 2630 | fetch_ht_irq_msg(irq, &msg); |
2567 | high = read_ht_irq_high(irq); | ||
2568 | 2631 | ||
2569 | low &= ~(HT_IRQ_LOW_DEST_ID_MASK); | 2632 | msg.address_lo &= ~(HT_IRQ_LOW_DEST_ID_MASK); |
2570 | high &= ~(HT_IRQ_HIGH_DEST_ID_MASK); | 2633 | msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK); |
2571 | 2634 | ||
2572 | low |= HT_IRQ_LOW_DEST_ID(dest); | 2635 | msg.address_lo |= HT_IRQ_LOW_DEST_ID(dest); |
2573 | high |= HT_IRQ_HIGH_DEST_ID(dest); | 2636 | msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest); |
2574 | 2637 | ||
2575 | write_ht_irq_low(irq, low); | 2638 | write_ht_irq_msg(irq, &msg); |
2576 | write_ht_irq_high(irq, high); | ||
2577 | } | 2639 | } |
2578 | 2640 | ||
2579 | static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) | 2641 | static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) |
@@ -2611,7 +2673,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
2611 | 2673 | ||
2612 | vector = assign_irq_vector(irq); | 2674 | vector = assign_irq_vector(irq); |
2613 | if (vector >= 0) { | 2675 | if (vector >= 0) { |
2614 | u32 low, high; | 2676 | struct ht_irq_msg msg; |
2615 | unsigned dest; | 2677 | unsigned dest; |
2616 | cpumask_t tmp; | 2678 | cpumask_t tmp; |
2617 | 2679 | ||
@@ -2619,9 +2681,10 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
2619 | cpu_set(vector >> 8, tmp); | 2681 | cpu_set(vector >> 8, tmp); |
2620 | dest = cpu_mask_to_apicid(tmp); | 2682 | dest = cpu_mask_to_apicid(tmp); |
2621 | 2683 | ||
2622 | high = HT_IRQ_HIGH_DEST_ID(dest); | 2684 | msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); |
2623 | 2685 | ||
2624 | low = HT_IRQ_LOW_BASE | | 2686 | msg.address_lo = |
2687 | HT_IRQ_LOW_BASE | | ||
2625 | HT_IRQ_LOW_DEST_ID(dest) | | 2688 | HT_IRQ_LOW_DEST_ID(dest) | |
2626 | HT_IRQ_LOW_VECTOR(vector) | | 2689 | HT_IRQ_LOW_VECTOR(vector) | |
2627 | ((INT_DEST_MODE == 0) ? | 2690 | ((INT_DEST_MODE == 0) ? |
@@ -2633,10 +2696,10 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
2633 | HT_IRQ_LOW_MT_ARBITRATED) | | 2696 | HT_IRQ_LOW_MT_ARBITRATED) | |
2634 | HT_IRQ_LOW_IRQ_MASKED; | 2697 | HT_IRQ_LOW_IRQ_MASKED; |
2635 | 2698 | ||
2636 | write_ht_irq_low(irq, low); | 2699 | write_ht_irq_msg(irq, &msg); |
2637 | write_ht_irq_high(irq, high); | ||
2638 | 2700 | ||
2639 | set_irq_chip_and_handler(irq, &ht_irq_chip, handle_edge_irq); | 2701 | set_irq_chip_and_handler_name(irq, &ht_irq_chip, |
2702 | handle_edge_irq, "edge"); | ||
2640 | } | 2703 | } |
2641 | return vector; | 2704 | return vector; |
2642 | } | 2705 | } |
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c index 8cfc7dbec7b..3201d421090 100644 --- a/arch/i386/kernel/irq.c +++ b/arch/i386/kernel/irq.c | |||
@@ -258,7 +258,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
258 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | 258 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); |
259 | #endif | 259 | #endif |
260 | seq_printf(p, " %8s", irq_desc[i].chip->name); | 260 | seq_printf(p, " %8s", irq_desc[i].chip->name); |
261 | seq_printf(p, "-%s", handle_irq_name(irq_desc[i].handle_irq)); | 261 | seq_printf(p, "-%-8s", irq_desc[i].name); |
262 | seq_printf(p, " %s", action->name); | 262 | seq_printf(p, " %s", action->name); |
263 | 263 | ||
264 | for (action=action->next; action; action = action->next) | 264 | for (action=action->next; action; action = action->next) |
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c index d98e44b16fe..fc79e1e859c 100644 --- a/arch/i386/kernel/kprobes.c +++ b/arch/i386/kernel/kprobes.c | |||
@@ -361,8 +361,11 @@ no_kprobe: | |||
361 | asm volatile ( ".global kretprobe_trampoline\n" | 361 | asm volatile ( ".global kretprobe_trampoline\n" |
362 | "kretprobe_trampoline: \n" | 362 | "kretprobe_trampoline: \n" |
363 | " pushf\n" | 363 | " pushf\n" |
364 | /* skip cs, eip, orig_eax, es, ds */ | 364 | /* skip cs, eip, orig_eax */ |
365 | " subl $20, %esp\n" | 365 | " subl $12, %esp\n" |
366 | " pushl %gs\n" | ||
367 | " pushl %ds\n" | ||
368 | " pushl %es\n" | ||
366 | " pushl %eax\n" | 369 | " pushl %eax\n" |
367 | " pushl %ebp\n" | 370 | " pushl %ebp\n" |
368 | " pushl %edi\n" | 371 | " pushl %edi\n" |
@@ -373,10 +376,10 @@ no_kprobe: | |||
373 | " movl %esp, %eax\n" | 376 | " movl %esp, %eax\n" |
374 | " call trampoline_handler\n" | 377 | " call trampoline_handler\n" |
375 | /* move eflags to cs */ | 378 | /* move eflags to cs */ |
376 | " movl 48(%esp), %edx\n" | 379 | " movl 52(%esp), %edx\n" |
377 | " movl %edx, 44(%esp)\n" | 380 | " movl %edx, 48(%esp)\n" |
378 | /* save true return address on eflags */ | 381 | /* save true return address on eflags */ |
379 | " movl %eax, 48(%esp)\n" | 382 | " movl %eax, 52(%esp)\n" |
380 | " popl %ebx\n" | 383 | " popl %ebx\n" |
381 | " popl %ecx\n" | 384 | " popl %ecx\n" |
382 | " popl %edx\n" | 385 | " popl %edx\n" |
@@ -384,8 +387,8 @@ no_kprobe: | |||
384 | " popl %edi\n" | 387 | " popl %edi\n" |
385 | " popl %ebp\n" | 388 | " popl %ebp\n" |
386 | " popl %eax\n" | 389 | " popl %eax\n" |
387 | /* skip eip, orig_eax, es, ds */ | 390 | /* skip eip, orig_eax, es, ds, gs */ |
388 | " addl $16, %esp\n" | 391 | " addl $20, %esp\n" |
389 | " popf\n" | 392 | " popf\n" |
390 | " ret\n"); | 393 | " ret\n"); |
391 | } | 394 | } |
@@ -404,6 +407,10 @@ fastcall void *__kprobes trampoline_handler(struct pt_regs *regs) | |||
404 | INIT_HLIST_HEAD(&empty_rp); | 407 | INIT_HLIST_HEAD(&empty_rp); |
405 | spin_lock_irqsave(&kretprobe_lock, flags); | 408 | spin_lock_irqsave(&kretprobe_lock, flags); |
406 | head = kretprobe_inst_table_head(current); | 409 | head = kretprobe_inst_table_head(current); |
410 | /* fixup registers */ | ||
411 | regs->xcs = __KERNEL_CS; | ||
412 | regs->eip = trampoline_address; | ||
413 | regs->orig_eax = 0xffffffff; | ||
407 | 414 | ||
408 | /* | 415 | /* |
409 | * It is possible to have multiple instances associated with a given | 416 | * It is possible to have multiple instances associated with a given |
@@ -425,6 +432,7 @@ fastcall void *__kprobes trampoline_handler(struct pt_regs *regs) | |||
425 | 432 | ||
426 | if (ri->rp && ri->rp->handler){ | 433 | if (ri->rp && ri->rp->handler){ |
427 | __get_cpu_var(current_kprobe) = &ri->rp->kp; | 434 | __get_cpu_var(current_kprobe) = &ri->rp->kp; |
435 | get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; | ||
428 | ri->rp->handler(ri, regs); | 436 | ri->rp->handler(ri, regs); |
429 | __get_cpu_var(current_kprobe) = NULL; | 437 | __get_cpu_var(current_kprobe) = NULL; |
430 | } | 438 | } |
diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c index c4d0291b519..23f5984d065 100644 --- a/arch/i386/kernel/microcode.c +++ b/arch/i386/kernel/microcode.c | |||
@@ -577,7 +577,7 @@ static void microcode_init_cpu(int cpu) | |||
577 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | 577 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); |
578 | mutex_lock(µcode_mutex); | 578 | mutex_lock(µcode_mutex); |
579 | collect_cpu_info(cpu); | 579 | collect_cpu_info(cpu); |
580 | if (uci->valid) | 580 | if (uci->valid && system_state == SYSTEM_RUNNING) |
581 | cpu_request_microcode(cpu); | 581 | cpu_request_microcode(cpu); |
582 | mutex_unlock(µcode_mutex); | 582 | mutex_unlock(µcode_mutex); |
583 | set_cpus_allowed(current, old); | 583 | set_cpus_allowed(current, old); |
diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c index d535cdbbfd2..a773f776c9e 100644 --- a/arch/i386/kernel/msr.c +++ b/arch/i386/kernel/msr.c | |||
@@ -239,14 +239,14 @@ static struct file_operations msr_fops = { | |||
239 | .open = msr_open, | 239 | .open = msr_open, |
240 | }; | 240 | }; |
241 | 241 | ||
242 | static int msr_class_device_create(int i) | 242 | static int msr_device_create(int i) |
243 | { | 243 | { |
244 | int err = 0; | 244 | int err = 0; |
245 | struct class_device *class_err; | 245 | struct device *dev; |
246 | 246 | ||
247 | class_err = class_device_create(msr_class, NULL, MKDEV(MSR_MAJOR, i), NULL, "msr%d",i); | 247 | dev = device_create(msr_class, NULL, MKDEV(MSR_MAJOR, i), "msr%d",i); |
248 | if (IS_ERR(class_err)) | 248 | if (IS_ERR(dev)) |
249 | err = PTR_ERR(class_err); | 249 | err = PTR_ERR(dev); |
250 | return err; | 250 | return err; |
251 | } | 251 | } |
252 | 252 | ||
@@ -258,10 +258,10 @@ static int msr_class_cpu_callback(struct notifier_block *nfb, | |||
258 | 258 | ||
259 | switch (action) { | 259 | switch (action) { |
260 | case CPU_ONLINE: | 260 | case CPU_ONLINE: |
261 | msr_class_device_create(cpu); | 261 | msr_device_create(cpu); |
262 | break; | 262 | break; |
263 | case CPU_DEAD: | 263 | case CPU_DEAD: |
264 | class_device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu)); | 264 | device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu)); |
265 | break; | 265 | break; |
266 | } | 266 | } |
267 | return NOTIFY_OK; | 267 | return NOTIFY_OK; |
@@ -290,7 +290,7 @@ static int __init msr_init(void) | |||
290 | goto out_chrdev; | 290 | goto out_chrdev; |
291 | } | 291 | } |
292 | for_each_online_cpu(i) { | 292 | for_each_online_cpu(i) { |
293 | err = msr_class_device_create(i); | 293 | err = msr_device_create(i); |
294 | if (err != 0) | 294 | if (err != 0) |
295 | goto out_class; | 295 | goto out_class; |
296 | } | 296 | } |
@@ -302,7 +302,7 @@ static int __init msr_init(void) | |||
302 | out_class: | 302 | out_class: |
303 | i = 0; | 303 | i = 0; |
304 | for_each_online_cpu(i) | 304 | for_each_online_cpu(i) |
305 | class_device_destroy(msr_class, MKDEV(MSR_MAJOR, i)); | 305 | device_destroy(msr_class, MKDEV(MSR_MAJOR, i)); |
306 | class_destroy(msr_class); | 306 | class_destroy(msr_class); |
307 | out_chrdev: | 307 | out_chrdev: |
308 | unregister_chrdev(MSR_MAJOR, "cpu/msr"); | 308 | unregister_chrdev(MSR_MAJOR, "cpu/msr"); |
@@ -314,7 +314,7 @@ static void __exit msr_exit(void) | |||
314 | { | 314 | { |
315 | int cpu = 0; | 315 | int cpu = 0; |
316 | for_each_online_cpu(cpu) | 316 | for_each_online_cpu(cpu) |
317 | class_device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu)); | 317 | device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu)); |
318 | class_destroy(msr_class); | 318 | class_destroy(msr_class); |
319 | unregister_chrdev(MSR_MAJOR, "cpu/msr"); | 319 | unregister_chrdev(MSR_MAJOR, "cpu/msr"); |
320 | unregister_hotcpu_notifier(&msr_class_cpu_notifier); | 320 | unregister_hotcpu_notifier(&msr_class_cpu_notifier); |
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 3e8e3adb048..eaafe233a5d 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c | |||
@@ -219,11 +219,11 @@ static int __init check_nmi_watchdog(void) | |||
219 | int cpu; | 219 | int cpu; |
220 | 220 | ||
221 | /* Enable NMI watchdog for newer systems. | 221 | /* Enable NMI watchdog for newer systems. |
222 | Actually it should be safe for most systems before 2004 too except | 222 | Probably safe on most older systems too, but let's be careful. |
223 | for some IBM systems that corrupt registers when NMI happens | 223 | IBM ThinkPads use INT10 inside SMM and that allows early NMI inside SMM |
224 | during SMM. Unfortunately we don't have more exact information | 224 | which hangs the system. Disable watchdog for all thinkpads */ |
225 | on these and use this coarse check. */ | 225 | if (nmi_watchdog == NMI_DEFAULT && dmi_get_year(DMI_BIOS_DATE) >= 2004 && |
226 | if (nmi_watchdog == NMI_DEFAULT && dmi_get_year(DMI_BIOS_DATE) >= 2004) | 226 | !dmi_name_in_vendors("ThinkPad")) |
227 | nmi_watchdog = NMI_LOCAL_APIC; | 227 | nmi_watchdog = NMI_LOCAL_APIC; |
228 | 228 | ||
229 | if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DEFAULT)) | 229 | if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DEFAULT)) |
diff --git a/arch/i386/kernel/pci-dma.c b/arch/i386/kernel/pci-dma.c index 25fe6685393..5c8c6ef1fc5 100644 --- a/arch/i386/kernel/pci-dma.c +++ b/arch/i386/kernel/pci-dma.c | |||
@@ -75,7 +75,7 @@ EXPORT_SYMBOL(dma_free_coherent); | |||
75 | int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | 75 | int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, |
76 | dma_addr_t device_addr, size_t size, int flags) | 76 | dma_addr_t device_addr, size_t size, int flags) |
77 | { | 77 | { |
78 | void __iomem *mem_base; | 78 | void __iomem *mem_base = NULL; |
79 | int pages = size >> PAGE_SHIFT; | 79 | int pages = size >> PAGE_SHIFT; |
80 | int bitmap_size = (pages + 31)/32; | 80 | int bitmap_size = (pages + 31)/32; |
81 | 81 | ||
@@ -114,6 +114,8 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | |||
114 | free1_out: | 114 | free1_out: |
115 | kfree(dev->dma_mem->bitmap); | 115 | kfree(dev->dma_mem->bitmap); |
116 | out: | 116 | out: |
117 | if (mem_base) | ||
118 | iounmap(mem_base); | ||
117 | return 0; | 119 | return 0; |
118 | } | 120 | } |
119 | EXPORT_SYMBOL(dma_declare_coherent_memory); | 121 | EXPORT_SYMBOL(dma_declare_coherent_memory); |
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index 57d375900af..dd53c58f64f 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c | |||
@@ -205,7 +205,7 @@ void cpu_idle(void) | |||
205 | void cpu_idle_wait(void) | 205 | void cpu_idle_wait(void) |
206 | { | 206 | { |
207 | unsigned int cpu, this_cpu = get_cpu(); | 207 | unsigned int cpu, this_cpu = get_cpu(); |
208 | cpumask_t map; | 208 | cpumask_t map, tmp = current->cpus_allowed; |
209 | 209 | ||
210 | set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); | 210 | set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); |
211 | put_cpu(); | 211 | put_cpu(); |
@@ -227,6 +227,8 @@ void cpu_idle_wait(void) | |||
227 | } | 227 | } |
228 | cpus_and(map, map, cpu_online_map); | 228 | cpus_and(map, map, cpu_online_map); |
229 | } while (!cpus_empty(map)); | 229 | } while (!cpus_empty(map)); |
230 | |||
231 | set_cpus_allowed(current, tmp); | ||
230 | } | 232 | } |
231 | EXPORT_SYMBOL_GPL(cpu_idle_wait); | 233 | EXPORT_SYMBOL_GPL(cpu_idle_wait); |
232 | 234 | ||
@@ -336,7 +338,6 @@ extern void kernel_thread_helper(void); | |||
336 | int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | 338 | int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) |
337 | { | 339 | { |
338 | struct pt_regs regs; | 340 | struct pt_regs regs; |
339 | int err; | ||
340 | 341 | ||
341 | memset(®s, 0, sizeof(regs)); | 342 | memset(®s, 0, sizeof(regs)); |
342 | 343 | ||
@@ -351,10 +352,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |||
351 | regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2; | 352 | regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2; |
352 | 353 | ||
353 | /* Ok, create the new process.. */ | 354 | /* Ok, create the new process.. */ |
354 | err = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); | 355 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); |
355 | if (err == 0) /* terminate kernel stack */ | ||
356 | task_pt_regs(current)->eip = 0; | ||
357 | return err; | ||
358 | } | 356 | } |
359 | EXPORT_SYMBOL(kernel_thread); | 357 | EXPORT_SYMBOL(kernel_thread); |
360 | 358 | ||
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index 519e63c3c13..141041dde74 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c | |||
@@ -846,7 +846,7 @@ efi_find_max_pfn(unsigned long start, unsigned long end, void *arg) | |||
846 | static int __init | 846 | static int __init |
847 | efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg) | 847 | efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg) |
848 | { | 848 | { |
849 | memory_present(0, start, end); | 849 | memory_present(0, PFN_UP(start), PFN_DOWN(end)); |
850 | return 0; | 850 | return 0; |
851 | } | 851 | } |
852 | 852 | ||
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index 00489b706d2..fe9c5e8e7e6 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c | |||
@@ -129,15 +129,19 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo, | |||
129 | 129 | ||
130 | #ifdef CONFIG_FRAME_POINTER | 130 | #ifdef CONFIG_FRAME_POINTER |
131 | while (valid_stack_ptr(tinfo, (void *)ebp)) { | 131 | while (valid_stack_ptr(tinfo, (void *)ebp)) { |
132 | unsigned long new_ebp; | ||
132 | addr = *(unsigned long *)(ebp + 4); | 133 | addr = *(unsigned long *)(ebp + 4); |
133 | ops->address(data, addr); | 134 | ops->address(data, addr); |
134 | /* | 135 | /* |
135 | * break out of recursive entries (such as | 136 | * break out of recursive entries (such as |
136 | * end_of_stack_stop_unwind_function): | 137 | * end_of_stack_stop_unwind_function). Also, |
138 | * we can never allow a frame pointer to | ||
139 | * move downwards! | ||
137 | */ | 140 | */ |
138 | if (ebp == *(unsigned long *)ebp) | 141 | new_ebp = *(unsigned long *)ebp; |
142 | if (new_ebp <= ebp) | ||
139 | break; | 143 | break; |
140 | ebp = *(unsigned long *)ebp; | 144 | ebp = new_ebp; |
141 | } | 145 | } |
142 | #else | 146 | #else |
143 | while (valid_stack_ptr(tinfo, stack)) { | 147 | while (valid_stack_ptr(tinfo, stack)) { |
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c index b8fa0a8b2e4..fbc95828cd7 100644 --- a/arch/i386/kernel/tsc.c +++ b/arch/i386/kernel/tsc.c | |||
@@ -349,8 +349,8 @@ static int tsc_update_callback(void) | |||
349 | int change = 0; | 349 | int change = 0; |
350 | 350 | ||
351 | /* check to see if we should switch to the safe clocksource: */ | 351 | /* check to see if we should switch to the safe clocksource: */ |
352 | if (clocksource_tsc.rating != 50 && check_tsc_unstable()) { | 352 | if (clocksource_tsc.rating != 0 && check_tsc_unstable()) { |
353 | clocksource_tsc.rating = 50; | 353 | clocksource_tsc.rating = 0; |
354 | clocksource_reselect(); | 354 | clocksource_reselect(); |
355 | change = 1; | 355 | change = 1; |
356 | } | 356 | } |
@@ -461,7 +461,7 @@ static int __init init_tsc_clocksource(void) | |||
461 | clocksource_tsc.shift); | 461 | clocksource_tsc.shift); |
462 | /* lower the rating if we already know its unstable: */ | 462 | /* lower the rating if we already know its unstable: */ |
463 | if (check_tsc_unstable()) | 463 | if (check_tsc_unstable()) |
464 | clocksource_tsc.rating = 50; | 464 | clocksource_tsc.rating = 0; |
465 | 465 | ||
466 | init_timer(&verify_tsc_freq_timer); | 466 | init_timer(&verify_tsc_freq_timer); |
467 | verify_tsc_freq_timer.function = verify_tsc_freq; | 467 | verify_tsc_freq_timer.function = verify_tsc_freq; |
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S index 1e7ac1c44dd..c6f84a0322b 100644 --- a/arch/i386/kernel/vmlinux.lds.S +++ b/arch/i386/kernel/vmlinux.lds.S | |||
@@ -51,6 +51,7 @@ SECTIONS | |||
51 | __tracedata_end = .; | 51 | __tracedata_end = .; |
52 | 52 | ||
53 | /* writeable */ | 53 | /* writeable */ |
54 | . = ALIGN(4096); | ||
54 | .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */ | 55 | .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */ |
55 | *(.data) | 56 | *(.data) |
56 | CONSTRUCTORS | 57 | CONSTRUCTORS |
@@ -126,13 +127,7 @@ SECTIONS | |||
126 | __setup_end = .; | 127 | __setup_end = .; |
127 | __initcall_start = .; | 128 | __initcall_start = .; |
128 | .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) { | 129 | .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) { |
129 | *(.initcall1.init) | 130 | INITCALLS |
130 | *(.initcall2.init) | ||
131 | *(.initcall3.init) | ||
132 | *(.initcall4.init) | ||
133 | *(.initcall5.init) | ||
134 | *(.initcall6.init) | ||
135 | *(.initcall7.init) | ||
136 | } | 131 | } |
137 | __initcall_end = .; | 132 | __initcall_end = .; |
138 | __con_initcall_start = .; | 133 | __con_initcall_start = .; |