aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-26 12:44:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-26 12:44:32 -0400
commitb82287587ef9917afbea5fcbf7aa63424b6f3719 (patch)
tree4b27b46c41ea9d26ee44f1f0f7e8ddcd3a17f6a2 /arch
parent2a8a2719be1397c64e726ccd1c0933a11dc493d0 (diff)
parent5afca33a43786408ce74540b54855973dde32bab (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86-misc
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86-misc: (28 commits) x86: section mismatch fixes, #3 x86: section mismatch fixes, #2 x86: pgtable_32.h - prototype and section mismatch fixes x86: unlock_ExtINT_logic() - fix section mismatch warnings x86: uniq_ioapic_id - fix section mismatch warning x86: trampoline_32.S - switch to .cpuinit.data x86: use get_bios_ebda() x86: remove duplicate get_bios_ebda() from rio.h x86: get_bios_ebda() requires asm/io.h x86: use cpumask function for present, possible, and online cpus x86: cleanup div_sc() usage x86: cleanup clocksource_hz2mult usage x86: remove unnecessary memset and NULL check after alloc_bootmem() x86: use bitmap library for pin_programmed x86: use MP_intsrc_info() x86: use BUILD_BUG_ON() for the size of struct intel_mp_floating x86_64 ia32 ptrace: convert to compat_arch_ptrace x86_64 ia32 ptrace: use compat_ptrace_request for siginfo x86 signals: lift set_fs x86 signals: lift flags diddling code ...
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/Kconfig.cpu4
-rw-r--r--arch/x86/ia32/ia32_signal.c10
-rw-r--r--arch/x86/ia32/ia32entry.S2
-rw-r--r--arch/x86/kernel/acpi/boot.c4
-rw-r--r--arch/x86/kernel/apic_32.c3
-rw-r--r--arch/x86/kernel/apic_64.c3
-rw-r--r--arch/x86/kernel/cpu/Makefile1
-rw-r--r--arch/x86/kernel/cpu/amd.c6
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c18
-rw-r--r--arch/x86/kernel/cpu/nexgen.c59
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c14
-rw-r--r--arch/x86/kernel/genapic_64.c2
-rw-r--r--arch/x86/kernel/head64.c5
-rw-r--r--arch/x86/kernel/hpet.c2
-rw-r--r--arch/x86/kernel/i8253.c6
-rw-r--r--arch/x86/kernel/io_apic_32.c2
-rw-r--r--arch/x86/kernel/io_apic_64.c2
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/mfgpt_32.c3
-rw-r--r--arch/x86/kernel/mpparse.c39
-rw-r--r--arch/x86/kernel/pci-calgary_64.c1
-rw-r--r--arch/x86/kernel/ptrace.c95
-rw-r--r--arch/x86/kernel/setup_32.c4
-rw-r--r--arch/x86/kernel/setup_64.c2
-rw-r--r--arch/x86/kernel/signal_32.c35
-rw-r--r--arch/x86/kernel/signal_64.c30
-rw-r--r--arch/x86/kernel/smpboot.c4
-rw-r--r--arch/x86/kernel/summit_32.c5
-rw-r--r--arch/x86/kernel/tlb_64.c4
-rw-r--r--arch/x86/kernel/trampoline_32.S2
-rw-r--r--arch/x86/kernel/traps_32.c2
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c10
-rw-r--r--arch/x86/mm/init_32.c6
-rw-r--r--arch/x86/xen/smp.c2
34 files changed, 107 insertions, 282 deletions
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 57072f2716f9..4da3cdb9c1b1 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -21,8 +21,8 @@ config M386
21 21
22 Here are the settings recommended for greatest speed: 22 Here are the settings recommended for greatest speed:
23 - "386" for the AMD/Cyrix/Intel 386DX/DXL/SL/SLC/SX, Cyrix/TI 23 - "386" for the AMD/Cyrix/Intel 386DX/DXL/SL/SLC/SX, Cyrix/TI
24 486DLC/DLC2, UMC 486SX-S and NexGen Nx586. Only "386" kernels 24 486DLC/DLC2, and UMC 486SX-S. Only "386" kernels will run on a 386
25 will run on a 386 class machine. 25 class machine.
26 - "486" for the AMD/Cyrix/IBM/Intel 486DX/DX2/DX4 or 26 - "486" for the AMD/Cyrix/IBM/Intel 486DX/DX2/DX4 or
27 SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or U5S. 27 SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or U5S.
28 - "586" for generic Pentium CPUs lacking the TSC 28 - "586" for generic Pentium CPUs lacking the TSC
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 05e155d3fb6c..bbed3a26ce55 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -499,11 +499,6 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
499 regs->cs = __USER32_CS; 499 regs->cs = __USER32_CS;
500 regs->ss = __USER32_DS; 500 regs->ss = __USER32_DS;
501 501
502 set_fs(USER_DS);
503 regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF);
504 if (test_thread_flag(TIF_SINGLESTEP))
505 ptrace_notify(SIGTRAP);
506
507#if DEBUG_SIG 502#if DEBUG_SIG
508 printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n", 503 printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
509 current->comm, current->pid, frame, regs->ip, frame->pretcode); 504 current->comm, current->pid, frame, regs->ip, frame->pretcode);
@@ -599,11 +594,6 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
599 regs->cs = __USER32_CS; 594 regs->cs = __USER32_CS;
600 regs->ss = __USER32_DS; 595 regs->ss = __USER32_DS;
601 596
602 set_fs(USER_DS);
603 regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF);
604 if (test_thread_flag(TIF_SINGLESTEP))
605 ptrace_notify(SIGTRAP);
606
607#if DEBUG_SIG 597#if DEBUG_SIG
608 printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n", 598 printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
609 current->comm, current->pid, frame, regs->ip, frame->pretcode); 599 current->comm, current->pid, frame, regs->ip, frame->pretcode);
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index ae7158bce4d6..b5e329da166c 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -430,7 +430,7 @@ ia32_sys_call_table:
430 .quad sys_setuid16 430 .quad sys_setuid16
431 .quad sys_getuid16 431 .quad sys_getuid16
432 .quad compat_sys_stime /* stime */ /* 25 */ 432 .quad compat_sys_stime /* stime */ /* 25 */
433 .quad sys32_ptrace /* ptrace */ 433 .quad compat_sys_ptrace /* ptrace */
434 .quad sys_alarm 434 .quad sys_alarm
435 .quad sys_fstat /* (old)fstat */ 435 .quad sys_fstat /* (old)fstat */
436 .quad sys_pause 436 .quad sys_pause
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 057ccf1d5ad4..977ed5cdeaa3 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -697,10 +697,6 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
697#define HPET_RESOURCE_NAME_SIZE 9 697#define HPET_RESOURCE_NAME_SIZE 9
698 hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE); 698 hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE);
699 699
700 if (!hpet_res)
701 return 0;
702
703 memset(hpet_res, 0, sizeof(*hpet_res));
704 hpet_res->name = (void *)&hpet_res[1]; 700 hpet_res->name = (void *)&hpet_res[1];
705 hpet_res->flags = IORESOURCE_MEM; 701 hpet_res->flags = IORESOURCE_MEM;
706 snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, "HPET %u", 702 snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, "HPET %u",
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
index 8317401170b8..4b99b1bdeb6c 100644
--- a/arch/x86/kernel/apic_32.c
+++ b/arch/x86/kernel/apic_32.c
@@ -451,7 +451,8 @@ void __init setup_boot_APIC_clock(void)
451 } 451 }
452 452
453 /* Calculate the scaled math multiplication factor */ 453 /* Calculate the scaled math multiplication factor */
454 lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS, 32); 454 lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
455 lapic_clockevent.shift);
455 lapic_clockevent.max_delta_ns = 456 lapic_clockevent.max_delta_ns =
456 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent); 457 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
457 lapic_clockevent.min_delta_ns = 458 lapic_clockevent.min_delta_ns =
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
index bf83157337e4..5910020c3f24 100644
--- a/arch/x86/kernel/apic_64.c
+++ b/arch/x86/kernel/apic_64.c
@@ -360,7 +360,8 @@ static void __init calibrate_APIC_clock(void)
360 result / 1000 / 1000, result / 1000 % 1000); 360 result / 1000 / 1000, result / 1000 % 1000);
361 361
362 /* Calculate the scaled math multiplication factor */ 362 /* Calculate the scaled math multiplication factor */
363 lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC, 32); 363 lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC,
364 lapic_clockevent.shift);
364 lapic_clockevent.max_delta_ns = 365 lapic_clockevent.max_delta_ns =
365 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent); 366 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
366 lapic_clockevent.min_delta_ns = 367 lapic_clockevent.min_delta_ns =
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index ee7c45235e54..a0c6f8190887 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -11,7 +11,6 @@ obj-$(CONFIG_X86_32) += cyrix.o
11obj-$(CONFIG_X86_32) += centaur.o 11obj-$(CONFIG_X86_32) += centaur.o
12obj-$(CONFIG_X86_32) += transmeta.o 12obj-$(CONFIG_X86_32) += transmeta.o
13obj-$(CONFIG_X86_32) += intel.o 13obj-$(CONFIG_X86_32) += intel.o
14obj-$(CONFIG_X86_32) += nexgen.o
15obj-$(CONFIG_X86_32) += umc.o 14obj-$(CONFIG_X86_32) += umc.o
16 15
17obj-$(CONFIG_X86_MCE) += mcheck/ 16obj-$(CONFIG_X86_MCE) += mcheck/
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 0173065dc3b7..245866828294 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -343,10 +343,4 @@ static struct cpu_dev amd_cpu_dev __cpuinitdata = {
343 .c_size_cache = amd_size_cache, 343 .c_size_cache = amd_size_cache,
344}; 344};
345 345
346int __init amd_init_cpu(void)
347{
348 cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev;
349 return 0;
350}
351
352cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev); 346cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 9a699ed03598..e07e8c068ae0 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -49,7 +49,7 @@ static int banks;
49static unsigned long bank[NR_BANKS] = { [0 ... NR_BANKS-1] = ~0UL }; 49static unsigned long bank[NR_BANKS] = { [0 ... NR_BANKS-1] = ~0UL };
50static unsigned long notify_user; 50static unsigned long notify_user;
51static int rip_msr; 51static int rip_msr;
52static int mce_bootlog = 1; 52static int mce_bootlog = -1;
53static atomic_t mce_events; 53static atomic_t mce_events;
54 54
55static char trigger[128]; 55static char trigger[128];
@@ -471,13 +471,15 @@ static void mce_init(void *dummy)
471static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) 471static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
472{ 472{
473 /* This should be disabled by the BIOS, but isn't always */ 473 /* This should be disabled by the BIOS, but isn't always */
474 if (c->x86_vendor == X86_VENDOR_AMD && c->x86 == 15) { 474 if (c->x86_vendor == X86_VENDOR_AMD) {
475 /* disable GART TBL walk error reporting, which trips off 475 if(c->x86 == 15)
476 incorrectly with the IOMMU & 3ware & Cerberus. */ 476 /* disable GART TBL walk error reporting, which trips off
477 clear_bit(10, &bank[4]); 477 incorrectly with the IOMMU & 3ware & Cerberus. */
478 /* Lots of broken BIOS around that don't clear them 478 clear_bit(10, &bank[4]);
479 by default and leave crap in there. Don't log. */ 479 if(c->x86 <= 17 && mce_bootlog < 0)
480 mce_bootlog = 0; 480 /* Lots of broken BIOS around that don't clear them
481 by default and leave crap in there. Don't log. */
482 mce_bootlog = 0;
481 } 483 }
482 484
483} 485}
diff --git a/arch/x86/kernel/cpu/nexgen.c b/arch/x86/kernel/cpu/nexgen.c
deleted file mode 100644
index 5d5e1c134123..000000000000
--- a/arch/x86/kernel/cpu/nexgen.c
+++ /dev/null
@@ -1,59 +0,0 @@
1#include <linux/kernel.h>
2#include <linux/init.h>
3#include <linux/string.h>
4#include <asm/processor.h>
5
6#include "cpu.h"
7
8/*
9 * Detect a NexGen CPU running without BIOS hypercode new enough
10 * to have CPUID. (Thanks to Herbert Oppmann)
11 */
12
13static int __cpuinit deep_magic_nexgen_probe(void)
14{
15 int ret;
16
17 __asm__ __volatile__ (
18 " movw $0x5555, %%ax\n"
19 " xorw %%dx,%%dx\n"
20 " movw $2, %%cx\n"
21 " divw %%cx\n"
22 " movl $0, %%eax\n"
23 " jnz 1f\n"
24 " movl $1, %%eax\n"
25 "1:\n"
26 : "=a" (ret) : : "cx", "dx");
27 return ret;
28}
29
30static void __cpuinit init_nexgen(struct cpuinfo_x86 *c)
31{
32 c->x86_cache_size = 256; /* A few had 1 MB... */
33}
34
35static void __cpuinit nexgen_identify(struct cpuinfo_x86 *c)
36{
37 /* Detect NexGen with old hypercode */
38 if (deep_magic_nexgen_probe())
39 strcpy(c->x86_vendor_id, "NexGenDriven");
40}
41
42static struct cpu_dev nexgen_cpu_dev __cpuinitdata = {
43 .c_vendor = "Nexgen",
44 .c_ident = { "NexGenDriven" },
45 .c_models = {
46 { .vendor = X86_VENDOR_NEXGEN,
47 .family = 5,
48 .model_names = { [1] = "Nx586" }
49 },
50 },
51 .c_init = init_nexgen,
52 .c_identify = nexgen_identify,
53};
54
55int __init nexgen_init_cpu(void)
56{
57 cpu_devs[X86_VENDOR_NEXGEN] = &nexgen_cpu_dev;
58 return 0;
59}
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index b943e10ad814..f9ae93adffe5 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -614,16 +614,6 @@ static struct wd_ops intel_arch_wd_ops __read_mostly = {
614 .evntsel = MSR_ARCH_PERFMON_EVENTSEL1, 614 .evntsel = MSR_ARCH_PERFMON_EVENTSEL1,
615}; 615};
616 616
617static struct wd_ops coreduo_wd_ops = {
618 .reserve = single_msr_reserve,
619 .unreserve = single_msr_unreserve,
620 .setup = setup_intel_arch_watchdog,
621 .rearm = p6_rearm,
622 .stop = single_msr_stop_watchdog,
623 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
624 .evntsel = MSR_ARCH_PERFMON_EVENTSEL0,
625};
626
627static void probe_nmi_watchdog(void) 617static void probe_nmi_watchdog(void)
628{ 618{
629 switch (boot_cpu_data.x86_vendor) { 619 switch (boot_cpu_data.x86_vendor) {
@@ -637,8 +627,8 @@ static void probe_nmi_watchdog(void)
637 /* Work around Core Duo (Yonah) errata AE49 where perfctr1 627 /* Work around Core Duo (Yonah) errata AE49 where perfctr1
638 doesn't have a working enable bit. */ 628 doesn't have a working enable bit. */
639 if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 14) { 629 if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 14) {
640 wd_ops = &coreduo_wd_ops; 630 intel_arch_wd_ops.perfctr = MSR_ARCH_PERFMON_PERFCTR0;
641 break; 631 intel_arch_wd_ops.evntsel = MSR_ARCH_PERFMON_EVENTSEL0;
642 } 632 }
643 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { 633 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
644 wd_ops = &intel_arch_wd_ops; 634 wd_ops = &intel_arch_wd_ops;
diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c
index 9546ef408b92..021624c83583 100644
--- a/arch/x86/kernel/genapic_64.c
+++ b/arch/x86/kernel/genapic_64.c
@@ -51,7 +51,7 @@ void __init setup_apic_routing(void)
51 else 51 else
52#endif 52#endif
53 53
54 if (cpus_weight(cpu_possible_map) <= 8) 54 if (num_possible_cpus() <= 8)
55 genapic = &apic_flat; 55 genapic = &apic_flat;
56 else 56 else
57 genapic = &apic_physflat; 57 genapic = &apic_physflat;
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 993c76773256..d31d6b72d60d 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -22,6 +22,7 @@
22#include <asm/sections.h> 22#include <asm/sections.h>
23#include <asm/kdebug.h> 23#include <asm/kdebug.h>
24#include <asm/e820.h> 24#include <asm/e820.h>
25#include <asm/bios_ebda.h>
25 26
26static void __init zap_identity_mappings(void) 27static void __init zap_identity_mappings(void)
27{ 28{
@@ -49,7 +50,6 @@ static void __init copy_bootdata(char *real_mode_data)
49 } 50 }
50} 51}
51 52
52#define BIOS_EBDA_SEGMENT 0x40E
53#define BIOS_LOWMEM_KILOBYTES 0x413 53#define BIOS_LOWMEM_KILOBYTES 0x413
54 54
55/* 55/*
@@ -80,8 +80,7 @@ static void __init reserve_ebda_region(void)
80 lowmem <<= 10; 80 lowmem <<= 10;
81 81
82 /* start of EBDA area */ 82 /* start of EBDA area */
83 ebda_addr = *(unsigned short *)__va(BIOS_EBDA_SEGMENT); 83 ebda_addr = get_bios_ebda();
84 ebda_addr <<= 4;
85 84
86 /* Fixup: bios puts an EBDA in the top 64K segment */ 85 /* Fixup: bios puts an EBDA in the top 64K segment */
87 /* of conventional memory, but does not adjust lowmem. */ 86 /* of conventional memory, but does not adjust lowmem. */
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 36652ea1a265..9007f9ea64ee 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -218,7 +218,7 @@ static void hpet_legacy_clockevent_register(void)
218 hpet_freq = 1000000000000000ULL; 218 hpet_freq = 1000000000000000ULL;
219 do_div(hpet_freq, hpet_period); 219 do_div(hpet_freq, hpet_period);
220 hpet_clockevent.mult = div_sc((unsigned long) hpet_freq, 220 hpet_clockevent.mult = div_sc((unsigned long) hpet_freq,
221 NSEC_PER_SEC, 32); 221 NSEC_PER_SEC, hpet_clockevent.shift);
222 /* Calculate the min / max delta */ 222 /* Calculate the min / max delta */
223 hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, 223 hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
224 &hpet_clockevent); 224 &hpet_clockevent);
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index 8540abe86ade..c1b5e3ece1f2 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -115,7 +115,8 @@ void __init setup_pit_timer(void)
115 * IO_APIC has been initialized. 115 * IO_APIC has been initialized.
116 */ 116 */
117 pit_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); 117 pit_clockevent.cpumask = cpumask_of_cpu(smp_processor_id());
118 pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, 32); 118 pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC,
119 pit_clockevent.shift);
119 pit_clockevent.max_delta_ns = 120 pit_clockevent.max_delta_ns =
120 clockevent_delta2ns(0x7FFF, &pit_clockevent); 121 clockevent_delta2ns(0x7FFF, &pit_clockevent);
121 pit_clockevent.min_delta_ns = 122 pit_clockevent.min_delta_ns =
@@ -224,7 +225,8 @@ static int __init init_pit_clocksource(void)
224 pit_clockevent.mode != CLOCK_EVT_MODE_PERIODIC) 225 pit_clockevent.mode != CLOCK_EVT_MODE_PERIODIC)
225 return 0; 226 return 0;
226 227
227 clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20); 228 clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE,
229 clocksource_pit.shift);
228 return clocksource_register(&clocksource_pit); 230 return clocksource_register(&clocksource_pit);
229} 231}
230arch_initcall(init_pit_clocksource); 232arch_initcall(init_pit_clocksource);
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
index 2e2f42074e18..696b8e4e66bb 100644
--- a/arch/x86/kernel/io_apic_32.c
+++ b/arch/x86/kernel/io_apic_32.c
@@ -2068,7 +2068,7 @@ static void __init setup_nmi(void)
2068 * cycles as some i82489DX-based boards have glue logic that keeps the 2068 * cycles as some i82489DX-based boards have glue logic that keeps the
2069 * 8259A interrupt line asserted until INTA. --macro 2069 * 8259A interrupt line asserted until INTA. --macro
2070 */ 2070 */
2071static inline void unlock_ExtINT_logic(void) 2071static inline void __init unlock_ExtINT_logic(void)
2072{ 2072{
2073 int apic, pin, i; 2073 int apic, pin, i;
2074 struct IO_APIC_route_entry entry0, entry1; 2074 struct IO_APIC_route_entry entry0, entry1;
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
index 9ba11d07920f..ef1a8dfcc529 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic_64.c
@@ -1599,7 +1599,7 @@ static void __init setup_nmi(void)
1599 * cycles as some i82489DX-based boards have glue logic that keeps the 1599 * cycles as some i82489DX-based boards have glue logic that keeps the
1600 * 8259A interrupt line asserted until INTA. --macro 1600 * 8259A interrupt line asserted until INTA. --macro
1601 */ 1601 */
1602static inline void unlock_ExtINT_logic(void) 1602static inline void __init unlock_ExtINT_logic(void)
1603{ 1603{
1604 int apic, pin, i; 1604 int apic, pin, i;
1605 struct IO_APIC_route_entry entry0, entry1; 1605 struct IO_APIC_route_entry entry0, entry1;
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 6ea67b76a214..00bda7bcda63 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -134,7 +134,7 @@ unsigned int do_IRQ(struct pt_regs *regs)
134 : "=a" (arg1), "=d" (arg2), "=b" (bx) 134 : "=a" (arg1), "=d" (arg2), "=b" (bx)
135 : "0" (irq), "1" (desc), "2" (isp), 135 : "0" (irq), "1" (desc), "2" (isp),
136 "D" (desc->handle_irq) 136 "D" (desc->handle_irq)
137 : "memory", "cc" 137 : "memory", "cc", "ecx"
138 ); 138 );
139 } else 139 } else
140#endif 140#endif
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c
index b402c0f3f192..cfc2648d25ff 100644
--- a/arch/x86/kernel/mfgpt_32.c
+++ b/arch/x86/kernel/mfgpt_32.c
@@ -364,7 +364,8 @@ int __init mfgpt_timer_setup(void)
364 geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP, val); 364 geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP, val);
365 365
366 /* Set up the clock event */ 366 /* Set up the clock event */
367 mfgpt_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC, 32); 367 mfgpt_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC,
368 mfgpt_clockevent.shift);
368 mfgpt_clockevent.min_delta_ns = clockevent_delta2ns(0xF, 369 mfgpt_clockevent.min_delta_ns = clockevent_delta2ns(0xF,
369 &mfgpt_clockevent); 370 &mfgpt_clockevent);
370 mfgpt_clockevent.max_delta_ns = clockevent_delta2ns(0xFFFE, 371 mfgpt_clockevent.max_delta_ns = clockevent_delta2ns(0xFFFE,
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 70744e344fa1..3e2c54dc8b29 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -686,13 +686,11 @@ void __init get_smp_config(void)
686static int __init smp_scan_config(unsigned long base, unsigned long length, 686static int __init smp_scan_config(unsigned long base, unsigned long length,
687 unsigned reserve) 687 unsigned reserve)
688{ 688{
689 extern void __bad_mpf_size(void);
690 unsigned int *bp = phys_to_virt(base); 689 unsigned int *bp = phys_to_virt(base);
691 struct intel_mp_floating *mpf; 690 struct intel_mp_floating *mpf;
692 691
693 Dprintk("Scan SMP from %p for %ld bytes.\n", bp, length); 692 Dprintk("Scan SMP from %p for %ld bytes.\n", bp, length);
694 if (sizeof(*mpf) != 16) 693 BUILD_BUG_ON(sizeof(*mpf) != 16);
695 __bad_mpf_size();
696 694
697 while (length > 0) { 695 while (length > 0) {
698 mpf = (struct intel_mp_floating *)bp; 696 mpf = (struct intel_mp_floating *)bp;
@@ -801,7 +799,6 @@ void __init find_smp_config(void)
801#ifdef CONFIG_X86_IO_APIC 799#ifdef CONFIG_X86_IO_APIC
802 800
803#define MP_ISA_BUS 0 801#define MP_ISA_BUS 0
804#define MP_MAX_IOAPIC_PIN 127
805 802
806extern struct mp_ioapic_routing mp_ioapic_routing[MAX_IO_APICS]; 803extern struct mp_ioapic_routing mp_ioapic_routing[MAX_IO_APICS];
807 804
@@ -820,7 +817,7 @@ static int mp_find_ioapic(int gsi)
820 return -1; 817 return -1;
821} 818}
822 819
823static u8 uniq_ioapic_id(u8 id) 820static u8 __init uniq_ioapic_id(u8 id)
824{ 821{
825#ifdef CONFIG_X86_32 822#ifdef CONFIG_X86_32
826 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && 823 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
@@ -909,14 +906,7 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
909 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */ 906 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
910 intsrc.mpc_dstirq = pin; /* INTIN# */ 907 intsrc.mpc_dstirq = pin; /* INTIN# */
911 908
912 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n", 909 MP_intsrc_info(&intsrc);
913 intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
914 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
915 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
916
917 mp_irqs[mp_irq_entries] = intsrc;
918 if (++mp_irq_entries == MAX_IRQ_SOURCES)
919 panic("Max # of irq sources exceeded!\n");
920} 910}
921 911
922int es7000_plat; 912int es7000_plat;
@@ -985,23 +975,14 @@ void __init mp_config_acpi_legacy_irqs(void)
985 intsrc.mpc_srcbusirq = i; /* Identity mapped */ 975 intsrc.mpc_srcbusirq = i; /* Identity mapped */
986 intsrc.mpc_dstirq = i; 976 intsrc.mpc_dstirq = i;
987 977
988 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, " 978 MP_intsrc_info(&intsrc);
989 "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
990 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
991 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
992 intsrc.mpc_dstirq);
993
994 mp_irqs[mp_irq_entries] = intsrc;
995 if (++mp_irq_entries == MAX_IRQ_SOURCES)
996 panic("Max # of irq sources exceeded!\n");
997 } 979 }
998} 980}
999 981
1000int mp_register_gsi(u32 gsi, int triggering, int polarity) 982int mp_register_gsi(u32 gsi, int triggering, int polarity)
1001{ 983{
1002 int ioapic = -1; 984 int ioapic;
1003 int ioapic_pin = 0; 985 int ioapic_pin;
1004 int idx, bit = 0;
1005#ifdef CONFIG_X86_32 986#ifdef CONFIG_X86_32
1006#define MAX_GSI_NUM 4096 987#define MAX_GSI_NUM 4096
1007#define IRQ_COMPRESSION_START 64 988#define IRQ_COMPRESSION_START 64
@@ -1041,15 +1022,13 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
1041 * with redundant pin->gsi mappings (but unique PCI devices); 1022 * with redundant pin->gsi mappings (but unique PCI devices);
1042 * we only program the IOAPIC on the first. 1023 * we only program the IOAPIC on the first.
1043 */ 1024 */
1044 bit = ioapic_pin % 32; 1025 if (ioapic_pin > MP_MAX_IOAPIC_PIN) {
1045 idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
1046 if (idx > 3) {
1047 printk(KERN_ERR "Invalid reference to IOAPIC pin " 1026 printk(KERN_ERR "Invalid reference to IOAPIC pin "
1048 "%d-%d\n", mp_ioapic_routing[ioapic].apic_id, 1027 "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
1049 ioapic_pin); 1028 ioapic_pin);
1050 return gsi; 1029 return gsi;
1051 } 1030 }
1052 if ((1 << bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) { 1031 if (test_bit(ioapic_pin, mp_ioapic_routing[ioapic].pin_programmed)) {
1053 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n", 1032 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
1054 mp_ioapic_routing[ioapic].apic_id, ioapic_pin); 1033 mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
1055#ifdef CONFIG_X86_32 1034#ifdef CONFIG_X86_32
@@ -1059,7 +1038,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
1059#endif 1038#endif
1060 } 1039 }
1061 1040
1062 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1 << bit); 1041 set_bit(ioapic_pin, mp_ioapic_routing[ioapic].pin_programmed);
1063#ifdef CONFIG_X86_32 1042#ifdef CONFIG_X86_32
1064 /* 1043 /*
1065 * For GSI >= 64, use IRQ compression 1044 * For GSI >= 64, use IRQ compression
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 2edee22e9c30..e28ec497e142 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -43,6 +43,7 @@
43#include <asm/system.h> 43#include <asm/system.h>
44#include <asm/dma.h> 44#include <asm/dma.h>
45#include <asm/rio.h> 45#include <asm/rio.h>
46#include <asm/bios_ebda.h>
46 47
47#ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT 48#ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT
48int use_calgary __read_mostly = 1; 49int use_calgary __read_mostly = 1;
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 559c1b027417..fb03ef380f0e 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -1207,97 +1207,16 @@ static int genregs32_set(struct task_struct *target,
1207 return ret; 1207 return ret;
1208} 1208}
1209 1209
1210static long ptrace32_siginfo(unsigned request, u32 pid, u32 addr, u32 data) 1210long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1211 compat_ulong_t caddr, compat_ulong_t cdata)
1211{ 1212{
1212 siginfo_t __user *si = compat_alloc_user_space(sizeof(siginfo_t)); 1213 unsigned long addr = caddr;
1213 compat_siginfo_t __user *si32 = compat_ptr(data); 1214 unsigned long data = cdata;
1214 siginfo_t ssi;
1215 int ret;
1216
1217 if (request == PTRACE_SETSIGINFO) {
1218 memset(&ssi, 0, sizeof(siginfo_t));
1219 ret = copy_siginfo_from_user32(&ssi, si32);
1220 if (ret)
1221 return ret;
1222 if (copy_to_user(si, &ssi, sizeof(siginfo_t)))
1223 return -EFAULT;
1224 }
1225 ret = sys_ptrace(request, pid, addr, (unsigned long)si);
1226 if (ret)
1227 return ret;
1228 if (request == PTRACE_GETSIGINFO) {
1229 if (copy_from_user(&ssi, si, sizeof(siginfo_t)))
1230 return -EFAULT;
1231 ret = copy_siginfo_to_user32(si32, &ssi);
1232 }
1233 return ret;
1234}
1235
1236asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
1237{
1238 struct task_struct *child;
1239 struct pt_regs *childregs;
1240 void __user *datap = compat_ptr(data); 1215 void __user *datap = compat_ptr(data);
1241 int ret; 1216 int ret;
1242 __u32 val; 1217 __u32 val;
1243 1218
1244 switch (request) { 1219 switch (request) {
1245 case PTRACE_TRACEME:
1246 case PTRACE_ATTACH:
1247 case PTRACE_KILL:
1248 case PTRACE_CONT:
1249 case PTRACE_SINGLESTEP:
1250 case PTRACE_SINGLEBLOCK:
1251 case PTRACE_DETACH:
1252 case PTRACE_SYSCALL:
1253 case PTRACE_OLDSETOPTIONS:
1254 case PTRACE_SETOPTIONS:
1255 case PTRACE_SET_THREAD_AREA:
1256 case PTRACE_GET_THREAD_AREA:
1257#ifdef X86_BTS
1258 case PTRACE_BTS_CONFIG:
1259 case PTRACE_BTS_STATUS:
1260 case PTRACE_BTS_SIZE:
1261 case PTRACE_BTS_GET:
1262 case PTRACE_BTS_CLEAR:
1263 case PTRACE_BTS_DRAIN:
1264#endif
1265 return sys_ptrace(request, pid, addr, data);
1266
1267 default:
1268 return -EINVAL;
1269
1270 case PTRACE_PEEKTEXT:
1271 case PTRACE_PEEKDATA:
1272 case PTRACE_POKEDATA:
1273 case PTRACE_POKETEXT:
1274 case PTRACE_POKEUSR:
1275 case PTRACE_PEEKUSR:
1276 case PTRACE_GETREGS:
1277 case PTRACE_SETREGS:
1278 case PTRACE_SETFPREGS:
1279 case PTRACE_GETFPREGS:
1280 case PTRACE_SETFPXREGS:
1281 case PTRACE_GETFPXREGS:
1282 case PTRACE_GETEVENTMSG:
1283 break;
1284
1285 case PTRACE_SETSIGINFO:
1286 case PTRACE_GETSIGINFO:
1287 return ptrace32_siginfo(request, pid, addr, data);
1288 }
1289
1290 child = ptrace_get_task_struct(pid);
1291 if (IS_ERR(child))
1292 return PTR_ERR(child);
1293
1294 ret = ptrace_check_attach(child, request == PTRACE_KILL);
1295 if (ret < 0)
1296 goto out;
1297
1298 childregs = task_pt_regs(child);
1299
1300 switch (request) {
1301 case PTRACE_PEEKUSR: 1220 case PTRACE_PEEKUSR:
1302 ret = getreg32(child, addr, &val); 1221 ret = getreg32(child, addr, &val);
1303 if (ret == 0) 1222 if (ret == 0)
@@ -1343,12 +1262,14 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
1343 sizeof(struct user32_fxsr_struct), 1262 sizeof(struct user32_fxsr_struct),
1344 datap); 1263 datap);
1345 1264
1265 case PTRACE_GET_THREAD_AREA:
1266 case PTRACE_SET_THREAD_AREA:
1267 return arch_ptrace(child, request, addr, data);
1268
1346 default: 1269 default:
1347 return compat_ptrace_request(child, request, addr, data); 1270 return compat_ptrace_request(child, request, addr, data);
1348 } 1271 }
1349 1272
1350 out:
1351 put_task_struct(child);
1352 return ret; 1273 return ret;
1353} 1274}
1354 1275
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
index 455d3c80960b..44cc9b933932 100644
--- a/arch/x86/kernel/setup_32.c
+++ b/arch/x86/kernel/setup_32.c
@@ -389,7 +389,6 @@ unsigned long __init find_max_low_pfn(void)
389 return max_low_pfn; 389 return max_low_pfn;
390} 390}
391 391
392#define BIOS_EBDA_SEGMENT 0x40E
393#define BIOS_LOWMEM_KILOBYTES 0x413 392#define BIOS_LOWMEM_KILOBYTES 0x413
394 393
395/* 394/*
@@ -420,8 +419,7 @@ static void __init reserve_ebda_region(void)
420 lowmem <<= 10; 419 lowmem <<= 10;
421 420
422 /* start of EBDA area */ 421 /* start of EBDA area */
423 ebda_addr = *(unsigned short *)__va(BIOS_EBDA_SEGMENT); 422 ebda_addr = get_bios_ebda();
424 ebda_addr <<= 4;
425 423
426 /* Fixup: bios puts an EBDA in the top 64K segment */ 424 /* Fixup: bios puts an EBDA in the top 64K segment */
427 /* of conventional memory, but does not adjust lowmem. */ 425 /* of conventional memory, but does not adjust lowmem. */
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index c2ec3dcb6b99..17bdf2343095 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -116,7 +116,7 @@ extern int root_mountflags;
116 116
117char __initdata command_line[COMMAND_LINE_SIZE]; 117char __initdata command_line[COMMAND_LINE_SIZE];
118 118
119struct resource standard_io_resources[] = { 119static struct resource standard_io_resources[] = {
120 { .name = "dma1", .start = 0x00, .end = 0x1f, 120 { .name = "dma1", .start = 0x00, .end = 0x1f,
121 .flags = IORESOURCE_BUSY | IORESOURCE_IO }, 121 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
122 { .name = "pic1", .start = 0x20, .end = 0x21, 122 { .name = "pic1", .start = 0x20, .end = 0x21,
diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c
index f1b117930837..8e05e7f7bd40 100644
--- a/arch/x86/kernel/signal_32.c
+++ b/arch/x86/kernel/signal_32.c
@@ -413,16 +413,6 @@ setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
413 regs->ss = __USER_DS; 413 regs->ss = __USER_DS;
414 regs->cs = __USER_CS; 414 regs->cs = __USER_CS;
415 415
416 /*
417 * Clear TF when entering the signal handler, but
418 * notify any tracer that was single-stepping it.
419 * The tracer may want to single-step inside the
420 * handler too.
421 */
422 regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF);
423 if (test_thread_flag(TIF_SINGLESTEP))
424 ptrace_notify(SIGTRAP);
425
426 return 0; 416 return 0;
427 417
428give_sigsegv: 418give_sigsegv:
@@ -501,16 +491,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
501 regs->ss = __USER_DS; 491 regs->ss = __USER_DS;
502 regs->cs = __USER_CS; 492 regs->cs = __USER_CS;
503 493
504 /*
505 * Clear TF when entering the signal handler, but
506 * notify any tracer that was single-stepping it.
507 * The tracer may want to single-step inside the
508 * handler too.
509 */
510 regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF);
511 if (test_thread_flag(TIF_SINGLESTEP))
512 ptrace_notify(SIGTRAP);
513
514 return 0; 494 return 0;
515 495
516give_sigsegv: 496give_sigsegv:
@@ -566,6 +546,21 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
566 if (ret) 546 if (ret)
567 return ret; 547 return ret;
568 548
549 /*
550 * Clear the direction flag as per the ABI for function entry.
551 */
552 regs->flags &= ~X86_EFLAGS_DF;
553
554 /*
555 * Clear TF when entering the signal handler, but
556 * notify any tracer that was single-stepping it.
557 * The tracer may want to single-step inside the
558 * handler too.
559 */
560 regs->flags &= ~X86_EFLAGS_TF;
561 if (test_thread_flag(TIF_SINGLESTEP))
562 ptrace_notify(SIGTRAP);
563
569 spin_lock_irq(&current->sighand->siglock); 564 spin_lock_irq(&current->sighand->siglock);
570 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask); 565 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
571 if (!(ka->sa.sa_flags & SA_NODEFER)) 566 if (!(ka->sa.sa_flags & SA_NODEFER))
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
index 827179c5b32a..ccb2a4560c2d 100644
--- a/arch/x86/kernel/signal_64.c
+++ b/arch/x86/kernel/signal_64.c
@@ -285,14 +285,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
285 even if the handler happens to be interrupting 32-bit code. */ 285 even if the handler happens to be interrupting 32-bit code. */
286 regs->cs = __USER_CS; 286 regs->cs = __USER_CS;
287 287
288 /* This, by contrast, has nothing to do with segment registers -
289 see include/asm-x86_64/uaccess.h for details. */
290 set_fs(USER_DS);
291
292 regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF);
293 if (test_thread_flag(TIF_SINGLESTEP))
294 ptrace_notify(SIGTRAP);
295
296 return 0; 288 return 0;
297 289
298give_sigsegv: 290give_sigsegv:
@@ -380,6 +372,28 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
380 ret = setup_rt_frame(sig, ka, info, oldset, regs); 372 ret = setup_rt_frame(sig, ka, info, oldset, regs);
381 373
382 if (ret == 0) { 374 if (ret == 0) {
375 /*
376 * This has nothing to do with segment registers,
377 * despite the name. This magic affects uaccess.h
378 * macros' behavior. Reset it to the normal setting.
379 */
380 set_fs(USER_DS);
381
382 /*
383 * Clear the direction flag as per the ABI for function entry.
384 */
385 regs->flags &= ~X86_EFLAGS_DF;
386
387 /*
388 * Clear TF when entering the signal handler, but
389 * notify any tracer that was single-stepping it.
390 * The tracer may want to single-step inside the
391 * handler too.
392 */
393 regs->flags &= ~X86_EFLAGS_TF;
394 if (test_thread_flag(TIF_SINGLESTEP))
395 ptrace_notify(SIGTRAP);
396
383 spin_lock_irq(&current->sighand->siglock); 397 spin_lock_irq(&current->sighand->siglock);
384 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 398 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
385 if (!(ka->sa.sa_flags & SA_NODEFER)) 399 if (!(ka->sa.sa_flags & SA_NODEFER))
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index eef79e84145f..04c662ba18f1 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1058,7 +1058,7 @@ int __cpuinit native_cpu_up(unsigned int cpu)
1058 check_tsc_sync_source(cpu); 1058 check_tsc_sync_source(cpu);
1059 local_irq_restore(flags); 1059 local_irq_restore(flags);
1060 1060
1061 while (!cpu_isset(cpu, cpu_online_map)) { 1061 while (!cpu_online(cpu)) {
1062 cpu_relax(); 1062 cpu_relax();
1063 touch_nmi_watchdog(); 1063 touch_nmi_watchdog();
1064 } 1064 }
@@ -1168,7 +1168,7 @@ static void __init smp_cpu_index_default(void)
1168 int i; 1168 int i;
1169 struct cpuinfo_x86 *c; 1169 struct cpuinfo_x86 *c;
1170 1170
1171 for_each_cpu_mask(i, cpu_possible_map) { 1171 for_each_possible_cpu(i) {
1172 c = &cpu_data(i); 1172 c = &cpu_data(i);
1173 /* mark all to hotplug */ 1173 /* mark all to hotplug */
1174 c->cpu_index = NR_CPUS; 1174 c->cpu_index = NR_CPUS;
diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c
index 6878a9c2df5d..ae751094eba9 100644
--- a/arch/x86/kernel/summit_32.c
+++ b/arch/x86/kernel/summit_32.c
@@ -29,6 +29,7 @@
29#include <linux/mm.h> 29#include <linux/mm.h>
30#include <linux/init.h> 30#include <linux/init.h>
31#include <asm/io.h> 31#include <asm/io.h>
32#include <asm/bios_ebda.h>
32#include <asm/mach-summit/mach_mpparse.h> 33#include <asm/mach-summit/mach_mpparse.h>
33 34
34static struct rio_table_hdr *rio_table_hdr __initdata; 35static struct rio_table_hdr *rio_table_hdr __initdata;
@@ -140,8 +141,8 @@ void __init setup_summit(void)
140 int i, next_wpeg, next_bus = 0; 141 int i, next_wpeg, next_bus = 0;
141 142
142 /* The pointer to the EBDA is stored in the word @ phys 0x40E(40:0E) */ 143 /* The pointer to the EBDA is stored in the word @ phys 0x40E(40:0E) */
143 ptr = *(unsigned short *)phys_to_virt(0x40Eul); 144 ptr = get_bios_ebda();
144 ptr = (unsigned long)phys_to_virt(ptr << 4); 145 ptr = (unsigned long)phys_to_virt(ptr);
145 146
146 rio_table_hdr = NULL; 147 rio_table_hdr = NULL;
147 offset = 0x180; 148 offset = 0x180;
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c
index df224a8774cb..a1f07d793202 100644
--- a/arch/x86/kernel/tlb_64.c
+++ b/arch/x86/kernel/tlb_64.c
@@ -195,9 +195,9 @@ static int __cpuinit init_smp_flush(void)
195{ 195{
196 int i; 196 int i;
197 197
198 for_each_cpu_mask(i, cpu_possible_map) { 198 for_each_possible_cpu(i)
199 spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock); 199 spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
200 } 200
201 return 0; 201 return 0;
202} 202}
203core_initcall(init_smp_flush); 203core_initcall(init_smp_flush);
diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
index 64580679861e..d8ccc3c6552f 100644
--- a/arch/x86/kernel/trampoline_32.S
+++ b/arch/x86/kernel/trampoline_32.S
@@ -33,7 +33,7 @@
33 33
34/* We can free up trampoline after bootup if cpu hotplug is not supported. */ 34/* We can free up trampoline after bootup if cpu hotplug is not supported. */
35#ifndef CONFIG_HOTPLUG_CPU 35#ifndef CONFIG_HOTPLUG_CPU
36.section ".init.data","aw",@progbits 36.section ".cpuinit.data","aw",@progbits
37#else 37#else
38.section .rodata,"a",@progbits 38.section .rodata,"a",@progbits
39#endif 39#endif
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c
index 471e694d6713..bde6f63e15d5 100644
--- a/arch/x86/kernel/traps_32.c
+++ b/arch/x86/kernel/traps_32.c
@@ -602,7 +602,7 @@ DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
602DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) 602DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
603DO_ERROR(12, SIGBUS, "stack segment", stack_segment) 603DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
604DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0, 0) 604DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0, 0)
605DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0, 1) 605DO_ERROR_INFO(32, SIGILL, "iret exception", iret_error, ILL_BADSTK, 0, 1)
606 606
607void __kprobes do_general_protection(struct pt_regs *regs, long error_code) 607void __kprobes do_general_protection(struct pt_regs *regs, long error_code)
608{ 608{
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 6e2c4efce0ef..8acbf0cdf1a5 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -113,7 +113,7 @@ static inline void send_QIC_CPI(__u32 cpuset, __u8 cpi)
113 for_each_online_cpu(cpu) { 113 for_each_online_cpu(cpu) {
114 if (cpuset & (1 << cpu)) { 114 if (cpuset & (1 << cpu)) {
115#ifdef VOYAGER_DEBUG 115#ifdef VOYAGER_DEBUG
116 if (!cpu_isset(cpu, cpu_online_map)) 116 if (!cpu_online(cpu))
117 VDEBUG(("CPU%d sending cpi %d to CPU%d not in " 117 VDEBUG(("CPU%d sending cpi %d to CPU%d not in "
118 "cpu_online_map\n", 118 "cpu_online_map\n",
119 hard_smp_processor_id(), cpi, cpu)); 119 hard_smp_processor_id(), cpi, cpu));
@@ -683,9 +683,9 @@ void __init smp_boot_cpus(void)
683 * Code added from smpboot.c */ 683 * Code added from smpboot.c */
684 { 684 {
685 unsigned long bogosum = 0; 685 unsigned long bogosum = 0;
686 for (i = 0; i < NR_CPUS; i++) 686
687 if (cpu_isset(i, cpu_online_map)) 687 for_each_online_cpu(i)
688 bogosum += cpu_data(i).loops_per_jiffy; 688 bogosum += cpu_data(i).loops_per_jiffy;
689 printk(KERN_INFO "Total of %d processors activated " 689 printk(KERN_INFO "Total of %d processors activated "
690 "(%lu.%02lu BogoMIPS).\n", 690 "(%lu.%02lu BogoMIPS).\n",
691 cpucount + 1, bogosum / (500000 / HZ), 691 cpucount + 1, bogosum / (500000 / HZ),
@@ -1838,7 +1838,7 @@ static int __cpuinit voyager_cpu_up(unsigned int cpu)
1838 return -EIO; 1838 return -EIO;
1839 /* Unleash the CPU! */ 1839 /* Unleash the CPU! */
1840 cpu_set(cpu, smp_commenced_mask); 1840 cpu_set(cpu, smp_commenced_mask);
1841 while (!cpu_isset(cpu, cpu_online_map)) 1841 while (!cpu_online(cpu))
1842 mb(); 1842 mb();
1843 return 0; 1843 return 0;
1844} 1844}
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index baf7c4f643c8..4a4761892951 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -566,9 +566,9 @@ void __init paging_init(void)
566 566
567/* 567/*
568 * Test if the WP bit works in supervisor mode. It isn't supported on 386's 568 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
569 * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This 569 * and also on some strange 486's. All 586+'s are OK. This used to involve
570 * used to involve black magic jumps to work around some nasty CPU bugs, 570 * black magic jumps to work around some nasty CPU bugs, but fortunately the
571 * but fortunately the switch to using exceptions got rid of all that. 571 * switch to using exceptions got rid of all that.
572 */ 572 */
573static void __init test_wp_bit(void) 573static void __init test_wp_bit(void)
574{ 574{
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 92dd3dbf3ffb..94e69000f982 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -193,7 +193,7 @@ void __init xen_smp_prepare_cpus(unsigned int max_cpus)
193 193
194 /* Restrict the possible_map according to max_cpus. */ 194 /* Restrict the possible_map according to max_cpus. */
195 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { 195 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
196 for (cpu = NR_CPUS-1; !cpu_isset(cpu, cpu_possible_map); cpu--) 196 for (cpu = NR_CPUS - 1; !cpu_possible(cpu); cpu--)
197 continue; 197 continue;
198 cpu_clear(cpu, cpu_possible_map); 198 cpu_clear(cpu, cpu_possible_map);
199 } 199 }