aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/kernel/process.c8
-rw-r--r--arch/alpha/kernel/smp.c12
-rw-r--r--arch/arm/configs/at91sam9260ek_defconfig2
-rw-r--r--arch/arm/configs/at91sam9261ek_defconfig2
-rw-r--r--arch/arm/configs/at91sam9263ek_defconfig2
-rw-r--r--arch/arm/configs/at91sam9rlek_defconfig2
-rw-r--r--arch/arm/configs/qil-a9260_defconfig2
-rw-r--r--arch/arm/kernel/elf.c4
-rw-r--r--arch/arm/mach-at91/at91cap9_devices.c2
-rw-r--r--arch/arm/mach-at91/at91sam9260_devices.c2
-rw-r--r--arch/arm/mach-at91/at91sam9261_devices.c2
-rw-r--r--arch/arm/mach-at91/at91sam9263_devices.c2
-rw-r--r--arch/arm/mach-at91/at91sam9rl_devices.c2
-rw-r--r--arch/arm/mach-at91/gpio.c15
-rw-r--r--arch/arm/mach-at91/include/mach/board.h1
-rw-r--r--arch/arm/mach-davinci/board-evm.c6
-rw-r--r--arch/arm/mach-davinci/clock.c5
-rw-r--r--arch/arm/mach-davinci/usb.c1
-rw-r--r--arch/arm/mach-ep93xx/include/mach/gesbc9312.h3
-rw-r--r--arch/arm/mach-ep93xx/include/mach/hardware.h1
-rw-r--r--arch/arm/mach-kirkwood/irq.c2
-rw-r--r--arch/arm/mach-mv78xx0/irq.c2
-rw-r--r--arch/arm/mach-omap2/clock.c16
-rw-r--r--arch/arm/mach-orion5x/irq.c2
-rw-r--r--arch/arm/mach-rpc/riscpc.c6
-rw-r--r--arch/arm/mm/mmu.c3
-rw-r--r--arch/arm/plat-orion/gpio.c73
-rw-r--r--arch/arm/plat-orion/include/plat/gpio.h3
-rw-r--r--arch/avr32/mach-at32ap/include/mach/board.h1
-rw-r--r--arch/ia64/Kconfig18
-rw-r--r--arch/ia64/configs/xen_domu_defconfig1601
-rw-r--r--arch/ia64/include/asm/kvm.h4
-rw-r--r--arch/ia64/include/asm/mmzone.h4
-rw-r--r--arch/ia64/include/asm/sn/bte.h2
-rw-r--r--arch/ia64/kernel/iosapic.c2
-rw-r--r--arch/ia64/kernel/smpboot.c5
-rw-r--r--arch/ia64/kernel/unwind.c2
-rw-r--r--arch/ia64/kvm/kvm-ia64.c4
-rw-r--r--arch/ia64/kvm/process.c17
-rw-r--r--arch/ia64/mm/numa.c4
-rw-r--r--arch/ia64/sn/kernel/bte.c7
-rw-r--r--arch/ia64/xen/Kconfig3
-rw-r--r--arch/ia64/xen/xen_pv_ops.c4
-rw-r--r--arch/m68k/atari/ataints.c16
-rw-r--r--arch/m68k/atari/atakeyb.c4
-rw-r--r--arch/m68k/atari/config.c2
-rw-r--r--arch/m68k/atari/debug.c22
-rw-r--r--arch/m68k/atari/time.c8
-rw-r--r--arch/m68k/include/asm/atarihw.h4
-rw-r--r--arch/m68k/include/asm/atariints.h6
-rw-r--r--arch/mips/Kconfig9
-rw-r--r--arch/mips/alchemy/common/time.c6
-rw-r--r--arch/mips/include/asm/seccomp.h1
-rw-r--r--arch/mips/kernel/irq.c1
-rw-r--r--arch/mips/kernel/linux32.c69
-rw-r--r--arch/mips/kernel/scall32-o32.S4
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S28
-rw-r--r--arch/mips/kernel/scall64-o32.S40
-rw-r--r--arch/mips/kernel/signal.c5
-rw-r--r--arch/mips/kernel/signal32.c28
-rw-r--r--arch/mips/kernel/syscall.c26
-rw-r--r--arch/mips/mm/cache.c5
-rw-r--r--arch/mn10300/Kconfig1
-rw-r--r--arch/mn10300/unit-asb2305/pci.c2
-rw-r--r--arch/powerpc/include/asm/compat.h5
-rw-r--r--arch/powerpc/include/asm/pgtable-4k.h2
-rw-r--r--arch/powerpc/include/asm/pgtable-64k.h2
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h3
-rw-r--r--arch/powerpc/include/asm/seccomp.h4
-rw-r--r--arch/powerpc/kernel/align.c36
-rw-r--r--arch/powerpc/kvm/powerpc.c4
-rw-r--r--arch/powerpc/lib/copyuser_64.S38
-rw-r--r--arch/powerpc/lib/memcpy_64.S26
-rw-r--r--arch/powerpc/mm/numa.c5
-rw-r--r--arch/powerpc/platforms/ps3/mm.c2
-rw-r--r--arch/powerpc/sysdev/ppc4xx_pci.c17
-rw-r--r--arch/s390/include/asm/cputime.h2
-rw-r--r--arch/s390/include/asm/setup.h2
-rw-r--r--arch/s390/kernel/setup.c9
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/sh/boards/board-ap325rxa.c53
-rw-r--r--arch/sh/kernel/cpu/sh2a/clock-sh7201.c4
-rw-r--r--arch/sparc/include/asm/compat.h5
-rw-r--r--arch/sparc/include/asm/seccomp.h6
-rw-r--r--arch/sparc/kernel/chmc.c1
-rw-r--r--arch/um/drivers/vde_user.c6
-rw-r--r--arch/x86/Kconfig63
-rw-r--r--arch/x86/Kconfig.debug24
-rw-r--r--arch/x86/boot/Makefile1
-rw-r--r--arch/x86/boot/a20.c6
-rw-r--r--arch/x86/boot/boot.h3
-rw-r--r--arch/x86/boot/compressed/Makefile21
-rw-r--r--arch/x86/boot/compressed/head_32.S8
-rw-r--r--arch/x86/boot/compressed/head_64.S10
-rw-r--r--arch/x86/boot/compressed/misc.c118
-rw-r--r--arch/x86/boot/copy.S40
-rw-r--r--arch/x86/boot/header.S2
-rw-r--r--arch/x86/boot/main.c5
-rw-r--r--arch/x86/boot/pmjump.S16
-rw-r--r--arch/x86/boot/voyager.c40
-rw-r--r--arch/x86/configs/i386_defconfig7
-rw-r--r--arch/x86/configs/x86_64_defconfig7
-rw-r--r--arch/x86/ia32/ia32_signal.c70
-rw-r--r--arch/x86/include/asm/apic.h414
-rw-r--r--arch/x86/include/asm/arch_hooks.h26
-rw-r--r--arch/x86/include/asm/boot.h16
-rw-r--r--arch/x86/include/asm/fixmap.h149
-rw-r--r--arch/x86/include/asm/fixmap_32.h115
-rw-r--r--arch/x86/include/asm/fixmap_64.h79
-rw-r--r--arch/x86/include/asm/genapic.h264
-rw-r--r--arch/x86/include/asm/i8259.h4
-rw-r--r--arch/x86/include/asm/io.h11
-rw-r--r--arch/x86/include/asm/iomap.h3
-rw-r--r--arch/x86/include/asm/ipi.h2
-rw-r--r--arch/x86/include/asm/irq_vectors.h2
-rw-r--r--arch/x86/include/asm/kvm.h7
-rw-r--r--arch/x86/include/asm/linkage.h64
-rw-r--r--arch/x86/include/asm/mach-voyager/do_timer.h17
-rw-r--r--arch/x86/include/asm/mach-voyager/entry_arch.h26
-rw-r--r--arch/x86/include/asm/mach-voyager/setup_arch.h12
-rw-r--r--arch/x86/include/asm/mmzone_32.h2
-rw-r--r--arch/x86/include/asm/mmzone_64.h2
-rw-r--r--arch/x86/include/asm/mpspec.h2
-rw-r--r--arch/x86/include/asm/numa_32.h6
-rw-r--r--arch/x86/include/asm/page_32_types.h2
-rw-r--r--arch/x86/include/asm/page_64_types.h2
-rw-r--r--arch/x86/include/asm/page_types.h6
-rw-r--r--arch/x86/include/asm/pat.h3
-rw-r--r--arch/x86/include/asm/pgtable-2level_types.h2
-rw-r--r--arch/x86/include/asm/pgtable-3level_types.h2
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h1
-rw-r--r--arch/x86/include/asm/pgtable_types.h6
-rw-r--r--arch/x86/include/asm/processor.h16
-rw-r--r--arch/x86/include/asm/seccomp_32.h6
-rw-r--r--arch/x86/include/asm/seccomp_64.h8
-rw-r--r--arch/x86/include/asm/setup.h16
-rw-r--r--arch/x86/include/asm/syscalls.h2
-rw-r--r--arch/x86/include/asm/system.h3
-rw-r--r--arch/x86/include/asm/timer.h2
-rw-r--r--arch/x86/include/asm/uaccess_64.h10
-rw-r--r--arch/x86/include/asm/uv/uv.h3
-rw-r--r--arch/x86/include/asm/vic.h61
-rw-r--r--arch/x86/include/asm/voyager.h571
-rw-r--r--arch/x86/kernel/Makefile35
-rw-r--r--arch/x86/kernel/acpi/boot.c1
-rw-r--r--arch/x86/kernel/acpi/realmode/wakeup.S4
-rw-r--r--arch/x86/kernel/acpi/wakeup_32.S2
-rw-r--r--arch/x86/kernel/acpi/wakeup_64.S34
-rw-r--r--arch/x86/kernel/alternative.c6
-rw-r--r--arch/x86/kernel/apic/Makefile19
-rw-r--r--arch/x86/kernel/apic/apic.c (renamed from arch/x86/kernel/apic.c)92
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c (renamed from arch/x86/kernel/genapic_flat_64.c)24
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c (renamed from arch/x86/kernel/bigsmp_32.c)95
-rw-r--r--arch/x86/kernel/apic/es7000_32.c (renamed from arch/x86/kernel/es7000_32.c)617
-rw-r--r--arch/x86/kernel/apic/io_apic.c (renamed from arch/x86/kernel/io_apic.c)2
-rw-r--r--arch/x86/kernel/apic/ipi.c (renamed from arch/x86/kernel/ipi.c)0
-rw-r--r--arch/x86/kernel/apic/nmi.c (renamed from arch/x86/kernel/nmi.c)0
-rw-r--r--arch/x86/kernel/apic/numaq_32.c (renamed from arch/x86/kernel/numaq_32.c)299
-rw-r--r--arch/x86/kernel/apic/probe_32.c (renamed from arch/x86/kernel/probe_32.c)217
-rw-r--r--arch/x86/kernel/apic/probe_64.c (renamed from arch/x86/kernel/genapic_64.c)37
-rw-r--r--arch/x86/kernel/apic/summit_32.c (renamed from arch/x86/kernel/summit_32.c)125
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c (renamed from arch/x86/kernel/genx2apic_cluster.c)20
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c (renamed from arch/x86/kernel/genx2apic_phys.c)25
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c (renamed from arch/x86/kernel/genx2apic_uv_x.c)54
-rw-r--r--arch/x86/kernel/apm_32.c4
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/common.c8
-rw-r--r--arch/x86/kernel/cpu/cpufreq/e_powersaver.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c12
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c6
-rw-r--r--arch/x86/kernel/cpu/intel.c5
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c7
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd_64.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel_64.c10
-rw-r--r--arch/x86/kernel/cpu/mcheck/p4.c4
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c2
-rw-r--r--arch/x86/kernel/crash.c2
-rw-r--r--arch/x86/kernel/e820.c3
-rw-r--r--arch/x86/kernel/efi_stub_32.S3
-rw-r--r--arch/x86/kernel/efi_stub_64.S7
-rw-r--r--arch/x86/kernel/entry_32.S4
-rw-r--r--arch/x86/kernel/entry_64.S25
-rw-r--r--arch/x86/kernel/head_32.S4
-rw-r--r--arch/x86/kernel/head_64.S4
-rw-r--r--arch/x86/kernel/i8259.c1
-rw-r--r--arch/x86/kernel/ioport.c11
-rw-r--r--arch/x86/kernel/irq_32.c1
-rw-r--r--arch/x86/kernel/irqinit_32.c13
-rw-r--r--arch/x86/kernel/kgdb.c2
-rw-r--r--arch/x86/kernel/kvmclock.c1
-rw-r--r--arch/x86/kernel/machine_kexec_32.c2
-rw-r--r--arch/x86/kernel/mca_32.c5
-rw-r--r--arch/x86/kernel/mpparse.c17
-rw-r--r--arch/x86/kernel/paravirt.c1
-rw-r--r--arch/x86/kernel/process.c191
-rw-r--r--arch/x86/kernel/process_32.c193
-rw-r--r--arch/x86/kernel/process_64.c188
-rw-r--r--arch/x86/kernel/ptrace.c4
-rw-r--r--arch/x86/kernel/reboot.c2
-rw-r--r--arch/x86/kernel/relocate_kernel_32.S2
-rw-r--r--arch/x86/kernel/relocate_kernel_64.S4
-rw-r--r--arch/x86/kernel/setup.c115
-rw-r--r--arch/x86/kernel/signal.c117
-rw-r--r--arch/x86/kernel/smp.c2
-rw-r--r--arch/x86/kernel/smpboot.c37
-rw-r--r--arch/x86/kernel/time_32.c6
-rw-r--r--arch/x86/kernel/time_64.c2
-rw-r--r--arch/x86/kernel/tlb_uv.c4
-rw-r--r--arch/x86/kernel/trampoline_32.S2
-rw-r--r--arch/x86/kernel/trampoline_64.S4
-rw-r--r--arch/x86/kernel/traps.c52
-rw-r--r--arch/x86/kernel/visws_quirks.c8
-rw-r--r--arch/x86/kernel/vmi_32.c4
-rw-r--r--arch/x86/kernel/vmiclock_32.c7
-rw-r--r--arch/x86/kernel/vmlinux_32.lds.S2
-rw-r--r--arch/x86/kernel/vmlinux_64.lds.S2
-rw-r--r--arch/x86/kernel/vsmp_64.c12
-rw-r--r--arch/x86/kvm/i8254.c2
-rw-r--r--arch/x86/kvm/irq.c7
-rw-r--r--arch/x86/kvm/irq.h1
-rw-r--r--arch/x86/kvm/lapic.c66
-rw-r--r--arch/x86/kvm/lapic.h2
-rw-r--r--arch/x86/kvm/mmu.c9
-rw-r--r--arch/x86/kvm/svm.c1
-rw-r--r--arch/x86/kvm/vmx.c5
-rw-r--r--arch/x86/kvm/x86.c10
-rw-r--r--arch/x86/lguest/Kconfig1
-rw-r--r--arch/x86/lguest/boot.c17
-rw-r--r--arch/x86/lib/getuser.S2
-rw-r--r--arch/x86/mach-voyager/Makefile8
-rw-r--r--arch/x86/mach-voyager/setup.c119
-rw-r--r--arch/x86/mach-voyager/voyager_basic.c317
-rw-r--r--arch/x86/mach-voyager/voyager_cat.c1197
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c1805
-rw-r--r--arch/x86/mach-voyager/voyager_thread.c128
-rw-r--r--arch/x86/mm/Makefile2
-rw-r--r--arch/x86/mm/fault.c1078
-rw-r--r--arch/x86/mm/highmem_32.c34
-rw-r--r--arch/x86/mm/init.c49
-rw-r--r--arch/x86/mm/init_32.c61
-rw-r--r--arch/x86/mm/init_64.c39
-rw-r--r--arch/x86/mm/iomap_32.c11
-rw-r--r--arch/x86/mm/memtest.c156
-rw-r--r--arch/x86/mm/numa_32.c28
-rw-r--r--arch/x86/mm/numa_64.c2
-rw-r--r--arch/x86/mm/pageattr.c22
-rw-r--r--arch/x86/mm/pat.c48
-rw-r--r--arch/x86/mm/pgtable.c18
-rw-r--r--arch/x86/mm/pgtable_32.c18
-rw-r--r--arch/x86/mm/srat_64.c2
-rw-r--r--arch/x86/mm/tlb.c1
-rw-r--r--arch/x86/oprofile/op_model_ppro.c14
-rw-r--r--arch/x86/pci/numaq_32.c2
-rw-r--r--arch/x86/power/hibernate_asm_32.S2
-rw-r--r--arch/x86/power/hibernate_asm_64.S2
-rw-r--r--arch/x86/vdso/vma.c4
-rw-r--r--arch/x86/xen/Kconfig2
-rw-r--r--arch/x86/xen/enlighten.c22
-rw-r--r--arch/x86/xen/xen-head.S2
261 files changed, 4895 insertions, 8037 deletions
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index f238370c907d..8d0097f10208 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -93,8 +93,8 @@ common_shutdown_1(void *generic_ptr)
93 if (cpuid != boot_cpuid) { 93 if (cpuid != boot_cpuid) {
94 flags |= 0x00040000UL; /* "remain halted" */ 94 flags |= 0x00040000UL; /* "remain halted" */
95 *pflags = flags; 95 *pflags = flags;
96 cpu_clear(cpuid, cpu_present_map); 96 set_cpu_present(cpuid, false);
97 cpu_clear(cpuid, cpu_possible_map); 97 set_cpu_possible(cpuid, false);
98 halt(); 98 halt();
99 } 99 }
100#endif 100#endif
@@ -120,8 +120,8 @@ common_shutdown_1(void *generic_ptr)
120 120
121#ifdef CONFIG_SMP 121#ifdef CONFIG_SMP
122 /* Wait for the secondaries to halt. */ 122 /* Wait for the secondaries to halt. */
123 cpu_clear(boot_cpuid, cpu_present_map); 123 set_cpu_present(boot_cpuid, false);
124 cpu_clear(boot_cpuid, cpu_possible_map); 124 set_cpu_possible(boot_cpuid, false);
125 while (cpus_weight(cpu_present_map)) 125 while (cpus_weight(cpu_present_map))
126 barrier(); 126 barrier();
127#endif 127#endif
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 00f1dc3dfd5f..b1fe5674c3a1 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -120,12 +120,12 @@ void __cpuinit
120smp_callin(void) 120smp_callin(void)
121{ 121{
122 int cpuid = hard_smp_processor_id(); 122 int cpuid = hard_smp_processor_id();
123 cpumask_t mask = cpu_online_map;
124 123
125 if (cpu_test_and_set(cpuid, mask)) { 124 if (cpu_online(cpuid)) {
126 printk("??, cpu 0x%x already present??\n", cpuid); 125 printk("??, cpu 0x%x already present??\n", cpuid);
127 BUG(); 126 BUG();
128 } 127 }
128 set_cpu_online(cpuid, true);
129 129
130 /* Turn on machine checks. */ 130 /* Turn on machine checks. */
131 wrmces(7); 131 wrmces(7);
@@ -436,8 +436,8 @@ setup_smp(void)
436 ((char *)cpubase + i*hwrpb->processor_size); 436 ((char *)cpubase + i*hwrpb->processor_size);
437 if ((cpu->flags & 0x1cc) == 0x1cc) { 437 if ((cpu->flags & 0x1cc) == 0x1cc) {
438 smp_num_probed++; 438 smp_num_probed++;
439 cpu_set(i, cpu_possible_map); 439 set_cpu_possible(i, true);
440 cpu_set(i, cpu_present_map); 440 set_cpu_present(i, true);
441 cpu->pal_revision = boot_cpu_palrev; 441 cpu->pal_revision = boot_cpu_palrev;
442 } 442 }
443 443
@@ -470,8 +470,8 @@ smp_prepare_cpus(unsigned int max_cpus)
470 470
471 /* Nothing to do on a UP box, or when told not to. */ 471 /* Nothing to do on a UP box, or when told not to. */
472 if (smp_num_probed == 1 || max_cpus == 0) { 472 if (smp_num_probed == 1 || max_cpus == 0) {
473 cpu_possible_map = cpumask_of_cpu(boot_cpuid); 473 init_cpu_possible(cpumask_of(boot_cpuid));
474 cpu_present_map = cpumask_of_cpu(boot_cpuid); 474 init_cpu_present(cpumask_of(boot_cpuid));
475 printk(KERN_INFO "SMP mode deactivated.\n"); 475 printk(KERN_INFO "SMP mode deactivated.\n");
476 return; 476 return;
477 } 477 }
diff --git a/arch/arm/configs/at91sam9260ek_defconfig b/arch/arm/configs/at91sam9260ek_defconfig
index e0ee7060f9aa..98e2f3de4bc5 100644
--- a/arch/arm/configs/at91sam9260ek_defconfig
+++ b/arch/arm/configs/at91sam9260ek_defconfig
@@ -608,7 +608,7 @@ CONFIG_WATCHDOG_NOWAYOUT=y
608# Watchdog Device Drivers 608# Watchdog Device Drivers
609# 609#
610# CONFIG_SOFT_WATCHDOG is not set 610# CONFIG_SOFT_WATCHDOG is not set
611CONFIG_AT91SAM9_WATCHDOG=y 611CONFIG_AT91SAM9X_WATCHDOG=y
612 612
613# 613#
614# USB-based Watchdog Cards 614# USB-based Watchdog Cards
diff --git a/arch/arm/configs/at91sam9261ek_defconfig b/arch/arm/configs/at91sam9261ek_defconfig
index 01d1ef97d8be..149456142392 100644
--- a/arch/arm/configs/at91sam9261ek_defconfig
+++ b/arch/arm/configs/at91sam9261ek_defconfig
@@ -700,7 +700,7 @@ CONFIG_WATCHDOG_NOWAYOUT=y
700# Watchdog Device Drivers 700# Watchdog Device Drivers
701# 701#
702# CONFIG_SOFT_WATCHDOG is not set 702# CONFIG_SOFT_WATCHDOG is not set
703CONFIG_AT91SAM9_WATCHDOG=y 703CONFIG_AT91SAM9X_WATCHDOG=y
704 704
705# 705#
706# USB-based Watchdog Cards 706# USB-based Watchdog Cards
diff --git a/arch/arm/configs/at91sam9263ek_defconfig b/arch/arm/configs/at91sam9263ek_defconfig
index 036a126725c1..21599f3c6275 100644
--- a/arch/arm/configs/at91sam9263ek_defconfig
+++ b/arch/arm/configs/at91sam9263ek_defconfig
@@ -710,7 +710,7 @@ CONFIG_WATCHDOG_NOWAYOUT=y
710# Watchdog Device Drivers 710# Watchdog Device Drivers
711# 711#
712# CONFIG_SOFT_WATCHDOG is not set 712# CONFIG_SOFT_WATCHDOG is not set
713CONFIG_AT91SAM9_WATCHDOG=y 713CONFIG_AT91SAM9X_WATCHDOG=y
714 714
715# 715#
716# USB-based Watchdog Cards 716# USB-based Watchdog Cards
diff --git a/arch/arm/configs/at91sam9rlek_defconfig b/arch/arm/configs/at91sam9rlek_defconfig
index 237a2a6a8517..e2df81a3e804 100644
--- a/arch/arm/configs/at91sam9rlek_defconfig
+++ b/arch/arm/configs/at91sam9rlek_defconfig
@@ -606,7 +606,7 @@ CONFIG_WATCHDOG_NOWAYOUT=y
606# Watchdog Device Drivers 606# Watchdog Device Drivers
607# 607#
608# CONFIG_SOFT_WATCHDOG is not set 608# CONFIG_SOFT_WATCHDOG is not set
609CONFIG_AT91SAM9_WATCHDOG=y 609CONFIG_AT91SAM9X_WATCHDOG=y
610 610
611# 611#
612# Sonics Silicon Backplane 612# Sonics Silicon Backplane
diff --git a/arch/arm/configs/qil-a9260_defconfig b/arch/arm/configs/qil-a9260_defconfig
index cd1d717903ac..9b32d0eb89ba 100644
--- a/arch/arm/configs/qil-a9260_defconfig
+++ b/arch/arm/configs/qil-a9260_defconfig
@@ -727,7 +727,7 @@ CONFIG_WATCHDOG_NOWAYOUT=y
727# Watchdog Device Drivers 727# Watchdog Device Drivers
728# 728#
729# CONFIG_SOFT_WATCHDOG is not set 729# CONFIG_SOFT_WATCHDOG is not set
730# CONFIG_AT91SAM9_WATCHDOG is not set 730# CONFIG_AT91SAM9X_WATCHDOG is not set
731 731
732# 732#
733# USB-based Watchdog Cards 733# USB-based Watchdog Cards
diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c
index 84849098c8e8..d4a0da1e48f4 100644
--- a/arch/arm/kernel/elf.c
+++ b/arch/arm/kernel/elf.c
@@ -74,9 +74,9 @@ EXPORT_SYMBOL(elf_set_personality);
74 */ 74 */
75int arm_elf_read_implies_exec(const struct elf32_hdr *x, int executable_stack) 75int arm_elf_read_implies_exec(const struct elf32_hdr *x, int executable_stack)
76{ 76{
77 if (executable_stack != EXSTACK_ENABLE_X) 77 if (executable_stack != EXSTACK_DISABLE_X)
78 return 1; 78 return 1;
79 if (cpu_architecture() <= CPU_ARCH_ARMv6) 79 if (cpu_architecture() < CPU_ARCH_ARMv6)
80 return 1; 80 return 1;
81 return 0; 81 return 0;
82} 82}
diff --git a/arch/arm/mach-at91/at91cap9_devices.c b/arch/arm/mach-at91/at91cap9_devices.c
index 9eca2209cde6..412aa49ad2fb 100644
--- a/arch/arm/mach-at91/at91cap9_devices.c
+++ b/arch/arm/mach-at91/at91cap9_devices.c
@@ -697,7 +697,7 @@ static void __init at91_add_device_rtt(void)
697 * Watchdog 697 * Watchdog
698 * -------------------------------------------------------------------- */ 698 * -------------------------------------------------------------------- */
699 699
700#if defined(CONFIG_AT91SAM9_WATCHDOG) || defined(CONFIG_AT91SAM9_WATCHDOG_MODULE) 700#if defined(CONFIG_AT91SAM9X_WATCHDOG) || defined(CONFIG_AT91SAM9X_WATCHDOG_MODULE)
701static struct platform_device at91cap9_wdt_device = { 701static struct platform_device at91cap9_wdt_device = {
702 .name = "at91_wdt", 702 .name = "at91_wdt",
703 .id = -1, 703 .id = -1,
diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
index fdde1ea21b07..d74c9ac007e7 100644
--- a/arch/arm/mach-at91/at91sam9260_devices.c
+++ b/arch/arm/mach-at91/at91sam9260_devices.c
@@ -643,7 +643,7 @@ static void __init at91_add_device_rtt(void)
643 * Watchdog 643 * Watchdog
644 * -------------------------------------------------------------------- */ 644 * -------------------------------------------------------------------- */
645 645
646#if defined(CONFIG_AT91SAM9_WATCHDOG) || defined(CONFIG_AT91SAM9_WATCHDOG_MODULE) 646#if defined(CONFIG_AT91SAM9X_WATCHDOG) || defined(CONFIG_AT91SAM9X_WATCHDOG_MODULE)
647static struct platform_device at91sam9260_wdt_device = { 647static struct platform_device at91sam9260_wdt_device = {
648 .name = "at91_wdt", 648 .name = "at91_wdt",
649 .id = -1, 649 .id = -1,
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
index 17289756f80f..59fc48311fb0 100644
--- a/arch/arm/mach-at91/at91sam9261_devices.c
+++ b/arch/arm/mach-at91/at91sam9261_devices.c
@@ -621,7 +621,7 @@ static void __init at91_add_device_rtt(void)
621 * Watchdog 621 * Watchdog
622 * -------------------------------------------------------------------- */ 622 * -------------------------------------------------------------------- */
623 623
624#if defined(CONFIG_AT91SAM9_WATCHDOG) || defined(CONFIG_AT91SAM9_WATCHDOG_MODULE) 624#if defined(CONFIG_AT91SAM9X_WATCHDOG) || defined(CONFIG_AT91SAM9X_WATCHDOG_MODULE)
625static struct platform_device at91sam9261_wdt_device = { 625static struct platform_device at91sam9261_wdt_device = {
626 .name = "at91_wdt", 626 .name = "at91_wdt",
627 .id = -1, 627 .id = -1,
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
index b753cb879d8e..134af97ff340 100644
--- a/arch/arm/mach-at91/at91sam9263_devices.c
+++ b/arch/arm/mach-at91/at91sam9263_devices.c
@@ -854,7 +854,7 @@ static void __init at91_add_device_rtt(void)
854 * Watchdog 854 * Watchdog
855 * -------------------------------------------------------------------- */ 855 * -------------------------------------------------------------------- */
856 856
857#if defined(CONFIG_AT91SAM9_WATCHDOG) || defined(CONFIG_AT91SAM9_WATCHDOG_MODULE) 857#if defined(CONFIG_AT91SAM9X_WATCHDOG) || defined(CONFIG_AT91SAM9X_WATCHDOG_MODULE)
858static struct platform_device at91sam9263_wdt_device = { 858static struct platform_device at91sam9263_wdt_device = {
859 .name = "at91_wdt", 859 .name = "at91_wdt",
860 .id = -1, 860 .id = -1,
diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c
index 145324f4ec56..728186515cdf 100644
--- a/arch/arm/mach-at91/at91sam9rl_devices.c
+++ b/arch/arm/mach-at91/at91sam9rl_devices.c
@@ -609,7 +609,7 @@ static void __init at91_add_device_rtt(void)
609 * Watchdog 609 * Watchdog
610 * -------------------------------------------------------------------- */ 610 * -------------------------------------------------------------------- */
611 611
612#if defined(CONFIG_AT91SAM9_WATCHDOG) || defined(CONFIG_AT91SAM9_WATCHDOG_MODULE) 612#if defined(CONFIG_AT91SAM9X_WATCHDOG) || defined(CONFIG_AT91SAM9X_WATCHDOG_MODULE)
613static struct platform_device at91sam9rl_wdt_device = { 613static struct platform_device at91sam9rl_wdt_device = {
614 .name = "at91_wdt", 614 .name = "at91_wdt",
615 .id = -1, 615 .id = -1,
diff --git a/arch/arm/mach-at91/gpio.c b/arch/arm/mach-at91/gpio.c
index 9b0447c3d59b..2f7d4977dce9 100644
--- a/arch/arm/mach-at91/gpio.c
+++ b/arch/arm/mach-at91/gpio.c
@@ -490,7 +490,8 @@ postcore_initcall(at91_gpio_debugfs_init);
490 490
491/*--------------------------------------------------------------------------*/ 491/*--------------------------------------------------------------------------*/
492 492
493/* This lock class tells lockdep that GPIO irqs are in a different 493/*
494 * This lock class tells lockdep that GPIO irqs are in a different
494 * category than their parents, so it won't report false recursion. 495 * category than their parents, so it won't report false recursion.
495 */ 496 */
496static struct lock_class_key gpio_lock_class; 497static struct lock_class_key gpio_lock_class;
@@ -509,9 +510,6 @@ void __init at91_gpio_irq_setup(void)
509 unsigned id = this->id; 510 unsigned id = this->id;
510 unsigned i; 511 unsigned i;
511 512
512 /* enable PIO controller's clock */
513 clk_enable(this->clock);
514
515 __raw_writel(~0, this->regbase + PIO_IDR); 513 __raw_writel(~0, this->regbase + PIO_IDR);
516 514
517 for (i = 0, pin = this->chipbase; i < 32; i++, pin++) { 515 for (i = 0, pin = this->chipbase; i < 32; i++, pin++) {
@@ -556,7 +554,14 @@ void __init at91_gpio_init(struct at91_gpio_bank *data, int nr_banks)
556 data->chipbase = PIN_BASE + i * 32; 554 data->chipbase = PIN_BASE + i * 32;
557 data->regbase = data->offset + (void __iomem *)AT91_VA_BASE_SYS; 555 data->regbase = data->offset + (void __iomem *)AT91_VA_BASE_SYS;
558 556
559 /* AT91SAM9263_ID_PIOCDE groups PIOC, PIOD, PIOE */ 557 /* enable PIO controller's clock */
558 clk_enable(data->clock);
559
560 /*
561 * Some processors share peripheral ID between multiple GPIO banks.
562 * SAM9263 (PIOC, PIOD, PIOE)
563 * CAP9 (PIOA, PIOB, PIOC, PIOD)
564 */
560 if (last && last->id == data->id) 565 if (last && last->id == data->id)
561 last->next = data; 566 last->next = data;
562 } 567 }
diff --git a/arch/arm/mach-at91/include/mach/board.h b/arch/arm/mach-at91/include/mach/board.h
index fb51f0e0a83f..0b3ae21b4565 100644
--- a/arch/arm/mach-at91/include/mach/board.h
+++ b/arch/arm/mach-at91/include/mach/board.h
@@ -93,6 +93,7 @@ struct atmel_nand_data {
93 u8 enable_pin; /* chip enable */ 93 u8 enable_pin; /* chip enable */
94 u8 det_pin; /* card detect */ 94 u8 det_pin; /* card detect */
95 u8 rdy_pin; /* ready/busy */ 95 u8 rdy_pin; /* ready/busy */
96 u8 rdy_pin_active_low; /* rdy_pin value is inverted */
96 u8 ale; /* address line number connected to ALE */ 97 u8 ale; /* address line number connected to ALE */
97 u8 cle; /* address line number connected to CLE */ 98 u8 cle; /* address line number connected to CLE */
98 u8 bus_width_16; /* buswidth is 16 bit */ 99 u8 bus_width_16; /* buswidth is 16 bit */
diff --git a/arch/arm/mach-davinci/board-evm.c b/arch/arm/mach-davinci/board-evm.c
index a957d239a683..38b6a9ce2a93 100644
--- a/arch/arm/mach-davinci/board-evm.c
+++ b/arch/arm/mach-davinci/board-evm.c
@@ -311,6 +311,9 @@ evm_u35_setup(struct i2c_client *client, int gpio, unsigned ngpio, void *c)
311 gpio_request(gpio + 7, "nCF_SEL"); 311 gpio_request(gpio + 7, "nCF_SEL");
312 gpio_direction_output(gpio + 7, 1); 312 gpio_direction_output(gpio + 7, 1);
313 313
314 /* irlml6401 sustains over 3A, switches 5V in under 8 msec */
315 setup_usb(500, 8);
316
314 return 0; 317 return 0;
315} 318}
316 319
@@ -417,9 +420,6 @@ static __init void davinci_evm_init(void)
417 platform_add_devices(davinci_evm_devices, 420 platform_add_devices(davinci_evm_devices,
418 ARRAY_SIZE(davinci_evm_devices)); 421 ARRAY_SIZE(davinci_evm_devices));
419 evm_init_i2c(); 422 evm_init_i2c();
420
421 /* irlml6401 sustains over 3A, switches 5V in under 8 msec */
422 setup_usb(500, 8);
423} 423}
424 424
425static __init void davinci_evm_irq_init(void) 425static __init void davinci_evm_irq_init(void)
diff --git a/arch/arm/mach-davinci/clock.c b/arch/arm/mach-davinci/clock.c
index 28f6dbc95bd7..abb92b7eca0c 100644
--- a/arch/arm/mach-davinci/clock.c
+++ b/arch/arm/mach-davinci/clock.c
@@ -231,6 +231,11 @@ static struct clk davinci_clks[] = {
231 .lpsc = DAVINCI_LPSC_GPIO, 231 .lpsc = DAVINCI_LPSC_GPIO,
232 }, 232 },
233 { 233 {
234 .name = "usb",
235 .rate = &commonrate,
236 .lpsc = DAVINCI_LPSC_USB,
237 },
238 {
234 .name = "AEMIFCLK", 239 .name = "AEMIFCLK",
235 .rate = &commonrate, 240 .rate = &commonrate,
236 .lpsc = DAVINCI_LPSC_AEMIF, 241 .lpsc = DAVINCI_LPSC_AEMIF,
diff --git a/arch/arm/mach-davinci/usb.c b/arch/arm/mach-davinci/usb.c
index 867ead2559ad..69680784448a 100644
--- a/arch/arm/mach-davinci/usb.c
+++ b/arch/arm/mach-davinci/usb.c
@@ -47,6 +47,7 @@ static struct musb_hdrc_platform_data usb_data = {
47#elif defined(CONFIG_USB_MUSB_HOST) 47#elif defined(CONFIG_USB_MUSB_HOST)
48 .mode = MUSB_HOST, 48 .mode = MUSB_HOST,
49#endif 49#endif
50 .clock = "usb",
50 .config = &musb_config, 51 .config = &musb_config,
51}; 52};
52 53
diff --git a/arch/arm/mach-ep93xx/include/mach/gesbc9312.h b/arch/arm/mach-ep93xx/include/mach/gesbc9312.h
deleted file mode 100644
index 21fe2b922aa5..000000000000
--- a/arch/arm/mach-ep93xx/include/mach/gesbc9312.h
+++ /dev/null
@@ -1,3 +0,0 @@
1/*
2 * arch/arm/mach-ep93xx/include/mach/gesbc9312.h
3 */
diff --git a/arch/arm/mach-ep93xx/include/mach/hardware.h b/arch/arm/mach-ep93xx/include/mach/hardware.h
index 529807d182bf..2866297310b7 100644
--- a/arch/arm/mach-ep93xx/include/mach/hardware.h
+++ b/arch/arm/mach-ep93xx/include/mach/hardware.h
@@ -10,7 +10,6 @@
10 10
11#include "platform.h" 11#include "platform.h"
12 12
13#include "gesbc9312.h"
14#include "ts72xx.h" 13#include "ts72xx.h"
15 14
16#endif 15#endif
diff --git a/arch/arm/mach-kirkwood/irq.c b/arch/arm/mach-kirkwood/irq.c
index efb86b700276..06083b23bb44 100644
--- a/arch/arm/mach-kirkwood/irq.c
+++ b/arch/arm/mach-kirkwood/irq.c
@@ -42,7 +42,7 @@ void __init kirkwood_init_irq(void)
42 writel(0, GPIO_EDGE_CAUSE(32)); 42 writel(0, GPIO_EDGE_CAUSE(32));
43 43
44 for (i = IRQ_KIRKWOOD_GPIO_START; i < NR_IRQS; i++) { 44 for (i = IRQ_KIRKWOOD_GPIO_START; i < NR_IRQS; i++) {
45 set_irq_chip(i, &orion_gpio_irq_level_chip); 45 set_irq_chip(i, &orion_gpio_irq_chip);
46 set_irq_handler(i, handle_level_irq); 46 set_irq_handler(i, handle_level_irq);
47 irq_desc[i].status |= IRQ_LEVEL; 47 irq_desc[i].status |= IRQ_LEVEL;
48 set_irq_flags(i, IRQF_VALID); 48 set_irq_flags(i, IRQF_VALID);
diff --git a/arch/arm/mach-mv78xx0/irq.c b/arch/arm/mach-mv78xx0/irq.c
index e273418797b4..30b7e4bcdbc7 100644
--- a/arch/arm/mach-mv78xx0/irq.c
+++ b/arch/arm/mach-mv78xx0/irq.c
@@ -40,7 +40,7 @@ void __init mv78xx0_init_irq(void)
40 writel(0, GPIO_EDGE_CAUSE(0)); 40 writel(0, GPIO_EDGE_CAUSE(0));
41 41
42 for (i = IRQ_MV78XX0_GPIO_START; i < NR_IRQS; i++) { 42 for (i = IRQ_MV78XX0_GPIO_START; i < NR_IRQS; i++) {
43 set_irq_chip(i, &orion_gpio_irq_level_chip); 43 set_irq_chip(i, &orion_gpio_irq_chip);
44 set_irq_handler(i, handle_level_irq); 44 set_irq_handler(i, handle_level_irq);
45 irq_desc[i].status |= IRQ_LEVEL; 45 irq_desc[i].status |= IRQ_LEVEL;
46 set_irq_flags(i, IRQF_VALID); 46 set_irq_flags(i, IRQF_VALID);
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index ad721e0cbf7a..ce4d46a4a838 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -565,7 +565,7 @@ u32 omap2_clksel_to_divisor(struct clk *clk, u32 field_val)
565 * 565 *
566 * Given a struct clk of a rate-selectable clksel clock, and a clock divisor, 566 * Given a struct clk of a rate-selectable clksel clock, and a clock divisor,
567 * find the corresponding register field value. The return register value is 567 * find the corresponding register field value. The return register value is
568 * the value before left-shifting. Returns 0xffffffff on error 568 * the value before left-shifting. Returns ~0 on error
569 */ 569 */
570u32 omap2_divisor_to_clksel(struct clk *clk, u32 div) 570u32 omap2_divisor_to_clksel(struct clk *clk, u32 div)
571{ 571{
@@ -577,7 +577,7 @@ u32 omap2_divisor_to_clksel(struct clk *clk, u32 div)
577 577
578 clks = omap2_get_clksel_by_parent(clk, clk->parent); 578 clks = omap2_get_clksel_by_parent(clk, clk->parent);
579 if (clks == NULL) 579 if (clks == NULL)
580 return 0; 580 return ~0;
581 581
582 for (clkr = clks->rates; clkr->div; clkr++) { 582 for (clkr = clks->rates; clkr->div; clkr++) {
583 if ((clkr->flags & cpu_mask) && (clkr->div == div)) 583 if ((clkr->flags & cpu_mask) && (clkr->div == div))
@@ -588,7 +588,7 @@ u32 omap2_divisor_to_clksel(struct clk *clk, u32 div)
588 printk(KERN_ERR "clock: Could not find divisor %d for " 588 printk(KERN_ERR "clock: Could not find divisor %d for "
589 "clock %s parent %s\n", div, clk->name, 589 "clock %s parent %s\n", div, clk->name,
590 clk->parent->name); 590 clk->parent->name);
591 return 0; 591 return ~0;
592 } 592 }
593 593
594 return clkr->val; 594 return clkr->val;
@@ -708,7 +708,7 @@ static u32 omap2_clksel_get_src_field(void __iomem **src_addr,
708 return 0; 708 return 0;
709 709
710 for (clkr = clks->rates; clkr->div; clkr++) { 710 for (clkr = clks->rates; clkr->div; clkr++) {
711 if (clkr->flags & (cpu_mask | DEFAULT_RATE)) 711 if (clkr->flags & cpu_mask && clkr->flags & DEFAULT_RATE)
712 break; /* Found the default rate for this platform */ 712 break; /* Found the default rate for this platform */
713 } 713 }
714 714
@@ -746,7 +746,7 @@ int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent)
746 return -EINVAL; 746 return -EINVAL;
747 747
748 if (clk->usecount > 0) 748 if (clk->usecount > 0)
749 _omap2_clk_disable(clk); 749 omap2_clk_disable(clk);
750 750
751 /* Set new source value (previous dividers if any in effect) */ 751 /* Set new source value (previous dividers if any in effect) */
752 reg_val = __raw_readl(src_addr) & ~field_mask; 752 reg_val = __raw_readl(src_addr) & ~field_mask;
@@ -759,11 +759,11 @@ int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent)
759 wmb(); 759 wmb();
760 } 760 }
761 761
762 if (clk->usecount > 0)
763 _omap2_clk_enable(clk);
764
765 clk->parent = new_parent; 762 clk->parent = new_parent;
766 763
764 if (clk->usecount > 0)
765 omap2_clk_enable(clk);
766
767 /* CLKSEL clocks follow their parents' rates, divided by a divisor */ 767 /* CLKSEL clocks follow their parents' rates, divided by a divisor */
768 clk->rate = new_parent->rate; 768 clk->rate = new_parent->rate;
769 769
diff --git a/arch/arm/mach-orion5x/irq.c b/arch/arm/mach-orion5x/irq.c
index 0caae43301e5..e03f7b45cb0d 100644
--- a/arch/arm/mach-orion5x/irq.c
+++ b/arch/arm/mach-orion5x/irq.c
@@ -44,7 +44,7 @@ void __init orion5x_init_irq(void)
44 * User can use set_type() if he wants to use edge types handlers. 44 * User can use set_type() if he wants to use edge types handlers.
45 */ 45 */
46 for (i = IRQ_ORION5X_GPIO_START; i < NR_IRQS; i++) { 46 for (i = IRQ_ORION5X_GPIO_START; i < NR_IRQS; i++) {
47 set_irq_chip(i, &orion_gpio_irq_level_chip); 47 set_irq_chip(i, &orion_gpio_irq_chip);
48 set_irq_handler(i, handle_level_irq); 48 set_irq_handler(i, handle_level_irq);
49 irq_desc[i].status |= IRQ_LEVEL; 49 irq_desc[i].status |= IRQ_LEVEL;
50 set_irq_flags(i, IRQF_VALID); 50 set_irq_flags(i, IRQF_VALID);
diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c
index e88d417736af..c7fc01e9d1f6 100644
--- a/arch/arm/mach-rpc/riscpc.c
+++ b/arch/arm/mach-rpc/riscpc.c
@@ -19,6 +19,7 @@
19#include <linux/serial_8250.h> 19#include <linux/serial_8250.h>
20#include <linux/ata_platform.h> 20#include <linux/ata_platform.h>
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/i2c.h>
22 23
23#include <asm/elf.h> 24#include <asm/elf.h>
24#include <asm/mach-types.h> 25#include <asm/mach-types.h>
@@ -201,8 +202,13 @@ static struct platform_device *devs[] __initdata = {
201 &pata_device, 202 &pata_device,
202}; 203};
203 204
205static struct i2c_board_info i2c_rtc = {
206 I2C_BOARD_INFO("pcf8583", 0x50)
207};
208
204static int __init rpc_init(void) 209static int __init rpc_init(void)
205{ 210{
211 i2c_register_board_info(0, &i2c_rtc, 1);
206 return platform_add_devices(devs, ARRAY_SIZE(devs)); 212 return platform_add_devices(devs, ARRAY_SIZE(devs));
207} 213}
208 214
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9b36c5cb5e9f..d4d082c5c2d4 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -693,7 +693,8 @@ static void __init sanity_check_meminfo(void)
693 * Check whether this memory bank would entirely overlap 693 * Check whether this memory bank would entirely overlap
694 * the vmalloc area. 694 * the vmalloc area.
695 */ 695 */
696 if (__va(bank->start) >= VMALLOC_MIN) { 696 if (__va(bank->start) >= VMALLOC_MIN ||
697 __va(bank->start) < PAGE_OFFSET) {
697 printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " 698 printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
698 "(vmalloc region overlap).\n", 699 "(vmalloc region overlap).\n",
699 bank->start, bank->start + bank->size - 1); 700 bank->start, bank->start + bank->size - 1);
diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c
index 967186425ca1..0d12c2164766 100644
--- a/arch/arm/plat-orion/gpio.c
+++ b/arch/arm/plat-orion/gpio.c
@@ -265,51 +265,36 @@ EXPORT_SYMBOL(orion_gpio_set_blink);
265 * polarity LEVEL mask 265 * polarity LEVEL mask
266 * 266 *
267 ****************************************************************************/ 267 ****************************************************************************/
268static void gpio_irq_edge_ack(u32 irq)
269{
270 int pin = irq_to_gpio(irq);
271
272 writel(~(1 << (pin & 31)), GPIO_EDGE_CAUSE(pin));
273}
274
275static void gpio_irq_edge_mask(u32 irq)
276{
277 int pin = irq_to_gpio(irq);
278 u32 u;
279
280 u = readl(GPIO_EDGE_MASK(pin));
281 u &= ~(1 << (pin & 31));
282 writel(u, GPIO_EDGE_MASK(pin));
283}
284 268
285static void gpio_irq_edge_unmask(u32 irq) 269static void gpio_irq_ack(u32 irq)
286{ 270{
287 int pin = irq_to_gpio(irq); 271 int type = irq_desc[irq].status & IRQ_TYPE_SENSE_MASK;
288 u32 u; 272 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
289 273 int pin = irq_to_gpio(irq);
290 u = readl(GPIO_EDGE_MASK(pin)); 274 writel(~(1 << (pin & 31)), GPIO_EDGE_CAUSE(pin));
291 u |= 1 << (pin & 31); 275 }
292 writel(u, GPIO_EDGE_MASK(pin));
293} 276}
294 277
295static void gpio_irq_level_mask(u32 irq) 278static void gpio_irq_mask(u32 irq)
296{ 279{
297 int pin = irq_to_gpio(irq); 280 int pin = irq_to_gpio(irq);
298 u32 u; 281 int type = irq_desc[irq].status & IRQ_TYPE_SENSE_MASK;
299 282 u32 reg = (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) ?
300 u = readl(GPIO_LEVEL_MASK(pin)); 283 GPIO_EDGE_MASK(pin) : GPIO_LEVEL_MASK(pin);
284 u32 u = readl(reg);
301 u &= ~(1 << (pin & 31)); 285 u &= ~(1 << (pin & 31));
302 writel(u, GPIO_LEVEL_MASK(pin)); 286 writel(u, reg);
303} 287}
304 288
305static void gpio_irq_level_unmask(u32 irq) 289static void gpio_irq_unmask(u32 irq)
306{ 290{
307 int pin = irq_to_gpio(irq); 291 int pin = irq_to_gpio(irq);
308 u32 u; 292 int type = irq_desc[irq].status & IRQ_TYPE_SENSE_MASK;
309 293 u32 reg = (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) ?
310 u = readl(GPIO_LEVEL_MASK(pin)); 294 GPIO_EDGE_MASK(pin) : GPIO_LEVEL_MASK(pin);
295 u32 u = readl(reg);
311 u |= 1 << (pin & 31); 296 u |= 1 << (pin & 31);
312 writel(u, GPIO_LEVEL_MASK(pin)); 297 writel(u, reg);
313} 298}
314 299
315static int gpio_irq_set_type(u32 irq, u32 type) 300static int gpio_irq_set_type(u32 irq, u32 type)
@@ -331,9 +316,9 @@ static int gpio_irq_set_type(u32 irq, u32 type)
331 * Set edge/level type. 316 * Set edge/level type.
332 */ 317 */
333 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) { 318 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
334 desc->chip = &orion_gpio_irq_edge_chip; 319 desc->handle_irq = handle_edge_irq;
335 } else if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) { 320 } else if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
336 desc->chip = &orion_gpio_irq_level_chip; 321 desc->handle_irq = handle_level_irq;
337 } else { 322 } else {
338 printk(KERN_ERR "failed to set irq=%d (type=%d)\n", irq, type); 323 printk(KERN_ERR "failed to set irq=%d (type=%d)\n", irq, type);
339 return -EINVAL; 324 return -EINVAL;
@@ -371,19 +356,11 @@ static int gpio_irq_set_type(u32 irq, u32 type)
371 return 0; 356 return 0;
372} 357}
373 358
374struct irq_chip orion_gpio_irq_edge_chip = { 359struct irq_chip orion_gpio_irq_chip = {
375 .name = "orion_gpio_irq_edge", 360 .name = "orion_gpio",
376 .ack = gpio_irq_edge_ack, 361 .ack = gpio_irq_ack,
377 .mask = gpio_irq_edge_mask, 362 .mask = gpio_irq_mask,
378 .unmask = gpio_irq_edge_unmask, 363 .unmask = gpio_irq_unmask,
379 .set_type = gpio_irq_set_type,
380};
381
382struct irq_chip orion_gpio_irq_level_chip = {
383 .name = "orion_gpio_irq_level",
384 .mask = gpio_irq_level_mask,
385 .mask_ack = gpio_irq_level_mask,
386 .unmask = gpio_irq_level_unmask,
387 .set_type = gpio_irq_set_type, 364 .set_type = gpio_irq_set_type,
388}; 365};
389 366
diff --git a/arch/arm/plat-orion/include/plat/gpio.h b/arch/arm/plat-orion/include/plat/gpio.h
index 54deaf274b52..ec743e82c876 100644
--- a/arch/arm/plat-orion/include/plat/gpio.h
+++ b/arch/arm/plat-orion/include/plat/gpio.h
@@ -31,8 +31,7 @@ void orion_gpio_set_blink(unsigned pin, int blink);
31/* 31/*
32 * GPIO interrupt handling. 32 * GPIO interrupt handling.
33 */ 33 */
34extern struct irq_chip orion_gpio_irq_edge_chip; 34extern struct irq_chip orion_gpio_irq_chip;
35extern struct irq_chip orion_gpio_irq_level_chip;
36void orion_gpio_irq_handler(int irqoff); 35void orion_gpio_irq_handler(int irqoff);
37 36
38 37
diff --git a/arch/avr32/mach-at32ap/include/mach/board.h b/arch/avr32/mach-at32ap/include/mach/board.h
index aafaf7a78886..cff8e84f78f2 100644
--- a/arch/avr32/mach-at32ap/include/mach/board.h
+++ b/arch/avr32/mach-at32ap/include/mach/board.h
@@ -116,6 +116,7 @@ struct atmel_nand_data {
116 int enable_pin; /* chip enable */ 116 int enable_pin; /* chip enable */
117 int det_pin; /* card detect */ 117 int det_pin; /* card detect */
118 int rdy_pin; /* ready/busy */ 118 int rdy_pin; /* ready/busy */
119 u8 rdy_pin_active_low; /* rdy_pin value is inverted */
119 u8 ale; /* address line number connected to ALE */ 120 u8 ale; /* address line number connected to ALE */
120 u8 cle; /* address line number connected to CLE */ 121 u8 cle; /* address line number connected to CLE */
121 u8 bus_width_16; /* buswidth is 16 bit */ 122 u8 bus_width_16; /* buswidth is 16 bit */
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 6183aeccecf1..153e727a6e8e 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -221,7 +221,11 @@ config IA64_HP_SIM
221 221
222config IA64_XEN_GUEST 222config IA64_XEN_GUEST
223 bool "Xen guest" 223 bool "Xen guest"
224 select SWIOTLB
224 depends on XEN 225 depends on XEN
226 help
227 Build a kernel that runs on Xen guest domain. At this moment only
228 16KB page size in supported.
225 229
226endchoice 230endchoice
227 231
@@ -479,8 +483,7 @@ config HOLES_IN_ZONE
479 default y if VIRTUAL_MEM_MAP 483 default y if VIRTUAL_MEM_MAP
480 484
481config HAVE_ARCH_EARLY_PFN_TO_NID 485config HAVE_ARCH_EARLY_PFN_TO_NID
482 def_bool y 486 def_bool NUMA && SPARSEMEM
483 depends on NEED_MULTIPLE_NODES
484 487
485config HAVE_ARCH_NODEDATA_EXTENSION 488config HAVE_ARCH_NODEDATA_EXTENSION
486 def_bool y 489 def_bool y
@@ -635,6 +638,17 @@ config DMAR
635 and include PCI device scope covered by these DMA 638 and include PCI device scope covered by these DMA
636 remapping devices. 639 remapping devices.
637 640
641config DMAR_DEFAULT_ON
642 def_bool y
643 prompt "Enable DMA Remapping Devices by default"
644 depends on DMAR
645 help
646 Selecting this option will enable a DMAR device at boot time if
647 one is found. If this option is not selected, DMAR support can
648 be enabled by passing intel_iommu=on to the kernel. It is
649 recommended you say N here while the DMAR code remains
650 experimental.
651
638endmenu 652endmenu
639 653
640endif 654endif
diff --git a/arch/ia64/configs/xen_domu_defconfig b/arch/ia64/configs/xen_domu_defconfig
new file mode 100644
index 000000000000..0bb0714dc19d
--- /dev/null
+++ b/arch/ia64/configs/xen_domu_defconfig
@@ -0,0 +1,1601 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.29-rc1
4# Fri Jan 16 11:49:59 2009
5#
6CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
7
8#
9# General setup
10#
11CONFIG_EXPERIMENTAL=y
12CONFIG_LOCK_KERNEL=y
13CONFIG_INIT_ENV_ARG_LIMIT=32
14CONFIG_LOCALVERSION=""
15CONFIG_LOCALVERSION_AUTO=y
16CONFIG_SWAP=y
17CONFIG_SYSVIPC=y
18CONFIG_SYSVIPC_SYSCTL=y
19CONFIG_POSIX_MQUEUE=y
20# CONFIG_BSD_PROCESS_ACCT is not set
21# CONFIG_TASKSTATS is not set
22# CONFIG_AUDIT is not set
23CONFIG_IKCONFIG=y
24CONFIG_IKCONFIG_PROC=y
25CONFIG_LOG_BUF_SHIFT=20
26CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
27# CONFIG_GROUP_SCHED is not set
28
29#
30# Control Group support
31#
32# CONFIG_CGROUPS is not set
33CONFIG_SYSFS_DEPRECATED=y
34CONFIG_SYSFS_DEPRECATED_V2=y
35# CONFIG_RELAY is not set
36CONFIG_NAMESPACES=y
37# CONFIG_UTS_NS is not set
38# CONFIG_IPC_NS is not set
39# CONFIG_USER_NS is not set
40# CONFIG_PID_NS is not set
41CONFIG_BLK_DEV_INITRD=y
42CONFIG_INITRAMFS_SOURCE=""
43CONFIG_CC_OPTIMIZE_FOR_SIZE=y
44CONFIG_SYSCTL=y
45# CONFIG_EMBEDDED is not set
46CONFIG_SYSCTL_SYSCALL=y
47CONFIG_KALLSYMS=y
48CONFIG_KALLSYMS_ALL=y
49CONFIG_KALLSYMS_STRIP_GENERATED=y
50# CONFIG_KALLSYMS_EXTRA_PASS is not set
51CONFIG_HOTPLUG=y
52CONFIG_PRINTK=y
53CONFIG_BUG=y
54CONFIG_ELF_CORE=y
55CONFIG_COMPAT_BRK=y
56CONFIG_BASE_FULL=y
57CONFIG_FUTEX=y
58CONFIG_ANON_INODES=y
59CONFIG_EPOLL=y
60CONFIG_SIGNALFD=y
61CONFIG_TIMERFD=y
62CONFIG_EVENTFD=y
63CONFIG_SHMEM=y
64CONFIG_AIO=y
65CONFIG_VM_EVENT_COUNTERS=y
66CONFIG_PCI_QUIRKS=y
67CONFIG_SLUB_DEBUG=y
68# CONFIG_SLAB is not set
69CONFIG_SLUB=y
70# CONFIG_SLOB is not set
71# CONFIG_PROFILING is not set
72CONFIG_HAVE_OPROFILE=y
73# CONFIG_KPROBES is not set
74CONFIG_HAVE_KPROBES=y
75CONFIG_HAVE_KRETPROBES=y
76CONFIG_HAVE_ARCH_TRACEHOOK=y
77CONFIG_HAVE_DMA_ATTRS=y
78CONFIG_USE_GENERIC_SMP_HELPERS=y
79# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
80CONFIG_SLABINFO=y
81CONFIG_RT_MUTEXES=y
82CONFIG_BASE_SMALL=0
83CONFIG_MODULES=y
84# CONFIG_MODULE_FORCE_LOAD is not set
85CONFIG_MODULE_UNLOAD=y
86# CONFIG_MODULE_FORCE_UNLOAD is not set
87CONFIG_MODVERSIONS=y
88CONFIG_MODULE_SRCVERSION_ALL=y
89CONFIG_STOP_MACHINE=y
90CONFIG_BLOCK=y
91# CONFIG_BLK_DEV_IO_TRACE is not set
92# CONFIG_BLK_DEV_BSG is not set
93# CONFIG_BLK_DEV_INTEGRITY is not set
94
95#
96# IO Schedulers
97#
98CONFIG_IOSCHED_NOOP=y
99CONFIG_IOSCHED_AS=y
100CONFIG_IOSCHED_DEADLINE=y
101CONFIG_IOSCHED_CFQ=y
102CONFIG_DEFAULT_AS=y
103# CONFIG_DEFAULT_DEADLINE is not set
104# CONFIG_DEFAULT_CFQ is not set
105# CONFIG_DEFAULT_NOOP is not set
106CONFIG_DEFAULT_IOSCHED="anticipatory"
107CONFIG_CLASSIC_RCU=y
108# CONFIG_TREE_RCU is not set
109# CONFIG_PREEMPT_RCU is not set
110# CONFIG_TREE_RCU_TRACE is not set
111# CONFIG_PREEMPT_RCU_TRACE is not set
112CONFIG_FREEZER=y
113
114#
115# Processor type and features
116#
117CONFIG_IA64=y
118CONFIG_64BIT=y
119CONFIG_ZONE_DMA=y
120CONFIG_QUICKLIST=y
121CONFIG_MMU=y
122CONFIG_SWIOTLB=y
123CONFIG_IOMMU_HELPER=y
124CONFIG_RWSEM_XCHGADD_ALGORITHM=y
125CONFIG_HUGETLB_PAGE_SIZE_VARIABLE=y
126CONFIG_GENERIC_FIND_NEXT_BIT=y
127CONFIG_GENERIC_CALIBRATE_DELAY=y
128CONFIG_GENERIC_TIME=y
129CONFIG_GENERIC_TIME_VSYSCALL=y
130CONFIG_HAVE_SETUP_PER_CPU_AREA=y
131CONFIG_DMI=y
132CONFIG_EFI=y
133CONFIG_GENERIC_IOMAP=y
134CONFIG_SCHED_OMIT_FRAME_POINTER=y
135CONFIG_AUDIT_ARCH=y
136CONFIG_PARAVIRT_GUEST=y
137CONFIG_PARAVIRT=y
138CONFIG_XEN=y
139CONFIG_XEN_XENCOMM=y
140CONFIG_NO_IDLE_HZ=y
141# CONFIG_IA64_GENERIC is not set
142# CONFIG_IA64_DIG is not set
143# CONFIG_IA64_DIG_VTD is not set
144# CONFIG_IA64_HP_ZX1 is not set
145# CONFIG_IA64_HP_ZX1_SWIOTLB is not set
146# CONFIG_IA64_SGI_SN2 is not set
147# CONFIG_IA64_SGI_UV is not set
148# CONFIG_IA64_HP_SIM is not set
149CONFIG_IA64_XEN_GUEST=y
150# CONFIG_ITANIUM is not set
151CONFIG_MCKINLEY=y
152# CONFIG_IA64_PAGE_SIZE_4KB is not set
153# CONFIG_IA64_PAGE_SIZE_8KB is not set
154CONFIG_IA64_PAGE_SIZE_16KB=y
155# CONFIG_IA64_PAGE_SIZE_64KB is not set
156CONFIG_PGTABLE_3=y
157# CONFIG_PGTABLE_4 is not set
158CONFIG_HZ=250
159# CONFIG_HZ_100 is not set
160CONFIG_HZ_250=y
161# CONFIG_HZ_300 is not set
162# CONFIG_HZ_1000 is not set
163# CONFIG_SCHED_HRTICK is not set
164CONFIG_IA64_L1_CACHE_SHIFT=7
165CONFIG_IA64_CYCLONE=y
166CONFIG_IOSAPIC=y
167CONFIG_FORCE_MAX_ZONEORDER=17
168# CONFIG_VIRT_CPU_ACCOUNTING is not set
169CONFIG_SMP=y
170CONFIG_NR_CPUS=16
171CONFIG_HOTPLUG_CPU=y
172CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
173CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
174# CONFIG_SCHED_SMT is not set
175CONFIG_PERMIT_BSP_REMOVE=y
176CONFIG_FORCE_CPEI_RETARGET=y
177CONFIG_PREEMPT_NONE=y
178# CONFIG_PREEMPT_VOLUNTARY is not set
179# CONFIG_PREEMPT is not set
180CONFIG_SELECT_MEMORY_MODEL=y
181CONFIG_FLATMEM_MANUAL=y
182# CONFIG_DISCONTIGMEM_MANUAL is not set
183# CONFIG_SPARSEMEM_MANUAL is not set
184CONFIG_FLATMEM=y
185CONFIG_FLAT_NODE_MEM_MAP=y
186CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
187CONFIG_PAGEFLAGS_EXTENDED=y
188CONFIG_SPLIT_PTLOCK_CPUS=4
189CONFIG_MIGRATION=y
190CONFIG_PHYS_ADDR_T_64BIT=y
191CONFIG_ZONE_DMA_FLAG=1
192CONFIG_BOUNCE=y
193CONFIG_NR_QUICK=1
194CONFIG_VIRT_TO_BUS=y
195CONFIG_UNEVICTABLE_LRU=y
196CONFIG_ARCH_SELECT_MEMORY_MODEL=y
197CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
198CONFIG_ARCH_FLATMEM_ENABLE=y
199CONFIG_ARCH_SPARSEMEM_ENABLE=y
200CONFIG_ARCH_POPULATES_NODE_MAP=y
201CONFIG_VIRTUAL_MEM_MAP=y
202CONFIG_HOLES_IN_ZONE=y
203# CONFIG_IA32_SUPPORT is not set
204# CONFIG_COMPAT_FOR_U64_ALIGNMENT is not set
205CONFIG_IA64_MCA_RECOVERY=y
206CONFIG_PERFMON=y
207CONFIG_IA64_PALINFO=y
208# CONFIG_IA64_MC_ERR_INJECT is not set
209# CONFIG_IA64_ESI is not set
210# CONFIG_IA64_HP_AML_NFW is not set
211CONFIG_KEXEC=y
212# CONFIG_CRASH_DUMP is not set
213
214#
215# Firmware Drivers
216#
217# CONFIG_FIRMWARE_MEMMAP is not set
218CONFIG_EFI_VARS=y
219CONFIG_EFI_PCDP=y
220CONFIG_DMIID=y
221CONFIG_BINFMT_ELF=y
222# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
223# CONFIG_HAVE_AOUT is not set
224CONFIG_BINFMT_MISC=m
225
226#
227# Power management and ACPI options
228#
229CONFIG_PM=y
230# CONFIG_PM_DEBUG is not set
231CONFIG_PM_SLEEP=y
232CONFIG_SUSPEND=y
233CONFIG_SUSPEND_FREEZER=y
234CONFIG_ACPI=y
235CONFIG_ACPI_SLEEP=y
236CONFIG_ACPI_PROCFS=y
237CONFIG_ACPI_PROCFS_POWER=y
238CONFIG_ACPI_SYSFS_POWER=y
239CONFIG_ACPI_PROC_EVENT=y
240CONFIG_ACPI_BUTTON=m
241CONFIG_ACPI_FAN=m
242# CONFIG_ACPI_DOCK is not set
243CONFIG_ACPI_PROCESSOR=m
244CONFIG_ACPI_HOTPLUG_CPU=y
245CONFIG_ACPI_THERMAL=m
246# CONFIG_ACPI_CUSTOM_DSDT is not set
247CONFIG_ACPI_BLACKLIST_YEAR=0
248# CONFIG_ACPI_DEBUG is not set
249# CONFIG_ACPI_PCI_SLOT is not set
250CONFIG_ACPI_SYSTEM=y
251CONFIG_ACPI_CONTAINER=m
252
253#
254# CPU Frequency scaling
255#
256# CONFIG_CPU_FREQ is not set
257
258#
259# Bus options (PCI, PCMCIA)
260#
261CONFIG_PCI=y
262CONFIG_PCI_DOMAINS=y
263CONFIG_PCI_SYSCALL=y
264# CONFIG_PCIEPORTBUS is not set
265CONFIG_ARCH_SUPPORTS_MSI=y
266# CONFIG_PCI_MSI is not set
267CONFIG_PCI_LEGACY=y
268# CONFIG_PCI_DEBUG is not set
269# CONFIG_PCI_STUB is not set
270CONFIG_HOTPLUG_PCI=m
271# CONFIG_HOTPLUG_PCI_FAKE is not set
272CONFIG_HOTPLUG_PCI_ACPI=m
273# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set
274# CONFIG_HOTPLUG_PCI_CPCI is not set
275# CONFIG_HOTPLUG_PCI_SHPC is not set
276# CONFIG_PCCARD is not set
277CONFIG_NET=y
278
279#
280# Networking options
281#
282# CONFIG_NET_NS is not set
283CONFIG_COMPAT_NET_DEV_OPS=y
284CONFIG_PACKET=y
285# CONFIG_PACKET_MMAP is not set
286CONFIG_UNIX=y
287CONFIG_XFRM=y
288# CONFIG_XFRM_USER is not set
289# CONFIG_XFRM_SUB_POLICY is not set
290# CONFIG_XFRM_MIGRATE is not set
291# CONFIG_XFRM_STATISTICS is not set
292# CONFIG_NET_KEY is not set
293CONFIG_INET=y
294CONFIG_IP_MULTICAST=y
295# CONFIG_IP_ADVANCED_ROUTER is not set
296CONFIG_IP_FIB_HASH=y
297# CONFIG_IP_PNP is not set
298# CONFIG_NET_IPIP is not set
299# CONFIG_NET_IPGRE is not set
300# CONFIG_IP_MROUTE is not set
301CONFIG_ARPD=y
302CONFIG_SYN_COOKIES=y
303# CONFIG_INET_AH is not set
304# CONFIG_INET_ESP is not set
305# CONFIG_INET_IPCOMP is not set
306# CONFIG_INET_XFRM_TUNNEL is not set
307# CONFIG_INET_TUNNEL is not set
308CONFIG_INET_XFRM_MODE_TRANSPORT=y
309CONFIG_INET_XFRM_MODE_TUNNEL=y
310CONFIG_INET_XFRM_MODE_BEET=y
311# CONFIG_INET_LRO is not set
312CONFIG_INET_DIAG=y
313CONFIG_INET_TCP_DIAG=y
314# CONFIG_TCP_CONG_ADVANCED is not set
315CONFIG_TCP_CONG_CUBIC=y
316CONFIG_DEFAULT_TCP_CONG="cubic"
317# CONFIG_TCP_MD5SIG is not set
318# CONFIG_IPV6 is not set
319# CONFIG_NETWORK_SECMARK is not set
320# CONFIG_NETFILTER is not set
321# CONFIG_IP_DCCP is not set
322# CONFIG_IP_SCTP is not set
323# CONFIG_TIPC is not set
324# CONFIG_ATM is not set
325# CONFIG_BRIDGE is not set
326# CONFIG_NET_DSA is not set
327# CONFIG_VLAN_8021Q is not set
328# CONFIG_DECNET is not set
329# CONFIG_LLC2 is not set
330# CONFIG_IPX is not set
331# CONFIG_ATALK is not set
332# CONFIG_X25 is not set
333# CONFIG_LAPB is not set
334# CONFIG_ECONET is not set
335# CONFIG_WAN_ROUTER is not set
336# CONFIG_NET_SCHED is not set
337# CONFIG_DCB is not set
338
339#
340# Network testing
341#
342# CONFIG_NET_PKTGEN is not set
343# CONFIG_HAMRADIO is not set
344# CONFIG_CAN is not set
345# CONFIG_IRDA is not set
346# CONFIG_BT is not set
347# CONFIG_AF_RXRPC is not set
348# CONFIG_PHONET is not set
349# CONFIG_WIRELESS is not set
350# CONFIG_WIMAX is not set
351# CONFIG_RFKILL is not set
352# CONFIG_NET_9P is not set
353
354#
355# Device Drivers
356#
357
358#
359# Generic Driver Options
360#
361CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
362CONFIG_STANDALONE=y
363CONFIG_PREVENT_FIRMWARE_BUILD=y
364CONFIG_FW_LOADER=y
365CONFIG_FIRMWARE_IN_KERNEL=y
366CONFIG_EXTRA_FIRMWARE=""
367# CONFIG_DEBUG_DRIVER is not set
368# CONFIG_DEBUG_DEVRES is not set
369# CONFIG_SYS_HYPERVISOR is not set
370# CONFIG_CONNECTOR is not set
371# CONFIG_MTD is not set
372# CONFIG_PARPORT is not set
373CONFIG_PNP=y
374CONFIG_PNP_DEBUG_MESSAGES=y
375
376#
377# Protocols
378#
379CONFIG_PNPACPI=y
380CONFIG_BLK_DEV=y
381# CONFIG_BLK_CPQ_DA is not set
382# CONFIG_BLK_CPQ_CISS_DA is not set
383# CONFIG_BLK_DEV_DAC960 is not set
384# CONFIG_BLK_DEV_UMEM is not set
385# CONFIG_BLK_DEV_COW_COMMON is not set
386CONFIG_BLK_DEV_LOOP=m
387CONFIG_BLK_DEV_CRYPTOLOOP=m
388CONFIG_BLK_DEV_NBD=m
389# CONFIG_BLK_DEV_SX8 is not set
390# CONFIG_BLK_DEV_UB is not set
391CONFIG_BLK_DEV_RAM=y
392CONFIG_BLK_DEV_RAM_COUNT=16
393CONFIG_BLK_DEV_RAM_SIZE=4096
394# CONFIG_BLK_DEV_XIP is not set
395# CONFIG_CDROM_PKTCDVD is not set
396# CONFIG_ATA_OVER_ETH is not set
397CONFIG_XEN_BLKDEV_FRONTEND=y
398# CONFIG_BLK_DEV_HD is not set
399CONFIG_MISC_DEVICES=y
400# CONFIG_PHANTOM is not set
401# CONFIG_EEPROM_93CX6 is not set
402# CONFIG_SGI_IOC4 is not set
403# CONFIG_TIFM_CORE is not set
404# CONFIG_ICS932S401 is not set
405# CONFIG_ENCLOSURE_SERVICES is not set
406# CONFIG_HP_ILO is not set
407# CONFIG_C2PORT is not set
408CONFIG_HAVE_IDE=y
409CONFIG_IDE=y
410
411#
412# Please see Documentation/ide/ide.txt for help/info on IDE drives
413#
414CONFIG_IDE_TIMINGS=y
415CONFIG_IDE_ATAPI=y
416# CONFIG_BLK_DEV_IDE_SATA is not set
417CONFIG_IDE_GD=y
418CONFIG_IDE_GD_ATA=y
419# CONFIG_IDE_GD_ATAPI is not set
420CONFIG_BLK_DEV_IDECD=y
421CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y
422# CONFIG_BLK_DEV_IDETAPE is not set
423# CONFIG_BLK_DEV_IDEACPI is not set
424# CONFIG_IDE_TASK_IOCTL is not set
425CONFIG_IDE_PROC_FS=y
426
427#
428# IDE chipset support/bugfixes
429#
430# CONFIG_IDE_GENERIC is not set
431# CONFIG_BLK_DEV_PLATFORM is not set
432# CONFIG_BLK_DEV_IDEPNP is not set
433CONFIG_BLK_DEV_IDEDMA_SFF=y
434
435#
436# PCI IDE chipsets support
437#
438CONFIG_BLK_DEV_IDEPCI=y
439CONFIG_IDEPCI_PCIBUS_ORDER=y
440# CONFIG_BLK_DEV_OFFBOARD is not set
441CONFIG_BLK_DEV_GENERIC=y
442# CONFIG_BLK_DEV_OPTI621 is not set
443CONFIG_BLK_DEV_IDEDMA_PCI=y
444# CONFIG_BLK_DEV_AEC62XX is not set
445# CONFIG_BLK_DEV_ALI15X3 is not set
446# CONFIG_BLK_DEV_AMD74XX is not set
447CONFIG_BLK_DEV_CMD64X=y
448# CONFIG_BLK_DEV_TRIFLEX is not set
449# CONFIG_BLK_DEV_CS5520 is not set
450# CONFIG_BLK_DEV_CS5530 is not set
451# CONFIG_BLK_DEV_HPT366 is not set
452# CONFIG_BLK_DEV_JMICRON is not set
453# CONFIG_BLK_DEV_SC1200 is not set
454CONFIG_BLK_DEV_PIIX=y
455# CONFIG_BLK_DEV_IT8172 is not set
456# CONFIG_BLK_DEV_IT8213 is not set
457# CONFIG_BLK_DEV_IT821X is not set
458# CONFIG_BLK_DEV_NS87415 is not set
459# CONFIG_BLK_DEV_PDC202XX_OLD is not set
460# CONFIG_BLK_DEV_PDC202XX_NEW is not set
461# CONFIG_BLK_DEV_SVWKS is not set
462# CONFIG_BLK_DEV_SIIMAGE is not set
463# CONFIG_BLK_DEV_SLC90E66 is not set
464# CONFIG_BLK_DEV_TRM290 is not set
465# CONFIG_BLK_DEV_VIA82CXXX is not set
466# CONFIG_BLK_DEV_TC86C001 is not set
467CONFIG_BLK_DEV_IDEDMA=y
468
469#
470# SCSI device support
471#
472# CONFIG_RAID_ATTRS is not set
473CONFIG_SCSI=y
474CONFIG_SCSI_DMA=y
475# CONFIG_SCSI_TGT is not set
476CONFIG_SCSI_NETLINK=y
477CONFIG_SCSI_PROC_FS=y
478
479#
480# SCSI support type (disk, tape, CD-ROM)
481#
482CONFIG_BLK_DEV_SD=y
483CONFIG_CHR_DEV_ST=m
484# CONFIG_CHR_DEV_OSST is not set
485CONFIG_BLK_DEV_SR=m
486# CONFIG_BLK_DEV_SR_VENDOR is not set
487CONFIG_CHR_DEV_SG=m
488# CONFIG_CHR_DEV_SCH is not set
489
490#
491# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
492#
493# CONFIG_SCSI_MULTI_LUN is not set
494# CONFIG_SCSI_CONSTANTS is not set
495# CONFIG_SCSI_LOGGING is not set
496# CONFIG_SCSI_SCAN_ASYNC is not set
497CONFIG_SCSI_WAIT_SCAN=m
498
499#
500# SCSI Transports
501#
502CONFIG_SCSI_SPI_ATTRS=y
503CONFIG_SCSI_FC_ATTRS=y
504# CONFIG_SCSI_ISCSI_ATTRS is not set
505# CONFIG_SCSI_SAS_LIBSAS is not set
506# CONFIG_SCSI_SRP_ATTRS is not set
507CONFIG_SCSI_LOWLEVEL=y
508# CONFIG_ISCSI_TCP is not set
509# CONFIG_SCSI_CXGB3_ISCSI is not set
510# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
511# CONFIG_SCSI_3W_9XXX is not set
512# CONFIG_SCSI_ACARD is not set
513# CONFIG_SCSI_AACRAID is not set
514# CONFIG_SCSI_AIC7XXX is not set
515# CONFIG_SCSI_AIC7XXX_OLD is not set
516# CONFIG_SCSI_AIC79XX is not set
517# CONFIG_SCSI_AIC94XX is not set
518# CONFIG_SCSI_DPT_I2O is not set
519# CONFIG_SCSI_ADVANSYS is not set
520# CONFIG_SCSI_ARCMSR is not set
521# CONFIG_MEGARAID_NEWGEN is not set
522# CONFIG_MEGARAID_LEGACY is not set
523# CONFIG_MEGARAID_SAS is not set
524# CONFIG_SCSI_HPTIOP is not set
525# CONFIG_LIBFC is not set
526# CONFIG_FCOE is not set
527# CONFIG_SCSI_DMX3191D is not set
528# CONFIG_SCSI_FUTURE_DOMAIN is not set
529# CONFIG_SCSI_IPS is not set
530# CONFIG_SCSI_INITIO is not set
531# CONFIG_SCSI_INIA100 is not set
532# CONFIG_SCSI_MVSAS is not set
533# CONFIG_SCSI_STEX is not set
534CONFIG_SCSI_SYM53C8XX_2=y
535CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
536CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
537CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
538CONFIG_SCSI_SYM53C8XX_MMIO=y
539CONFIG_SCSI_QLOGIC_1280=y
540# CONFIG_SCSI_QLA_FC is not set
541# CONFIG_SCSI_QLA_ISCSI is not set
542# CONFIG_SCSI_LPFC is not set
543# CONFIG_SCSI_DC395x is not set
544# CONFIG_SCSI_DC390T is not set
545# CONFIG_SCSI_DEBUG is not set
546# CONFIG_SCSI_SRP is not set
547# CONFIG_SCSI_DH is not set
548# CONFIG_ATA is not set
549CONFIG_MD=y
550CONFIG_BLK_DEV_MD=m
551CONFIG_MD_LINEAR=m
552CONFIG_MD_RAID0=m
553CONFIG_MD_RAID1=m
554# CONFIG_MD_RAID10 is not set
555# CONFIG_MD_RAID456 is not set
556CONFIG_MD_MULTIPATH=m
557# CONFIG_MD_FAULTY is not set
558CONFIG_BLK_DEV_DM=m
559# CONFIG_DM_DEBUG is not set
560CONFIG_DM_CRYPT=m
561CONFIG_DM_SNAPSHOT=m
562CONFIG_DM_MIRROR=m
563CONFIG_DM_ZERO=m
564# CONFIG_DM_MULTIPATH is not set
565# CONFIG_DM_DELAY is not set
566# CONFIG_DM_UEVENT is not set
567CONFIG_FUSION=y
568CONFIG_FUSION_SPI=y
569CONFIG_FUSION_FC=y
570# CONFIG_FUSION_SAS is not set
571CONFIG_FUSION_MAX_SGE=128
572CONFIG_FUSION_CTL=y
573# CONFIG_FUSION_LOGGING is not set
574
575#
576# IEEE 1394 (FireWire) support
577#
578
579#
580# Enable only one of the two stacks, unless you know what you are doing
581#
582# CONFIG_FIREWIRE is not set
583# CONFIG_IEEE1394 is not set
584# CONFIG_I2O is not set
585CONFIG_NETDEVICES=y
586CONFIG_DUMMY=m
587# CONFIG_BONDING is not set
588# CONFIG_MACVLAN is not set
589# CONFIG_EQUALIZER is not set
590# CONFIG_TUN is not set
591# CONFIG_VETH is not set
592# CONFIG_NET_SB1000 is not set
593# CONFIG_ARCNET is not set
594CONFIG_PHYLIB=y
595
596#
597# MII PHY device drivers
598#
599# CONFIG_MARVELL_PHY is not set
600# CONFIG_DAVICOM_PHY is not set
601# CONFIG_QSEMI_PHY is not set
602# CONFIG_LXT_PHY is not set
603# CONFIG_CICADA_PHY is not set
604# CONFIG_VITESSE_PHY is not set
605# CONFIG_SMSC_PHY is not set
606# CONFIG_BROADCOM_PHY is not set
607# CONFIG_ICPLUS_PHY is not set
608# CONFIG_REALTEK_PHY is not set
609# CONFIG_NATIONAL_PHY is not set
610# CONFIG_STE10XP is not set
611# CONFIG_LSI_ET1011C_PHY is not set
612# CONFIG_FIXED_PHY is not set
613# CONFIG_MDIO_BITBANG is not set
614CONFIG_NET_ETHERNET=y
615CONFIG_MII=m
616# CONFIG_HAPPYMEAL is not set
617# CONFIG_SUNGEM is not set
618# CONFIG_CASSINI is not set
619# CONFIG_NET_VENDOR_3COM is not set
620CONFIG_NET_TULIP=y
621# CONFIG_DE2104X is not set
622CONFIG_TULIP=m
623# CONFIG_TULIP_MWI is not set
624# CONFIG_TULIP_MMIO is not set
625# CONFIG_TULIP_NAPI is not set
626# CONFIG_DE4X5 is not set
627# CONFIG_WINBOND_840 is not set
628# CONFIG_DM9102 is not set
629# CONFIG_ULI526X is not set
630# CONFIG_HP100 is not set
631# CONFIG_IBM_NEW_EMAC_ZMII is not set
632# CONFIG_IBM_NEW_EMAC_RGMII is not set
633# CONFIG_IBM_NEW_EMAC_TAH is not set
634# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
635# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
636# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
637# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
638CONFIG_NET_PCI=y
639# CONFIG_PCNET32 is not set
640# CONFIG_AMD8111_ETH is not set
641# CONFIG_ADAPTEC_STARFIRE is not set
642# CONFIG_B44 is not set
643# CONFIG_FORCEDETH is not set
644CONFIG_E100=m
645# CONFIG_FEALNX is not set
646# CONFIG_NATSEMI is not set
647# CONFIG_NE2K_PCI is not set
648# CONFIG_8139CP is not set
649# CONFIG_8139TOO is not set
650# CONFIG_R6040 is not set
651# CONFIG_SIS900 is not set
652# CONFIG_EPIC100 is not set
653# CONFIG_SMSC9420 is not set
654# CONFIG_SUNDANCE is not set
655# CONFIG_TLAN is not set
656# CONFIG_VIA_RHINE is not set
657# CONFIG_SC92031 is not set
658# CONFIG_ATL2 is not set
659CONFIG_NETDEV_1000=y
660# CONFIG_ACENIC is not set
661# CONFIG_DL2K is not set
662CONFIG_E1000=y
663# CONFIG_E1000E is not set
664# CONFIG_IP1000 is not set
665# CONFIG_IGB is not set
666# CONFIG_NS83820 is not set
667# CONFIG_HAMACHI is not set
668# CONFIG_YELLOWFIN is not set
669# CONFIG_R8169 is not set
670# CONFIG_SIS190 is not set
671# CONFIG_SKGE is not set
672# CONFIG_SKY2 is not set
673# CONFIG_VIA_VELOCITY is not set
674CONFIG_TIGON3=y
675# CONFIG_BNX2 is not set
676# CONFIG_QLA3XXX is not set
677# CONFIG_ATL1 is not set
678# CONFIG_ATL1E is not set
679# CONFIG_JME is not set
680CONFIG_NETDEV_10000=y
681# CONFIG_CHELSIO_T1 is not set
682CONFIG_CHELSIO_T3_DEPENDS=y
683# CONFIG_CHELSIO_T3 is not set
684# CONFIG_ENIC is not set
685# CONFIG_IXGBE is not set
686# CONFIG_IXGB is not set
687# CONFIG_S2IO is not set
688# CONFIG_MYRI10GE is not set
689# CONFIG_NETXEN_NIC is not set
690# CONFIG_NIU is not set
691# CONFIG_MLX4_EN is not set
692# CONFIG_MLX4_CORE is not set
693# CONFIG_TEHUTI is not set
694# CONFIG_BNX2X is not set
695# CONFIG_QLGE is not set
696# CONFIG_SFC is not set
697# CONFIG_TR is not set
698
699#
700# Wireless LAN
701#
702# CONFIG_WLAN_PRE80211 is not set
703# CONFIG_WLAN_80211 is not set
704# CONFIG_IWLWIFI_LEDS is not set
705
706#
707# Enable WiMAX (Networking options) to see the WiMAX drivers
708#
709
710#
711# USB Network Adapters
712#
713# CONFIG_USB_CATC is not set
714# CONFIG_USB_KAWETH is not set
715# CONFIG_USB_PEGASUS is not set
716# CONFIG_USB_RTL8150 is not set
717# CONFIG_USB_USBNET is not set
718# CONFIG_WAN is not set
719CONFIG_XEN_NETDEV_FRONTEND=y
720# CONFIG_FDDI is not set
721# CONFIG_HIPPI is not set
722# CONFIG_PPP is not set
723# CONFIG_SLIP is not set
724# CONFIG_NET_FC is not set
725CONFIG_NETCONSOLE=y
726# CONFIG_NETCONSOLE_DYNAMIC is not set
727CONFIG_NETPOLL=y
728# CONFIG_NETPOLL_TRAP is not set
729CONFIG_NET_POLL_CONTROLLER=y
730# CONFIG_ISDN is not set
731# CONFIG_PHONE is not set
732
733#
734# Input device support
735#
736CONFIG_INPUT=y
737# CONFIG_INPUT_FF_MEMLESS is not set
738# CONFIG_INPUT_POLLDEV is not set
739
740#
741# Userland interfaces
742#
743CONFIG_INPUT_MOUSEDEV=y
744CONFIG_INPUT_MOUSEDEV_PSAUX=y
745CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
746CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
747# CONFIG_INPUT_JOYDEV is not set
748# CONFIG_INPUT_EVDEV is not set
749# CONFIG_INPUT_EVBUG is not set
750
751#
752# Input Device Drivers
753#
754CONFIG_INPUT_KEYBOARD=y
755CONFIG_KEYBOARD_ATKBD=y
756# CONFIG_KEYBOARD_SUNKBD is not set
757# CONFIG_KEYBOARD_LKKBD is not set
758# CONFIG_KEYBOARD_XTKBD is not set
759# CONFIG_KEYBOARD_NEWTON is not set
760# CONFIG_KEYBOARD_STOWAWAY is not set
761CONFIG_INPUT_MOUSE=y
762CONFIG_MOUSE_PS2=y
763CONFIG_MOUSE_PS2_ALPS=y
764CONFIG_MOUSE_PS2_LOGIPS2PP=y
765CONFIG_MOUSE_PS2_SYNAPTICS=y
766CONFIG_MOUSE_PS2_LIFEBOOK=y
767CONFIG_MOUSE_PS2_TRACKPOINT=y
768# CONFIG_MOUSE_PS2_ELANTECH is not set
769# CONFIG_MOUSE_PS2_TOUCHKIT is not set
770# CONFIG_MOUSE_SERIAL is not set
771# CONFIG_MOUSE_APPLETOUCH is not set
772# CONFIG_MOUSE_BCM5974 is not set
773# CONFIG_MOUSE_VSXXXAA is not set
774# CONFIG_INPUT_JOYSTICK is not set
775# CONFIG_INPUT_TABLET is not set
776# CONFIG_INPUT_TOUCHSCREEN is not set
777# CONFIG_INPUT_MISC is not set
778
779#
780# Hardware I/O ports
781#
782CONFIG_SERIO=y
783CONFIG_SERIO_I8042=y
784# CONFIG_SERIO_SERPORT is not set
785# CONFIG_SERIO_PCIPS2 is not set
786CONFIG_SERIO_LIBPS2=y
787# CONFIG_SERIO_RAW is not set
788CONFIG_GAMEPORT=m
789# CONFIG_GAMEPORT_NS558 is not set
790# CONFIG_GAMEPORT_L4 is not set
791# CONFIG_GAMEPORT_EMU10K1 is not set
792# CONFIG_GAMEPORT_FM801 is not set
793
794#
795# Character devices
796#
797CONFIG_VT=y
798CONFIG_CONSOLE_TRANSLATIONS=y
799CONFIG_VT_CONSOLE=y
800CONFIG_HW_CONSOLE=y
801# CONFIG_VT_HW_CONSOLE_BINDING is not set
802CONFIG_DEVKMEM=y
803CONFIG_SERIAL_NONSTANDARD=y
804# CONFIG_COMPUTONE is not set
805# CONFIG_ROCKETPORT is not set
806# CONFIG_CYCLADES is not set
807# CONFIG_DIGIEPCA is not set
808# CONFIG_MOXA_INTELLIO is not set
809# CONFIG_MOXA_SMARTIO is not set
810# CONFIG_ISI is not set
811# CONFIG_SYNCLINKMP is not set
812# CONFIG_SYNCLINK_GT is not set
813# CONFIG_N_HDLC is not set
814# CONFIG_RISCOM8 is not set
815# CONFIG_SPECIALIX is not set
816# CONFIG_SX is not set
817# CONFIG_RIO is not set
818# CONFIG_STALDRV is not set
819# CONFIG_NOZOMI is not set
820
821#
822# Serial drivers
823#
824CONFIG_SERIAL_8250=y
825CONFIG_SERIAL_8250_CONSOLE=y
826CONFIG_SERIAL_8250_PCI=y
827CONFIG_SERIAL_8250_PNP=y
828CONFIG_SERIAL_8250_NR_UARTS=6
829CONFIG_SERIAL_8250_RUNTIME_UARTS=4
830CONFIG_SERIAL_8250_EXTENDED=y
831CONFIG_SERIAL_8250_SHARE_IRQ=y
832# CONFIG_SERIAL_8250_DETECT_IRQ is not set
833# CONFIG_SERIAL_8250_RSA is not set
834
835#
836# Non-8250 serial port support
837#
838CONFIG_SERIAL_CORE=y
839CONFIG_SERIAL_CORE_CONSOLE=y
840# CONFIG_SERIAL_JSM is not set
841CONFIG_UNIX98_PTYS=y
842# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
843CONFIG_LEGACY_PTYS=y
844CONFIG_LEGACY_PTY_COUNT=256
845CONFIG_HVC_DRIVER=y
846CONFIG_HVC_IRQ=y
847CONFIG_HVC_XEN=y
848# CONFIG_IPMI_HANDLER is not set
849# CONFIG_HW_RANDOM is not set
850CONFIG_EFI_RTC=y
851# CONFIG_R3964 is not set
852# CONFIG_APPLICOM is not set
853CONFIG_RAW_DRIVER=m
854CONFIG_MAX_RAW_DEVS=256
855CONFIG_HPET=y
856CONFIG_HPET_MMAP=y
857# CONFIG_HANGCHECK_TIMER is not set
858# CONFIG_TCG_TPM is not set
859CONFIG_DEVPORT=y
860CONFIG_I2C=m
861CONFIG_I2C_BOARDINFO=y
862# CONFIG_I2C_CHARDEV is not set
863CONFIG_I2C_HELPER_AUTO=y
864CONFIG_I2C_ALGOBIT=m
865
866#
867# I2C Hardware Bus support
868#
869
870#
871# PC SMBus host controller drivers
872#
873# CONFIG_I2C_ALI1535 is not set
874# CONFIG_I2C_ALI1563 is not set
875# CONFIG_I2C_ALI15X3 is not set
876# CONFIG_I2C_AMD756 is not set
877# CONFIG_I2C_AMD8111 is not set
878# CONFIG_I2C_I801 is not set
879# CONFIG_I2C_ISCH is not set
880# CONFIG_I2C_PIIX4 is not set
881# CONFIG_I2C_NFORCE2 is not set
882# CONFIG_I2C_SIS5595 is not set
883# CONFIG_I2C_SIS630 is not set
884# CONFIG_I2C_SIS96X is not set
885# CONFIG_I2C_VIA is not set
886# CONFIG_I2C_VIAPRO is not set
887
888#
889# I2C system bus drivers (mostly embedded / system-on-chip)
890#
891# CONFIG_I2C_OCORES is not set
892# CONFIG_I2C_SIMTEC is not set
893
894#
895# External I2C/SMBus adapter drivers
896#
897# CONFIG_I2C_PARPORT_LIGHT is not set
898# CONFIG_I2C_TAOS_EVM is not set
899# CONFIG_I2C_TINY_USB is not set
900
901#
902# Graphics adapter I2C/DDC channel drivers
903#
904# CONFIG_I2C_VOODOO3 is not set
905
906#
907# Other I2C/SMBus bus drivers
908#
909# CONFIG_I2C_PCA_PLATFORM is not set
910# CONFIG_I2C_STUB is not set
911
912#
913# Miscellaneous I2C Chip support
914#
915# CONFIG_DS1682 is not set
916# CONFIG_AT24 is not set
917# CONFIG_SENSORS_EEPROM is not set
918# CONFIG_SENSORS_PCF8574 is not set
919# CONFIG_PCF8575 is not set
920# CONFIG_SENSORS_PCA9539 is not set
921# CONFIG_SENSORS_PCF8591 is not set
922# CONFIG_SENSORS_MAX6875 is not set
923# CONFIG_SENSORS_TSL2550 is not set
924# CONFIG_I2C_DEBUG_CORE is not set
925# CONFIG_I2C_DEBUG_ALGO is not set
926# CONFIG_I2C_DEBUG_BUS is not set
927# CONFIG_I2C_DEBUG_CHIP is not set
928# CONFIG_SPI is not set
929# CONFIG_W1 is not set
930CONFIG_POWER_SUPPLY=y
931# CONFIG_POWER_SUPPLY_DEBUG is not set
932# CONFIG_PDA_POWER is not set
933# CONFIG_BATTERY_DS2760 is not set
934# CONFIG_BATTERY_BQ27x00 is not set
935CONFIG_HWMON=y
936# CONFIG_HWMON_VID is not set
937# CONFIG_SENSORS_AD7414 is not set
938# CONFIG_SENSORS_AD7418 is not set
939# CONFIG_SENSORS_ADM1021 is not set
940# CONFIG_SENSORS_ADM1025 is not set
941# CONFIG_SENSORS_ADM1026 is not set
942# CONFIG_SENSORS_ADM1029 is not set
943# CONFIG_SENSORS_ADM1031 is not set
944# CONFIG_SENSORS_ADM9240 is not set
945# CONFIG_SENSORS_ADT7462 is not set
946# CONFIG_SENSORS_ADT7470 is not set
947# CONFIG_SENSORS_ADT7473 is not set
948# CONFIG_SENSORS_ATXP1 is not set
949# CONFIG_SENSORS_DS1621 is not set
950# CONFIG_SENSORS_I5K_AMB is not set
951# CONFIG_SENSORS_F71805F is not set
952# CONFIG_SENSORS_F71882FG is not set
953# CONFIG_SENSORS_F75375S is not set
954# CONFIG_SENSORS_GL518SM is not set
955# CONFIG_SENSORS_GL520SM is not set
956# CONFIG_SENSORS_IT87 is not set
957# CONFIG_SENSORS_LM63 is not set
958# CONFIG_SENSORS_LM75 is not set
959# CONFIG_SENSORS_LM77 is not set
960# CONFIG_SENSORS_LM78 is not set
961# CONFIG_SENSORS_LM80 is not set
962# CONFIG_SENSORS_LM83 is not set
963# CONFIG_SENSORS_LM85 is not set
964# CONFIG_SENSORS_LM87 is not set
965# CONFIG_SENSORS_LM90 is not set
966# CONFIG_SENSORS_LM92 is not set
967# CONFIG_SENSORS_LM93 is not set
968# CONFIG_SENSORS_LTC4245 is not set
969# CONFIG_SENSORS_MAX1619 is not set
970# CONFIG_SENSORS_MAX6650 is not set
971# CONFIG_SENSORS_PC87360 is not set
972# CONFIG_SENSORS_PC87427 is not set
973# CONFIG_SENSORS_SIS5595 is not set
974# CONFIG_SENSORS_DME1737 is not set
975# CONFIG_SENSORS_SMSC47M1 is not set
976# CONFIG_SENSORS_SMSC47M192 is not set
977# CONFIG_SENSORS_SMSC47B397 is not set
978# CONFIG_SENSORS_ADS7828 is not set
979# CONFIG_SENSORS_THMC50 is not set
980# CONFIG_SENSORS_VIA686A is not set
981# CONFIG_SENSORS_VT1211 is not set
982# CONFIG_SENSORS_VT8231 is not set
983# CONFIG_SENSORS_W83781D is not set
984# CONFIG_SENSORS_W83791D is not set
985# CONFIG_SENSORS_W83792D is not set
986# CONFIG_SENSORS_W83793 is not set
987# CONFIG_SENSORS_W83L785TS is not set
988# CONFIG_SENSORS_W83L786NG is not set
989# CONFIG_SENSORS_W83627HF is not set
990# CONFIG_SENSORS_W83627EHF is not set
991# CONFIG_SENSORS_LIS3LV02D is not set
992# CONFIG_HWMON_DEBUG_CHIP is not set
993CONFIG_THERMAL=m
994# CONFIG_THERMAL_HWMON is not set
995# CONFIG_WATCHDOG is not set
996CONFIG_SSB_POSSIBLE=y
997
998#
999# Sonics Silicon Backplane
1000#
1001# CONFIG_SSB is not set
1002
1003#
1004# Multifunction device drivers
1005#
1006# CONFIG_MFD_CORE is not set
1007# CONFIG_MFD_SM501 is not set
1008# CONFIG_HTC_PASIC3 is not set
1009# CONFIG_MFD_TMIO is not set
1010# CONFIG_MFD_WM8400 is not set
1011# CONFIG_MFD_WM8350_I2C is not set
1012# CONFIG_MFD_PCF50633 is not set
1013# CONFIG_REGULATOR is not set
1014
1015#
1016# Multimedia devices
1017#
1018
1019#
1020# Multimedia core support
1021#
1022# CONFIG_VIDEO_DEV is not set
1023# CONFIG_DVB_CORE is not set
1024# CONFIG_VIDEO_MEDIA is not set
1025
1026#
1027# Multimedia drivers
1028#
1029CONFIG_DAB=y
1030# CONFIG_USB_DABUSB is not set
1031
1032#
1033# Graphics support
1034#
1035CONFIG_AGP=m
1036CONFIG_DRM=m
1037CONFIG_DRM_TDFX=m
1038CONFIG_DRM_R128=m
1039CONFIG_DRM_RADEON=m
1040CONFIG_DRM_MGA=m
1041CONFIG_DRM_SIS=m
1042# CONFIG_DRM_VIA is not set
1043# CONFIG_DRM_SAVAGE is not set
1044# CONFIG_VGASTATE is not set
1045# CONFIG_VIDEO_OUTPUT_CONTROL is not set
1046# CONFIG_FB is not set
1047# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
1048
1049#
1050# Display device support
1051#
1052# CONFIG_DISPLAY_SUPPORT is not set
1053
1054#
1055# Console display driver support
1056#
1057CONFIG_VGA_CONSOLE=y
1058# CONFIG_VGACON_SOFT_SCROLLBACK is not set
1059CONFIG_DUMMY_CONSOLE=y
1060# CONFIG_SOUND is not set
1061CONFIG_HID_SUPPORT=y
1062CONFIG_HID=y
1063# CONFIG_HID_DEBUG is not set
1064# CONFIG_HIDRAW is not set
1065
1066#
1067# USB Input Devices
1068#
1069CONFIG_USB_HID=y
1070# CONFIG_HID_PID is not set
1071# CONFIG_USB_HIDDEV is not set
1072
1073#
1074# Special HID drivers
1075#
1076CONFIG_HID_COMPAT=y
1077CONFIG_HID_A4TECH=y
1078CONFIG_HID_APPLE=y
1079CONFIG_HID_BELKIN=y
1080CONFIG_HID_CHERRY=y
1081CONFIG_HID_CHICONY=y
1082CONFIG_HID_CYPRESS=y
1083CONFIG_HID_EZKEY=y
1084CONFIG_HID_GYRATION=y
1085CONFIG_HID_LOGITECH=y
1086# CONFIG_LOGITECH_FF is not set
1087# CONFIG_LOGIRUMBLEPAD2_FF is not set
1088CONFIG_HID_MICROSOFT=y
1089CONFIG_HID_MONTEREY=y
1090CONFIG_HID_NTRIG=y
1091CONFIG_HID_PANTHERLORD=y
1092# CONFIG_PANTHERLORD_FF is not set
1093CONFIG_HID_PETALYNX=y
1094CONFIG_HID_SAMSUNG=y
1095CONFIG_HID_SONY=y
1096CONFIG_HID_SUNPLUS=y
1097# CONFIG_GREENASIA_FF is not set
1098CONFIG_HID_TOPSEED=y
1099# CONFIG_THRUSTMASTER_FF is not set
1100# CONFIG_ZEROPLUS_FF is not set
1101CONFIG_USB_SUPPORT=y
1102CONFIG_USB_ARCH_HAS_HCD=y
1103CONFIG_USB_ARCH_HAS_OHCI=y
1104CONFIG_USB_ARCH_HAS_EHCI=y
1105CONFIG_USB=y
1106# CONFIG_USB_DEBUG is not set
1107# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
1108
1109#
1110# Miscellaneous USB options
1111#
1112CONFIG_USB_DEVICEFS=y
1113CONFIG_USB_DEVICE_CLASS=y
1114# CONFIG_USB_DYNAMIC_MINORS is not set
1115# CONFIG_USB_SUSPEND is not set
1116# CONFIG_USB_OTG is not set
1117# CONFIG_USB_MON is not set
1118# CONFIG_USB_WUSB is not set
1119# CONFIG_USB_WUSB_CBAF is not set
1120
1121#
1122# USB Host Controller Drivers
1123#
1124# CONFIG_USB_C67X00_HCD is not set
1125CONFIG_USB_EHCI_HCD=m
1126# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
1127# CONFIG_USB_EHCI_TT_NEWSCHED is not set
1128# CONFIG_USB_OXU210HP_HCD is not set
1129# CONFIG_USB_ISP116X_HCD is not set
1130# CONFIG_USB_ISP1760_HCD is not set
1131CONFIG_USB_OHCI_HCD=m
1132# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
1133# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
1134CONFIG_USB_OHCI_LITTLE_ENDIAN=y
1135CONFIG_USB_UHCI_HCD=y
1136# CONFIG_USB_SL811_HCD is not set
1137# CONFIG_USB_R8A66597_HCD is not set
1138# CONFIG_USB_WHCI_HCD is not set
1139# CONFIG_USB_HWA_HCD is not set
1140
1141#
1142# USB Device Class drivers
1143#
1144# CONFIG_USB_ACM is not set
1145# CONFIG_USB_PRINTER is not set
1146# CONFIG_USB_WDM is not set
1147# CONFIG_USB_TMC is not set
1148
1149#
1150# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
1151#
1152
1153#
1154# see USB_STORAGE Help for more information
1155#
1156CONFIG_USB_STORAGE=m
1157# CONFIG_USB_STORAGE_DEBUG is not set
1158# CONFIG_USB_STORAGE_DATAFAB is not set
1159# CONFIG_USB_STORAGE_FREECOM is not set
1160# CONFIG_USB_STORAGE_ISD200 is not set
1161# CONFIG_USB_STORAGE_USBAT is not set
1162# CONFIG_USB_STORAGE_SDDR09 is not set
1163# CONFIG_USB_STORAGE_SDDR55 is not set
1164# CONFIG_USB_STORAGE_JUMPSHOT is not set
1165# CONFIG_USB_STORAGE_ALAUDA is not set
1166# CONFIG_USB_STORAGE_ONETOUCH is not set
1167# CONFIG_USB_STORAGE_KARMA is not set
1168# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
1169# CONFIG_USB_LIBUSUAL is not set
1170
1171#
1172# USB Imaging devices
1173#
1174# CONFIG_USB_MDC800 is not set
1175# CONFIG_USB_MICROTEK is not set
1176
1177#
1178# USB port drivers
1179#
1180# CONFIG_USB_SERIAL is not set
1181
1182#
1183# USB Miscellaneous drivers
1184#
1185# CONFIG_USB_EMI62 is not set
1186# CONFIG_USB_EMI26 is not set
1187# CONFIG_USB_ADUTUX is not set
1188# CONFIG_USB_SEVSEG is not set
1189# CONFIG_USB_RIO500 is not set
1190# CONFIG_USB_LEGOTOWER is not set
1191# CONFIG_USB_LCD is not set
1192# CONFIG_USB_BERRY_CHARGE is not set
1193# CONFIG_USB_LED is not set
1194# CONFIG_USB_CYPRESS_CY7C63 is not set
1195# CONFIG_USB_CYTHERM is not set
1196# CONFIG_USB_PHIDGET is not set
1197# CONFIG_USB_IDMOUSE is not set
1198# CONFIG_USB_FTDI_ELAN is not set
1199# CONFIG_USB_APPLEDISPLAY is not set
1200# CONFIG_USB_SISUSBVGA is not set
1201# CONFIG_USB_LD is not set
1202# CONFIG_USB_TRANCEVIBRATOR is not set
1203# CONFIG_USB_IOWARRIOR is not set
1204# CONFIG_USB_TEST is not set
1205# CONFIG_USB_ISIGHTFW is not set
1206# CONFIG_USB_VST is not set
1207# CONFIG_USB_GADGET is not set
1208
1209#
1210# OTG and related infrastructure
1211#
1212# CONFIG_UWB is not set
1213# CONFIG_MMC is not set
1214# CONFIG_MEMSTICK is not set
1215# CONFIG_NEW_LEDS is not set
1216# CONFIG_ACCESSIBILITY is not set
1217# CONFIG_INFINIBAND is not set
1218# CONFIG_RTC_CLASS is not set
1219# CONFIG_DMADEVICES is not set
1220# CONFIG_UIO is not set
1221CONFIG_XEN_BALLOON=y
1222CONFIG_XEN_SCRUB_PAGES=y
1223CONFIG_XENFS=y
1224CONFIG_XEN_COMPAT_XENFS=y
1225# CONFIG_STAGING is not set
1226# CONFIG_MSPEC is not set
1227
1228#
1229# File systems
1230#
1231CONFIG_EXT2_FS=y
1232CONFIG_EXT2_FS_XATTR=y
1233CONFIG_EXT2_FS_POSIX_ACL=y
1234CONFIG_EXT2_FS_SECURITY=y
1235# CONFIG_EXT2_FS_XIP is not set
1236CONFIG_EXT3_FS=y
1237CONFIG_EXT3_FS_XATTR=y
1238CONFIG_EXT3_FS_POSIX_ACL=y
1239CONFIG_EXT3_FS_SECURITY=y
1240# CONFIG_EXT4_FS is not set
1241CONFIG_JBD=y
1242CONFIG_FS_MBCACHE=y
1243CONFIG_REISERFS_FS=y
1244# CONFIG_REISERFS_CHECK is not set
1245# CONFIG_REISERFS_PROC_INFO is not set
1246CONFIG_REISERFS_FS_XATTR=y
1247CONFIG_REISERFS_FS_POSIX_ACL=y
1248CONFIG_REISERFS_FS_SECURITY=y
1249# CONFIG_JFS_FS is not set
1250CONFIG_FS_POSIX_ACL=y
1251CONFIG_FILE_LOCKING=y
1252CONFIG_XFS_FS=y
1253# CONFIG_XFS_QUOTA is not set
1254# CONFIG_XFS_POSIX_ACL is not set
1255# CONFIG_XFS_RT is not set
1256# CONFIG_XFS_DEBUG is not set
1257# CONFIG_GFS2_FS is not set
1258# CONFIG_OCFS2_FS is not set
1259# CONFIG_BTRFS_FS is not set
1260CONFIG_DNOTIFY=y
1261CONFIG_INOTIFY=y
1262CONFIG_INOTIFY_USER=y
1263# CONFIG_QUOTA is not set
1264CONFIG_AUTOFS_FS=y
1265CONFIG_AUTOFS4_FS=y
1266# CONFIG_FUSE_FS is not set
1267
1268#
1269# CD-ROM/DVD Filesystems
1270#
1271CONFIG_ISO9660_FS=m
1272CONFIG_JOLIET=y
1273# CONFIG_ZISOFS is not set
1274CONFIG_UDF_FS=m
1275CONFIG_UDF_NLS=y
1276
1277#
1278# DOS/FAT/NT Filesystems
1279#
1280CONFIG_FAT_FS=y
1281# CONFIG_MSDOS_FS is not set
1282CONFIG_VFAT_FS=y
1283CONFIG_FAT_DEFAULT_CODEPAGE=437
1284CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
1285CONFIG_NTFS_FS=m
1286# CONFIG_NTFS_DEBUG is not set
1287# CONFIG_NTFS_RW is not set
1288
1289#
1290# Pseudo filesystems
1291#
1292CONFIG_PROC_FS=y
1293CONFIG_PROC_KCORE=y
1294CONFIG_PROC_SYSCTL=y
1295CONFIG_PROC_PAGE_MONITOR=y
1296CONFIG_SYSFS=y
1297CONFIG_TMPFS=y
1298# CONFIG_TMPFS_POSIX_ACL is not set
1299CONFIG_HUGETLBFS=y
1300CONFIG_HUGETLB_PAGE=y
1301# CONFIG_CONFIGFS_FS is not set
1302CONFIG_MISC_FILESYSTEMS=y
1303# CONFIG_ADFS_FS is not set
1304# CONFIG_AFFS_FS is not set
1305# CONFIG_HFS_FS is not set
1306# CONFIG_HFSPLUS_FS is not set
1307# CONFIG_BEFS_FS is not set
1308# CONFIG_BFS_FS is not set
1309# CONFIG_EFS_FS is not set
1310# CONFIG_CRAMFS is not set
1311# CONFIG_SQUASHFS is not set
1312# CONFIG_VXFS_FS is not set
1313# CONFIG_MINIX_FS is not set
1314# CONFIG_OMFS_FS is not set
1315# CONFIG_HPFS_FS is not set
1316# CONFIG_QNX4FS_FS is not set
1317# CONFIG_ROMFS_FS is not set
1318# CONFIG_SYSV_FS is not set
1319# CONFIG_UFS_FS is not set
1320CONFIG_NETWORK_FILESYSTEMS=y
1321CONFIG_NFS_FS=m
1322CONFIG_NFS_V3=y
1323# CONFIG_NFS_V3_ACL is not set
1324CONFIG_NFS_V4=y
1325CONFIG_NFSD=m
1326CONFIG_NFSD_V3=y
1327# CONFIG_NFSD_V3_ACL is not set
1328CONFIG_NFSD_V4=y
1329CONFIG_LOCKD=m
1330CONFIG_LOCKD_V4=y
1331CONFIG_EXPORTFS=m
1332CONFIG_NFS_COMMON=y
1333CONFIG_SUNRPC=m
1334CONFIG_SUNRPC_GSS=m
1335# CONFIG_SUNRPC_REGISTER_V4 is not set
1336CONFIG_RPCSEC_GSS_KRB5=m
1337# CONFIG_RPCSEC_GSS_SPKM3 is not set
1338CONFIG_SMB_FS=m
1339CONFIG_SMB_NLS_DEFAULT=y
1340CONFIG_SMB_NLS_REMOTE="cp437"
1341CONFIG_CIFS=m
1342# CONFIG_CIFS_STATS is not set
1343# CONFIG_CIFS_WEAK_PW_HASH is not set
1344# CONFIG_CIFS_XATTR is not set
1345# CONFIG_CIFS_DEBUG2 is not set
1346# CONFIG_CIFS_EXPERIMENTAL is not set
1347# CONFIG_NCP_FS is not set
1348# CONFIG_CODA_FS is not set
1349# CONFIG_AFS_FS is not set
1350
1351#
1352# Partition Types
1353#
1354CONFIG_PARTITION_ADVANCED=y
1355# CONFIG_ACORN_PARTITION is not set
1356# CONFIG_OSF_PARTITION is not set
1357# CONFIG_AMIGA_PARTITION is not set
1358# CONFIG_ATARI_PARTITION is not set
1359# CONFIG_MAC_PARTITION is not set
1360CONFIG_MSDOS_PARTITION=y
1361# CONFIG_BSD_DISKLABEL is not set
1362# CONFIG_MINIX_SUBPARTITION is not set
1363# CONFIG_SOLARIS_X86_PARTITION is not set
1364# CONFIG_UNIXWARE_DISKLABEL is not set
1365# CONFIG_LDM_PARTITION is not set
1366CONFIG_SGI_PARTITION=y
1367# CONFIG_ULTRIX_PARTITION is not set
1368# CONFIG_SUN_PARTITION is not set
1369# CONFIG_KARMA_PARTITION is not set
1370CONFIG_EFI_PARTITION=y
1371# CONFIG_SYSV68_PARTITION is not set
1372CONFIG_NLS=y
1373CONFIG_NLS_DEFAULT="iso8859-1"
1374CONFIG_NLS_CODEPAGE_437=y
1375CONFIG_NLS_CODEPAGE_737=m
1376CONFIG_NLS_CODEPAGE_775=m
1377CONFIG_NLS_CODEPAGE_850=m
1378CONFIG_NLS_CODEPAGE_852=m
1379CONFIG_NLS_CODEPAGE_855=m
1380CONFIG_NLS_CODEPAGE_857=m
1381CONFIG_NLS_CODEPAGE_860=m
1382CONFIG_NLS_CODEPAGE_861=m
1383CONFIG_NLS_CODEPAGE_862=m
1384CONFIG_NLS_CODEPAGE_863=m
1385CONFIG_NLS_CODEPAGE_864=m
1386CONFIG_NLS_CODEPAGE_865=m
1387CONFIG_NLS_CODEPAGE_866=m
1388CONFIG_NLS_CODEPAGE_869=m
1389CONFIG_NLS_CODEPAGE_936=m
1390CONFIG_NLS_CODEPAGE_950=m
1391CONFIG_NLS_CODEPAGE_932=m
1392CONFIG_NLS_CODEPAGE_949=m
1393CONFIG_NLS_CODEPAGE_874=m
1394CONFIG_NLS_ISO8859_8=m
1395CONFIG_NLS_CODEPAGE_1250=m
1396CONFIG_NLS_CODEPAGE_1251=m
1397# CONFIG_NLS_ASCII is not set
1398CONFIG_NLS_ISO8859_1=y
1399CONFIG_NLS_ISO8859_2=m
1400CONFIG_NLS_ISO8859_3=m
1401CONFIG_NLS_ISO8859_4=m
1402CONFIG_NLS_ISO8859_5=m
1403CONFIG_NLS_ISO8859_6=m
1404CONFIG_NLS_ISO8859_7=m
1405CONFIG_NLS_ISO8859_9=m
1406CONFIG_NLS_ISO8859_13=m
1407CONFIG_NLS_ISO8859_14=m
1408CONFIG_NLS_ISO8859_15=m
1409CONFIG_NLS_KOI8_R=m
1410CONFIG_NLS_KOI8_U=m
1411CONFIG_NLS_UTF8=m
1412# CONFIG_DLM is not set
1413
1414#
1415# Kernel hacking
1416#
1417# CONFIG_PRINTK_TIME is not set
1418CONFIG_ENABLE_WARN_DEPRECATED=y
1419CONFIG_ENABLE_MUST_CHECK=y
1420CONFIG_FRAME_WARN=2048
1421CONFIG_MAGIC_SYSRQ=y
1422# CONFIG_UNUSED_SYMBOLS is not set
1423# CONFIG_DEBUG_FS is not set
1424# CONFIG_HEADERS_CHECK is not set
1425CONFIG_DEBUG_KERNEL=y
1426# CONFIG_DEBUG_SHIRQ is not set
1427CONFIG_DETECT_SOFTLOCKUP=y
1428# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1429CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
1430CONFIG_SCHED_DEBUG=y
1431# CONFIG_SCHEDSTATS is not set
1432# CONFIG_TIMER_STATS is not set
1433# CONFIG_DEBUG_OBJECTS is not set
1434# CONFIG_SLUB_DEBUG_ON is not set
1435# CONFIG_SLUB_STATS is not set
1436# CONFIG_DEBUG_RT_MUTEXES is not set
1437# CONFIG_RT_MUTEX_TESTER is not set
1438# CONFIG_DEBUG_SPINLOCK is not set
1439CONFIG_DEBUG_MUTEXES=y
1440# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1441# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1442# CONFIG_DEBUG_KOBJECT is not set
1443# CONFIG_DEBUG_INFO is not set
1444# CONFIG_DEBUG_VM is not set
1445# CONFIG_DEBUG_WRITECOUNT is not set
1446CONFIG_DEBUG_MEMORY_INIT=y
1447# CONFIG_DEBUG_LIST is not set
1448# CONFIG_DEBUG_SG is not set
1449# CONFIG_DEBUG_NOTIFIERS is not set
1450# CONFIG_BOOT_PRINTK_DELAY is not set
1451# CONFIG_RCU_TORTURE_TEST is not set
1452# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1453# CONFIG_BACKTRACE_SELF_TEST is not set
1454# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1455# CONFIG_FAULT_INJECTION is not set
1456# CONFIG_SYSCTL_SYSCALL_CHECK is not set
1457
1458#
1459# Tracers
1460#
1461# CONFIG_SCHED_TRACER is not set
1462# CONFIG_CONTEXT_SWITCH_TRACER is not set
1463# CONFIG_BOOT_TRACER is not set
1464# CONFIG_TRACE_BRANCH_PROFILING is not set
1465# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
1466# CONFIG_SAMPLES is not set
1467CONFIG_IA64_GRANULE_16MB=y
1468# CONFIG_IA64_GRANULE_64MB is not set
1469# CONFIG_IA64_PRINT_HAZARDS is not set
1470# CONFIG_DISABLE_VHPT is not set
1471# CONFIG_IA64_DEBUG_CMPXCHG is not set
1472# CONFIG_IA64_DEBUG_IRQ is not set
1473
1474#
1475# Security options
1476#
1477# CONFIG_KEYS is not set
1478# CONFIG_SECURITY is not set
1479# CONFIG_SECURITYFS is not set
1480# CONFIG_SECURITY_FILE_CAPABILITIES is not set
1481CONFIG_CRYPTO=y
1482
1483#
1484# Crypto core or helper
1485#
1486# CONFIG_CRYPTO_FIPS is not set
1487CONFIG_CRYPTO_ALGAPI=y
1488CONFIG_CRYPTO_ALGAPI2=y
1489CONFIG_CRYPTO_AEAD2=y
1490CONFIG_CRYPTO_BLKCIPHER=m
1491CONFIG_CRYPTO_BLKCIPHER2=y
1492CONFIG_CRYPTO_HASH=y
1493CONFIG_CRYPTO_HASH2=y
1494CONFIG_CRYPTO_RNG2=y
1495CONFIG_CRYPTO_MANAGER=m
1496CONFIG_CRYPTO_MANAGER2=y
1497# CONFIG_CRYPTO_GF128MUL is not set
1498# CONFIG_CRYPTO_NULL is not set
1499# CONFIG_CRYPTO_CRYPTD is not set
1500# CONFIG_CRYPTO_AUTHENC is not set
1501# CONFIG_CRYPTO_TEST is not set
1502
1503#
1504# Authenticated Encryption with Associated Data
1505#
1506# CONFIG_CRYPTO_CCM is not set
1507# CONFIG_CRYPTO_GCM is not set
1508# CONFIG_CRYPTO_SEQIV is not set
1509
1510#
1511# Block modes
1512#
1513CONFIG_CRYPTO_CBC=m
1514# CONFIG_CRYPTO_CTR is not set
1515# CONFIG_CRYPTO_CTS is not set
1516CONFIG_CRYPTO_ECB=m
1517# CONFIG_CRYPTO_LRW is not set
1518CONFIG_CRYPTO_PCBC=m
1519# CONFIG_CRYPTO_XTS is not set
1520
1521#
1522# Hash modes
1523#
1524# CONFIG_CRYPTO_HMAC is not set
1525# CONFIG_CRYPTO_XCBC is not set
1526
1527#
1528# Digest
1529#
1530# CONFIG_CRYPTO_CRC32C is not set
1531# CONFIG_CRYPTO_MD4 is not set
1532CONFIG_CRYPTO_MD5=y
1533# CONFIG_CRYPTO_MICHAEL_MIC is not set
1534# CONFIG_CRYPTO_RMD128 is not set
1535# CONFIG_CRYPTO_RMD160 is not set
1536# CONFIG_CRYPTO_RMD256 is not set
1537# CONFIG_CRYPTO_RMD320 is not set
1538# CONFIG_CRYPTO_SHA1 is not set
1539# CONFIG_CRYPTO_SHA256 is not set
1540# CONFIG_CRYPTO_SHA512 is not set
1541# CONFIG_CRYPTO_TGR192 is not set
1542# CONFIG_CRYPTO_WP512 is not set
1543
1544#
1545# Ciphers
1546#
1547# CONFIG_CRYPTO_AES is not set
1548# CONFIG_CRYPTO_ANUBIS is not set
1549# CONFIG_CRYPTO_ARC4 is not set
1550# CONFIG_CRYPTO_BLOWFISH is not set
1551# CONFIG_CRYPTO_CAMELLIA is not set
1552# CONFIG_CRYPTO_CAST5 is not set
1553# CONFIG_CRYPTO_CAST6 is not set
1554CONFIG_CRYPTO_DES=m
1555# CONFIG_CRYPTO_FCRYPT is not set
1556# CONFIG_CRYPTO_KHAZAD is not set
1557# CONFIG_CRYPTO_SALSA20 is not set
1558# CONFIG_CRYPTO_SEED is not set
1559# CONFIG_CRYPTO_SERPENT is not set
1560# CONFIG_CRYPTO_TEA is not set
1561# CONFIG_CRYPTO_TWOFISH is not set
1562
1563#
1564# Compression
1565#
1566# CONFIG_CRYPTO_DEFLATE is not set
1567# CONFIG_CRYPTO_LZO is not set
1568
1569#
1570# Random Number Generation
1571#
1572# CONFIG_CRYPTO_ANSI_CPRNG is not set
1573CONFIG_CRYPTO_HW=y
1574# CONFIG_CRYPTO_DEV_HIFN_795X is not set
1575CONFIG_HAVE_KVM=y
1576CONFIG_VIRTUALIZATION=y
1577# CONFIG_KVM is not set
1578# CONFIG_VIRTIO_PCI is not set
1579# CONFIG_VIRTIO_BALLOON is not set
1580
1581#
1582# Library routines
1583#
1584CONFIG_BITREVERSE=y
1585CONFIG_GENERIC_FIND_LAST_BIT=y
1586# CONFIG_CRC_CCITT is not set
1587# CONFIG_CRC16 is not set
1588# CONFIG_CRC_T10DIF is not set
1589CONFIG_CRC_ITU_T=m
1590CONFIG_CRC32=y
1591# CONFIG_CRC7 is not set
1592# CONFIG_LIBCRC32C is not set
1593CONFIG_PLIST=y
1594CONFIG_HAS_IOMEM=y
1595CONFIG_HAS_IOPORT=y
1596CONFIG_HAS_DMA=y
1597CONFIG_GENERIC_HARDIRQS=y
1598CONFIG_GENERIC_IRQ_PROBE=y
1599CONFIG_GENERIC_PENDING_IRQ=y
1600CONFIG_IRQ_PER_CPU=y
1601# CONFIG_IOMMU_API is not set
diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h
index 116761ca462d..2b0a38e84705 100644
--- a/arch/ia64/include/asm/kvm.h
+++ b/arch/ia64/include/asm/kvm.h
@@ -24,6 +24,10 @@
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/ioctl.h> 25#include <linux/ioctl.h>
26 26
27/* Select x86 specific features in <linux/kvm.h> */
28#define __KVM_HAVE_IOAPIC
29#define __KVM_HAVE_DEVICE_ASSIGNMENT
30
27/* Architectural interrupt line count. */ 31/* Architectural interrupt line count. */
28#define KVM_NR_INTERRUPTS 256 32#define KVM_NR_INTERRUPTS 256
29 33
diff --git a/arch/ia64/include/asm/mmzone.h b/arch/ia64/include/asm/mmzone.h
index 34efe88eb849..f2ca32069b3f 100644
--- a/arch/ia64/include/asm/mmzone.h
+++ b/arch/ia64/include/asm/mmzone.h
@@ -31,10 +31,6 @@ static inline int pfn_to_nid(unsigned long pfn)
31#endif 31#endif
32} 32}
33 33
34#ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
35extern int early_pfn_to_nid(unsigned long pfn);
36#endif
37
38#ifdef CONFIG_IA64_DIG /* DIG systems are small */ 34#ifdef CONFIG_IA64_DIG /* DIG systems are small */
39# define MAX_PHYSNODE_ID 8 35# define MAX_PHYSNODE_ID 8
40# define NR_NODE_MEMBLKS (MAX_NUMNODES * 8) 36# define NR_NODE_MEMBLKS (MAX_NUMNODES * 8)
diff --git a/arch/ia64/include/asm/sn/bte.h b/arch/ia64/include/asm/sn/bte.h
index 5efecf06c9a4..96798d2da7c2 100644
--- a/arch/ia64/include/asm/sn/bte.h
+++ b/arch/ia64/include/asm/sn/bte.h
@@ -39,7 +39,7 @@
39/* BTE status register only supports 16 bits for length field */ 39/* BTE status register only supports 16 bits for length field */
40#define BTE_LEN_BITS (16) 40#define BTE_LEN_BITS (16)
41#define BTE_LEN_MASK ((1 << BTE_LEN_BITS) - 1) 41#define BTE_LEN_MASK ((1 << BTE_LEN_BITS) - 1)
42#define BTE_MAX_XFER ((1 << BTE_LEN_BITS) * L1_CACHE_BYTES) 42#define BTE_MAX_XFER (BTE_LEN_MASK << L1_CACHE_SHIFT)
43 43
44 44
45/* Define hardware */ 45/* Define hardware */
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 006ad366a454..166e0d839fa0 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -507,7 +507,7 @@ static int iosapic_find_sharable_irq(unsigned long trigger, unsigned long pol)
507 if (trigger == IOSAPIC_EDGE) 507 if (trigger == IOSAPIC_EDGE)
508 return -EINVAL; 508 return -EINVAL;
509 509
510 for (i = 0; i <= NR_IRQS; i++) { 510 for (i = 0; i < NR_IRQS; i++) {
511 info = &iosapic_intr_info[i]; 511 info = &iosapic_intr_info[i];
512 if (info->trigger == trigger && info->polarity == pol && 512 if (info->trigger == trigger && info->polarity == pol &&
513 (info->dmode == IOSAPIC_FIXED || 513 (info->dmode == IOSAPIC_FIXED ||
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 11463994a7d5..52290547c85b 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -736,14 +736,15 @@ int __cpu_disable(void)
736 return -EBUSY; 736 return -EBUSY;
737 } 737 }
738 738
739 cpu_clear(cpu, cpu_online_map);
740
739 if (migrate_platform_irqs(cpu)) { 741 if (migrate_platform_irqs(cpu)) {
740 cpu_set(cpu, cpu_online_map); 742 cpu_set(cpu, cpu_online_map);
741 return (-EBUSY); 743 return -EBUSY;
742 } 744 }
743 745
744 remove_siblinginfo(cpu); 746 remove_siblinginfo(cpu);
745 fixup_irqs(); 747 fixup_irqs();
746 cpu_clear(cpu, cpu_online_map);
747 local_flush_tlb_all(); 748 local_flush_tlb_all();
748 cpu_clear(cpu, cpu_callin_map); 749 cpu_clear(cpu, cpu_callin_map);
749 return 0; 750 return 0;
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c
index 67810b77d998..b6c0e63a0bf6 100644
--- a/arch/ia64/kernel/unwind.c
+++ b/arch/ia64/kernel/unwind.c
@@ -2149,7 +2149,7 @@ unw_remove_unwind_table (void *handle)
2149 2149
2150 /* next, remove hash table entries for this table */ 2150 /* next, remove hash table entries for this table */
2151 2151
2152 for (index = 0; index <= UNW_HASH_SIZE; ++index) { 2152 for (index = 0; index < UNW_HASH_SIZE; ++index) {
2153 tmp = unw.cache + unw.hash[index]; 2153 tmp = unw.cache + unw.hash[index];
2154 if (unw.hash[index] >= UNW_CACHE_SIZE 2154 if (unw.hash[index] >= UNW_CACHE_SIZE
2155 || tmp->ip < table->start || tmp->ip >= table->end) 2155 || tmp->ip < table->start || tmp->ip >= table->end)
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 4e586f6110aa..28f982045f29 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1337,6 +1337,10 @@ static void kvm_release_vm_pages(struct kvm *kvm)
1337 } 1337 }
1338} 1338}
1339 1339
1340void kvm_arch_sync_events(struct kvm *kvm)
1341{
1342}
1343
1340void kvm_arch_destroy_vm(struct kvm *kvm) 1344void kvm_arch_destroy_vm(struct kvm *kvm)
1341{ 1345{
1342 kvm_iommu_unmap_guest(kvm); 1346 kvm_iommu_unmap_guest(kvm);
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
index 552d07724207..230eae482f32 100644
--- a/arch/ia64/kvm/process.c
+++ b/arch/ia64/kvm/process.c
@@ -455,13 +455,18 @@ fpswa_ret_t vmm_fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr,
455 if (!vmm_fpswa_interface) 455 if (!vmm_fpswa_interface)
456 return (fpswa_ret_t) {-1, 0, 0, 0}; 456 return (fpswa_ret_t) {-1, 0, 0, 0};
457 457
458 /*
459 * Just let fpswa driver to use hardware fp registers.
460 * No fp register is valid in memory.
461 */
462 memset(&fp_state, 0, sizeof(fp_state_t)); 458 memset(&fp_state, 0, sizeof(fp_state_t));
463 459
464 /* 460 /*
461 * compute fp_state. only FP registers f6 - f11 are used by the
462 * vmm, so set those bits in the mask and set the low volatile
463 * pointer to point to these registers.
464 */
465 fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */
466
467 fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
468
469 /*
465 * unsigned long (*EFI_FPSWA) ( 470 * unsigned long (*EFI_FPSWA) (
466 * unsigned long trap_type, 471 * unsigned long trap_type,
467 * void *Bundle, 472 * void *Bundle,
@@ -545,10 +550,6 @@ void reflect_interruption(u64 ifa, u64 isr, u64 iim,
545 status = vmm_handle_fpu_swa(0, regs, isr); 550 status = vmm_handle_fpu_swa(0, regs, isr);
546 if (!status) 551 if (!status)
547 return ; 552 return ;
548 else if (-EAGAIN == status) {
549 vcpu_decrement_iip(vcpu);
550 return ;
551 }
552 break; 553 break;
553 } 554 }
554 555
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index b73bf1838e57..3efea7d0a351 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -58,7 +58,7 @@ paddr_to_nid(unsigned long paddr)
58 * SPARSEMEM to allocate the SPARSEMEM sectionmap on the NUMA node where 58 * SPARSEMEM to allocate the SPARSEMEM sectionmap on the NUMA node where
59 * the section resides. 59 * the section resides.
60 */ 60 */
61int early_pfn_to_nid(unsigned long pfn) 61int __meminit __early_pfn_to_nid(unsigned long pfn)
62{ 62{
63 int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec; 63 int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec;
64 64
@@ -70,7 +70,7 @@ int early_pfn_to_nid(unsigned long pfn)
70 return node_memblk[i].nid; 70 return node_memblk[i].nid;
71 } 71 }
72 72
73 return 0; 73 return -1;
74} 74}
75 75
76#ifdef CONFIG_MEMORY_HOTPLUG 76#ifdef CONFIG_MEMORY_HOTPLUG
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c
index 9456d4034024..c6d6b62db66c 100644
--- a/arch/ia64/sn/kernel/bte.c
+++ b/arch/ia64/sn/kernel/bte.c
@@ -97,9 +97,10 @@ bte_result_t bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
97 return BTE_SUCCESS; 97 return BTE_SUCCESS;
98 } 98 }
99 99
100 BUG_ON((len & L1_CACHE_MASK) || 100 BUG_ON(len & L1_CACHE_MASK);
101 (src & L1_CACHE_MASK) || (dest & L1_CACHE_MASK)); 101 BUG_ON(src & L1_CACHE_MASK);
102 BUG_ON(!(len < ((BTE_LEN_MASK + 1) << L1_CACHE_SHIFT))); 102 BUG_ON(dest & L1_CACHE_MASK);
103 BUG_ON(len > BTE_MAX_XFER);
103 104
104 /* 105 /*
105 * Start with interface corresponding to cpu number 106 * Start with interface corresponding to cpu number
diff --git a/arch/ia64/xen/Kconfig b/arch/ia64/xen/Kconfig
index f1683a20275b..515e0826803a 100644
--- a/arch/ia64/xen/Kconfig
+++ b/arch/ia64/xen/Kconfig
@@ -8,8 +8,7 @@ config XEN
8 depends on PARAVIRT && MCKINLEY && IA64_PAGE_SIZE_16KB && EXPERIMENTAL 8 depends on PARAVIRT && MCKINLEY && IA64_PAGE_SIZE_16KB && EXPERIMENTAL
9 select XEN_XENCOMM 9 select XEN_XENCOMM
10 select NO_IDLE_HZ 10 select NO_IDLE_HZ
11 11 # followings are required to save/restore.
12 # those are required to save/restore.
13 select ARCH_SUSPEND_POSSIBLE 12 select ARCH_SUSPEND_POSSIBLE
14 select SUSPEND 13 select SUSPEND
15 select PM_SLEEP 14 select PM_SLEEP
diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c
index 04cd12350455..936cff3c96e0 100644
--- a/arch/ia64/xen/xen_pv_ops.c
+++ b/arch/ia64/xen/xen_pv_ops.c
@@ -153,7 +153,7 @@ xen_post_smp_prepare_boot_cpu(void)
153 xen_setup_vcpu_info_placement(); 153 xen_setup_vcpu_info_placement();
154} 154}
155 155
156static const struct pv_init_ops xen_init_ops __initdata = { 156static const struct pv_init_ops xen_init_ops __initconst = {
157 .banner = xen_banner, 157 .banner = xen_banner,
158 158
159 .reserve_memory = xen_reserve_memory, 159 .reserve_memory = xen_reserve_memory,
@@ -337,7 +337,7 @@ xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
337 HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op); 337 HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
338} 338}
339 339
340static const struct pv_iosapic_ops xen_iosapic_ops __initdata = { 340static const struct pv_iosapic_ops xen_iosapic_ops __initconst = {
341 .pcat_compat_init = xen_pcat_compat_init, 341 .pcat_compat_init = xen_pcat_compat_init,
342 .__get_irq_chip = xen_iosapic_get_irq_chip, 342 .__get_irq_chip = xen_iosapic_get_irq_chip,
343 343
diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c
index dba4afabb444..39478dd08e67 100644
--- a/arch/m68k/atari/ataints.c
+++ b/arch/m68k/atari/ataints.c
@@ -187,8 +187,8 @@ __asm__ (__ALIGN_STR "\n" \
187" jbra ret_from_interrupt\n" \ 187" jbra ret_from_interrupt\n" \
188 : : "i" (&kstat_cpu(0).irqs[n+8]), "i" (&irq_handler[n+8]), \ 188 : : "i" (&kstat_cpu(0).irqs[n+8]), "i" (&irq_handler[n+8]), \
189 "n" (PT_OFF_SR), "n" (n), \ 189 "n" (PT_OFF_SR), "n" (n), \
190 "i" (n & 8 ? (n & 16 ? &tt_mfp.int_mk_a : &mfp.int_mk_a) \ 190 "i" (n & 8 ? (n & 16 ? &tt_mfp.int_mk_a : &st_mfp.int_mk_a) \
191 : (n & 16 ? &tt_mfp.int_mk_b : &mfp.int_mk_b)), \ 191 : (n & 16 ? &tt_mfp.int_mk_b : &st_mfp.int_mk_b)), \
192 "m" (preempt_count()), "di" (HARDIRQ_OFFSET) \ 192 "m" (preempt_count()), "di" (HARDIRQ_OFFSET) \
193); \ 193); \
194 for (;;); /* fake noreturn */ \ 194 for (;;); /* fake noreturn */ \
@@ -366,14 +366,14 @@ void __init atari_init_IRQ(void)
366 /* Initialize the MFP(s) */ 366 /* Initialize the MFP(s) */
367 367
368#ifdef ATARI_USE_SOFTWARE_EOI 368#ifdef ATARI_USE_SOFTWARE_EOI
369 mfp.vec_adr = 0x48; /* Software EOI-Mode */ 369 st_mfp.vec_adr = 0x48; /* Software EOI-Mode */
370#else 370#else
371 mfp.vec_adr = 0x40; /* Automatic EOI-Mode */ 371 st_mfp.vec_adr = 0x40; /* Automatic EOI-Mode */
372#endif 372#endif
373 mfp.int_en_a = 0x00; /* turn off MFP-Ints */ 373 st_mfp.int_en_a = 0x00; /* turn off MFP-Ints */
374 mfp.int_en_b = 0x00; 374 st_mfp.int_en_b = 0x00;
375 mfp.int_mk_a = 0xff; /* no Masking */ 375 st_mfp.int_mk_a = 0xff; /* no Masking */
376 mfp.int_mk_b = 0xff; 376 st_mfp.int_mk_b = 0xff;
377 377
378 if (ATARIHW_PRESENT(TT_MFP)) { 378 if (ATARIHW_PRESENT(TT_MFP)) {
379#ifdef ATARI_USE_SOFTWARE_EOI 379#ifdef ATARI_USE_SOFTWARE_EOI
diff --git a/arch/m68k/atari/atakeyb.c b/arch/m68k/atari/atakeyb.c
index a5f33c059979..4add96d13b19 100644
--- a/arch/m68k/atari/atakeyb.c
+++ b/arch/m68k/atari/atakeyb.c
@@ -609,10 +609,10 @@ int atari_keyb_init(void)
609 ACIA_RHTID : 0); 609 ACIA_RHTID : 0);
610 610
611 /* make sure the interrupt line is up */ 611 /* make sure the interrupt line is up */
612 } while ((mfp.par_dt_reg & 0x10) == 0); 612 } while ((st_mfp.par_dt_reg & 0x10) == 0);
613 613
614 /* enable ACIA Interrupts */ 614 /* enable ACIA Interrupts */
615 mfp.active_edge &= ~0x10; 615 st_mfp.active_edge &= ~0x10;
616 atari_turnon_irq(IRQ_MFP_ACIA); 616 atari_turnon_irq(IRQ_MFP_ACIA);
617 617
618 ikbd_self_test = 1; 618 ikbd_self_test = 1;
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index 49c28cdbea5c..ae2d96e5d618 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -258,7 +258,7 @@ void __init config_atari(void)
258 printk("STND_SHIFTER "); 258 printk("STND_SHIFTER ");
259 } 259 }
260 } 260 }
261 if (hwreg_present(&mfp.par_dt_reg)) { 261 if (hwreg_present(&st_mfp.par_dt_reg)) {
262 ATARIHW_SET(ST_MFP); 262 ATARIHW_SET(ST_MFP);
263 printk("ST_MFP "); 263 printk("ST_MFP ");
264 } 264 }
diff --git a/arch/m68k/atari/debug.c b/arch/m68k/atari/debug.c
index 702b15ccfab7..28efdc33c1ae 100644
--- a/arch/m68k/atari/debug.c
+++ b/arch/m68k/atari/debug.c
@@ -34,9 +34,9 @@ static struct console atari_console_driver = {
34 34
35static inline void ata_mfp_out(char c) 35static inline void ata_mfp_out(char c)
36{ 36{
37 while (!(mfp.trn_stat & 0x80)) /* wait for tx buf empty */ 37 while (!(st_mfp.trn_stat & 0x80)) /* wait for tx buf empty */
38 barrier(); 38 barrier();
39 mfp.usart_dta = c; 39 st_mfp.usart_dta = c;
40} 40}
41 41
42static void atari_mfp_console_write(struct console *co, const char *str, 42static void atari_mfp_console_write(struct console *co, const char *str,
@@ -91,7 +91,7 @@ static int ata_par_out(char c)
91 /* This a some-seconds timeout in case no printer is connected */ 91 /* This a some-seconds timeout in case no printer is connected */
92 unsigned long i = loops_per_jiffy > 1 ? loops_per_jiffy : 10000000/HZ; 92 unsigned long i = loops_per_jiffy > 1 ? loops_per_jiffy : 10000000/HZ;
93 93
94 while ((mfp.par_dt_reg & 1) && --i) /* wait for BUSY == L */ 94 while ((st_mfp.par_dt_reg & 1) && --i) /* wait for BUSY == L */
95 ; 95 ;
96 if (!i) 96 if (!i)
97 return 0; 97 return 0;
@@ -131,9 +131,9 @@ static void atari_par_console_write(struct console *co, const char *str,
131#if 0 131#if 0
132int atari_mfp_console_wait_key(struct console *co) 132int atari_mfp_console_wait_key(struct console *co)
133{ 133{
134 while (!(mfp.rcv_stat & 0x80)) /* wait for rx buf filled */ 134 while (!(st_mfp.rcv_stat & 0x80)) /* wait for rx buf filled */
135 barrier(); 135 barrier();
136 return mfp.usart_dta; 136 return st_mfp.usart_dta;
137} 137}
138 138
139int atari_scc_console_wait_key(struct console *co) 139int atari_scc_console_wait_key(struct console *co)
@@ -175,12 +175,12 @@ static void __init atari_init_mfp_port(int cflag)
175 baud = B9600; /* use default 9600bps for non-implemented rates */ 175 baud = B9600; /* use default 9600bps for non-implemented rates */
176 baud -= B1200; /* baud_table[] starts at 1200bps */ 176 baud -= B1200; /* baud_table[] starts at 1200bps */
177 177
178 mfp.trn_stat &= ~0x01; /* disable TX */ 178 st_mfp.trn_stat &= ~0x01; /* disable TX */
179 mfp.usart_ctr = parity | csize | 0x88; /* 1:16 clk mode, 1 stop bit */ 179 st_mfp.usart_ctr = parity | csize | 0x88; /* 1:16 clk mode, 1 stop bit */
180 mfp.tim_ct_cd &= 0x70; /* stop timer D */ 180 st_mfp.tim_ct_cd &= 0x70; /* stop timer D */
181 mfp.tim_dt_d = baud_table[baud]; 181 st_mfp.tim_dt_d = baud_table[baud];
182 mfp.tim_ct_cd |= 0x01; /* start timer D, 1:4 */ 182 st_mfp.tim_ct_cd |= 0x01; /* start timer D, 1:4 */
183 mfp.trn_stat |= 0x01; /* enable TX */ 183 st_mfp.trn_stat |= 0x01; /* enable TX */
184} 184}
185 185
186#define SCC_WRITE(reg, val) \ 186#define SCC_WRITE(reg, val) \
diff --git a/arch/m68k/atari/time.c b/arch/m68k/atari/time.c
index d076ff8d1b39..a0531f34c617 100644
--- a/arch/m68k/atari/time.c
+++ b/arch/m68k/atari/time.c
@@ -27,9 +27,9 @@ void __init
27atari_sched_init(irq_handler_t timer_routine) 27atari_sched_init(irq_handler_t timer_routine)
28{ 28{
29 /* set Timer C data Register */ 29 /* set Timer C data Register */
30 mfp.tim_dt_c = INT_TICKS; 30 st_mfp.tim_dt_c = INT_TICKS;
31 /* start timer C, div = 1:100 */ 31 /* start timer C, div = 1:100 */
32 mfp.tim_ct_cd = (mfp.tim_ct_cd & 15) | 0x60; 32 st_mfp.tim_ct_cd = (st_mfp.tim_ct_cd & 15) | 0x60;
33 /* install interrupt service routine for MFP Timer C */ 33 /* install interrupt service routine for MFP Timer C */
34 if (request_irq(IRQ_MFP_TIMC, timer_routine, IRQ_TYPE_SLOW, 34 if (request_irq(IRQ_MFP_TIMC, timer_routine, IRQ_TYPE_SLOW,
35 "timer", timer_routine)) 35 "timer", timer_routine))
@@ -46,11 +46,11 @@ unsigned long atari_gettimeoffset (void)
46 unsigned long ticks, offset = 0; 46 unsigned long ticks, offset = 0;
47 47
48 /* read MFP timer C current value */ 48 /* read MFP timer C current value */
49 ticks = mfp.tim_dt_c; 49 ticks = st_mfp.tim_dt_c;
50 /* The probability of underflow is less than 2% */ 50 /* The probability of underflow is less than 2% */
51 if (ticks > INT_TICKS - INT_TICKS / 50) 51 if (ticks > INT_TICKS - INT_TICKS / 50)
52 /* Check for pending timer interrupt */ 52 /* Check for pending timer interrupt */
53 if (mfp.int_pn_b & (1 << 5)) 53 if (st_mfp.int_pn_b & (1 << 5))
54 offset = TICK_SIZE; 54 offset = TICK_SIZE;
55 55
56 ticks = INT_TICKS - ticks; 56 ticks = INT_TICKS - ticks;
diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h
index 1412b4ab202f..a714e1aa072a 100644
--- a/arch/m68k/include/asm/atarihw.h
+++ b/arch/m68k/include/asm/atarihw.h
@@ -113,7 +113,7 @@ extern struct atari_hw_present atari_hw_present;
113 * of nops on various machines. Somebody claimed that the tstb takes 600 ns. 113 * of nops on various machines. Somebody claimed that the tstb takes 600 ns.
114 */ 114 */
115#define MFPDELAY() \ 115#define MFPDELAY() \
116 __asm__ __volatile__ ( "tstb %0" : : "m" (mfp.par_dt_reg) : "cc" ); 116 __asm__ __volatile__ ( "tstb %0" : : "m" (st_mfp.par_dt_reg) : "cc" );
117 117
118/* Do cache push/invalidate for DMA read/write. This function obeys the 118/* Do cache push/invalidate for DMA read/write. This function obeys the
119 * snooping on some machines (Medusa) and processors: The Medusa itself can 119 * snooping on some machines (Medusa) and processors: The Medusa itself can
@@ -565,7 +565,7 @@ struct MFP
565 u_char char_dummy23; 565 u_char char_dummy23;
566 u_char usart_dta; 566 u_char usart_dta;
567 }; 567 };
568# define mfp ((*(volatile struct MFP*)MFP_BAS)) 568# define st_mfp ((*(volatile struct MFP*)MFP_BAS))
569 569
570/* TT's second MFP */ 570/* TT's second MFP */
571 571
diff --git a/arch/m68k/include/asm/atariints.h b/arch/m68k/include/asm/atariints.h
index 5748e99f4e26..f597892e43a0 100644
--- a/arch/m68k/include/asm/atariints.h
+++ b/arch/m68k/include/asm/atariints.h
@@ -113,7 +113,7 @@ static inline int get_mfp_bit( unsigned irq, int type )
113{ unsigned char mask, *reg; 113{ unsigned char mask, *reg;
114 114
115 mask = 1 << (irq & 7); 115 mask = 1 << (irq & 7);
116 reg = (unsigned char *)&mfp.int_en_a + type*4 + 116 reg = (unsigned char *)&st_mfp.int_en_a + type*4 +
117 ((irq & 8) >> 2) + (((irq-8) & 16) << 3); 117 ((irq & 8) >> 2) + (((irq-8) & 16) << 3);
118 return( *reg & mask ); 118 return( *reg & mask );
119} 119}
@@ -123,7 +123,7 @@ static inline void set_mfp_bit( unsigned irq, int type )
123{ unsigned char mask, *reg; 123{ unsigned char mask, *reg;
124 124
125 mask = 1 << (irq & 7); 125 mask = 1 << (irq & 7);
126 reg = (unsigned char *)&mfp.int_en_a + type*4 + 126 reg = (unsigned char *)&st_mfp.int_en_a + type*4 +
127 ((irq & 8) >> 2) + (((irq-8) & 16) << 3); 127 ((irq & 8) >> 2) + (((irq-8) & 16) << 3);
128 __asm__ __volatile__ ( "orb %0,%1" 128 __asm__ __volatile__ ( "orb %0,%1"
129 : : "di" (mask), "m" (*reg) : "memory" ); 129 : : "di" (mask), "m" (*reg) : "memory" );
@@ -134,7 +134,7 @@ static inline void clear_mfp_bit( unsigned irq, int type )
134{ unsigned char mask, *reg; 134{ unsigned char mask, *reg;
135 135
136 mask = ~(1 << (irq & 7)); 136 mask = ~(1 << (irq & 7));
137 reg = (unsigned char *)&mfp.int_en_a + type*4 + 137 reg = (unsigned char *)&st_mfp.int_en_a + type*4 +
138 ((irq & 8) >> 2) + (((irq-8) & 16) << 3); 138 ((irq & 8) >> 2) + (((irq-8) & 16) << 3);
139 if (type == MFP_PENDING || type == MFP_SERVICE) 139 if (type == MFP_PENDING || type == MFP_SERVICE)
140 __asm__ __volatile__ ( "moveb %0,%1" 140 __asm__ __volatile__ ( "moveb %0,%1"
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 600eef3f3ac7..e61465a18c7e 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -603,7 +603,7 @@ config CAVIUM_OCTEON_SIMULATOR
603 select SYS_SUPPORTS_64BIT_KERNEL 603 select SYS_SUPPORTS_64BIT_KERNEL
604 select SYS_SUPPORTS_BIG_ENDIAN 604 select SYS_SUPPORTS_BIG_ENDIAN
605 select SYS_SUPPORTS_HIGHMEM 605 select SYS_SUPPORTS_HIGHMEM
606 select CPU_CAVIUM_OCTEON 606 select SYS_HAS_CPU_CAVIUM_OCTEON
607 help 607 help
608 The Octeon simulator is software performance model of the Cavium 608 The Octeon simulator is software performance model of the Cavium
609 Octeon Processor. It supports simulating Octeon processors on x86 609 Octeon Processor. It supports simulating Octeon processors on x86
@@ -618,7 +618,7 @@ config CAVIUM_OCTEON_REFERENCE_BOARD
618 select SYS_SUPPORTS_BIG_ENDIAN 618 select SYS_SUPPORTS_BIG_ENDIAN
619 select SYS_SUPPORTS_HIGHMEM 619 select SYS_SUPPORTS_HIGHMEM
620 select SYS_HAS_EARLY_PRINTK 620 select SYS_HAS_EARLY_PRINTK
621 select CPU_CAVIUM_OCTEON 621 select SYS_HAS_CPU_CAVIUM_OCTEON
622 select SWAP_IO_SPACE 622 select SWAP_IO_SPACE
623 help 623 help
624 This option supports all of the Octeon reference boards from Cavium 624 This option supports all of the Octeon reference boards from Cavium
@@ -1234,6 +1234,7 @@ config CPU_SB1
1234 1234
1235config CPU_CAVIUM_OCTEON 1235config CPU_CAVIUM_OCTEON
1236 bool "Cavium Octeon processor" 1236 bool "Cavium Octeon processor"
1237 depends on SYS_HAS_CPU_CAVIUM_OCTEON
1237 select IRQ_CPU 1238 select IRQ_CPU
1238 select IRQ_CPU_OCTEON 1239 select IRQ_CPU_OCTEON
1239 select CPU_HAS_PREFETCH 1240 select CPU_HAS_PREFETCH
@@ -1314,6 +1315,9 @@ config SYS_HAS_CPU_RM9000
1314config SYS_HAS_CPU_SB1 1315config SYS_HAS_CPU_SB1
1315 bool 1316 bool
1316 1317
1318config SYS_HAS_CPU_CAVIUM_OCTEON
1319 bool
1320
1317# 1321#
1318# CPU may reorder R->R, R->W, W->R, W->W 1322# CPU may reorder R->R, R->W, W->R, W->W
1319# Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC 1323# Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC
@@ -1387,6 +1391,7 @@ config 32BIT
1387config 64BIT 1391config 64BIT
1388 bool "64-bit kernel" 1392 bool "64-bit kernel"
1389 depends on CPU_SUPPORTS_64BIT_KERNEL && SYS_SUPPORTS_64BIT_KERNEL 1393 depends on CPU_SUPPORTS_64BIT_KERNEL && SYS_SUPPORTS_64BIT_KERNEL
1394 select HAVE_SYSCALL_WRAPPERS
1390 help 1395 help
1391 Select this option if you want to build a 64-bit kernel. 1396 Select this option if you want to build a 64-bit kernel.
1392 1397
diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c
index 6fd441d16af5..f58d4ffb8945 100644
--- a/arch/mips/alchemy/common/time.c
+++ b/arch/mips/alchemy/common/time.c
@@ -118,7 +118,7 @@ void __init plat_time_init(void)
118 * setup counter 1 (RTC) to tick at full speed 118 * setup counter 1 (RTC) to tick at full speed
119 */ 119 */
120 t = 0xffffff; 120 t = 0xffffff;
121 while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S) && t--) 121 while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S) && --t)
122 asm volatile ("nop"); 122 asm volatile ("nop");
123 if (!t) 123 if (!t)
124 goto cntr_err; 124 goto cntr_err;
@@ -127,7 +127,7 @@ void __init plat_time_init(void)
127 au_sync(); 127 au_sync();
128 128
129 t = 0xffffff; 129 t = 0xffffff;
130 while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && t--) 130 while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && --t)
131 asm volatile ("nop"); 131 asm volatile ("nop");
132 if (!t) 132 if (!t)
133 goto cntr_err; 133 goto cntr_err;
@@ -135,7 +135,7 @@ void __init plat_time_init(void)
135 au_sync(); 135 au_sync();
136 136
137 t = 0xffffff; 137 t = 0xffffff;
138 while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && t--) 138 while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && --t)
139 asm volatile ("nop"); 139 asm volatile ("nop");
140 if (!t) 140 if (!t)
141 goto cntr_err; 141 goto cntr_err;
diff --git a/arch/mips/include/asm/seccomp.h b/arch/mips/include/asm/seccomp.h
index 36ed44070256..a6772e9507f5 100644
--- a/arch/mips/include/asm/seccomp.h
+++ b/arch/mips/include/asm/seccomp.h
@@ -1,6 +1,5 @@
1#ifndef __ASM_SECCOMP_H 1#ifndef __ASM_SECCOMP_H
2 2
3#include <linux/thread_info.h>
4#include <linux/unistd.h> 3#include <linux/unistd.h>
5 4
6#define __NR_seccomp_read __NR_read 5#define __NR_seccomp_read __NR_read
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index a0ff2b66e22b..4b4007b3083a 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -111,7 +111,6 @@ int show_interrupts(struct seq_file *p, void *v)
111 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 111 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
112#endif 112#endif
113 seq_printf(p, " %14s", irq_desc[i].chip->name); 113 seq_printf(p, " %14s", irq_desc[i].chip->name);
114 seq_printf(p, "-%-8s", irq_desc[i].name);
115 seq_printf(p, " %s", action->name); 114 seq_printf(p, " %s", action->name);
116 115
117 for (action=action->next; action; action = action->next) 116 for (action=action->next; action; action = action->next)
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index aa2c55e3b55f..2f8452b404c7 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -32,6 +32,7 @@
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/binfmts.h> 33#include <linux/binfmts.h>
34#include <linux/security.h> 34#include <linux/security.h>
35#include <linux/syscalls.h>
35#include <linux/compat.h> 36#include <linux/compat.h>
36#include <linux/vfs.h> 37#include <linux/vfs.h>
37#include <linux/ipc.h> 38#include <linux/ipc.h>
@@ -63,9 +64,9 @@
63#define merge_64(r1, r2) ((((r2) & 0xffffffffUL) << 32) + ((r1) & 0xffffffffUL)) 64#define merge_64(r1, r2) ((((r2) & 0xffffffffUL) << 32) + ((r1) & 0xffffffffUL))
64#endif 65#endif
65 66
66asmlinkage unsigned long 67SYSCALL_DEFINE6(32_mmap2, unsigned long, addr, unsigned long, len,
67sys32_mmap2(unsigned long addr, unsigned long len, unsigned long prot, 68 unsigned long, prot, unsigned long, flags, unsigned long, fd,
68 unsigned long flags, unsigned long fd, unsigned long pgoff) 69 unsigned long, pgoff)
69{ 70{
70 struct file * file = NULL; 71 struct file * file = NULL;
71 unsigned long error; 72 unsigned long error;
@@ -121,21 +122,21 @@ struct rlimit32 {
121 int rlim_max; 122 int rlim_max;
122}; 123};
123 124
124asmlinkage long sys32_truncate64(const char __user * path, 125SYSCALL_DEFINE4(32_truncate64, const char __user *, path,
125 unsigned long __dummy, int a2, int a3) 126 unsigned long, __dummy, unsigned long, a2, unsigned long, a3)
126{ 127{
127 return sys_truncate(path, merge_64(a2, a3)); 128 return sys_truncate(path, merge_64(a2, a3));
128} 129}
129 130
130asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long __dummy, 131SYSCALL_DEFINE4(32_ftruncate64, unsigned long, fd, unsigned long, __dummy,
131 int a2, int a3) 132 unsigned long, a2, unsigned long, a3)
132{ 133{
133 return sys_ftruncate(fd, merge_64(a2, a3)); 134 return sys_ftruncate(fd, merge_64(a2, a3));
134} 135}
135 136
136asmlinkage int sys32_llseek(unsigned int fd, unsigned int offset_high, 137SYSCALL_DEFINE5(32_llseek, unsigned long, fd, unsigned long, offset_high,
137 unsigned int offset_low, loff_t __user * result, 138 unsigned long, offset_low, loff_t __user *, result,
138 unsigned int origin) 139 unsigned long, origin)
139{ 140{
140 return sys_llseek(fd, offset_high, offset_low, result, origin); 141 return sys_llseek(fd, offset_high, offset_low, result, origin);
141} 142}
@@ -144,20 +145,20 @@ asmlinkage int sys32_llseek(unsigned int fd, unsigned int offset_high,
144 lseek back to original location. They fail just like lseek does on 145 lseek back to original location. They fail just like lseek does on
145 non-seekable files. */ 146 non-seekable files. */
146 147
147asmlinkage ssize_t sys32_pread(unsigned int fd, char __user * buf, 148SYSCALL_DEFINE6(32_pread, unsigned long, fd, char __user *, buf, size_t, count,
148 size_t count, u32 unused, u64 a4, u64 a5) 149 unsigned long, unused, unsigned long, a4, unsigned long, a5)
149{ 150{
150 return sys_pread64(fd, buf, count, merge_64(a4, a5)); 151 return sys_pread64(fd, buf, count, merge_64(a4, a5));
151} 152}
152 153
153asmlinkage ssize_t sys32_pwrite(unsigned int fd, const char __user * buf, 154SYSCALL_DEFINE6(32_pwrite, unsigned int, fd, const char __user *, buf,
154 size_t count, u32 unused, u64 a4, u64 a5) 155 size_t, count, u32, unused, u64, a4, u64, a5)
155{ 156{
156 return sys_pwrite64(fd, buf, count, merge_64(a4, a5)); 157 return sys_pwrite64(fd, buf, count, merge_64(a4, a5));
157} 158}
158 159
159asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid, 160SYSCALL_DEFINE2(32_sched_rr_get_interval, compat_pid_t, pid,
160 struct compat_timespec __user *interval) 161 struct compat_timespec __user *, interval)
161{ 162{
162 struct timespec t; 163 struct timespec t;
163 int ret; 164 int ret;
@@ -174,8 +175,8 @@ asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid,
174 175
175#ifdef CONFIG_SYSVIPC 176#ifdef CONFIG_SYSVIPC
176 177
177asmlinkage long 178SYSCALL_DEFINE6(32_ipc, u32, call, long, first, long, second, long, third,
178sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth) 179 unsigned long, ptr, unsigned long, fifth)
179{ 180{
180 int version, err; 181 int version, err;
181 182
@@ -233,8 +234,8 @@ sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
233 234
234#else 235#else
235 236
236asmlinkage long 237SYSCALL_DEFINE6(32_ipc, u32, call, int, first, int, second, int, third,
237sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth) 238 u32, ptr, u32 fifth)
238{ 239{
239 return -ENOSYS; 240 return -ENOSYS;
240} 241}
@@ -242,7 +243,7 @@ sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
242#endif /* CONFIG_SYSVIPC */ 243#endif /* CONFIG_SYSVIPC */
243 244
244#ifdef CONFIG_MIPS32_N32 245#ifdef CONFIG_MIPS32_N32
245asmlinkage long sysn32_semctl(int semid, int semnum, int cmd, u32 arg) 246SYSCALL_DEFINE4(n32_semctl, int, semid, int, semnum, int, cmd, u32, arg)
246{ 247{
247 /* compat_sys_semctl expects a pointer to union semun */ 248 /* compat_sys_semctl expects a pointer to union semun */
248 u32 __user *uptr = compat_alloc_user_space(sizeof(u32)); 249 u32 __user *uptr = compat_alloc_user_space(sizeof(u32));
@@ -251,13 +252,14 @@ asmlinkage long sysn32_semctl(int semid, int semnum, int cmd, u32 arg)
251 return compat_sys_semctl(semid, semnum, cmd, uptr); 252 return compat_sys_semctl(semid, semnum, cmd, uptr);
252} 253}
253 254
254asmlinkage long sysn32_msgsnd(int msqid, u32 msgp, unsigned msgsz, int msgflg) 255SYSCALL_DEFINE4(n32_msgsnd, int, msqid, u32, msgp, unsigned int, msgsz,
256 int, msgflg)
255{ 257{
256 return compat_sys_msgsnd(msqid, msgsz, msgflg, compat_ptr(msgp)); 258 return compat_sys_msgsnd(msqid, msgsz, msgflg, compat_ptr(msgp));
257} 259}
258 260
259asmlinkage long sysn32_msgrcv(int msqid, u32 msgp, size_t msgsz, int msgtyp, 261SYSCALL_DEFINE5(n32_msgrcv, int, msqid, u32, msgp, size_t, msgsz,
260 int msgflg) 262 int, msgtyp, int, msgflg)
261{ 263{
262 return compat_sys_msgrcv(msqid, msgsz, msgtyp, msgflg, IPC_64, 264 return compat_sys_msgrcv(msqid, msgsz, msgtyp, msgflg, IPC_64,
263 compat_ptr(msgp)); 265 compat_ptr(msgp));
@@ -277,7 +279,7 @@ struct sysctl_args32
277 279
278#ifdef CONFIG_SYSCTL_SYSCALL 280#ifdef CONFIG_SYSCTL_SYSCALL
279 281
280asmlinkage long sys32_sysctl(struct sysctl_args32 __user *args) 282SYSCALL_DEFINE1(32_sysctl, struct sysctl_args32 __user *, args)
281{ 283{
282 struct sysctl_args32 tmp; 284 struct sysctl_args32 tmp;
283 int error; 285 int error;
@@ -316,9 +318,16 @@ asmlinkage long sys32_sysctl(struct sysctl_args32 __user *args)
316 return error; 318 return error;
317} 319}
318 320
321#else
322
323SYSCALL_DEFINE1(32_sysctl, struct sysctl_args32 __user *, args)
324{
325 return -ENOSYS;
326}
327
319#endif /* CONFIG_SYSCTL_SYSCALL */ 328#endif /* CONFIG_SYSCTL_SYSCALL */
320 329
321asmlinkage long sys32_newuname(struct new_utsname __user * name) 330SYSCALL_DEFINE1(32_newuname, struct new_utsname __user *, name)
322{ 331{
323 int ret = 0; 332 int ret = 0;
324 333
@@ -334,7 +343,7 @@ asmlinkage long sys32_newuname(struct new_utsname __user * name)
334 return ret; 343 return ret;
335} 344}
336 345
337asmlinkage int sys32_personality(unsigned long personality) 346SYSCALL_DEFINE1(32_personality, unsigned long, personality)
338{ 347{
339 int ret; 348 int ret;
340 personality &= 0xffffffff; 349 personality &= 0xffffffff;
@@ -357,7 +366,7 @@ struct ustat32 {
357 366
358extern asmlinkage long sys_ustat(dev_t dev, struct ustat __user * ubuf); 367extern asmlinkage long sys_ustat(dev_t dev, struct ustat __user * ubuf);
359 368
360asmlinkage int sys32_ustat(dev_t dev, struct ustat32 __user * ubuf32) 369SYSCALL_DEFINE2(32_ustat, dev_t, dev, struct ustat32 __user *, ubuf32)
361{ 370{
362 int err; 371 int err;
363 struct ustat tmp; 372 struct ustat tmp;
@@ -381,8 +390,8 @@ out:
381 return err; 390 return err;
382} 391}
383 392
384asmlinkage int sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, 393SYSCALL_DEFINE4(32_sendfile, long, out_fd, long, in_fd,
385 s32 count) 394 compat_off_t __user *, offset, s32, count)
386{ 395{
387 mm_segment_t old_fs = get_fs(); 396 mm_segment_t old_fs = get_fs();
388 int ret; 397 int ret;
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 51d1ba415b90..9ab70c3b5be6 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -399,7 +399,7 @@ einval: li v0, -ENOSYS
399 sys sys_swapon 2 399 sys sys_swapon 2
400 sys sys_reboot 3 400 sys sys_reboot 3
401 sys sys_old_readdir 3 401 sys sys_old_readdir 3
402 sys old_mmap 6 /* 4090 */ 402 sys sys_mips_mmap 6 /* 4090 */
403 sys sys_munmap 2 403 sys sys_munmap 2
404 sys sys_truncate 2 404 sys sys_truncate 2
405 sys sys_ftruncate 2 405 sys sys_ftruncate 2
@@ -519,7 +519,7 @@ einval: li v0, -ENOSYS
519 sys sys_sendfile 4 519 sys sys_sendfile 4
520 sys sys_ni_syscall 0 520 sys sys_ni_syscall 0
521 sys sys_ni_syscall 0 521 sys sys_ni_syscall 0
522 sys sys_mmap2 6 /* 4210 */ 522 sys sys_mips_mmap2 6 /* 4210 */
523 sys sys_truncate64 4 523 sys sys_truncate64 4
524 sys sys_ftruncate64 4 524 sys sys_ftruncate64 4
525 sys sys_stat64 2 525 sys sys_stat64 2
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index a9e171618994..9b4698667154 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -207,7 +207,7 @@ sys_call_table:
207 PTR sys_newlstat 207 PTR sys_newlstat
208 PTR sys_poll 208 PTR sys_poll
209 PTR sys_lseek 209 PTR sys_lseek
210 PTR old_mmap 210 PTR sys_mips_mmap
211 PTR sys_mprotect /* 5010 */ 211 PTR sys_mprotect /* 5010 */
212 PTR sys_munmap 212 PTR sys_munmap
213 PTR sys_brk 213 PTR sys_brk
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 30f3b6317a83..7438e92f8a01 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -129,12 +129,12 @@ EXPORT(sysn32_call_table)
129 PTR sys_newlstat 129 PTR sys_newlstat
130 PTR sys_poll 130 PTR sys_poll
131 PTR sys_lseek 131 PTR sys_lseek
132 PTR old_mmap 132 PTR sys_mips_mmap
133 PTR sys_mprotect /* 6010 */ 133 PTR sys_mprotect /* 6010 */
134 PTR sys_munmap 134 PTR sys_munmap
135 PTR sys_brk 135 PTR sys_brk
136 PTR sys32_rt_sigaction 136 PTR sys_32_rt_sigaction
137 PTR sys32_rt_sigprocmask 137 PTR sys_32_rt_sigprocmask
138 PTR compat_sys_ioctl /* 6015 */ 138 PTR compat_sys_ioctl /* 6015 */
139 PTR sys_pread64 139 PTR sys_pread64
140 PTR sys_pwrite64 140 PTR sys_pwrite64
@@ -159,7 +159,7 @@ EXPORT(sysn32_call_table)
159 PTR compat_sys_setitimer 159 PTR compat_sys_setitimer
160 PTR sys_alarm 160 PTR sys_alarm
161 PTR sys_getpid 161 PTR sys_getpid
162 PTR sys32_sendfile 162 PTR sys_32_sendfile
163 PTR sys_socket /* 6040 */ 163 PTR sys_socket /* 6040 */
164 PTR sys_connect 164 PTR sys_connect
165 PTR sys_accept 165 PTR sys_accept
@@ -181,14 +181,14 @@ EXPORT(sysn32_call_table)
181 PTR sys_exit 181 PTR sys_exit
182 PTR compat_sys_wait4 182 PTR compat_sys_wait4
183 PTR sys_kill /* 6060 */ 183 PTR sys_kill /* 6060 */
184 PTR sys32_newuname 184 PTR sys_32_newuname
185 PTR sys_semget 185 PTR sys_semget
186 PTR sys_semop 186 PTR sys_semop
187 PTR sysn32_semctl 187 PTR sys_n32_semctl
188 PTR sys_shmdt /* 6065 */ 188 PTR sys_shmdt /* 6065 */
189 PTR sys_msgget 189 PTR sys_msgget
190 PTR sysn32_msgsnd 190 PTR sys_n32_msgsnd
191 PTR sysn32_msgrcv 191 PTR sys_n32_msgrcv
192 PTR compat_sys_msgctl 192 PTR compat_sys_msgctl
193 PTR compat_sys_fcntl /* 6070 */ 193 PTR compat_sys_fcntl /* 6070 */
194 PTR sys_flock 194 PTR sys_flock
@@ -245,15 +245,15 @@ EXPORT(sysn32_call_table)
245 PTR sys_getsid 245 PTR sys_getsid
246 PTR sys_capget 246 PTR sys_capget
247 PTR sys_capset 247 PTR sys_capset
248 PTR sys32_rt_sigpending /* 6125 */ 248 PTR sys_32_rt_sigpending /* 6125 */
249 PTR compat_sys_rt_sigtimedwait 249 PTR compat_sys_rt_sigtimedwait
250 PTR sys32_rt_sigqueueinfo 250 PTR sys_32_rt_sigqueueinfo
251 PTR sysn32_rt_sigsuspend 251 PTR sysn32_rt_sigsuspend
252 PTR sys32_sigaltstack 252 PTR sys32_sigaltstack
253 PTR compat_sys_utime /* 6130 */ 253 PTR compat_sys_utime /* 6130 */
254 PTR sys_mknod 254 PTR sys_mknod
255 PTR sys32_personality 255 PTR sys_32_personality
256 PTR sys32_ustat 256 PTR sys_32_ustat
257 PTR compat_sys_statfs 257 PTR compat_sys_statfs
258 PTR compat_sys_fstatfs /* 6135 */ 258 PTR compat_sys_fstatfs /* 6135 */
259 PTR sys_sysfs 259 PTR sys_sysfs
@@ -265,14 +265,14 @@ EXPORT(sysn32_call_table)
265 PTR sys_sched_getscheduler 265 PTR sys_sched_getscheduler
266 PTR sys_sched_get_priority_max 266 PTR sys_sched_get_priority_max
267 PTR sys_sched_get_priority_min 267 PTR sys_sched_get_priority_min
268 PTR sys32_sched_rr_get_interval /* 6145 */ 268 PTR sys_32_sched_rr_get_interval /* 6145 */
269 PTR sys_mlock 269 PTR sys_mlock
270 PTR sys_munlock 270 PTR sys_munlock
271 PTR sys_mlockall 271 PTR sys_mlockall
272 PTR sys_munlockall 272 PTR sys_munlockall
273 PTR sys_vhangup /* 6150 */ 273 PTR sys_vhangup /* 6150 */
274 PTR sys_pivot_root 274 PTR sys_pivot_root
275 PTR sys32_sysctl 275 PTR sys_32_sysctl
276 PTR sys_prctl 276 PTR sys_prctl
277 PTR compat_sys_adjtimex 277 PTR compat_sys_adjtimex
278 PTR compat_sys_setrlimit /* 6155 */ 278 PTR compat_sys_setrlimit /* 6155 */
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index fefef4af8595..b0fef4ff9827 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -265,12 +265,12 @@ sys_call_table:
265 PTR sys_olduname 265 PTR sys_olduname
266 PTR sys_umask /* 4060 */ 266 PTR sys_umask /* 4060 */
267 PTR sys_chroot 267 PTR sys_chroot
268 PTR sys32_ustat 268 PTR sys_32_ustat
269 PTR sys_dup2 269 PTR sys_dup2
270 PTR sys_getppid 270 PTR sys_getppid
271 PTR sys_getpgrp /* 4065 */ 271 PTR sys_getpgrp /* 4065 */
272 PTR sys_setsid 272 PTR sys_setsid
273 PTR sys32_sigaction 273 PTR sys_32_sigaction
274 PTR sys_sgetmask 274 PTR sys_sgetmask
275 PTR sys_ssetmask 275 PTR sys_ssetmask
276 PTR sys_setreuid /* 4070 */ 276 PTR sys_setreuid /* 4070 */
@@ -293,7 +293,7 @@ sys_call_table:
293 PTR sys_swapon 293 PTR sys_swapon
294 PTR sys_reboot 294 PTR sys_reboot
295 PTR compat_sys_old_readdir 295 PTR compat_sys_old_readdir
296 PTR old_mmap /* 4090 */ 296 PTR sys_mips_mmap /* 4090 */
297 PTR sys_munmap 297 PTR sys_munmap
298 PTR sys_truncate 298 PTR sys_truncate
299 PTR sys_ftruncate 299 PTR sys_ftruncate
@@ -320,12 +320,12 @@ sys_call_table:
320 PTR compat_sys_wait4 320 PTR compat_sys_wait4
321 PTR sys_swapoff /* 4115 */ 321 PTR sys_swapoff /* 4115 */
322 PTR compat_sys_sysinfo 322 PTR compat_sys_sysinfo
323 PTR sys32_ipc 323 PTR sys_32_ipc
324 PTR sys_fsync 324 PTR sys_fsync
325 PTR sys32_sigreturn 325 PTR sys32_sigreturn
326 PTR sys32_clone /* 4120 */ 326 PTR sys32_clone /* 4120 */
327 PTR sys_setdomainname 327 PTR sys_setdomainname
328 PTR sys32_newuname 328 PTR sys_32_newuname
329 PTR sys_ni_syscall /* sys_modify_ldt */ 329 PTR sys_ni_syscall /* sys_modify_ldt */
330 PTR compat_sys_adjtimex 330 PTR compat_sys_adjtimex
331 PTR sys_mprotect /* 4125 */ 331 PTR sys_mprotect /* 4125 */
@@ -339,11 +339,11 @@ sys_call_table:
339 PTR sys_fchdir 339 PTR sys_fchdir
340 PTR sys_bdflush 340 PTR sys_bdflush
341 PTR sys_sysfs /* 4135 */ 341 PTR sys_sysfs /* 4135 */
342 PTR sys32_personality 342 PTR sys_32_personality
343 PTR sys_ni_syscall /* for afs_syscall */ 343 PTR sys_ni_syscall /* for afs_syscall */
344 PTR sys_setfsuid 344 PTR sys_setfsuid
345 PTR sys_setfsgid 345 PTR sys_setfsgid
346 PTR sys32_llseek /* 4140 */ 346 PTR sys_32_llseek /* 4140 */
347 PTR compat_sys_getdents 347 PTR compat_sys_getdents
348 PTR compat_sys_select 348 PTR compat_sys_select
349 PTR sys_flock 349 PTR sys_flock
@@ -356,7 +356,7 @@ sys_call_table:
356 PTR sys_ni_syscall /* 4150 */ 356 PTR sys_ni_syscall /* 4150 */
357 PTR sys_getsid 357 PTR sys_getsid
358 PTR sys_fdatasync 358 PTR sys_fdatasync
359 PTR sys32_sysctl 359 PTR sys_32_sysctl
360 PTR sys_mlock 360 PTR sys_mlock
361 PTR sys_munlock /* 4155 */ 361 PTR sys_munlock /* 4155 */
362 PTR sys_mlockall 362 PTR sys_mlockall
@@ -368,7 +368,7 @@ sys_call_table:
368 PTR sys_sched_yield 368 PTR sys_sched_yield
369 PTR sys_sched_get_priority_max 369 PTR sys_sched_get_priority_max
370 PTR sys_sched_get_priority_min 370 PTR sys_sched_get_priority_min
371 PTR sys32_sched_rr_get_interval /* 4165 */ 371 PTR sys_32_sched_rr_get_interval /* 4165 */
372 PTR compat_sys_nanosleep 372 PTR compat_sys_nanosleep
373 PTR sys_mremap 373 PTR sys_mremap
374 PTR sys_accept 374 PTR sys_accept
@@ -397,25 +397,25 @@ sys_call_table:
397 PTR sys_getresgid 397 PTR sys_getresgid
398 PTR sys_prctl 398 PTR sys_prctl
399 PTR sys32_rt_sigreturn 399 PTR sys32_rt_sigreturn
400 PTR sys32_rt_sigaction 400 PTR sys_32_rt_sigaction
401 PTR sys32_rt_sigprocmask /* 4195 */ 401 PTR sys_32_rt_sigprocmask /* 4195 */
402 PTR sys32_rt_sigpending 402 PTR sys_32_rt_sigpending
403 PTR compat_sys_rt_sigtimedwait 403 PTR compat_sys_rt_sigtimedwait
404 PTR sys32_rt_sigqueueinfo 404 PTR sys_32_rt_sigqueueinfo
405 PTR sys32_rt_sigsuspend 405 PTR sys32_rt_sigsuspend
406 PTR sys32_pread /* 4200 */ 406 PTR sys_32_pread /* 4200 */
407 PTR sys32_pwrite 407 PTR sys_32_pwrite
408 PTR sys_chown 408 PTR sys_chown
409 PTR sys_getcwd 409 PTR sys_getcwd
410 PTR sys_capget 410 PTR sys_capget
411 PTR sys_capset /* 4205 */ 411 PTR sys_capset /* 4205 */
412 PTR sys32_sigaltstack 412 PTR sys32_sigaltstack
413 PTR sys32_sendfile 413 PTR sys_32_sendfile
414 PTR sys_ni_syscall 414 PTR sys_ni_syscall
415 PTR sys_ni_syscall 415 PTR sys_ni_syscall
416 PTR sys32_mmap2 /* 4210 */ 416 PTR sys_mips_mmap2 /* 4210 */
417 PTR sys32_truncate64 417 PTR sys_32_truncate64
418 PTR sys32_ftruncate64 418 PTR sys_32_ftruncate64
419 PTR sys_newstat 419 PTR sys_newstat
420 PTR sys_newlstat 420 PTR sys_newlstat
421 PTR sys_newfstat /* 4215 */ 421 PTR sys_newfstat /* 4215 */
@@ -481,7 +481,7 @@ sys_call_table:
481 PTR compat_sys_mq_notify /* 4275 */ 481 PTR compat_sys_mq_notify /* 4275 */
482 PTR compat_sys_mq_getsetattr 482 PTR compat_sys_mq_getsetattr
483 PTR sys_ni_syscall /* sys_vserver */ 483 PTR sys_ni_syscall /* sys_vserver */
484 PTR sys32_waitid 484 PTR sys_32_waitid
485 PTR sys_ni_syscall /* available, was setaltroot */ 485 PTR sys_ni_syscall /* available, was setaltroot */
486 PTR sys_add_key /* 4280 */ 486 PTR sys_add_key /* 4280 */
487 PTR sys_request_key 487 PTR sys_request_key
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index a4e106c56ab5..830c5ef9932b 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -19,6 +19,7 @@
19#include <linux/ptrace.h> 19#include <linux/ptrace.h>
20#include <linux/unistd.h> 20#include <linux/unistd.h>
21#include <linux/compiler.h> 21#include <linux/compiler.h>
22#include <linux/syscalls.h>
22#include <linux/uaccess.h> 23#include <linux/uaccess.h>
23 24
24#include <asm/abi.h> 25#include <asm/abi.h>
@@ -338,8 +339,8 @@ asmlinkage int sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
338} 339}
339 340
340#ifdef CONFIG_TRAD_SIGNALS 341#ifdef CONFIG_TRAD_SIGNALS
341asmlinkage int sys_sigaction(int sig, const struct sigaction __user *act, 342SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act,
342 struct sigaction __user *oact) 343 struct sigaction __user *, oact)
343{ 344{
344 struct k_sigaction new_ka, old_ka; 345 struct k_sigaction new_ka, old_ka;
345 int ret; 346 int ret;
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 652709b353ad..2e74075ac0ca 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -349,8 +349,8 @@ asmlinkage int sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
349 return -ERESTARTNOHAND; 349 return -ERESTARTNOHAND;
350} 350}
351 351
352asmlinkage int sys32_sigaction(int sig, const struct sigaction32 __user *act, 352SYSCALL_DEFINE3(32_sigaction, long, sig, const struct sigaction32 __user *, act,
353 struct sigaction32 __user *oact) 353 struct sigaction32 __user *, oact)
354{ 354{
355 struct k_sigaction new_ka, old_ka; 355 struct k_sigaction new_ka, old_ka;
356 int ret; 356 int ret;
@@ -704,9 +704,9 @@ struct mips_abi mips_abi_32 = {
704 .restart = __NR_O32_restart_syscall 704 .restart = __NR_O32_restart_syscall
705}; 705};
706 706
707asmlinkage int sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, 707SYSCALL_DEFINE4(32_rt_sigaction, int, sig,
708 struct sigaction32 __user *oact, 708 const struct sigaction32 __user *, act,
709 unsigned int sigsetsize) 709 struct sigaction32 __user *, oact, unsigned int, sigsetsize)
710{ 710{
711 struct k_sigaction new_sa, old_sa; 711 struct k_sigaction new_sa, old_sa;
712 int ret = -EINVAL; 712 int ret = -EINVAL;
@@ -748,8 +748,8 @@ out:
748 return ret; 748 return ret;
749} 749}
750 750
751asmlinkage int sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, 751SYSCALL_DEFINE4(32_rt_sigprocmask, int, how, compat_sigset_t __user *, set,
752 compat_sigset_t __user *oset, unsigned int sigsetsize) 752 compat_sigset_t __user *, oset, unsigned int, sigsetsize)
753{ 753{
754 sigset_t old_set, new_set; 754 sigset_t old_set, new_set;
755 int ret; 755 int ret;
@@ -770,8 +770,8 @@ asmlinkage int sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
770 return ret; 770 return ret;
771} 771}
772 772
773asmlinkage int sys32_rt_sigpending(compat_sigset_t __user *uset, 773SYSCALL_DEFINE2(32_rt_sigpending, compat_sigset_t __user *, uset,
774 unsigned int sigsetsize) 774 unsigned int, sigsetsize)
775{ 775{
776 int ret; 776 int ret;
777 sigset_t set; 777 sigset_t set;
@@ -787,7 +787,8 @@ asmlinkage int sys32_rt_sigpending(compat_sigset_t __user *uset,
787 return ret; 787 return ret;
788} 788}
789 789
790asmlinkage int sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo) 790SYSCALL_DEFINE3(32_rt_sigqueueinfo, int, pid, int, sig,
791 compat_siginfo_t __user *, uinfo)
791{ 792{
792 siginfo_t info; 793 siginfo_t info;
793 int ret; 794 int ret;
@@ -802,10 +803,9 @@ asmlinkage int sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *
802 return ret; 803 return ret;
803} 804}
804 805
805asmlinkage long 806SYSCALL_DEFINE5(32_waitid, int, which, compat_pid_t, pid,
806sys32_waitid(int which, compat_pid_t pid, 807 compat_siginfo_t __user *, uinfo, int, options,
807 compat_siginfo_t __user *uinfo, int options, 808 struct compat_rusage __user *, uru)
808 struct compat_rusage __user *uru)
809{ 809{
810 siginfo_t info; 810 siginfo_t info;
811 struct rusage ru; 811 struct rusage ru;
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 37970d9b2186..8cf384644040 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -152,9 +152,9 @@ out:
152 return error; 152 return error;
153} 153}
154 154
155asmlinkage unsigned long 155SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
156old_mmap(unsigned long addr, unsigned long len, int prot, 156 unsigned long, prot, unsigned long, flags, unsigned long,
157 int flags, int fd, off_t offset) 157 fd, off_t, offset)
158{ 158{
159 unsigned long result; 159 unsigned long result;
160 160
@@ -168,9 +168,9 @@ out:
168 return result; 168 return result;
169} 169}
170 170
171asmlinkage unsigned long 171SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len,
172sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, 172 unsigned long, prot, unsigned long, flags, unsigned long, fd,
173 unsigned long flags, unsigned long fd, unsigned long pgoff) 173 unsigned long, pgoff)
174{ 174{
175 if (pgoff & (~PAGE_MASK >> 12)) 175 if (pgoff & (~PAGE_MASK >> 12))
176 return -EINVAL; 176 return -EINVAL;
@@ -240,7 +240,7 @@ out:
240/* 240/*
241 * Compacrapability ... 241 * Compacrapability ...
242 */ 242 */
243asmlinkage int sys_uname(struct old_utsname __user * name) 243SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
244{ 244{
245 if (name && !copy_to_user(name, utsname(), sizeof (*name))) 245 if (name && !copy_to_user(name, utsname(), sizeof (*name)))
246 return 0; 246 return 0;
@@ -250,7 +250,7 @@ asmlinkage int sys_uname(struct old_utsname __user * name)
250/* 250/*
251 * Compacrapability ... 251 * Compacrapability ...
252 */ 252 */
253asmlinkage int sys_olduname(struct oldold_utsname __user * name) 253SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
254{ 254{
255 int error; 255 int error;
256 256
@@ -279,7 +279,7 @@ asmlinkage int sys_olduname(struct oldold_utsname __user * name)
279 return error; 279 return error;
280} 280}
281 281
282asmlinkage int sys_set_thread_area(unsigned long addr) 282SYSCALL_DEFINE1(set_thread_area, unsigned long, addr)
283{ 283{
284 struct thread_info *ti = task_thread_info(current); 284 struct thread_info *ti = task_thread_info(current);
285 285
@@ -290,7 +290,7 @@ asmlinkage int sys_set_thread_area(unsigned long addr)
290 return 0; 290 return 0;
291} 291}
292 292
293asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3) 293asmlinkage int _sys_sysmips(long cmd, long arg1, long arg2, long arg3)
294{ 294{
295 switch (cmd) { 295 switch (cmd) {
296 case MIPS_ATOMIC_SET: 296 case MIPS_ATOMIC_SET:
@@ -325,8 +325,8 @@ asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3)
325 * 325 *
326 * This is really horribly ugly. 326 * This is really horribly ugly.
327 */ 327 */
328asmlinkage int sys_ipc(unsigned int call, int first, int second, 328SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, int, second,
329 unsigned long third, void __user *ptr, long fifth) 329 unsigned long, third, void __user *, ptr, long, fifth)
330{ 330{
331 int version, ret; 331 int version, ret;
332 332
@@ -411,7 +411,7 @@ asmlinkage int sys_ipc(unsigned int call, int first, int second,
411/* 411/*
412 * No implemented yet ... 412 * No implemented yet ...
413 */ 413 */
414asmlinkage int sys_cachectl(char *addr, int nbytes, int op) 414SYSCALL_DEFINE3(cachectl, char *, addr, int, nbytes, int, op)
415{ 415{
416 return -ENOSYS; 416 return -ENOSYS;
417} 417}
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 98ad0a82c29e..694d51f523d1 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -13,6 +13,7 @@
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/syscalls.h>
16#include <linux/mm.h> 17#include <linux/mm.h>
17 18
18#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
@@ -58,8 +59,8 @@ EXPORT_SYMBOL(_dma_cache_wback_inv);
58 * We could optimize the case where the cache argument is not BCACHE but 59 * We could optimize the case where the cache argument is not BCACHE but
59 * that seems very atypical use ... 60 * that seems very atypical use ...
60 */ 61 */
61asmlinkage int sys_cacheflush(unsigned long addr, 62SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
62 unsigned long bytes, unsigned int cache) 63 unsigned int, cache)
63{ 64{
64 if (bytes == 0) 65 if (bytes == 0)
65 return 0; 66 return 0;
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 9a9f43358879..41d16822e616 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -7,6 +7,7 @@ mainmenu "Linux Kernel Configuration"
7 7
8config MN10300 8config MN10300
9 def_bool y 9 def_bool y
10 select HAVE_OPROFILE
10 11
11config AM33 12config AM33
12 def_bool y 13 def_bool y
diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c
index 1a86425fec42..07dbbcda3b2e 100644
--- a/arch/mn10300/unit-asb2305/pci.c
+++ b/arch/mn10300/unit-asb2305/pci.c
@@ -173,7 +173,7 @@ static int pci_ampci_write_config_byte(struct pci_bus *bus, unsigned int devfn,
173 BRIDGEREGB(where) = value; 173 BRIDGEREGB(where) = value;
174 } else { 174 } else {
175 if (bus->number == 0 && 175 if (bus->number == 0 &&
176 (devfn == PCI_DEVFN(2, 0) && devfn == PCI_DEVFN(3, 0)) 176 (devfn == PCI_DEVFN(2, 0) || devfn == PCI_DEVFN(3, 0))
177 ) 177 )
178 __pcidebug("<= %02x", bus, devfn, where, value); 178 __pcidebug("<= %02x", bus, devfn, where, value);
179 CONFIG_ADDRESS = CONFIG_CMD(bus, devfn, where); 179 CONFIG_ADDRESS = CONFIG_CMD(bus, devfn, where);
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index d811a8cd7b58..4774c2f92232 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -210,5 +210,10 @@ struct compat_shmid64_ds {
210 compat_ulong_t __unused6; 210 compat_ulong_t __unused6;
211}; 211};
212 212
213static inline int is_compat_task(void)
214{
215 return test_thread_flag(TIF_32BIT);
216}
217
213#endif /* __KERNEL__ */ 218#endif /* __KERNEL__ */
214#endif /* _ASM_POWERPC_COMPAT_H */ 219#endif /* _ASM_POWERPC_COMPAT_H */
diff --git a/arch/powerpc/include/asm/pgtable-4k.h b/arch/powerpc/include/asm/pgtable-4k.h
index 6b18ba9d2d85..1dbca4e7de67 100644
--- a/arch/powerpc/include/asm/pgtable-4k.h
+++ b/arch/powerpc/include/asm/pgtable-4k.h
@@ -60,7 +60,7 @@
60/* It should be preserving the high 48 bits and then specifically */ 60/* It should be preserving the high 48 bits and then specifically */
61/* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */ 61/* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */
62#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \ 62#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
63 _PAGE_HPTEFLAGS) 63 _PAGE_HPTEFLAGS | _PAGE_SPECIAL)
64 64
65/* Bits to mask out from a PMD to get to the PTE page */ 65/* Bits to mask out from a PMD to get to the PTE page */
66#define PMD_MASKED_BITS 0 66#define PMD_MASKED_BITS 0
diff --git a/arch/powerpc/include/asm/pgtable-64k.h b/arch/powerpc/include/asm/pgtable-64k.h
index 07b0d8f09cb6..7389003349a6 100644
--- a/arch/powerpc/include/asm/pgtable-64k.h
+++ b/arch/powerpc/include/asm/pgtable-64k.h
@@ -114,7 +114,7 @@ static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd)
114 * pgprot changes 114 * pgprot changes
115 */ 115 */
116#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ 116#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
117 _PAGE_ACCESSED) 117 _PAGE_ACCESSED | _PAGE_SPECIAL)
118 118
119/* Bits to mask out from a PMD to get to the PTE page */ 119/* Bits to mask out from a PMD to get to the PTE page */
120#define PMD_MASKED_BITS 0x1ff 120#define PMD_MASKED_BITS 0x1ff
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index f69a4d977729..820b5f0a35ce 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -429,7 +429,8 @@ extern int icache_44x_need_flush;
429#define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE() 429#define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE()
430#endif 430#endif
431 431
432#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) 432#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
433 _PAGE_SPECIAL)
433 434
434 435
435#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ 436#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
diff --git a/arch/powerpc/include/asm/seccomp.h b/arch/powerpc/include/asm/seccomp.h
index 853765eb1f65..00c1d9133cfe 100644
--- a/arch/powerpc/include/asm/seccomp.h
+++ b/arch/powerpc/include/asm/seccomp.h
@@ -1,10 +1,6 @@
1#ifndef _ASM_POWERPC_SECCOMP_H 1#ifndef _ASM_POWERPC_SECCOMP_H
2#define _ASM_POWERPC_SECCOMP_H 2#define _ASM_POWERPC_SECCOMP_H
3 3
4#ifdef __KERNEL__
5#include <linux/thread_info.h>
6#endif
7
8#include <linux/unistd.h> 4#include <linux/unistd.h>
9 5
10#define __NR_seccomp_read __NR_read 6#define __NR_seccomp_read __NR_read
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 5af4e9b2dbe2..73cb6a3229ae 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -367,27 +367,24 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
367static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg, 367static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
368 unsigned int flags) 368 unsigned int flags)
369{ 369{
370 char *ptr = (char *) &current->thread.TS_FPR(reg); 370 char *ptr0 = (char *) &current->thread.TS_FPR(reg);
371 int i, ret; 371 char *ptr1 = (char *) &current->thread.TS_FPR(reg+1);
372 int i, ret, sw = 0;
372 373
373 if (!(flags & F)) 374 if (!(flags & F))
374 return 0; 375 return 0;
375 if (reg & 1) 376 if (reg & 1)
376 return 0; /* invalid form: FRS/FRT must be even */ 377 return 0; /* invalid form: FRS/FRT must be even */
377 if (!(flags & SW)) { 378 if (flags & SW)
378 /* not byte-swapped - easy */ 379 sw = 7;
379 if (!(flags & ST)) 380 ret = 0;
380 ret = __copy_from_user(ptr, addr, 16); 381 for (i = 0; i < 8; ++i) {
381 else 382 if (!(flags & ST)) {
382 ret = __copy_to_user(addr, ptr, 16); 383 ret |= __get_user(ptr0[i^sw], addr + i);
383 } else { 384 ret |= __get_user(ptr1[i^sw], addr + i + 8);
384 /* each FPR value is byte-swapped separately */ 385 } else {
385 ret = 0; 386 ret |= __put_user(ptr0[i^sw], addr + i);
386 for (i = 0; i < 16; ++i) { 387 ret |= __put_user(ptr1[i^sw], addr + i + 8);
387 if (!(flags & ST))
388 ret |= __get_user(ptr[i^7], addr + i);
389 else
390 ret |= __put_user(ptr[i^7], addr + i);
391 } 388 }
392 } 389 }
393 if (ret) 390 if (ret)
@@ -646,11 +643,16 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
646 unsigned int areg, struct pt_regs *regs, 643 unsigned int areg, struct pt_regs *regs,
647 unsigned int flags, unsigned int length) 644 unsigned int flags, unsigned int length)
648{ 645{
649 char *ptr = (char *) &current->thread.TS_FPR(reg); 646 char *ptr;
650 int ret = 0; 647 int ret = 0;
651 648
652 flush_vsx_to_thread(current); 649 flush_vsx_to_thread(current);
653 650
651 if (reg < 32)
652 ptr = (char *) &current->thread.TS_FPR(reg);
653 else
654 ptr = (char *) &current->thread.vr[reg - 32];
655
654 if (flags & ST) 656 if (flags & ST)
655 ret = __copy_to_user(addr, ptr, length); 657 ret = __copy_to_user(addr, ptr, length);
656 else { 658 else {
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 2822c8ccfaaf..5f81256287f5 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -125,6 +125,10 @@ static void kvmppc_free_vcpus(struct kvm *kvm)
125 } 125 }
126} 126}
127 127
128void kvm_arch_sync_events(struct kvm *kvm)
129{
130}
131
128void kvm_arch_destroy_vm(struct kvm *kvm) 132void kvm_arch_destroy_vm(struct kvm *kvm)
129{ 133{
130 kvmppc_free_vcpus(kvm); 134 kvmppc_free_vcpus(kvm);
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index 70693a5c12a1..693b14a778fa 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -62,18 +62,19 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
6272: std r8,8(r3) 6272: std r8,8(r3)
63 beq+ 3f 63 beq+ 3f
64 addi r3,r3,16 64 addi r3,r3,16
6523: ld r9,8(r4)
66.Ldo_tail: 65.Ldo_tail:
67 bf cr7*4+1,1f 66 bf cr7*4+1,1f
68 rotldi r9,r9,32 6723: lwz r9,8(r4)
68 addi r4,r4,4
6973: stw r9,0(r3) 6973: stw r9,0(r3)
70 addi r3,r3,4 70 addi r3,r3,4
711: bf cr7*4+2,2f 711: bf cr7*4+2,2f
72 rotldi r9,r9,16 7244: lhz r9,8(r4)
73 addi r4,r4,2
7374: sth r9,0(r3) 7474: sth r9,0(r3)
74 addi r3,r3,2 75 addi r3,r3,2
752: bf cr7*4+3,3f 762: bf cr7*4+3,3f
76 rotldi r9,r9,8 7745: lbz r9,8(r4)
7775: stb r9,0(r3) 7875: stb r9,0(r3)
783: li r3,0 793: li r3,0
79 blr 80 blr
@@ -141,11 +142,24 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
1416: cmpwi cr1,r5,8 1426: cmpwi cr1,r5,8
142 addi r3,r3,32 143 addi r3,r3,32
143 sld r9,r9,r10 144 sld r9,r9,r10
144 ble cr1,.Ldo_tail 145 ble cr1,7f
14534: ld r0,8(r4) 14634: ld r0,8(r4)
146 srd r7,r0,r11 147 srd r7,r0,r11
147 or r9,r7,r9 148 or r9,r7,r9
148 b .Ldo_tail 1497:
150 bf cr7*4+1,1f
151 rotldi r9,r9,32
15294: stw r9,0(r3)
153 addi r3,r3,4
1541: bf cr7*4+2,2f
155 rotldi r9,r9,16
15695: sth r9,0(r3)
157 addi r3,r3,2
1582: bf cr7*4+3,3f
159 rotldi r9,r9,8
16096: stb r9,0(r3)
1613: li r3,0
162 blr
149 163
150.Ldst_unaligned: 164.Ldst_unaligned:
151 PPC_MTOCRF 0x01,r6 /* put #bytes to 8B bdry into cr7 */ 165 PPC_MTOCRF 0x01,r6 /* put #bytes to 8B bdry into cr7 */
@@ -218,7 +232,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
218121: 232121:
219132: 233132:
220 addi r3,r3,8 234 addi r3,r3,8
221123:
222134: 235134:
223135: 236135:
224138: 237138:
@@ -226,6 +239,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
226140: 239140:
227141: 240141:
228142: 241142:
242123:
243144:
244145:
229 245
230/* 246/*
231 * here we have had a fault on a load and r3 points to the first 247 * here we have had a fault on a load and r3 points to the first
@@ -309,6 +325,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
309187: 325187:
310188: 326188:
311189: 327189:
328194:
329195:
330196:
3121: 3311:
313 ld r6,-24(r1) 332 ld r6,-24(r1)
314 ld r5,-8(r1) 333 ld r5,-8(r1)
@@ -329,7 +348,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
329 .llong 72b,172b 348 .llong 72b,172b
330 .llong 23b,123b 349 .llong 23b,123b
331 .llong 73b,173b 350 .llong 73b,173b
351 .llong 44b,144b
332 .llong 74b,174b 352 .llong 74b,174b
353 .llong 45b,145b
333 .llong 75b,175b 354 .llong 75b,175b
334 .llong 24b,124b 355 .llong 24b,124b
335 .llong 25b,125b 356 .llong 25b,125b
@@ -347,6 +368,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
347 .llong 79b,179b 368 .llong 79b,179b
348 .llong 80b,180b 369 .llong 80b,180b
349 .llong 34b,134b 370 .llong 34b,134b
371 .llong 94b,194b
372 .llong 95b,195b
373 .llong 96b,196b
350 .llong 35b,135b 374 .llong 35b,135b
351 .llong 81b,181b 375 .llong 81b,181b
352 .llong 36b,136b 376 .llong 36b,136b
diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S
index fe2d34e5332d..e178922b2c21 100644
--- a/arch/powerpc/lib/memcpy_64.S
+++ b/arch/powerpc/lib/memcpy_64.S
@@ -53,18 +53,19 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
533: std r8,8(r3) 533: std r8,8(r3)
54 beq 3f 54 beq 3f
55 addi r3,r3,16 55 addi r3,r3,16
56 ld r9,8(r4)
57.Ldo_tail: 56.Ldo_tail:
58 bf cr7*4+1,1f 57 bf cr7*4+1,1f
59 rotldi r9,r9,32 58 lwz r9,8(r4)
59 addi r4,r4,4
60 stw r9,0(r3) 60 stw r9,0(r3)
61 addi r3,r3,4 61 addi r3,r3,4
621: bf cr7*4+2,2f 621: bf cr7*4+2,2f
63 rotldi r9,r9,16 63 lhz r9,8(r4)
64 addi r4,r4,2
64 sth r9,0(r3) 65 sth r9,0(r3)
65 addi r3,r3,2 66 addi r3,r3,2
662: bf cr7*4+3,3f 672: bf cr7*4+3,3f
67 rotldi r9,r9,8 68 lbz r9,8(r4)
68 stb r9,0(r3) 69 stb r9,0(r3)
693: ld r3,48(r1) /* return dest pointer */ 703: ld r3,48(r1) /* return dest pointer */
70 blr 71 blr
@@ -133,11 +134,24 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
133 cmpwi cr1,r5,8 134 cmpwi cr1,r5,8
134 addi r3,r3,32 135 addi r3,r3,32
135 sld r9,r9,r10 136 sld r9,r9,r10
136 ble cr1,.Ldo_tail 137 ble cr1,6f
137 ld r0,8(r4) 138 ld r0,8(r4)
138 srd r7,r0,r11 139 srd r7,r0,r11
139 or r9,r7,r9 140 or r9,r7,r9
140 b .Ldo_tail 1416:
142 bf cr7*4+1,1f
143 rotldi r9,r9,32
144 stw r9,0(r3)
145 addi r3,r3,4
1461: bf cr7*4+2,2f
147 rotldi r9,r9,16
148 sth r9,0(r3)
149 addi r3,r3,2
1502: bf cr7*4+3,3f
151 rotldi r9,r9,8
152 stb r9,0(r3)
1533: ld r3,48(r1) /* return dest pointer */
154 blr
141 155
142.Ldst_unaligned: 156.Ldst_unaligned:
143 PPC_MTOCRF 0x01,r6 # put #bytes to 8B bdry into cr7 157 PPC_MTOCRF 0x01,r6 # put #bytes to 8B bdry into cr7
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 7393bd76d698..5ac08b8ab654 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -19,6 +19,7 @@
19#include <linux/notifier.h> 19#include <linux/notifier.h>
20#include <linux/lmb.h> 20#include <linux/lmb.h>
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/pfn.h>
22#include <asm/sparsemem.h> 23#include <asm/sparsemem.h>
23#include <asm/prom.h> 24#include <asm/prom.h>
24#include <asm/system.h> 25#include <asm/system.h>
@@ -882,7 +883,7 @@ static void mark_reserved_regions_for_nid(int nid)
882 unsigned long physbase = lmb.reserved.region[i].base; 883 unsigned long physbase = lmb.reserved.region[i].base;
883 unsigned long size = lmb.reserved.region[i].size; 884 unsigned long size = lmb.reserved.region[i].size;
884 unsigned long start_pfn = physbase >> PAGE_SHIFT; 885 unsigned long start_pfn = physbase >> PAGE_SHIFT;
885 unsigned long end_pfn = ((physbase + size) >> PAGE_SHIFT); 886 unsigned long end_pfn = PFN_UP(physbase + size);
886 struct node_active_region node_ar; 887 struct node_active_region node_ar;
887 unsigned long node_end_pfn = node->node_start_pfn + 888 unsigned long node_end_pfn = node->node_start_pfn +
888 node->node_spanned_pages; 889 node->node_spanned_pages;
@@ -908,7 +909,7 @@ static void mark_reserved_regions_for_nid(int nid)
908 */ 909 */
909 if (end_pfn > node_ar.end_pfn) 910 if (end_pfn > node_ar.end_pfn)
910 reserve_size = (node_ar.end_pfn << PAGE_SHIFT) 911 reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
911 - (start_pfn << PAGE_SHIFT); 912 - physbase;
912 /* 913 /*
913 * Only worry about *this* node, others may not 914 * Only worry about *this* node, others may not
914 * yet have valid NODE_DATA(). 915 * yet have valid NODE_DATA().
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
index 67de6bf3db3d..d281cc0bca71 100644
--- a/arch/powerpc/platforms/ps3/mm.c
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -328,7 +328,7 @@ static int __init ps3_mm_add_memory(void)
328 return result; 328 return result;
329} 329}
330 330
331core_initcall(ps3_mm_add_memory); 331device_initcall(ps3_mm_add_memory);
332 332
333/*============================================================================*/ 333/*============================================================================*/
334/* dma routines */ 334/* dma routines */
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c
index 77fae5f64f2e..5558d932b4d5 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.c
+++ b/arch/powerpc/sysdev/ppc4xx_pci.c
@@ -204,6 +204,23 @@ static int __init ppc4xx_setup_one_pci_PMM(struct pci_controller *hose,
204{ 204{
205 u32 ma, pcila, pciha; 205 u32 ma, pcila, pciha;
206 206
207 /* Hack warning ! The "old" PCI 2.x cell only let us configure the low
208 * 32-bit of incoming PLB addresses. The top 4 bits of the 36-bit
209 * address are actually hard wired to a value that appears to depend
210 * on the specific SoC. For example, it's 0 on 440EP and 1 on 440EPx.
211 *
212 * The trick here is we just crop those top bits and ignore them when
213 * programming the chip. That means the device-tree has to be right
214 * for the specific part used (we don't print a warning if it's wrong
215 * but on the other hand, you'll crash quickly enough), but at least
216 * this code should work whatever the hard coded value is
217 */
218 plb_addr &= 0xffffffffull;
219
220 /* Note: Due to the above hack, the test below doesn't actually test
221 * if you address is above 4G, but it tests that address and
222 * (address + size) are both contained in the same 4G
223 */
207 if ((plb_addr + size) > 0xffffffffull || !is_power_of_2(size) || 224 if ((plb_addr + size) > 0xffffffffull || !is_power_of_2(size) ||
208 size < 0x1000 || (plb_addr & (size - 1)) != 0) { 225 size < 0x1000 || (plb_addr & (size - 1)) != 0) {
209 printk(KERN_WARNING "%s: Resource out of range\n", 226 printk(KERN_WARNING "%s: Resource out of range\n",
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 521726430afa..95b0f7db3c69 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -145,7 +145,7 @@ cputime_to_timeval(const cputime_t cputime, struct timeval *value)
145 value->tv_usec = rp.subreg.even / 4096; 145 value->tv_usec = rp.subreg.even / 4096;
146 value->tv_sec = rp.subreg.odd; 146 value->tv_sec = rp.subreg.odd;
147#else 147#else
148 value->tv_usec = cputime % 4096000000ULL; 148 value->tv_usec = (cputime % 4096000000ULL) / 4096;
149 value->tv_sec = cputime / 4096000000ULL; 149 value->tv_sec = cputime / 4096000000ULL;
150#endif 150#endif
151} 151}
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 2bd9faeb3919..e8bd6ac22c99 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -43,6 +43,8 @@ struct mem_chunk {
43 43
44extern struct mem_chunk memory_chunk[]; 44extern struct mem_chunk memory_chunk[];
45extern unsigned long real_memory_size; 45extern unsigned long real_memory_size;
46extern int memory_end_set;
47extern unsigned long memory_end;
46 48
47void detect_memory_layout(struct mem_chunk chunk[]); 49void detect_memory_layout(struct mem_chunk chunk[]);
48 50
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index d825f4950e4e..c5cfb6185eac 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -82,7 +82,9 @@ char elf_platform[ELF_PLATFORM_SIZE];
82 82
83struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS]; 83struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
84volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ 84volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
85static unsigned long __initdata memory_end; 85
86int __initdata memory_end_set;
87unsigned long __initdata memory_end;
86 88
87/* 89/*
88 * This is set up by the setup-routine at boot-time 90 * This is set up by the setup-routine at boot-time
@@ -281,6 +283,7 @@ void (*pm_power_off)(void) = machine_power_off;
281static int __init early_parse_mem(char *p) 283static int __init early_parse_mem(char *p)
282{ 284{
283 memory_end = memparse(p, &p); 285 memory_end = memparse(p, &p);
286 memory_end_set = 1;
284 return 0; 287 return 0;
285} 288}
286early_param("mem", early_parse_mem); 289early_param("mem", early_parse_mem);
@@ -508,8 +511,10 @@ static void __init setup_memory_end(void)
508 int i; 511 int i;
509 512
510#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) 513#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
511 if (ipl_info.type == IPL_TYPE_FCP_DUMP) 514 if (ipl_info.type == IPL_TYPE_FCP_DUMP) {
512 memory_end = ZFCPDUMP_HSA_SIZE; 515 memory_end = ZFCPDUMP_HSA_SIZE;
516 memory_end_set = 1;
517 }
513#endif 518#endif
514 memory_size = 0; 519 memory_size = 0;
515 memory_end &= PAGE_MASK; 520 memory_end &= PAGE_MASK;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index be8497186b96..0d33893e1e89 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -212,6 +212,10 @@ static void kvm_free_vcpus(struct kvm *kvm)
212 } 212 }
213} 213}
214 214
215void kvm_arch_sync_events(struct kvm *kvm)
216{
217}
218
215void kvm_arch_destroy_vm(struct kvm *kvm) 219void kvm_arch_destroy_vm(struct kvm *kvm)
216{ 220{
217 kvm_free_vcpus(kvm); 221 kvm_free_vcpus(kvm);
diff --git a/arch/sh/boards/board-ap325rxa.c b/arch/sh/boards/board-ap325rxa.c
index 7c35787d29b4..72da416f6162 100644
--- a/arch/sh/boards/board-ap325rxa.c
+++ b/arch/sh/boards/board-ap325rxa.c
@@ -22,7 +22,6 @@
22#include <linux/gpio.h> 22#include <linux/gpio.h>
23#include <linux/spi/spi.h> 23#include <linux/spi/spi.h>
24#include <linux/spi/spi_gpio.h> 24#include <linux/spi/spi_gpio.h>
25#include <media/ov772x.h>
26#include <media/soc_camera_platform.h> 25#include <media/soc_camera_platform.h>
27#include <media/sh_mobile_ceu.h> 26#include <media/sh_mobile_ceu.h>
28#include <video/sh_mobile_lcdc.h> 27#include <video/sh_mobile_lcdc.h>
@@ -224,7 +223,6 @@ static void camera_power(int val)
224} 223}
225 224
226#ifdef CONFIG_I2C 225#ifdef CONFIG_I2C
227/* support for the old ncm03j camera */
228static unsigned char camera_ncm03j_magic[] = 226static unsigned char camera_ncm03j_magic[] =
229{ 227{
230 0x87, 0x00, 0x88, 0x08, 0x89, 0x01, 0x8A, 0xE8, 228 0x87, 0x00, 0x88, 0x08, 0x89, 0x01, 0x8A, 0xE8,
@@ -245,23 +243,6 @@ static unsigned char camera_ncm03j_magic[] =
245 0x63, 0xD4, 0x64, 0xEA, 0xD6, 0x0F, 243 0x63, 0xD4, 0x64, 0xEA, 0xD6, 0x0F,
246}; 244};
247 245
248static int camera_probe(void)
249{
250 struct i2c_adapter *a = i2c_get_adapter(0);
251 struct i2c_msg msg;
252 int ret;
253
254 camera_power(1);
255 msg.addr = 0x6e;
256 msg.buf = camera_ncm03j_magic;
257 msg.len = 2;
258 msg.flags = 0;
259 ret = i2c_transfer(a, &msg, 1);
260 camera_power(0);
261
262 return ret;
263}
264
265static int camera_set_capture(struct soc_camera_platform_info *info, 246static int camera_set_capture(struct soc_camera_platform_info *info,
266 int enable) 247 int enable)
267{ 248{
@@ -313,35 +294,8 @@ static struct platform_device camera_device = {
313 .platform_data = &camera_info, 294 .platform_data = &camera_info,
314 }, 295 },
315}; 296};
316
317static int __init camera_setup(void)
318{
319 if (camera_probe() > 0)
320 platform_device_register(&camera_device);
321
322 return 0;
323}
324late_initcall(camera_setup);
325
326#endif /* CONFIG_I2C */ 297#endif /* CONFIG_I2C */
327 298
328static int ov7725_power(struct device *dev, int mode)
329{
330 camera_power(0);
331 if (mode)
332 camera_power(1);
333
334 return 0;
335}
336
337static struct ov772x_camera_info ov7725_info = {
338 .buswidth = SOCAM_DATAWIDTH_8,
339 .flags = OV772X_FLAG_VFLIP | OV772X_FLAG_HFLIP,
340 .link = {
341 .power = ov7725_power,
342 },
343};
344
345static struct sh_mobile_ceu_info sh_mobile_ceu_info = { 299static struct sh_mobile_ceu_info sh_mobile_ceu_info = {
346 .flags = SOCAM_PCLK_SAMPLE_RISING | SOCAM_HSYNC_ACTIVE_HIGH | 300 .flags = SOCAM_PCLK_SAMPLE_RISING | SOCAM_HSYNC_ACTIVE_HIGH |
347 SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_MASTER | SOCAM_DATAWIDTH_8, 301 SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_MASTER | SOCAM_DATAWIDTH_8,
@@ -392,6 +346,9 @@ static struct platform_device *ap325rxa_devices[] __initdata = {
392 &ap325rxa_nor_flash_device, 346 &ap325rxa_nor_flash_device,
393 &lcdc_device, 347 &lcdc_device,
394 &ceu_device, 348 &ceu_device,
349#ifdef CONFIG_I2C
350 &camera_device,
351#endif
395 &nand_flash_device, 352 &nand_flash_device,
396 &sdcard_cn3_device, 353 &sdcard_cn3_device,
397}; 354};
@@ -400,10 +357,6 @@ static struct i2c_board_info __initdata ap325rxa_i2c_devices[] = {
400 { 357 {
401 I2C_BOARD_INFO("pcf8563", 0x51), 358 I2C_BOARD_INFO("pcf8563", 0x51),
402 }, 359 },
403 {
404 I2C_BOARD_INFO("ov772x", 0x21),
405 .platform_data = &ov7725_info,
406 },
407}; 360};
408 361
409static struct spi_board_info ap325rxa_spi_devices[] = { 362static struct spi_board_info ap325rxa_spi_devices[] = {
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7201.c b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c
index 020a96fe961a..4a5e59732334 100644
--- a/arch/sh/kernel/cpu/sh2a/clock-sh7201.c
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c
@@ -18,8 +18,8 @@
18#include <asm/freq.h> 18#include <asm/freq.h>
19#include <asm/io.h> 19#include <asm/io.h>
20 20
21const static int pll1rate[]={1,2,3,4,6,8}; 21static const int pll1rate[]={1,2,3,4,6,8};
22const static int pfc_divisors[]={1,2,3,4,6,8,12}; 22static const int pfc_divisors[]={1,2,3,4,6,8,12};
23#define ifc_divisors pfc_divisors 23#define ifc_divisors pfc_divisors
24 24
25#if (CONFIG_SH_CLK_MD == 0) 25#if (CONFIG_SH_CLK_MD == 0)
diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h
index f260b58f5ce9..0e706257918f 100644
--- a/arch/sparc/include/asm/compat.h
+++ b/arch/sparc/include/asm/compat.h
@@ -240,4 +240,9 @@ struct compat_shmid64_ds {
240 unsigned int __unused2; 240 unsigned int __unused2;
241}; 241};
242 242
243static inline int is_compat_task(void)
244{
245 return test_thread_flag(TIF_32BIT);
246}
247
243#endif /* _ASM_SPARC64_COMPAT_H */ 248#endif /* _ASM_SPARC64_COMPAT_H */
diff --git a/arch/sparc/include/asm/seccomp.h b/arch/sparc/include/asm/seccomp.h
index 7fcd9968192b..adca1bce41d4 100644
--- a/arch/sparc/include/asm/seccomp.h
+++ b/arch/sparc/include/asm/seccomp.h
@@ -1,11 +1,5 @@
1#ifndef _ASM_SECCOMP_H 1#ifndef _ASM_SECCOMP_H
2 2
3#include <linux/thread_info.h> /* already defines TIF_32BIT */
4
5#ifndef TIF_32BIT
6#error "unexpected TIF_32BIT on sparc64"
7#endif
8
9#include <linux/unistd.h> 3#include <linux/unistd.h>
10 4
11#define __NR_seccomp_read __NR_read 5#define __NR_seccomp_read __NR_read
diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c
index 3b9f4d6e14a9..e1a9598e2a4d 100644
--- a/arch/sparc/kernel/chmc.c
+++ b/arch/sparc/kernel/chmc.c
@@ -306,6 +306,7 @@ static int jbusmc_print_dimm(int syndrome_code,
306 buf[1] = '?'; 306 buf[1] = '?';
307 buf[2] = '?'; 307 buf[2] = '?';
308 buf[3] = '\0'; 308 buf[3] = '\0';
309 return 0;
309 } 310 }
310 p = dp->controller; 311 p = dp->controller;
311 prop = &p->layout; 312 prop = &p->layout;
diff --git a/arch/um/drivers/vde_user.c b/arch/um/drivers/vde_user.c
index 56533db25343..c5c43253e6ce 100644
--- a/arch/um/drivers/vde_user.c
+++ b/arch/um/drivers/vde_user.c
@@ -78,7 +78,7 @@ void vde_init_libstuff(struct vde_data *vpri, struct vde_init *init)
78{ 78{
79 struct vde_open_args *args; 79 struct vde_open_args *args;
80 80
81 vpri->args = kmalloc(sizeof(struct vde_open_args), UM_GFP_KERNEL); 81 vpri->args = uml_kmalloc(sizeof(struct vde_open_args), UM_GFP_KERNEL);
82 if (vpri->args == NULL) { 82 if (vpri->args == NULL) {
83 printk(UM_KERN_ERR "vde_init_libstuff - vde_open_args " 83 printk(UM_KERN_ERR "vde_init_libstuff - vde_open_args "
84 "allocation failed"); 84 "allocation failed");
@@ -91,8 +91,8 @@ void vde_init_libstuff(struct vde_data *vpri, struct vde_init *init)
91 args->group = init->group; 91 args->group = init->group;
92 args->mode = init->mode ? init->mode : 0700; 92 args->mode = init->mode ? init->mode : 0700;
93 93
94 args->port ? printk(UM_KERN_INFO "port %d", args->port) : 94 args->port ? printk("port %d", args->port) :
95 printk(UM_KERN_INFO "undefined port"); 95 printk("undefined port");
96} 96}
97 97
98int vde_user_read(void *conn, void *buf, int len) 98int vde_user_read(void *conn, void *buf, int len)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 1042d69b267d..469f3450bf81 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -40,6 +40,9 @@ config X86
40 select HAVE_GENERIC_DMA_COHERENT if X86_32 40 select HAVE_GENERIC_DMA_COHERENT if X86_32
41 select HAVE_EFFICIENT_UNALIGNED_ACCESS 41 select HAVE_EFFICIENT_UNALIGNED_ACCESS
42 select USER_STACKTRACE_SUPPORT 42 select USER_STACKTRACE_SUPPORT
43 select HAVE_KERNEL_GZIP
44 select HAVE_KERNEL_BZIP2
45 select HAVE_KERNEL_LZMA
43 46
44config ARCH_DEFCONFIG 47config ARCH_DEFCONFIG
45 string 48 string
@@ -235,6 +238,20 @@ config SMP
235 238
236 If you don't know what to do here, say N. 239 If you don't know what to do here, say N.
237 240
241config X86_X2APIC
242 bool "Support x2apic"
243 depends on X86_LOCAL_APIC && X86_64
244 ---help---
245 This enables x2apic support on CPUs that have this feature.
246
247 This allows 32-bit apic IDs (so it can support very large systems),
248 and accesses the local apic via MSRs not via mmio.
249
250 ( On certain CPU models you may need to enable INTR_REMAP too,
251 to get functional x2apic mode. )
252
253 If you don't know what to do here, say N.
254
238config SPARSE_IRQ 255config SPARSE_IRQ
239 bool "Support sparse irq numbering" 256 bool "Support sparse irq numbering"
240 depends on PCI_MSI || HT_IRQ 257 depends on PCI_MSI || HT_IRQ
@@ -271,6 +288,7 @@ config X86_BIGSMP
271 ---help--- 288 ---help---
272 This option is needed for the systems that have more than 8 CPUs 289 This option is needed for the systems that have more than 8 CPUs
273 290
291if X86_32
274config X86_EXTENDED_PLATFORM 292config X86_EXTENDED_PLATFORM
275 bool "Support for extended (non-PC) x86 platforms" 293 bool "Support for extended (non-PC) x86 platforms"
276 default y 294 default y
@@ -279,12 +297,36 @@ config X86_EXTENDED_PLATFORM
279 standard PC platforms. (which covers the vast majority of 297 standard PC platforms. (which covers the vast majority of
280 systems out there.) 298 systems out there.)
281 299
282 If you enable this option then you'll be able to select a number 300 If you enable this option then you'll be able to select support
283 of non-PC x86 platforms. 301 for the following (non-PC) 32 bit x86 platforms:
302 AMD Elan
303 NUMAQ (IBM/Sequent)
304 RDC R-321x SoC
305 SGI 320/540 (Visual Workstation)
306 Summit/EXA (IBM x440)
307 Unisys ES7000 IA32 series
284 308
285 If you have one of these systems, or if you want to build a 309 If you have one of these systems, or if you want to build a
286 generic distribution kernel, say Y here - otherwise say N. 310 generic distribution kernel, say Y here - otherwise say N.
311endif
312
313if X86_64
314config X86_EXTENDED_PLATFORM
315 bool "Support for extended (non-PC) x86 platforms"
316 default y
317 ---help---
318 If you disable this option then the kernel will only support
319 standard PC platforms. (which covers the vast majority of
320 systems out there.)
321
322 If you enable this option then you'll be able to select support
323 for the following (non-PC) 64 bit x86 platforms:
324 ScaleMP vSMP
325 SGI Ultraviolet
287 326
327 If you have one of these systems, or if you want to build a
328 generic distribution kernel, say Y here - otherwise say N.
329endif
288# This is an alphabetically sorted list of 64 bit extended platforms 330# This is an alphabetically sorted list of 64 bit extended platforms
289# Please maintain the alphabetic order if and when there are additions 331# Please maintain the alphabetic order if and when there are additions
290 332
@@ -302,6 +344,7 @@ config X86_UV
302 bool "SGI Ultraviolet" 344 bool "SGI Ultraviolet"
303 depends on X86_64 345 depends on X86_64
304 depends on X86_EXTENDED_PLATFORM 346 depends on X86_EXTENDED_PLATFORM
347 select X86_X2APIC
305 ---help--- 348 ---help---
306 This option is needed in order to support SGI Ultraviolet systems. 349 This option is needed in order to support SGI Ultraviolet systems.
307 If you don't have one of these, you should say N here. 350 If you don't have one of these, you should say N here.
@@ -382,19 +425,6 @@ config X86_ES7000
382 Support for Unisys ES7000 systems. Say 'Y' here if this kernel is 425 Support for Unisys ES7000 systems. Say 'Y' here if this kernel is
383 supposed to run on an IA32-based Unisys ES7000 system. 426 supposed to run on an IA32-based Unisys ES7000 system.
384 427
385config X86_VOYAGER
386 bool "Voyager (NCR)"
387 depends on SMP && !PCI && BROKEN
388 depends on X86_32_NON_STANDARD
389 ---help---
390 Voyager is an MCA-based 32-way capable SMP architecture proprietary
391 to NCR Corp. Machine classes 345x/35xx/4100/51xx are Voyager-based.
392
393 *** WARNING ***
394
395 If you do not specifically know you have a Voyager based machine,
396 say N here, otherwise the kernel you build will not be bootable.
397
398config SCHED_OMIT_FRAME_POINTER 428config SCHED_OMIT_FRAME_POINTER
399 def_bool y 429 def_bool y
400 prompt "Single-depth WCHAN output" 430 prompt "Single-depth WCHAN output"
@@ -1795,7 +1825,7 @@ config DMAR
1795 remapping devices. 1825 remapping devices.
1796 1826
1797config DMAR_DEFAULT_ON 1827config DMAR_DEFAULT_ON
1798 def_bool n 1828 def_bool y
1799 prompt "Enable DMA Remapping Devices by default" 1829 prompt "Enable DMA Remapping Devices by default"
1800 depends on DMAR 1830 depends on DMAR
1801 help 1831 help
@@ -1828,6 +1858,7 @@ config DMAR_FLOPPY_WA
1828config INTR_REMAP 1858config INTR_REMAP
1829 bool "Support for Interrupt Remapping (EXPERIMENTAL)" 1859 bool "Support for Interrupt Remapping (EXPERIMENTAL)"
1830 depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL 1860 depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL
1861 select X86_X2APIC
1831 ---help--- 1862 ---help---
1832 Supports Interrupt remapping for IO-APIC and MSI devices. 1863 Supports Interrupt remapping for IO-APIC and MSI devices.
1833 To use x2apic mode in the CPU's which support x2APIC enhancements or 1864 To use x2apic mode in the CPU's which support x2APIC enhancements or
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index ba4781b93890..fdb45df608b6 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -175,28 +175,8 @@ config IOMMU_LEAK
175 Add a simple leak tracer to the IOMMU code. This is useful when you 175 Add a simple leak tracer to the IOMMU code. This is useful when you
176 are debugging a buggy device driver that leaks IOMMU mappings. 176 are debugging a buggy device driver that leaks IOMMU mappings.
177 177
178config MMIOTRACE 178config HAVE_MMIOTRACE_SUPPORT
179 bool "Memory mapped IO tracing" 179 def_bool y
180 depends on DEBUG_KERNEL && PCI
181 select TRACING
182 help
183 Mmiotrace traces Memory Mapped I/O access and is meant for
184 debugging and reverse engineering. It is called from the ioremap
185 implementation and works via page faults. Tracing is disabled by
186 default and can be enabled at run-time.
187
188 See Documentation/tracers/mmiotrace.txt.
189 If you are not helping to develop drivers, say N.
190
191config MMIOTRACE_TEST
192 tristate "Test module for mmiotrace"
193 depends on MMIOTRACE && m
194 help
195 This is a dumb module for testing mmiotrace. It is very dangerous
196 as it will write garbage to IO memory starting at a given address.
197 However, it should be safe to use on e.g. unused portion of VRAM.
198
199 Say N, unless you absolutely know what you are doing.
200 180
201# 181#
202# IO delay types: 182# IO delay types:
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index cd48c7210016..c70eff69a1fb 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -32,7 +32,6 @@ setup-y += a20.o cmdline.o copy.o cpu.o cpucheck.o edd.o
32setup-y += header.o main.o mca.o memory.o pm.o pmjump.o 32setup-y += header.o main.o mca.o memory.o pm.o pmjump.o
33setup-y += printf.o string.o tty.o video.o video-mode.o version.o 33setup-y += printf.o string.o tty.o video.o video-mode.o version.o
34setup-$(CONFIG_X86_APM_BOOT) += apm.o 34setup-$(CONFIG_X86_APM_BOOT) += apm.o
35setup-$(CONFIG_X86_VOYAGER) += voyager.o
36 35
37# The link order of the video-*.o modules can matter. In particular, 36# The link order of the video-*.o modules can matter. In particular,
38# video-vga.o *must* be listed first, followed by video-vesa.o. 37# video-vga.o *must* be listed first, followed by video-vesa.o.
diff --git a/arch/x86/boot/a20.c b/arch/x86/boot/a20.c
index fba8e9c6a504..7c19ce8c2442 100644
--- a/arch/x86/boot/a20.c
+++ b/arch/x86/boot/a20.c
@@ -126,11 +126,6 @@ static void enable_a20_fast(void)
126 126
127int enable_a20(void) 127int enable_a20(void)
128{ 128{
129#ifdef CONFIG_X86_VOYAGER
130 /* On Voyager, a20_test() is unsafe? */
131 enable_a20_kbc();
132 return 0;
133#else
134 int loops = A20_ENABLE_LOOPS; 129 int loops = A20_ENABLE_LOOPS;
135 int kbc_err; 130 int kbc_err;
136 131
@@ -164,5 +159,4 @@ int enable_a20(void)
164 } 159 }
165 160
166 return -1; 161 return -1;
167#endif
168} 162}
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index cc0ef13fba7a..7b2692e897e5 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -302,9 +302,6 @@ void probe_cards(int unsafe);
302/* video-vesa.c */ 302/* video-vesa.c */
303void vesa_store_edid(void); 303void vesa_store_edid(void);
304 304
305/* voyager.c */
306int query_voyager(void);
307
308#endif /* __ASSEMBLY__ */ 305#endif /* __ASSEMBLY__ */
309 306
310#endif /* BOOT_BOOT_H */ 307#endif /* BOOT_BOOT_H */
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 1771c804e02f..3ca4c194b8e5 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -4,7 +4,7 @@
4# create a compressed vmlinux image from the original vmlinux 4# create a compressed vmlinux image from the original vmlinux
5# 5#
6 6
7targets := vmlinux vmlinux.bin vmlinux.bin.gz head_$(BITS).o misc.o piggy.o 7targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma head_$(BITS).o misc.o piggy.o
8 8
9KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 9KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
10KBUILD_CFLAGS += -fno-strict-aliasing -fPIC 10KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
@@ -47,18 +47,35 @@ ifeq ($(CONFIG_X86_32),y)
47ifdef CONFIG_RELOCATABLE 47ifdef CONFIG_RELOCATABLE
48$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin.all FORCE 48$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin.all FORCE
49 $(call if_changed,gzip) 49 $(call if_changed,gzip)
50$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin.all FORCE
51 $(call if_changed,bzip2)
52$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin.all FORCE
53 $(call if_changed,lzma)
50else 54else
51$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE 55$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
52 $(call if_changed,gzip) 56 $(call if_changed,gzip)
57$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
58 $(call if_changed,bzip2)
59$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
60 $(call if_changed,lzma)
53endif 61endif
54LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T 62LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T
55 63
56else 64else
65
57$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE 66$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
58 $(call if_changed,gzip) 67 $(call if_changed,gzip)
68$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
69 $(call if_changed,bzip2)
70$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
71 $(call if_changed,lzma)
59 72
60LDFLAGS_piggy.o := -r --format binary --oformat elf64-x86-64 -T 73LDFLAGS_piggy.o := -r --format binary --oformat elf64-x86-64 -T
61endif 74endif
62 75
63$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE 76suffix_$(CONFIG_KERNEL_GZIP) = gz
77suffix_$(CONFIG_KERNEL_BZIP2) = bz2
78suffix_$(CONFIG_KERNEL_LZMA) = lzma
79
80$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix_y) FORCE
64 $(call if_changed,ld) 81 $(call if_changed,ld)
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index 29c5fbf08392..3a8a866fb2e2 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -25,14 +25,12 @@
25 25
26#include <linux/linkage.h> 26#include <linux/linkage.h>
27#include <asm/segment.h> 27#include <asm/segment.h>
28#include <asm/page.h> 28#include <asm/page_types.h>
29#include <asm/boot.h> 29#include <asm/boot.h>
30#include <asm/asm-offsets.h> 30#include <asm/asm-offsets.h>
31 31
32.section ".text.head","ax",@progbits 32.section ".text.head","ax",@progbits
33 .globl startup_32 33ENTRY(startup_32)
34
35startup_32:
36 cld 34 cld
37 /* test KEEP_SEGMENTS flag to see if the bootloader is asking 35 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
38 * us to not reload segments */ 36 * us to not reload segments */
@@ -113,6 +111,8 @@ startup_32:
113 */ 111 */
114 leal relocated(%ebx), %eax 112 leal relocated(%ebx), %eax
115 jmp *%eax 113 jmp *%eax
114ENDPROC(startup_32)
115
116.section ".text" 116.section ".text"
117relocated: 117relocated:
118 118
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 1d5dff4123e1..ed4a82948002 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -26,8 +26,8 @@
26 26
27#include <linux/linkage.h> 27#include <linux/linkage.h>
28#include <asm/segment.h> 28#include <asm/segment.h>
29#include <asm/pgtable.h> 29#include <asm/pgtable_types.h>
30#include <asm/page.h> 30#include <asm/page_types.h>
31#include <asm/boot.h> 31#include <asm/boot.h>
32#include <asm/msr.h> 32#include <asm/msr.h>
33#include <asm/processor-flags.h> 33#include <asm/processor-flags.h>
@@ -35,9 +35,7 @@
35 35
36.section ".text.head" 36.section ".text.head"
37 .code32 37 .code32
38 .globl startup_32 38ENTRY(startup_32)
39
40startup_32:
41 cld 39 cld
42 /* test KEEP_SEGMENTS flag to see if the bootloader is asking 40 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
43 * us to not reload segments */ 41 * us to not reload segments */
@@ -176,6 +174,7 @@ startup_32:
176 174
177 /* Jump from 32bit compatibility mode into 64bit mode. */ 175 /* Jump from 32bit compatibility mode into 64bit mode. */
178 lret 176 lret
177ENDPROC(startup_32)
179 178
180no_longmode: 179no_longmode:
181 /* This isn't an x86-64 CPU so hang */ 180 /* This isn't an x86-64 CPU so hang */
@@ -295,7 +294,6 @@ relocated:
295 call decompress_kernel 294 call decompress_kernel
296 popq %rsi 295 popq %rsi
297 296
298
299/* 297/*
300 * Jump to the decompressed kernel. 298 * Jump to the decompressed kernel.
301 */ 299 */
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index da062216948a..e45be73684ff 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -116,71 +116,13 @@
116/* 116/*
117 * gzip declarations 117 * gzip declarations
118 */ 118 */
119
120#define OF(args) args
121#define STATIC static 119#define STATIC static
122 120
123#undef memset 121#undef memset
124#undef memcpy 122#undef memcpy
125#define memzero(s, n) memset((s), 0, (n)) 123#define memzero(s, n) memset((s), 0, (n))
126 124
127typedef unsigned char uch;
128typedef unsigned short ush;
129typedef unsigned long ulg;
130
131/*
132 * Window size must be at least 32k, and a power of two.
133 * We don't actually have a window just a huge output buffer,
134 * so we report a 2G window size, as that should always be
135 * larger than our output buffer:
136 */
137#define WSIZE 0x80000000
138
139/* Input buffer: */
140static unsigned char *inbuf;
141
142/* Sliding window buffer (and final output buffer): */
143static unsigned char *window;
144
145/* Valid bytes in inbuf: */
146static unsigned insize;
147
148/* Index of next byte to be processed in inbuf: */
149static unsigned inptr;
150
151/* Bytes in output buffer: */
152static unsigned outcnt;
153
154/* gzip flag byte */
155#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */
156#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gz file */
157#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
158#define ORIG_NAM 0x08 /* bit 3 set: original file name present */
159#define COMMENT 0x10 /* bit 4 set: file comment present */
160#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */
161#define RESERVED 0xC0 /* bit 6, 7: reserved */
162
163#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf())
164
165/* Diagnostic functions */
166#ifdef DEBUG
167# define Assert(cond, msg) do { if (!(cond)) error(msg); } while (0)
168# define Trace(x) do { fprintf x; } while (0)
169# define Tracev(x) do { if (verbose) fprintf x ; } while (0)
170# define Tracevv(x) do { if (verbose > 1) fprintf x ; } while (0)
171# define Tracec(c, x) do { if (verbose && (c)) fprintf x ; } while (0)
172# define Tracecv(c, x) do { if (verbose > 1 && (c)) fprintf x ; } while (0)
173#else
174# define Assert(cond, msg)
175# define Trace(x)
176# define Tracev(x)
177# define Tracevv(x)
178# define Tracec(c, x)
179# define Tracecv(c, x)
180#endif
181 125
182static int fill_inbuf(void);
183static void flush_window(void);
184static void error(char *m); 126static void error(char *m);
185 127
186/* 128/*
@@ -189,13 +131,8 @@ static void error(char *m);
189static struct boot_params *real_mode; /* Pointer to real-mode data */ 131static struct boot_params *real_mode; /* Pointer to real-mode data */
190static int quiet; 132static int quiet;
191 133
192extern unsigned char input_data[];
193extern int input_len;
194
195static long bytes_out;
196
197static void *memset(void *s, int c, unsigned n); 134static void *memset(void *s, int c, unsigned n);
198static void *memcpy(void *dest, const void *src, unsigned n); 135void *memcpy(void *dest, const void *src, unsigned n);
199 136
200static void __putstr(int, const char *); 137static void __putstr(int, const char *);
201#define putstr(__x) __putstr(0, __x) 138#define putstr(__x) __putstr(0, __x)
@@ -213,7 +150,17 @@ static char *vidmem;
213static int vidport; 150static int vidport;
214static int lines, cols; 151static int lines, cols;
215 152
216#include "../../../../lib/inflate.c" 153#ifdef CONFIG_KERNEL_GZIP
154#include "../../../../lib/decompress_inflate.c"
155#endif
156
157#ifdef CONFIG_KERNEL_BZIP2
158#include "../../../../lib/decompress_bunzip2.c"
159#endif
160
161#ifdef CONFIG_KERNEL_LZMA
162#include "../../../../lib/decompress_unlzma.c"
163#endif
217 164
218static void scroll(void) 165static void scroll(void)
219{ 166{
@@ -282,7 +229,7 @@ static void *memset(void *s, int c, unsigned n)
282 return s; 229 return s;
283} 230}
284 231
285static void *memcpy(void *dest, const void *src, unsigned n) 232void *memcpy(void *dest, const void *src, unsigned n)
286{ 233{
287 int i; 234 int i;
288 const char *s = src; 235 const char *s = src;
@@ -293,38 +240,6 @@ static void *memcpy(void *dest, const void *src, unsigned n)
293 return dest; 240 return dest;
294} 241}
295 242
296/* ===========================================================================
297 * Fill the input buffer. This is called only when the buffer is empty
298 * and at least one byte is really needed.
299 */
300static int fill_inbuf(void)
301{
302 error("ran out of input data");
303 return 0;
304}
305
306/* ===========================================================================
307 * Write the output window window[0..outcnt-1] and update crc and bytes_out.
308 * (Used for the decompressed data only.)
309 */
310static void flush_window(void)
311{
312 /* With my window equal to my output buffer
313 * I only need to compute the crc here.
314 */
315 unsigned long c = crc; /* temporary variable */
316 unsigned n;
317 unsigned char *in, ch;
318
319 in = window;
320 for (n = 0; n < outcnt; n++) {
321 ch = *in++;
322 c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
323 }
324 crc = c;
325 bytes_out += (unsigned long)outcnt;
326 outcnt = 0;
327}
328 243
329static void error(char *x) 244static void error(char *x)
330{ 245{
@@ -407,12 +322,8 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
407 lines = real_mode->screen_info.orig_video_lines; 322 lines = real_mode->screen_info.orig_video_lines;
408 cols = real_mode->screen_info.orig_video_cols; 323 cols = real_mode->screen_info.orig_video_cols;
409 324
410 window = output; /* Output buffer (Normally at 1M) */
411 free_mem_ptr = heap; /* Heap */ 325 free_mem_ptr = heap; /* Heap */
412 free_mem_end_ptr = heap + BOOT_HEAP_SIZE; 326 free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
413 inbuf = input_data; /* Input buffer */
414 insize = input_len;
415 inptr = 0;
416 327
417#ifdef CONFIG_X86_64 328#ifdef CONFIG_X86_64
418 if ((unsigned long)output & (__KERNEL_ALIGN - 1)) 329 if ((unsigned long)output & (__KERNEL_ALIGN - 1))
@@ -430,10 +341,9 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
430#endif 341#endif
431#endif 342#endif
432 343
433 makecrc();
434 if (!quiet) 344 if (!quiet)
435 putstr("\nDecompressing Linux... "); 345 putstr("\nDecompressing Linux... ");
436 gunzip(); 346 decompress(input_data, input_len, NULL, NULL, output, NULL, error);
437 parse_elf(output); 347 parse_elf(output);
438 if (!quiet) 348 if (!quiet)
439 putstr("done.\nBooting the kernel.\n"); 349 putstr("done.\nBooting the kernel.\n");
diff --git a/arch/x86/boot/copy.S b/arch/x86/boot/copy.S
index ef50c84e8b4b..11f272c6f5e9 100644
--- a/arch/x86/boot/copy.S
+++ b/arch/x86/boot/copy.S
@@ -8,6 +8,8 @@
8 * 8 *
9 * ----------------------------------------------------------------------- */ 9 * ----------------------------------------------------------------------- */
10 10
11#include <linux/linkage.h>
12
11/* 13/*
12 * Memory copy routines 14 * Memory copy routines
13 */ 15 */
@@ -15,9 +17,7 @@
15 .code16gcc 17 .code16gcc
16 .text 18 .text
17 19
18 .globl memcpy 20GLOBAL(memcpy)
19 .type memcpy, @function
20memcpy:
21 pushw %si 21 pushw %si
22 pushw %di 22 pushw %di
23 movw %ax, %di 23 movw %ax, %di
@@ -31,11 +31,9 @@ memcpy:
31 popw %di 31 popw %di
32 popw %si 32 popw %si
33 ret 33 ret
34 .size memcpy, .-memcpy 34ENDPROC(memcpy)
35 35
36 .globl memset 36GLOBAL(memset)
37 .type memset, @function
38memset:
39 pushw %di 37 pushw %di
40 movw %ax, %di 38 movw %ax, %di
41 movzbl %dl, %eax 39 movzbl %dl, %eax
@@ -48,52 +46,42 @@ memset:
48 rep; stosb 46 rep; stosb
49 popw %di 47 popw %di
50 ret 48 ret
51 .size memset, .-memset 49ENDPROC(memset)
52 50
53 .globl copy_from_fs 51GLOBAL(copy_from_fs)
54 .type copy_from_fs, @function
55copy_from_fs:
56 pushw %ds 52 pushw %ds
57 pushw %fs 53 pushw %fs
58 popw %ds 54 popw %ds
59 call memcpy 55 call memcpy
60 popw %ds 56 popw %ds
61 ret 57 ret
62 .size copy_from_fs, .-copy_from_fs 58ENDPROC(copy_from_fs)
63 59
64 .globl copy_to_fs 60GLOBAL(copy_to_fs)
65 .type copy_to_fs, @function
66copy_to_fs:
67 pushw %es 61 pushw %es
68 pushw %fs 62 pushw %fs
69 popw %es 63 popw %es
70 call memcpy 64 call memcpy
71 popw %es 65 popw %es
72 ret 66 ret
73 .size copy_to_fs, .-copy_to_fs 67ENDPROC(copy_to_fs)
74 68
75#if 0 /* Not currently used, but can be enabled as needed */ 69#if 0 /* Not currently used, but can be enabled as needed */
76 70GLOBAL(copy_from_gs)
77 .globl copy_from_gs
78 .type copy_from_gs, @function
79copy_from_gs:
80 pushw %ds 71 pushw %ds
81 pushw %gs 72 pushw %gs
82 popw %ds 73 popw %ds
83 call memcpy 74 call memcpy
84 popw %ds 75 popw %ds
85 ret 76 ret
86 .size copy_from_gs, .-copy_from_gs 77ENDPROC(copy_from_gs)
87 .globl copy_to_gs
88 78
89 .type copy_to_gs, @function 79GLOBAL(copy_to_gs)
90copy_to_gs:
91 pushw %es 80 pushw %es
92 pushw %gs 81 pushw %gs
93 popw %es 82 popw %es
94 call memcpy 83 call memcpy
95 popw %es 84 popw %es
96 ret 85 ret
97 .size copy_to_gs, .-copy_to_gs 86ENDPROC(copy_to_gs)
98
99#endif 87#endif
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index b993062e9a5f..7ccff4884a23 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -19,7 +19,7 @@
19#include <linux/utsrelease.h> 19#include <linux/utsrelease.h>
20#include <asm/boot.h> 20#include <asm/boot.h>
21#include <asm/e820.h> 21#include <asm/e820.h>
22#include <asm/page.h> 22#include <asm/page_types.h>
23#include <asm/setup.h> 23#include <asm/setup.h>
24#include "boot.h" 24#include "boot.h"
25#include "offsets.h" 25#include "offsets.h"
diff --git a/arch/x86/boot/main.c b/arch/x86/boot/main.c
index 197421db1af1..58f0415d3ae0 100644
--- a/arch/x86/boot/main.c
+++ b/arch/x86/boot/main.c
@@ -149,11 +149,6 @@ void main(void)
149 /* Query MCA information */ 149 /* Query MCA information */
150 query_mca(); 150 query_mca();
151 151
152 /* Voyager */
153#ifdef CONFIG_X86_VOYAGER
154 query_voyager();
155#endif
156
157 /* Query Intel SpeedStep (IST) information */ 152 /* Query Intel SpeedStep (IST) information */
158 query_ist(); 153 query_ist();
159 154
diff --git a/arch/x86/boot/pmjump.S b/arch/x86/boot/pmjump.S
index 141b6e20ed31..019c17a75851 100644
--- a/arch/x86/boot/pmjump.S
+++ b/arch/x86/boot/pmjump.S
@@ -15,18 +15,15 @@
15#include <asm/boot.h> 15#include <asm/boot.h>
16#include <asm/processor-flags.h> 16#include <asm/processor-flags.h>
17#include <asm/segment.h> 17#include <asm/segment.h>
18#include <linux/linkage.h>
18 19
19 .text 20 .text
20
21 .globl protected_mode_jump
22 .type protected_mode_jump, @function
23
24 .code16 21 .code16
25 22
26/* 23/*
27 * void protected_mode_jump(u32 entrypoint, u32 bootparams); 24 * void protected_mode_jump(u32 entrypoint, u32 bootparams);
28 */ 25 */
29protected_mode_jump: 26GLOBAL(protected_mode_jump)
30 movl %edx, %esi # Pointer to boot_params table 27 movl %edx, %esi # Pointer to boot_params table
31 28
32 xorl %ebx, %ebx 29 xorl %ebx, %ebx
@@ -47,12 +44,10 @@ protected_mode_jump:
47 .byte 0x66, 0xea # ljmpl opcode 44 .byte 0x66, 0xea # ljmpl opcode
482: .long in_pm32 # offset 452: .long in_pm32 # offset
49 .word __BOOT_CS # segment 46 .word __BOOT_CS # segment
50 47ENDPROC(protected_mode_jump)
51 .size protected_mode_jump, .-protected_mode_jump
52 48
53 .code32 49 .code32
54 .type in_pm32, @function 50GLOBAL(in_pm32)
55in_pm32:
56 # Set up data segments for flat 32-bit mode 51 # Set up data segments for flat 32-bit mode
57 movl %ecx, %ds 52 movl %ecx, %ds
58 movl %ecx, %es 53 movl %ecx, %es
@@ -78,5 +73,4 @@ in_pm32:
78 lldt %cx 73 lldt %cx
79 74
80 jmpl *%eax # Jump to the 32-bit entrypoint 75 jmpl *%eax # Jump to the 32-bit entrypoint
81 76ENDPROC(in_pm32)
82 .size in_pm32, .-in_pm32
diff --git a/arch/x86/boot/voyager.c b/arch/x86/boot/voyager.c
deleted file mode 100644
index 433909d61e5c..000000000000
--- a/arch/x86/boot/voyager.c
+++ /dev/null
@@ -1,40 +0,0 @@
1/* -*- linux-c -*- ------------------------------------------------------- *
2 *
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright 2007 rPath, Inc. - All Rights Reserved
5 *
6 * This file is part of the Linux kernel, and is made available under
7 * the terms of the GNU General Public License version 2.
8 *
9 * ----------------------------------------------------------------------- */
10
11/*
12 * Get the Voyager config information
13 */
14
15#include "boot.h"
16
17int query_voyager(void)
18{
19 u8 err;
20 u16 es, di;
21 /* Abuse the apm_bios_info area for this */
22 u8 *data_ptr = (u8 *)&boot_params.apm_bios_info;
23
24 data_ptr[0] = 0xff; /* Flag on config not found(?) */
25
26 asm("pushw %%es ; "
27 "int $0x15 ; "
28 "setc %0 ; "
29 "movw %%es, %1 ; "
30 "popw %%es"
31 : "=q" (err), "=r" (es), "=D" (di)
32 : "a" (0xffc0));
33
34 if (err)
35 return -1; /* Not Voyager */
36
37 set_fs(es);
38 copy_from_fs(data_ptr, di, 7); /* Table is 7 bytes apparently */
39 return 0;
40}
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 096dd5359cd9..235b81d0f6f2 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.29-rc4 3# Linux kernel version: 2.6.29-rc4
4# Thu Feb 12 12:57:57 2009 4# Tue Feb 24 15:50:58 2009
5# 5#
6# CONFIG_64BIT is not set 6# CONFIG_64BIT is not set
7CONFIG_X86_32=y 7CONFIG_X86_32=y
@@ -197,7 +197,6 @@ CONFIG_SPARSE_IRQ=y
197CONFIG_X86_FIND_SMP_CONFIG=y 197CONFIG_X86_FIND_SMP_CONFIG=y
198CONFIG_X86_MPPARSE=y 198CONFIG_X86_MPPARSE=y
199# CONFIG_X86_ELAN is not set 199# CONFIG_X86_ELAN is not set
200# CONFIG_X86_VOYAGER is not set
201# CONFIG_X86_GENERICARCH is not set 200# CONFIG_X86_GENERICARCH is not set
202# CONFIG_X86_VSMP is not set 201# CONFIG_X86_VSMP is not set
203# CONFIG_X86_RDC321X is not set 202# CONFIG_X86_RDC321X is not set
@@ -267,7 +266,9 @@ CONFIG_PREEMPT_VOLUNTARY=y
267CONFIG_X86_LOCAL_APIC=y 266CONFIG_X86_LOCAL_APIC=y
268CONFIG_X86_IO_APIC=y 267CONFIG_X86_IO_APIC=y
269CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y 268CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
270# CONFIG_X86_MCE is not set 269CONFIG_X86_MCE=y
270CONFIG_X86_MCE_NONFATAL=y
271CONFIG_X86_MCE_P4THERMAL=y
271CONFIG_VM86=y 272CONFIG_VM86=y
272# CONFIG_TOSHIBA is not set 273# CONFIG_TOSHIBA is not set
273# CONFIG_I8K is not set 274# CONFIG_I8K is not set
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 2efb5d5063ff..9fe5d212ab4c 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.29-rc4 3# Linux kernel version: 2.6.29-rc4
4# Thu Feb 12 12:57:29 2009 4# Tue Feb 24 15:44:16 2009
5# 5#
6CONFIG_64BIT=y 6CONFIG_64BIT=y
7# CONFIG_X86_32 is not set 7# CONFIG_X86_32 is not set
@@ -199,7 +199,6 @@ CONFIG_SPARSE_IRQ=y
199CONFIG_X86_FIND_SMP_CONFIG=y 199CONFIG_X86_FIND_SMP_CONFIG=y
200CONFIG_X86_MPPARSE=y 200CONFIG_X86_MPPARSE=y
201# CONFIG_X86_ELAN is not set 201# CONFIG_X86_ELAN is not set
202# CONFIG_X86_VOYAGER is not set
203# CONFIG_X86_GENERICARCH is not set 202# CONFIG_X86_GENERICARCH is not set
204# CONFIG_X86_VSMP is not set 203# CONFIG_X86_VSMP is not set
205CONFIG_SCHED_OMIT_FRAME_POINTER=y 204CONFIG_SCHED_OMIT_FRAME_POINTER=y
@@ -267,7 +266,9 @@ CONFIG_PREEMPT_VOLUNTARY=y
267CONFIG_X86_LOCAL_APIC=y 266CONFIG_X86_LOCAL_APIC=y
268CONFIG_X86_IO_APIC=y 267CONFIG_X86_IO_APIC=y
269CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y 268CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
270# CONFIG_X86_MCE is not set 269CONFIG_X86_MCE=y
270CONFIG_X86_MCE_INTEL=y
271CONFIG_X86_MCE_AMD=y
271# CONFIG_I8K is not set 272# CONFIG_I8K is not set
272CONFIG_MICROCODE=y 273CONFIG_MICROCODE=y
273CONFIG_MICROCODE_INTEL=y 274CONFIG_MICROCODE_INTEL=y
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index dd77ac0cac46..588a7aa937e1 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -33,8 +33,6 @@
33#include <asm/sigframe.h> 33#include <asm/sigframe.h>
34#include <asm/sys_ia32.h> 34#include <asm/sys_ia32.h>
35 35
36#define DEBUG_SIG 0
37
38#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 36#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
39 37
40#define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \ 38#define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
@@ -190,42 +188,47 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
190/* 188/*
191 * Do a signal return; undo the signal stack. 189 * Do a signal return; undo the signal stack.
192 */ 190 */
191#define loadsegment_gs(v) load_gs_index(v)
192#define loadsegment_fs(v) loadsegment(fs, v)
193#define loadsegment_ds(v) loadsegment(ds, v)
194#define loadsegment_es(v) loadsegment(es, v)
195
196#define get_user_seg(seg) ({ unsigned int v; savesegment(seg, v); v; })
197#define set_user_seg(seg, v) loadsegment_##seg(v)
198
193#define COPY(x) { \ 199#define COPY(x) { \
194 get_user_ex(regs->x, &sc->x); \ 200 get_user_ex(regs->x, &sc->x); \
195} 201}
196 202
197#define COPY_SEG_CPL3(seg) { \ 203#define GET_SEG(seg) ({ \
198 unsigned short tmp; \ 204 unsigned short tmp; \
199 get_user_ex(tmp, &sc->seg); \ 205 get_user_ex(tmp, &sc->seg); \
200 regs->seg = tmp | 3; \ 206 tmp; \
201} 207})
208
209#define COPY_SEG_CPL3(seg) do { \
210 regs->seg = GET_SEG(seg) | 3; \
211} while (0)
202 212
203#define RELOAD_SEG(seg) { \ 213#define RELOAD_SEG(seg) { \
204 unsigned int cur, pre; \ 214 unsigned int pre = GET_SEG(seg); \
205 get_user_ex(pre, &sc->seg); \ 215 unsigned int cur = get_user_seg(seg); \
206 savesegment(seg, cur); \
207 pre |= 3; \ 216 pre |= 3; \
208 if (pre != cur) \ 217 if (pre != cur) \
209 loadsegment(seg, pre); \ 218 set_user_seg(seg, pre); \
210} 219}
211 220
212static int ia32_restore_sigcontext(struct pt_regs *regs, 221static int ia32_restore_sigcontext(struct pt_regs *regs,
213 struct sigcontext_ia32 __user *sc, 222 struct sigcontext_ia32 __user *sc,
214 unsigned int *pax) 223 unsigned int *pax)
215{ 224{
216 unsigned int tmpflags, gs, oldgs, err = 0; 225 unsigned int tmpflags, err = 0;
217 void __user *buf; 226 void __user *buf;
218 u32 tmp; 227 u32 tmp;
219 228
220 /* Always make any pending restarted system calls return -EINTR */ 229 /* Always make any pending restarted system calls return -EINTR */
221 current_thread_info()->restart_block.fn = do_no_restart_syscall; 230 current_thread_info()->restart_block.fn = do_no_restart_syscall;
222 231
223#if DEBUG_SIG
224 printk(KERN_DEBUG "SIG restore_sigcontext: "
225 "sc=%p err(%x) eip(%x) cs(%x) flg(%x)\n",
226 sc, sc->err, sc->ip, sc->cs, sc->flags);
227#endif
228
229 get_user_try { 232 get_user_try {
230 /* 233 /*
231 * Reload fs and gs if they have changed in the signal 234 * Reload fs and gs if they have changed in the signal
@@ -233,12 +236,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
233 * the handler, but does not clobber them at least in the 236 * the handler, but does not clobber them at least in the
234 * normal case. 237 * normal case.
235 */ 238 */
236 get_user_ex(gs, &sc->gs); 239 RELOAD_SEG(gs);
237 gs |= 3;
238 savesegment(gs, oldgs);
239 if (gs != oldgs)
240 load_gs_index(gs);
241
242 RELOAD_SEG(fs); 240 RELOAD_SEG(fs);
243 RELOAD_SEG(ds); 241 RELOAD_SEG(ds);
244 RELOAD_SEG(es); 242 RELOAD_SEG(es);
@@ -337,17 +335,13 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
337 void __user *fpstate, 335 void __user *fpstate,
338 struct pt_regs *regs, unsigned int mask) 336 struct pt_regs *regs, unsigned int mask)
339{ 337{
340 int tmp, err = 0; 338 int err = 0;
341 339
342 put_user_try { 340 put_user_try {
343 savesegment(gs, tmp); 341 put_user_ex(get_user_seg(gs), (unsigned int __user *)&sc->gs);
344 put_user_ex(tmp, (unsigned int __user *)&sc->gs); 342 put_user_ex(get_user_seg(fs), (unsigned int __user *)&sc->fs);
345 savesegment(fs, tmp); 343 put_user_ex(get_user_seg(ds), (unsigned int __user *)&sc->ds);
346 put_user_ex(tmp, (unsigned int __user *)&sc->fs); 344 put_user_ex(get_user_seg(es), (unsigned int __user *)&sc->es);
347 savesegment(ds, tmp);
348 put_user_ex(tmp, (unsigned int __user *)&sc->ds);
349 savesegment(es, tmp);
350 put_user_ex(tmp, (unsigned int __user *)&sc->es);
351 345
352 put_user_ex(regs->di, &sc->di); 346 put_user_ex(regs->di, &sc->di);
353 put_user_ex(regs->si, &sc->si); 347 put_user_ex(regs->si, &sc->si);
@@ -488,11 +482,6 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
488 regs->cs = __USER32_CS; 482 regs->cs = __USER32_CS;
489 regs->ss = __USER32_DS; 483 regs->ss = __USER32_DS;
490 484
491#if DEBUG_SIG
492 printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
493 current->comm, current->pid, frame, regs->ip, frame->pretcode);
494#endif
495
496 return 0; 485 return 0;
497} 486}
498 487
@@ -574,10 +563,5 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
574 regs->cs = __USER32_CS; 563 regs->cs = __USER32_CS;
575 regs->ss = __USER32_DS; 564 regs->ss = __USER32_DS;
576 565
577#if DEBUG_SIG
578 printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
579 current->comm, current->pid, frame, regs->ip, frame->pretcode);
580#endif
581
582 return 0; 566 return 0;
583} 567}
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index fba49f66228f..4ef949c1972e 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -1,15 +1,18 @@
1#ifndef _ASM_X86_APIC_H 1#ifndef _ASM_X86_APIC_H
2#define _ASM_X86_APIC_H 2#define _ASM_X86_APIC_H
3 3
4#include <linux/pm.h> 4#include <linux/cpumask.h>
5#include <linux/delay.h> 5#include <linux/delay.h>
6#include <linux/pm.h>
6 7
7#include <asm/alternative.h> 8#include <asm/alternative.h>
8#include <asm/fixmap.h> 9#include <asm/cpufeature.h>
9#include <asm/apicdef.h>
10#include <asm/processor.h> 10#include <asm/processor.h>
11#include <asm/apicdef.h>
12#include <asm/atomic.h>
13#include <asm/fixmap.h>
14#include <asm/mpspec.h>
11#include <asm/system.h> 15#include <asm/system.h>
12#include <asm/cpufeature.h>
13#include <asm/msr.h> 16#include <asm/msr.h>
14 17
15#define ARCH_APICTIMER_STOPS_ON_C3 1 18#define ARCH_APICTIMER_STOPS_ON_C3 1
@@ -72,7 +75,14 @@ static inline void default_inquire_remote_apic(int apicid)
72#define setup_secondary_clock setup_secondary_APIC_clock 75#define setup_secondary_clock setup_secondary_APIC_clock
73#endif 76#endif
74 77
78#ifdef CONFIG_X86_VSMP
75extern int is_vsmp_box(void); 79extern int is_vsmp_box(void);
80#else
81static inline int is_vsmp_box(void)
82{
83 return 0;
84}
85#endif
76extern void xapic_wait_icr_idle(void); 86extern void xapic_wait_icr_idle(void);
77extern u32 safe_xapic_wait_icr_idle(void); 87extern u32 safe_xapic_wait_icr_idle(void);
78extern void xapic_icr_write(u32, u32); 88extern void xapic_icr_write(u32, u32);
@@ -92,6 +102,12 @@ static inline u32 native_apic_mem_read(u32 reg)
92 return *((volatile u32 *)(APIC_BASE + reg)); 102 return *((volatile u32 *)(APIC_BASE + reg));
93} 103}
94 104
105extern void native_apic_wait_icr_idle(void);
106extern u32 native_safe_apic_wait_icr_idle(void);
107extern void native_apic_icr_write(u32 low, u32 id);
108extern u64 native_apic_icr_read(void);
109
110#ifdef CONFIG_X86_X2APIC
95static inline void native_apic_msr_write(u32 reg, u32 v) 111static inline void native_apic_msr_write(u32 reg, u32 v)
96{ 112{
97 if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR || 113 if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||
@@ -112,8 +128,32 @@ static inline u32 native_apic_msr_read(u32 reg)
112 return low; 128 return low;
113} 129}
114 130
115#ifndef CONFIG_X86_32 131static inline void native_x2apic_wait_icr_idle(void)
116extern int x2apic; 132{
133 /* no need to wait for icr idle in x2apic */
134 return;
135}
136
137static inline u32 native_safe_x2apic_wait_icr_idle(void)
138{
139 /* no need to wait for icr idle in x2apic */
140 return 0;
141}
142
143static inline void native_x2apic_icr_write(u32 low, u32 id)
144{
145 wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
146}
147
148static inline u64 native_x2apic_icr_read(void)
149{
150 unsigned long val;
151
152 rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
153 return val;
154}
155
156extern int x2apic, x2apic_phys;
117extern void check_x2apic(void); 157extern void check_x2apic(void);
118extern void enable_x2apic(void); 158extern void enable_x2apic(void);
119extern void enable_IR_x2apic(void); 159extern void enable_IR_x2apic(void);
@@ -131,53 +171,24 @@ static inline int x2apic_enabled(void)
131 return 0; 171 return 0;
132} 172}
133#else 173#else
134#define x2apic_enabled() 0 174static inline void check_x2apic(void)
135#endif
136
137struct apic_ops {
138 u32 (*read)(u32 reg);
139 void (*write)(u32 reg, u32 v);
140 u64 (*icr_read)(void);
141 void (*icr_write)(u32 low, u32 high);
142 void (*wait_icr_idle)(void);
143 u32 (*safe_wait_icr_idle)(void);
144};
145
146extern struct apic_ops *apic_ops;
147
148static inline u32 apic_read(u32 reg)
149{
150 return apic_ops->read(reg);
151}
152
153static inline void apic_write(u32 reg, u32 val)
154{ 175{
155 apic_ops->write(reg, val);
156} 176}
157 177static inline void enable_x2apic(void)
158static inline u64 apic_icr_read(void)
159{
160 return apic_ops->icr_read();
161}
162
163static inline void apic_icr_write(u32 low, u32 high)
164{ 178{
165 apic_ops->icr_write(low, high);
166} 179}
167 180static inline void enable_IR_x2apic(void)
168static inline void apic_wait_icr_idle(void)
169{ 181{
170 apic_ops->wait_icr_idle();
171} 182}
172 183static inline int x2apic_enabled(void)
173static inline u32 safe_apic_wait_icr_idle(void)
174{ 184{
175 return apic_ops->safe_wait_icr_idle(); 185 return 0;
176} 186}
187#endif
177 188
178extern int get_physical_broadcast(void); 189extern int get_physical_broadcast(void);
179 190
180#ifdef CONFIG_X86_64 191#ifdef CONFIG_X86_X2APIC
181static inline void ack_x2APIC_irq(void) 192static inline void ack_x2APIC_irq(void)
182{ 193{
183 /* Docs say use 0 for future compatibility */ 194 /* Docs say use 0 for future compatibility */
@@ -185,18 +196,6 @@ static inline void ack_x2APIC_irq(void)
185} 196}
186#endif 197#endif
187 198
188
189static inline void ack_APIC_irq(void)
190{
191 /*
192 * ack_APIC_irq() actually gets compiled as a single instruction
193 * ... yummie.
194 */
195
196 /* Docs say use 0 for future compatibility */
197 apic_write(APIC_EOI, 0);
198}
199
200extern int lapic_get_maxlvt(void); 199extern int lapic_get_maxlvt(void);
201extern void clear_local_APIC(void); 200extern void clear_local_APIC(void);
202extern void connect_bsp_APIC(void); 201extern void connect_bsp_APIC(void);
@@ -244,7 +243,151 @@ static inline void disable_local_APIC(void) { }
244#define SET_APIC_ID(x) (apic->set_apic_id(x)) 243#define SET_APIC_ID(x) (apic->set_apic_id(x))
245#else 244#else
246 245
247#ifdef CONFIG_X86_LOCAL_APIC 246#endif
247
248/*
249 * Copyright 2004 James Cleverdon, IBM.
250 * Subject to the GNU Public License, v.2
251 *
252 * Generic APIC sub-arch data struct.
253 *
254 * Hacked for x86-64 by James Cleverdon from i386 architecture code by
255 * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
256 * James Cleverdon.
257 */
258struct apic {
259 char *name;
260
261 int (*probe)(void);
262 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
263 int (*apic_id_registered)(void);
264
265 u32 irq_delivery_mode;
266 u32 irq_dest_mode;
267
268 const struct cpumask *(*target_cpus)(void);
269
270 int disable_esr;
271
272 int dest_logical;
273 unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
274 unsigned long (*check_apicid_present)(int apicid);
275
276 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
277 void (*init_apic_ldr)(void);
278
279 physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
280
281 void (*setup_apic_routing)(void);
282 int (*multi_timer_check)(int apic, int irq);
283 int (*apicid_to_node)(int logical_apicid);
284 int (*cpu_to_logical_apicid)(int cpu);
285 int (*cpu_present_to_apicid)(int mps_cpu);
286 physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
287 void (*setup_portio_remap)(void);
288 int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
289 void (*enable_apic_mode)(void);
290 int (*phys_pkg_id)(int cpuid_apic, int index_msb);
291
292 /*
293 * When one of the next two hooks returns 1 the apic
294 * is switched to this. Essentially they are additional
295 * probe functions:
296 */
297 int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid);
298
299 unsigned int (*get_apic_id)(unsigned long x);
300 unsigned long (*set_apic_id)(unsigned int id);
301 unsigned long apic_id_mask;
302
303 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
304 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
305 const struct cpumask *andmask);
306
307 /* ipi */
308 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
309 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
310 int vector);
311 void (*send_IPI_allbutself)(int vector);
312 void (*send_IPI_all)(int vector);
313 void (*send_IPI_self)(int vector);
314
315 /* wakeup_secondary_cpu */
316 int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip);
317
318 int trampoline_phys_low;
319 int trampoline_phys_high;
320
321 void (*wait_for_init_deassert)(atomic_t *deassert);
322 void (*smp_callin_clear_local_apic)(void);
323 void (*inquire_remote_apic)(int apicid);
324
325 /* apic ops */
326 u32 (*read)(u32 reg);
327 void (*write)(u32 reg, u32 v);
328 u64 (*icr_read)(void);
329 void (*icr_write)(u32 low, u32 high);
330 void (*wait_icr_idle)(void);
331 u32 (*safe_wait_icr_idle)(void);
332};
333
334/*
335 * Pointer to the local APIC driver in use on this system (there's
336 * always just one such driver in use - the kernel decides via an
337 * early probing process which one it picks - and then sticks to it):
338 */
339extern struct apic *apic;
340
341/*
342 * APIC functionality to boot other CPUs - only used on SMP:
343 */
344#ifdef CONFIG_SMP
345extern atomic_t init_deasserted;
346extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
347#endif
348
349static inline u32 apic_read(u32 reg)
350{
351 return apic->read(reg);
352}
353
354static inline void apic_write(u32 reg, u32 val)
355{
356 apic->write(reg, val);
357}
358
359static inline u64 apic_icr_read(void)
360{
361 return apic->icr_read();
362}
363
364static inline void apic_icr_write(u32 low, u32 high)
365{
366 apic->icr_write(low, high);
367}
368
369static inline void apic_wait_icr_idle(void)
370{
371 apic->wait_icr_idle();
372}
373
374static inline u32 safe_apic_wait_icr_idle(void)
375{
376 return apic->safe_wait_icr_idle();
377}
378
379
380static inline void ack_APIC_irq(void)
381{
382 /*
383 * ack_APIC_irq() actually gets compiled as a single instruction
384 * ... yummie.
385 */
386
387 /* Docs say use 0 for future compatibility */
388 apic_write(APIC_EOI, 0);
389}
390
248static inline unsigned default_get_apic_id(unsigned long x) 391static inline unsigned default_get_apic_id(unsigned long x)
249{ 392{
250 unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); 393 unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
@@ -254,8 +397,169 @@ static inline unsigned default_get_apic_id(unsigned long x)
254 else 397 else
255 return (x >> 24) & 0x0F; 398 return (x >> 24) & 0x0F;
256} 399}
400
401/*
402 * Warm reset vector default position:
403 */
404#define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467
405#define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469
406
407#ifdef CONFIG_X86_64
408extern struct apic apic_flat;
409extern struct apic apic_physflat;
410extern struct apic apic_x2apic_cluster;
411extern struct apic apic_x2apic_phys;
412extern int default_acpi_madt_oem_check(char *, char *);
413
414extern void apic_send_IPI_self(int vector);
415
416extern struct apic apic_x2apic_uv_x;
417DECLARE_PER_CPU(int, x2apic_extra_bits);
418
419extern int default_cpu_present_to_apicid(int mps_cpu);
420extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
421#endif
422
423static inline void default_wait_for_init_deassert(atomic_t *deassert)
424{
425 while (!atomic_read(deassert))
426 cpu_relax();
427 return;
428}
429
430extern void generic_bigsmp_probe(void);
431
432
433#ifdef CONFIG_X86_LOCAL_APIC
434
435#include <asm/smp.h>
436
437#define APIC_DFR_VALUE (APIC_DFR_FLAT)
438
439static inline const struct cpumask *default_target_cpus(void)
440{
441#ifdef CONFIG_SMP
442 return cpu_online_mask;
443#else
444 return cpumask_of(0);
257#endif 445#endif
446}
447
448DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
449
450
451static inline unsigned int read_apic_id(void)
452{
453 unsigned int reg;
454
455 reg = apic_read(APIC_ID);
456
457 return apic->get_apic_id(reg);
458}
459
460extern void default_setup_apic_routing(void);
461
462#ifdef CONFIG_X86_32
463/*
464 * Set up the logical destination ID.
465 *
466 * Intel recommends to set DFR, LDR and TPR before enabling
467 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
468 * document number 292116). So here it goes...
469 */
470extern void default_init_apic_ldr(void);
471
472static inline int default_apic_id_registered(void)
473{
474 return physid_isset(read_apic_id(), phys_cpu_present_map);
475}
476
477static inline unsigned int
478default_cpu_mask_to_apicid(const struct cpumask *cpumask)
479{
480 return cpumask_bits(cpumask)[0];
481}
482
483static inline unsigned int
484default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
485 const struct cpumask *andmask)
486{
487 unsigned long mask1 = cpumask_bits(cpumask)[0];
488 unsigned long mask2 = cpumask_bits(andmask)[0];
489 unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
490
491 return (unsigned int)(mask1 & mask2 & mask3);
492}
493
494static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
495{
496 return cpuid_apic >> index_msb;
497}
498
499extern int default_apicid_to_node(int logical_apicid);
500
501#endif
502
503static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid)
504{
505 return physid_isset(apicid, bitmap);
506}
507
508static inline unsigned long default_check_apicid_present(int bit)
509{
510 return physid_isset(bit, phys_cpu_present_map);
511}
512
513static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map)
514{
515 return phys_map;
516}
517
518/* Mapping from cpu number to logical apicid */
519static inline int default_cpu_to_logical_apicid(int cpu)
520{
521 return 1 << cpu;
522}
523
524static inline int __default_cpu_present_to_apicid(int mps_cpu)
525{
526 if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
527 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
528 else
529 return BAD_APICID;
530}
531
532static inline int
533__default_check_phys_apicid_present(int boot_cpu_physical_apicid)
534{
535 return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
536}
537
538#ifdef CONFIG_X86_32
539static inline int default_cpu_present_to_apicid(int mps_cpu)
540{
541 return __default_cpu_present_to_apicid(mps_cpu);
542}
543
544static inline int
545default_check_phys_apicid_present(int boot_cpu_physical_apicid)
546{
547 return __default_check_phys_apicid_present(boot_cpu_physical_apicid);
548}
549#else
550extern int default_cpu_present_to_apicid(int mps_cpu);
551extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
552#endif
553
554static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid)
555{
556 return physid_mask_of_physid(phys_apicid);
557}
558
559#endif /* CONFIG_X86_LOCAL_APIC */
258 560
561#ifdef CONFIG_X86_32
562extern u8 cpu_2_logical_apicid[NR_CPUS];
259#endif 563#endif
260 564
261#endif /* _ASM_X86_APIC_H */ 565#endif /* _ASM_X86_APIC_H */
diff --git a/arch/x86/include/asm/arch_hooks.h b/arch/x86/include/asm/arch_hooks.h
deleted file mode 100644
index cbd4957838a6..000000000000
--- a/arch/x86/include/asm/arch_hooks.h
+++ /dev/null
@@ -1,26 +0,0 @@
1#ifndef _ASM_X86_ARCH_HOOKS_H
2#define _ASM_X86_ARCH_HOOKS_H
3
4#include <linux/interrupt.h>
5
6/*
7 * linux/include/asm/arch_hooks.h
8 *
9 * define the architecture specific hooks
10 */
11
12/* these aren't arch hooks, they are generic routines
13 * that can be used by the hooks */
14extern void init_ISA_irqs(void);
15extern irqreturn_t timer_interrupt(int irq, void *dev_id);
16
17/* these are the defined hooks */
18extern void intr_init_hook(void);
19extern void pre_intr_init_hook(void);
20extern void pre_setup_arch_hook(void);
21extern void trap_init_hook(void);
22extern void pre_time_init_hook(void);
23extern void time_init_hook(void);
24extern void mca_nmi_hook(void);
25
26#endif /* _ASM_X86_ARCH_HOOKS_H */
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index dd61616cb73d..6526cf08b0e4 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -10,17 +10,31 @@
10#define EXTENDED_VGA 0xfffe /* 80x50 mode */ 10#define EXTENDED_VGA 0xfffe /* 80x50 mode */
11#define ASK_VGA 0xfffd /* ask for it at bootup */ 11#define ASK_VGA 0xfffd /* ask for it at bootup */
12 12
13#ifdef __KERNEL__
14
13/* Physical address where kernel should be loaded. */ 15/* Physical address where kernel should be loaded. */
14#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ 16#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
15 + (CONFIG_PHYSICAL_ALIGN - 1)) \ 17 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16 & ~(CONFIG_PHYSICAL_ALIGN - 1)) 18 & ~(CONFIG_PHYSICAL_ALIGN - 1))
17 19
20#ifdef CONFIG_KERNEL_BZIP2
21#define BOOT_HEAP_SIZE 0x400000
22#else /* !CONFIG_KERNEL_BZIP2 */
23
18#ifdef CONFIG_X86_64 24#ifdef CONFIG_X86_64
19#define BOOT_HEAP_SIZE 0x7000 25#define BOOT_HEAP_SIZE 0x7000
20#define BOOT_STACK_SIZE 0x4000
21#else 26#else
22#define BOOT_HEAP_SIZE 0x4000 27#define BOOT_HEAP_SIZE 0x4000
28#endif
29
30#endif /* !CONFIG_KERNEL_BZIP2 */
31
32#ifdef CONFIG_X86_64
33#define BOOT_STACK_SIZE 0x4000
34#else
23#define BOOT_STACK_SIZE 0x1000 35#define BOOT_STACK_SIZE 0x1000
24#endif 36#endif
25 37
38#endif /* __KERNEL__ */
39
26#endif /* _ASM_X86_BOOT_H */ 40#endif /* _ASM_X86_BOOT_H */
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 23696d44a0af..dca8f03da5b2 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -1,11 +1,155 @@
1/*
2 * fixmap.h: compile-time virtual memory allocation
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1998 Ingo Molnar
9 *
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009
12 */
13
1#ifndef _ASM_X86_FIXMAP_H 14#ifndef _ASM_X86_FIXMAP_H
2#define _ASM_X86_FIXMAP_H 15#define _ASM_X86_FIXMAP_H
3 16
17#ifndef __ASSEMBLY__
18#include <linux/kernel.h>
19#include <asm/acpi.h>
20#include <asm/apicdef.h>
21#include <asm/page.h>
22#ifdef CONFIG_X86_32
23#include <linux/threads.h>
24#include <asm/kmap_types.h>
25#else
26#include <asm/vsyscall.h>
27#ifdef CONFIG_EFI
28#include <asm/efi.h>
29#endif
30#endif
31
32/*
33 * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall
34 * uses fixmaps that relies on FIXADDR_TOP for proper address calculation.
35 * Because of this, FIXADDR_TOP x86 integration was left as later work.
36 */
37#ifdef CONFIG_X86_32
38/* used by vmalloc.c, vsyscall.lds.S.
39 *
40 * Leave one empty page between vmalloc'ed areas and
41 * the start of the fixmap.
42 */
43extern unsigned long __FIXADDR_TOP;
44#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
45
46#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO)
47#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1)
48#else
49#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
50
51/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
52#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
53#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
54#endif
55
56
57/*
58 * Here we define all the compile-time 'special' virtual
59 * addresses. The point is to have a constant address at
60 * compile time, but to set the physical address only
61 * in the boot process.
62 * for x86_32: We allocate these special addresses
63 * from the end of virtual memory (0xfffff000) backwards.
64 * Also this lets us do fail-safe vmalloc(), we
65 * can guarantee that these special addresses and
66 * vmalloc()-ed addresses never overlap.
67 *
68 * These 'compile-time allocated' memory buffers are
69 * fixed-size 4k pages (or larger if used with an increment
70 * higher than 1). Use set_fixmap(idx,phys) to associate
71 * physical memory with fixmap indices.
72 *
73 * TLB entries of such buffers will not be flushed across
74 * task switches.
75 */
76enum fixed_addresses {
4#ifdef CONFIG_X86_32 77#ifdef CONFIG_X86_32
5# include "fixmap_32.h" 78 FIX_HOLE,
79 FIX_VDSO,
6#else 80#else
7# include "fixmap_64.h" 81 VSYSCALL_LAST_PAGE,
82 VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE
83 + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
84 VSYSCALL_HPET,
8#endif 85#endif
86 FIX_DBGP_BASE,
87 FIX_EARLYCON_MEM_BASE,
88#ifdef CONFIG_X86_LOCAL_APIC
89 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
90#endif
91#ifdef CONFIG_X86_IO_APIC
92 FIX_IO_APIC_BASE_0,
93 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
94#endif
95#ifdef CONFIG_X86_64
96#ifdef CONFIG_EFI
97 FIX_EFI_IO_MAP_LAST_PAGE,
98 FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE
99 + MAX_EFI_IO_PAGES - 1,
100#endif
101#endif
102#ifdef CONFIG_X86_VISWS_APIC
103 FIX_CO_CPU, /* Cobalt timer */
104 FIX_CO_APIC, /* Cobalt APIC Redirection Table */
105 FIX_LI_PCIA, /* Lithium PCI Bridge A */
106 FIX_LI_PCIB, /* Lithium PCI Bridge B */
107#endif
108#ifdef CONFIG_X86_F00F_BUG
109 FIX_F00F_IDT, /* Virtual mapping for IDT */
110#endif
111#ifdef CONFIG_X86_CYCLONE_TIMER
112 FIX_CYCLONE_TIMER, /*cyclone timer register*/
113#endif
114#ifdef CONFIG_X86_32
115 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
116 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
117#ifdef CONFIG_PCI_MMCONFIG
118 FIX_PCIE_MCFG,
119#endif
120#endif
121#ifdef CONFIG_PARAVIRT
122 FIX_PARAVIRT_BOOTMAP,
123#endif
124 __end_of_permanent_fixed_addresses,
125#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
126 FIX_OHCI1394_BASE,
127#endif
128 /*
129 * 256 temporary boot-time mappings, used by early_ioremap(),
130 * before ioremap() is functional.
131 *
132 * We round it up to the next 256 pages boundary so that we
133 * can have a single pgd entry and a single pte table:
134 */
135#define NR_FIX_BTMAPS 64
136#define FIX_BTMAPS_SLOTS 4
137 FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
138 (__end_of_permanent_fixed_addresses & 255),
139 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
140#ifdef CONFIG_X86_32
141 FIX_WP_TEST,
142#endif
143 __end_of_fixed_addresses
144};
145
146
147extern void reserve_top_address(unsigned long reserve);
148
149#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
150#define FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
151#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
152#define FIXADDR_BOOT_START (FIXADDR_TOP - FIXADDR_BOOT_SIZE)
9 153
10extern int fixmaps_set; 154extern int fixmaps_set;
11 155
@@ -69,4 +213,5 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
69 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); 213 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
70 return __virt_to_fix(vaddr); 214 return __virt_to_fix(vaddr);
71} 215}
216#endif /* !__ASSEMBLY__ */
72#endif /* _ASM_X86_FIXMAP_H */ 217#endif /* _ASM_X86_FIXMAP_H */
diff --git a/arch/x86/include/asm/fixmap_32.h b/arch/x86/include/asm/fixmap_32.h
deleted file mode 100644
index 047d9bab2b31..000000000000
--- a/arch/x86/include/asm/fixmap_32.h
+++ /dev/null
@@ -1,115 +0,0 @@
1/*
2 * fixmap.h: compile-time virtual memory allocation
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1998 Ingo Molnar
9 *
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 */
12
13#ifndef _ASM_X86_FIXMAP_32_H
14#define _ASM_X86_FIXMAP_32_H
15
16
17/* used by vmalloc.c, vsyscall.lds.S.
18 *
19 * Leave one empty page between vmalloc'ed areas and
20 * the start of the fixmap.
21 */
22extern unsigned long __FIXADDR_TOP;
23#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO)
24#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1)
25
26#ifndef __ASSEMBLY__
27#include <linux/kernel.h>
28#include <asm/acpi.h>
29#include <asm/apicdef.h>
30#include <asm/page.h>
31#include <linux/threads.h>
32#include <asm/kmap_types.h>
33
34/*
35 * Here we define all the compile-time 'special' virtual
36 * addresses. The point is to have a constant address at
37 * compile time, but to set the physical address only
38 * in the boot process. We allocate these special addresses
39 * from the end of virtual memory (0xfffff000) backwards.
40 * Also this lets us do fail-safe vmalloc(), we
41 * can guarantee that these special addresses and
42 * vmalloc()-ed addresses never overlap.
43 *
44 * these 'compile-time allocated' memory buffers are
45 * fixed-size 4k pages. (or larger if used with an increment
46 * highger than 1) use fixmap_set(idx,phys) to associate
47 * physical memory with fixmap indices.
48 *
49 * TLB entries of such buffers will not be flushed across
50 * task switches.
51 */
52enum fixed_addresses {
53 FIX_HOLE,
54 FIX_VDSO,
55 FIX_DBGP_BASE,
56 FIX_EARLYCON_MEM_BASE,
57#ifdef CONFIG_X86_LOCAL_APIC
58 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
59#endif
60#ifdef CONFIG_X86_IO_APIC
61 FIX_IO_APIC_BASE_0,
62 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
63#endif
64#ifdef CONFIG_X86_VISWS_APIC
65 FIX_CO_CPU, /* Cobalt timer */
66 FIX_CO_APIC, /* Cobalt APIC Redirection Table */
67 FIX_LI_PCIA, /* Lithium PCI Bridge A */
68 FIX_LI_PCIB, /* Lithium PCI Bridge B */
69#endif
70#ifdef CONFIG_X86_F00F_BUG
71 FIX_F00F_IDT, /* Virtual mapping for IDT */
72#endif
73#ifdef CONFIG_X86_CYCLONE_TIMER
74 FIX_CYCLONE_TIMER, /*cyclone timer register*/
75#endif
76 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
77 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
78#ifdef CONFIG_PCI_MMCONFIG
79 FIX_PCIE_MCFG,
80#endif
81#ifdef CONFIG_PARAVIRT
82 FIX_PARAVIRT_BOOTMAP,
83#endif
84 __end_of_permanent_fixed_addresses,
85 /*
86 * 256 temporary boot-time mappings, used by early_ioremap(),
87 * before ioremap() is functional.
88 *
89 * We round it up to the next 256 pages boundary so that we
90 * can have a single pgd entry and a single pte table:
91 */
92#define NR_FIX_BTMAPS 64
93#define FIX_BTMAPS_SLOTS 4
94 FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
95 (__end_of_permanent_fixed_addresses & 255),
96 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
97 FIX_WP_TEST,
98#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
99 FIX_OHCI1394_BASE,
100#endif
101 __end_of_fixed_addresses
102};
103
104extern void reserve_top_address(unsigned long reserve);
105
106
107#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
108
109#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
110#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
111#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
112#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE)
113
114#endif /* !__ASSEMBLY__ */
115#endif /* _ASM_X86_FIXMAP_32_H */
diff --git a/arch/x86/include/asm/fixmap_64.h b/arch/x86/include/asm/fixmap_64.h
deleted file mode 100644
index 298d9ba3faeb..000000000000
--- a/arch/x86/include/asm/fixmap_64.h
+++ /dev/null
@@ -1,79 +0,0 @@
1/*
2 * fixmap.h: compile-time virtual memory allocation
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1998 Ingo Molnar
9 */
10
11#ifndef _ASM_X86_FIXMAP_64_H
12#define _ASM_X86_FIXMAP_64_H
13
14#include <linux/kernel.h>
15#include <asm/acpi.h>
16#include <asm/apicdef.h>
17#include <asm/page.h>
18#include <asm/vsyscall.h>
19#include <asm/efi.h>
20
21/*
22 * Here we define all the compile-time 'special' virtual
23 * addresses. The point is to have a constant address at
24 * compile time, but to set the physical address only
25 * in the boot process.
26 *
27 * These 'compile-time allocated' memory buffers are
28 * fixed-size 4k pages (or larger if used with an increment
29 * higher than 1). Use set_fixmap(idx,phys) to associate
30 * physical memory with fixmap indices.
31 *
32 * TLB entries of such buffers will not be flushed across
33 * task switches.
34 */
35
36enum fixed_addresses {
37 VSYSCALL_LAST_PAGE,
38 VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE
39 + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
40 VSYSCALL_HPET,
41 FIX_DBGP_BASE,
42 FIX_EARLYCON_MEM_BASE,
43 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
44 FIX_IO_APIC_BASE_0,
45 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
46 FIX_EFI_IO_MAP_LAST_PAGE,
47 FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE
48 + MAX_EFI_IO_PAGES - 1,
49#ifdef CONFIG_PARAVIRT
50 FIX_PARAVIRT_BOOTMAP,
51#endif
52 __end_of_permanent_fixed_addresses,
53#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
54 FIX_OHCI1394_BASE,
55#endif
56 /*
57 * 256 temporary boot-time mappings, used by early_ioremap(),
58 * before ioremap() is functional.
59 *
60 * We round it up to the next 256 pages boundary so that we
61 * can have a single pgd entry and a single pte table:
62 */
63#define NR_FIX_BTMAPS 64
64#define FIX_BTMAPS_SLOTS 4
65 FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
66 (__end_of_permanent_fixed_addresses & 255),
67 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
68 __end_of_fixed_addresses
69};
70
71#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
72#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
73#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
74
75/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
76#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
77#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
78
79#endif /* _ASM_X86_FIXMAP_64_H */
diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h
index 273b99452ae0..4b8b98fa7f25 100644
--- a/arch/x86/include/asm/genapic.h
+++ b/arch/x86/include/asm/genapic.h
@@ -1,263 +1 @@
1#ifndef _ASM_X86_GENAPIC_H #include <asm/apic.h>
2#define _ASM_X86_GENAPIC_H
3
4#include <linux/cpumask.h>
5
6#include <asm/mpspec.h>
7#include <asm/atomic.h>
8
9/*
10 * Copyright 2004 James Cleverdon, IBM.
11 * Subject to the GNU Public License, v.2
12 *
13 * Generic APIC sub-arch data struct.
14 *
15 * Hacked for x86-64 by James Cleverdon from i386 architecture code by
16 * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
17 * James Cleverdon.
18 */
19struct genapic {
20 char *name;
21
22 int (*probe)(void);
23 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
24 int (*apic_id_registered)(void);
25
26 u32 irq_delivery_mode;
27 u32 irq_dest_mode;
28
29 const struct cpumask *(*target_cpus)(void);
30
31 int disable_esr;
32
33 int dest_logical;
34 unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
35 unsigned long (*check_apicid_present)(int apicid);
36
37 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
38 void (*init_apic_ldr)(void);
39
40 physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
41
42 void (*setup_apic_routing)(void);
43 int (*multi_timer_check)(int apic, int irq);
44 int (*apicid_to_node)(int logical_apicid);
45 int (*cpu_to_logical_apicid)(int cpu);
46 int (*cpu_present_to_apicid)(int mps_cpu);
47 physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
48 void (*setup_portio_remap)(void);
49 int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
50 void (*enable_apic_mode)(void);
51 int (*phys_pkg_id)(int cpuid_apic, int index_msb);
52
53 /*
54 * When one of the next two hooks returns 1 the genapic
55 * is switched to this. Essentially they are additional
56 * probe functions:
57 */
58 int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid);
59
60 unsigned int (*get_apic_id)(unsigned long x);
61 unsigned long (*set_apic_id)(unsigned int id);
62 unsigned long apic_id_mask;
63
64 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
65 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
66 const struct cpumask *andmask);
67
68 /* ipi */
69 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
70 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
71 int vector);
72 void (*send_IPI_allbutself)(int vector);
73 void (*send_IPI_all)(int vector);
74 void (*send_IPI_self)(int vector);
75
76 /* wakeup_secondary_cpu */
77 int (*wakeup_cpu)(int apicid, unsigned long start_eip);
78
79 int trampoline_phys_low;
80 int trampoline_phys_high;
81
82 void (*wait_for_init_deassert)(atomic_t *deassert);
83 void (*smp_callin_clear_local_apic)(void);
84 void (*store_NMI_vector)(unsigned short *high, unsigned short *low);
85 void (*inquire_remote_apic)(int apicid);
86};
87
88extern struct genapic *apic;
89
90/*
91 * Warm reset vector default position:
92 */
93#define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467
94#define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469
95
96#ifdef CONFIG_X86_32
97extern void es7000_update_genapic_to_cluster(void);
98#else
99extern struct genapic apic_flat;
100extern struct genapic apic_physflat;
101extern struct genapic apic_x2apic_cluster;
102extern struct genapic apic_x2apic_phys;
103extern int default_acpi_madt_oem_check(char *, char *);
104
105extern void apic_send_IPI_self(int vector);
106
107extern struct genapic apic_x2apic_uv_x;
108DECLARE_PER_CPU(int, x2apic_extra_bits);
109
110extern void default_setup_apic_routing(void);
111
112extern int default_cpu_present_to_apicid(int mps_cpu);
113extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
114#endif
115
116static inline void default_wait_for_init_deassert(atomic_t *deassert)
117{
118 while (!atomic_read(deassert))
119 cpu_relax();
120 return;
121}
122
123extern void generic_bigsmp_probe(void);
124
125
126#ifdef CONFIG_X86_LOCAL_APIC
127
128#include <asm/smp.h>
129
130#define APIC_DFR_VALUE (APIC_DFR_FLAT)
131
132static inline const struct cpumask *default_target_cpus(void)
133{
134#ifdef CONFIG_SMP
135 return cpu_online_mask;
136#else
137 return cpumask_of(0);
138#endif
139}
140
141DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
142
143
144static inline unsigned int read_apic_id(void)
145{
146 unsigned int reg;
147
148 reg = apic_read(APIC_ID);
149
150 return apic->get_apic_id(reg);
151}
152
153#ifdef CONFIG_X86_64
154extern void default_setup_apic_routing(void);
155#else
156
157/*
158 * Set up the logical destination ID.
159 *
160 * Intel recommends to set DFR, LDR and TPR before enabling
161 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
162 * document number 292116). So here it goes...
163 */
164extern void default_init_apic_ldr(void);
165
166static inline int default_apic_id_registered(void)
167{
168 return physid_isset(read_apic_id(), phys_cpu_present_map);
169}
170
171static inline unsigned int
172default_cpu_mask_to_apicid(const struct cpumask *cpumask)
173{
174 return cpumask_bits(cpumask)[0];
175}
176
177static inline unsigned int
178default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
179 const struct cpumask *andmask)
180{
181 unsigned long mask1 = cpumask_bits(cpumask)[0];
182 unsigned long mask2 = cpumask_bits(andmask)[0];
183 unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
184
185 return (unsigned int)(mask1 & mask2 & mask3);
186}
187
188static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
189{
190 return cpuid_apic >> index_msb;
191}
192
193static inline void default_setup_apic_routing(void)
194{
195#ifdef CONFIG_X86_IO_APIC
196 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
197 "Flat", nr_ioapics);
198#endif
199}
200
201extern int default_apicid_to_node(int logical_apicid);
202
203#endif
204
205static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid)
206{
207 return physid_isset(apicid, bitmap);
208}
209
210static inline unsigned long default_check_apicid_present(int bit)
211{
212 return physid_isset(bit, phys_cpu_present_map);
213}
214
215static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map)
216{
217 return phys_map;
218}
219
220/* Mapping from cpu number to logical apicid */
221static inline int default_cpu_to_logical_apicid(int cpu)
222{
223 return 1 << cpu;
224}
225
226static inline int __default_cpu_present_to_apicid(int mps_cpu)
227{
228 if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
229 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
230 else
231 return BAD_APICID;
232}
233
234static inline int
235__default_check_phys_apicid_present(int boot_cpu_physical_apicid)
236{
237 return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
238}
239
240#ifdef CONFIG_X86_32
241static inline int default_cpu_present_to_apicid(int mps_cpu)
242{
243 return __default_cpu_present_to_apicid(mps_cpu);
244}
245
246static inline int
247default_check_phys_apicid_present(int boot_cpu_physical_apicid)
248{
249 return __default_check_phys_apicid_present(boot_cpu_physical_apicid);
250}
251#else
252extern int default_cpu_present_to_apicid(int mps_cpu);
253extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
254#endif
255
256static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid)
257{
258 return physid_mask_of_physid(phys_apicid);
259}
260
261#endif /* CONFIG_X86_LOCAL_APIC */
262
263#endif /* _ASM_X86_GENAPIC_64_H */
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
index 58d7091eeb1f..1a99e6c092af 100644
--- a/arch/x86/include/asm/i8259.h
+++ b/arch/x86/include/asm/i8259.h
@@ -60,4 +60,8 @@ extern struct irq_chip i8259A_chip;
60extern void mask_8259A(void); 60extern void mask_8259A(void);
61extern void unmask_8259A(void); 61extern void unmask_8259A(void);
62 62
63#ifdef CONFIG_X86_32
64extern void init_ISA_irqs(void);
65#endif
66
63#endif /* _ASM_X86_I8259_H */ 67#endif /* _ASM_X86_I8259_H */
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 4f8e820cf38f..683d0b4c00fc 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -124,10 +124,15 @@ static inline void *phys_to_virt(phys_addr_t address)
124 124
125/* 125/*
126 * ISA I/O bus memory addresses are 1:1 with the physical address. 126 * ISA I/O bus memory addresses are 1:1 with the physical address.
127 * However, we truncate the address to unsigned int to avoid undesirable
128 * promitions in legacy drivers.
127 */ 129 */
128#define isa_virt_to_bus (unsigned long)virt_to_phys 130static inline unsigned int isa_virt_to_bus(volatile void *address)
129#define isa_page_to_bus page_to_phys 131{
130#define isa_bus_to_virt phys_to_virt 132 return (unsigned int)virt_to_phys(address);
133}
134#define isa_page_to_bus(page) ((unsigned int)page_to_phys(page))
135#define isa_bus_to_virt phys_to_virt
131 136
132/* 137/*
133 * However PCI ones are not necessarily 1:1 and therefore these interfaces 138 * However PCI ones are not necessarily 1:1 and therefore these interfaces
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h
index c1f06289b14b..86af26091d6c 100644
--- a/arch/x86/include/asm/iomap.h
+++ b/arch/x86/include/asm/iomap.h
@@ -23,6 +23,9 @@
23#include <asm/pgtable.h> 23#include <asm/pgtable.h>
24#include <asm/tlbflush.h> 24#include <asm/tlbflush.h>
25 25
26int
27is_io_mapping_possible(resource_size_t base, unsigned long size);
28
26void * 29void *
27iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); 30iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
28 31
diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h
index 5f2efc5d9927..0b7228268a63 100644
--- a/arch/x86/include/asm/ipi.h
+++ b/arch/x86/include/asm/ipi.h
@@ -123,8 +123,6 @@ extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
123 int vector); 123 int vector);
124extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, 124extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
125 int vector); 125 int vector);
126#include <asm/genapic.h>
127
128extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, 126extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
129 int vector); 127 int vector);
130extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, 128extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index b07278c55e9e..8a285f356f8a 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -128,7 +128,7 @@
128#ifndef __ASSEMBLY__ 128#ifndef __ASSEMBLY__
129static inline int invalid_vm86_irq(int irq) 129static inline int invalid_vm86_irq(int irq)
130{ 130{
131 return irq < 3 || irq > 15; 131 return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ;
132} 132}
133#endif 133#endif
134 134
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
index d2e3bf3608af..886c9402ec45 100644
--- a/arch/x86/include/asm/kvm.h
+++ b/arch/x86/include/asm/kvm.h
@@ -9,6 +9,13 @@
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/ioctl.h> 10#include <linux/ioctl.h>
11 11
12/* Select x86 specific features in <linux/kvm.h> */
13#define __KVM_HAVE_PIT
14#define __KVM_HAVE_IOAPIC
15#define __KVM_HAVE_DEVICE_ASSIGNMENT
16#define __KVM_HAVE_MSI
17#define __KVM_HAVE_USER_NMI
18
12/* Architectural interrupt line count. */ 19/* Architectural interrupt line count. */
13#define KVM_NR_INTERRUPTS 256 20#define KVM_NR_INTERRUPTS 256
14 21
diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
index 5d98d0b68ffc..9320e2a8a26a 100644
--- a/arch/x86/include/asm/linkage.h
+++ b/arch/x86/include/asm/linkage.h
@@ -52,70 +52,14 @@
52 52
53#endif 53#endif
54 54
55#define GLOBAL(name) \
56 .globl name; \
57 name:
58
55#ifdef CONFIG_X86_ALIGNMENT_16 59#ifdef CONFIG_X86_ALIGNMENT_16
56#define __ALIGN .align 16,0x90 60#define __ALIGN .align 16,0x90
57#define __ALIGN_STR ".align 16,0x90" 61#define __ALIGN_STR ".align 16,0x90"
58#endif 62#endif
59 63
60/*
61 * to check ENTRY_X86/END_X86 and
62 * KPROBE_ENTRY_X86/KPROBE_END_X86
63 * unbalanced-missed-mixed appearance
64 */
65#define __set_entry_x86 .set ENTRY_X86_IN, 0
66#define __unset_entry_x86 .set ENTRY_X86_IN, 1
67#define __set_kprobe_x86 .set KPROBE_X86_IN, 0
68#define __unset_kprobe_x86 .set KPROBE_X86_IN, 1
69
70#define __macro_err_x86 .error "ENTRY_X86/KPROBE_X86 unbalanced,missed,mixed"
71
72#define __check_entry_x86 \
73 .ifdef ENTRY_X86_IN; \
74 .ifeq ENTRY_X86_IN; \
75 __macro_err_x86; \
76 .abort; \
77 .endif; \
78 .endif
79
80#define __check_kprobe_x86 \
81 .ifdef KPROBE_X86_IN; \
82 .ifeq KPROBE_X86_IN; \
83 __macro_err_x86; \
84 .abort; \
85 .endif; \
86 .endif
87
88#define __check_entry_kprobe_x86 \
89 __check_entry_x86; \
90 __check_kprobe_x86
91
92#define ENTRY_KPROBE_FINAL_X86 __check_entry_kprobe_x86
93
94#define ENTRY_X86(name) \
95 __check_entry_kprobe_x86; \
96 __set_entry_x86; \
97 .globl name; \
98 __ALIGN; \
99 name:
100
101#define END_X86(name) \
102 __unset_entry_x86; \
103 __check_entry_kprobe_x86; \
104 .size name, .-name
105
106#define KPROBE_ENTRY_X86(name) \
107 __check_entry_kprobe_x86; \
108 __set_kprobe_x86; \
109 .pushsection .kprobes.text, "ax"; \
110 .globl name; \
111 __ALIGN; \
112 name:
113
114#define KPROBE_END_X86(name) \
115 __unset_kprobe_x86; \
116 __check_entry_kprobe_x86; \
117 .size name, .-name; \
118 .popsection
119
120#endif /* _ASM_X86_LINKAGE_H */ 64#endif /* _ASM_X86_LINKAGE_H */
121 65
diff --git a/arch/x86/include/asm/mach-voyager/do_timer.h b/arch/x86/include/asm/mach-voyager/do_timer.h
deleted file mode 100644
index 9e5a459fd15b..000000000000
--- a/arch/x86/include/asm/mach-voyager/do_timer.h
+++ /dev/null
@@ -1,17 +0,0 @@
1/* defines for inline arch setup functions */
2#include <linux/clockchips.h>
3
4#include <asm/voyager.h>
5#include <asm/i8253.h>
6
7/**
8 * do_timer_interrupt_hook - hook into timer tick
9 *
10 * Call the pit clock event handler. see asm/i8253.h
11 **/
12static inline void do_timer_interrupt_hook(void)
13{
14 global_clock_event->event_handler(global_clock_event);
15 voyager_timer_interrupt();
16}
17
diff --git a/arch/x86/include/asm/mach-voyager/entry_arch.h b/arch/x86/include/asm/mach-voyager/entry_arch.h
deleted file mode 100644
index ae52624b5937..000000000000
--- a/arch/x86/include/asm/mach-voyager/entry_arch.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/* -*- mode: c; c-basic-offset: 8 -*- */
2
3/* Copyright (C) 2002
4 *
5 * Author: James.Bottomley@HansenPartnership.com
6 *
7 * linux/arch/i386/voyager/entry_arch.h
8 *
9 * This file builds the VIC and QIC CPI gates
10 */
11
12/* initialise the voyager interrupt gates
13 *
14 * This uses the macros in irq.h to set up assembly jump gates. The
15 * calls are then redirected to the same routine with smp_ prefixed */
16BUILD_INTERRUPT(vic_sys_interrupt, VIC_SYS_INT)
17BUILD_INTERRUPT(vic_cmn_interrupt, VIC_CMN_INT)
18BUILD_INTERRUPT(vic_cpi_interrupt, VIC_CPI_LEVEL0);
19
20/* do all the QIC interrupts */
21BUILD_INTERRUPT(qic_timer_interrupt, QIC_TIMER_CPI);
22BUILD_INTERRUPT(qic_invalidate_interrupt, QIC_INVALIDATE_CPI);
23BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI);
24BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI);
25BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI);
26BUILD_INTERRUPT(qic_call_function_single_interrupt, QIC_CALL_FUNCTION_SINGLE_CPI);
diff --git a/arch/x86/include/asm/mach-voyager/setup_arch.h b/arch/x86/include/asm/mach-voyager/setup_arch.h
deleted file mode 100644
index 71729ca05cd7..000000000000
--- a/arch/x86/include/asm/mach-voyager/setup_arch.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#include <asm/voyager.h>
2#include <asm/setup.h>
3#define VOYAGER_BIOS_INFO ((struct voyager_bios_info *) \
4 (&boot_params.apm_bios_info))
5
6/* Hook to call BIOS initialisation function */
7
8/* for voyager, pass the voyager BIOS/SUS info area to the detection
9 * routines */
10
11#define ARCH_SETUP voyager_detect(VOYAGER_BIOS_INFO);
12
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
index 07f1af494ca5..105fb90a0635 100644
--- a/arch/x86/include/asm/mmzone_32.h
+++ b/arch/x86/include/asm/mmzone_32.h
@@ -32,8 +32,6 @@ static inline void get_memcfg_numa(void)
32 get_memcfg_numa_flat(); 32 get_memcfg_numa_flat();
33} 33}
34 34
35extern int early_pfn_to_nid(unsigned long pfn);
36
37extern void resume_map_numa_kva(pgd_t *pgd); 35extern void resume_map_numa_kva(pgd_t *pgd);
38 36
39#else /* !CONFIG_NUMA */ 37#else /* !CONFIG_NUMA */
diff --git a/arch/x86/include/asm/mmzone_64.h b/arch/x86/include/asm/mmzone_64.h
index a5b3817d4b9e..a29f48c2a322 100644
--- a/arch/x86/include/asm/mmzone_64.h
+++ b/arch/x86/include/asm/mmzone_64.h
@@ -40,8 +40,6 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
40#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ 40#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \
41 NODE_DATA(nid)->node_spanned_pages) 41 NODE_DATA(nid)->node_spanned_pages)
42 42
43extern int early_pfn_to_nid(unsigned long pfn);
44
45#ifdef CONFIG_NUMA_EMU 43#ifdef CONFIG_NUMA_EMU
46#define FAKE_NODE_MIN_SIZE (64 * 1024 * 1024) 44#define FAKE_NODE_MIN_SIZE (64 * 1024 * 1024)
47#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) 45#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL))
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index 5916c8df09d9..642fc7fc8cdc 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -167,6 +167,4 @@ extern int generic_mps_oem_check(struct mpc_table *, char *, char *);
167 167
168extern int default_acpi_madt_oem_check(char *, char *); 168extern int default_acpi_madt_oem_check(char *, char *);
169 169
170extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
171
172#endif /* _ASM_X86_MPSPEC_H */ 170#endif /* _ASM_X86_MPSPEC_H */
diff --git a/arch/x86/include/asm/numa_32.h b/arch/x86/include/asm/numa_32.h
index e9f5db796244..a37229011b56 100644
--- a/arch/x86/include/asm/numa_32.h
+++ b/arch/x86/include/asm/numa_32.h
@@ -4,8 +4,12 @@
4extern int pxm_to_nid(int pxm); 4extern int pxm_to_nid(int pxm);
5extern void numa_remove_cpu(int cpu); 5extern void numa_remove_cpu(int cpu);
6 6
7#ifdef CONFIG_NUMA 7#ifdef CONFIG_HIGHMEM
8extern void set_highmem_pages_init(void); 8extern void set_highmem_pages_init(void);
9#else
10static inline void set_highmem_pages_init(void)
11{
12}
9#endif 13#endif
10 14
11#endif /* _ASM_X86_NUMA_32_H */ 15#endif /* _ASM_X86_NUMA_32_H */
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
index b5486aaf36ec..f1e4a79a6e41 100644
--- a/arch/x86/include/asm/page_32_types.h
+++ b/arch/x86/include/asm/page_32_types.h
@@ -33,12 +33,10 @@
33/* 44=32+12, the limit we can fit into an unsigned long pfn */ 33/* 44=32+12, the limit we can fit into an unsigned long pfn */
34#define __PHYSICAL_MASK_SHIFT 44 34#define __PHYSICAL_MASK_SHIFT 44
35#define __VIRTUAL_MASK_SHIFT 32 35#define __VIRTUAL_MASK_SHIFT 32
36#define PAGETABLE_LEVELS 3
37 36
38#else /* !CONFIG_X86_PAE */ 37#else /* !CONFIG_X86_PAE */
39#define __PHYSICAL_MASK_SHIFT 32 38#define __PHYSICAL_MASK_SHIFT 32
40#define __VIRTUAL_MASK_SHIFT 32 39#define __VIRTUAL_MASK_SHIFT 32
41#define PAGETABLE_LEVELS 2
42#endif /* CONFIG_X86_PAE */ 40#endif /* CONFIG_X86_PAE */
43 41
44#ifndef __ASSEMBLY__ 42#ifndef __ASSEMBLY__
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index bc73af3eda9c..d38c91b70248 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -1,8 +1,6 @@
1#ifndef _ASM_X86_PAGE_64_DEFS_H 1#ifndef _ASM_X86_PAGE_64_DEFS_H
2#define _ASM_X86_PAGE_64_DEFS_H 2#define _ASM_X86_PAGE_64_DEFS_H
3 3
4#define PAGETABLE_LEVELS 4
5
6#define THREAD_ORDER 1 4#define THREAD_ORDER 1
7#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) 5#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
8#define CURRENT_MASK (~(THREAD_SIZE - 1)) 6#define CURRENT_MASK (~(THREAD_SIZE - 1))
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index 2c52ff767584..2d625da6603c 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -16,12 +16,6 @@
16 (ie, 32-bit PAE). */ 16 (ie, 32-bit PAE). */
17#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK) 17#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
18 18
19/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
20#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
21
22/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
23#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
24
25#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) 19#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
26#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) 20#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
27 21
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index 9709fdff6615..b0e70056838e 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -15,4 +15,7 @@ extern int reserve_memtype(u64 start, u64 end,
15 unsigned long req_type, unsigned long *ret_type); 15 unsigned long req_type, unsigned long *ret_type);
16extern int free_memtype(u64 start, u64 end); 16extern int free_memtype(u64 start, u64 end);
17 17
18extern int kernel_map_sync_memtype(u64 base, unsigned long size,
19 unsigned long flag);
20
18#endif /* _ASM_X86_PAT_H */ 21#endif /* _ASM_X86_PAT_H */
diff --git a/arch/x86/include/asm/pgtable-2level_types.h b/arch/x86/include/asm/pgtable-2level_types.h
index 09ae67efcebd..daacc23e3fb9 100644
--- a/arch/x86/include/asm/pgtable-2level_types.h
+++ b/arch/x86/include/asm/pgtable-2level_types.h
@@ -17,6 +17,7 @@ typedef union {
17#endif /* !__ASSEMBLY__ */ 17#endif /* !__ASSEMBLY__ */
18 18
19#define SHARED_KERNEL_PMD 0 19#define SHARED_KERNEL_PMD 0
20#define PAGETABLE_LEVELS 2
20 21
21/* 22/*
22 * traditional i386 two-level paging structure: 23 * traditional i386 two-level paging structure:
@@ -25,6 +26,7 @@ typedef union {
25#define PGDIR_SHIFT 22 26#define PGDIR_SHIFT 22
26#define PTRS_PER_PGD 1024 27#define PTRS_PER_PGD 1024
27 28
29
28/* 30/*
29 * the i386 is two-level, so we don't really have any 31 * the i386 is two-level, so we don't really have any
30 * PMD directory physically. 32 * PMD directory physically.
diff --git a/arch/x86/include/asm/pgtable-3level_types.h b/arch/x86/include/asm/pgtable-3level_types.h
index bcc89625ebe5..1bd5876c8649 100644
--- a/arch/x86/include/asm/pgtable-3level_types.h
+++ b/arch/x86/include/asm/pgtable-3level_types.h
@@ -24,6 +24,8 @@ typedef union {
24#define SHARED_KERNEL_PMD 1 24#define SHARED_KERNEL_PMD 1
25#endif 25#endif
26 26
27#define PAGETABLE_LEVELS 3
28
27/* 29/*
28 * PGDIR_SHIFT determines what a top-level page table entry can map 30 * PGDIR_SHIFT determines what a top-level page table entry can map
29 */ 31 */
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 2f59135c6f2a..fbf42b8e0383 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -18,6 +18,7 @@ typedef struct { pteval_t pte; } pte_t;
18#endif /* !__ASSEMBLY__ */ 18#endif /* !__ASSEMBLY__ */
19 19
20#define SHARED_KERNEL_PMD 0 20#define SHARED_KERNEL_PMD 0
21#define PAGETABLE_LEVELS 4
21 22
22/* 23/*
23 * PGDIR_SHIFT determines what a top-level page table entry can map 24 * PGDIR_SHIFT determines what a top-level page table entry can map
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 9dafe87be2de..4d258ad76a0f 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -173,6 +173,12 @@
173 173
174#include <linux/types.h> 174#include <linux/types.h>
175 175
176/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
177#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
178
179/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
180#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
181
176typedef struct pgprot { pgprotval_t pgprot; } pgprot_t; 182typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
177 183
178typedef struct { pgdval_t pgd; } pgd_t; 184typedef struct { pgdval_t pgd; } pgd_t;
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index dabab1a19ddd..76139506c3e4 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -248,7 +248,6 @@ struct x86_hw_tss {
248#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) 248#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
249#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) 249#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
250#define INVALID_IO_BITMAP_OFFSET 0x8000 250#define INVALID_IO_BITMAP_OFFSET 0x8000
251#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
252 251
253struct tss_struct { 252struct tss_struct {
254 /* 253 /*
@@ -263,11 +262,6 @@ struct tss_struct {
263 * be within the limit. 262 * be within the limit.
264 */ 263 */
265 unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; 264 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
266 /*
267 * Cache the current maximum and the last task that used the bitmap:
268 */
269 unsigned long io_bitmap_max;
270 struct thread_struct *io_bitmap_owner;
271 265
272 /* 266 /*
273 * .. and then another 0x100 bytes for the emergency kernel stack: 267 * .. and then another 0x100 bytes for the emergency kernel stack:
@@ -403,7 +397,6 @@ DECLARE_PER_CPU(unsigned long, stack_canary);
403#endif 397#endif
404#endif /* X86_64 */ 398#endif /* X86_64 */
405 399
406extern void print_cpu_info(struct cpuinfo_x86 *);
407extern unsigned int xstate_size; 400extern unsigned int xstate_size;
408extern void free_thread_xstate(struct task_struct *); 401extern void free_thread_xstate(struct task_struct *);
409extern struct kmem_cache *task_xstate_cachep; 402extern struct kmem_cache *task_xstate_cachep;
@@ -862,6 +855,7 @@ static inline void spin_lock_prefetch(const void *x)
862 * User space process size: 3GB (default). 855 * User space process size: 3GB (default).
863 */ 856 */
864#define TASK_SIZE PAGE_OFFSET 857#define TASK_SIZE PAGE_OFFSET
858#define TASK_SIZE_MAX TASK_SIZE
865#define STACK_TOP TASK_SIZE 859#define STACK_TOP TASK_SIZE
866#define STACK_TOP_MAX STACK_TOP 860#define STACK_TOP_MAX STACK_TOP
867 861
@@ -921,7 +915,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
921/* 915/*
922 * User space process size. 47bits minus one guard page. 916 * User space process size. 47bits minus one guard page.
923 */ 917 */
924#define TASK_SIZE64 ((1UL << 47) - PAGE_SIZE) 918#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
925 919
926/* This decides where the kernel will search for a free chunk of vm 920/* This decides where the kernel will search for a free chunk of vm
927 * space during mmap's. 921 * space during mmap's.
@@ -930,12 +924,12 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
930 0xc0000000 : 0xFFFFe000) 924 0xc0000000 : 0xFFFFe000)
931 925
932#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ 926#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
933 IA32_PAGE_OFFSET : TASK_SIZE64) 927 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
934#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ 928#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
935 IA32_PAGE_OFFSET : TASK_SIZE64) 929 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
936 930
937#define STACK_TOP TASK_SIZE 931#define STACK_TOP TASK_SIZE
938#define STACK_TOP_MAX TASK_SIZE64 932#define STACK_TOP_MAX TASK_SIZE_MAX
939 933
940#define INIT_THREAD { \ 934#define INIT_THREAD { \
941 .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ 935 .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
diff --git a/arch/x86/include/asm/seccomp_32.h b/arch/x86/include/asm/seccomp_32.h
index a6ad87b352c4..b811d6f5780c 100644
--- a/arch/x86/include/asm/seccomp_32.h
+++ b/arch/x86/include/asm/seccomp_32.h
@@ -1,12 +1,6 @@
1#ifndef _ASM_X86_SECCOMP_32_H 1#ifndef _ASM_X86_SECCOMP_32_H
2#define _ASM_X86_SECCOMP_32_H 2#define _ASM_X86_SECCOMP_32_H
3 3
4#include <linux/thread_info.h>
5
6#ifdef TIF_32BIT
7#error "unexpected TIF_32BIT on i386"
8#endif
9
10#include <linux/unistd.h> 4#include <linux/unistd.h>
11 5
12#define __NR_seccomp_read __NR_read 6#define __NR_seccomp_read __NR_read
diff --git a/arch/x86/include/asm/seccomp_64.h b/arch/x86/include/asm/seccomp_64.h
index 4171bb794e9e..84ec1bd161a5 100644
--- a/arch/x86/include/asm/seccomp_64.h
+++ b/arch/x86/include/asm/seccomp_64.h
@@ -1,14 +1,6 @@
1#ifndef _ASM_X86_SECCOMP_64_H 1#ifndef _ASM_X86_SECCOMP_64_H
2#define _ASM_X86_SECCOMP_64_H 2#define _ASM_X86_SECCOMP_64_H
3 3
4#include <linux/thread_info.h>
5
6#ifdef TIF_32BIT
7#error "unexpected TIF_32BIT on x86_64"
8#else
9#define TIF_32BIT TIF_IA32
10#endif
11
12#include <linux/unistd.h> 4#include <linux/unistd.h>
13#include <asm/ia32_unistd.h> 5#include <asm/ia32_unistd.h>
14 6
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index c230189462a2..05c6f6b11fd5 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -13,6 +13,7 @@
13struct mpc_cpu; 13struct mpc_cpu;
14struct mpc_bus; 14struct mpc_bus;
15struct mpc_oemtable; 15struct mpc_oemtable;
16
16struct x86_quirks { 17struct x86_quirks {
17 int (*arch_pre_time_init)(void); 18 int (*arch_pre_time_init)(void);
18 int (*arch_time_init)(void); 19 int (*arch_time_init)(void);
@@ -30,9 +31,16 @@ struct x86_quirks {
30 void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable, 31 void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable,
31 unsigned short oemsize); 32 unsigned short oemsize);
32 int (*setup_ioapic_ids)(void); 33 int (*setup_ioapic_ids)(void);
33 int (*update_genapic)(void);
34}; 34};
35 35
36extern void x86_quirk_pre_intr_init(void);
37extern void x86_quirk_intr_init(void);
38
39extern void x86_quirk_trap_init(void);
40
41extern void x86_quirk_pre_time_init(void);
42extern void x86_quirk_time_init(void);
43
36#endif /* __ASSEMBLY__ */ 44#endif /* __ASSEMBLY__ */
37 45
38#ifdef __i386__ 46#ifdef __i386__
@@ -56,7 +64,11 @@ struct x86_quirks {
56#include <asm/bootparam.h> 64#include <asm/bootparam.h>
57 65
58/* Interrupt control for vSMPowered x86_64 systems */ 66/* Interrupt control for vSMPowered x86_64 systems */
67#ifdef CONFIG_X86_VSMP
59void vsmp_init(void); 68void vsmp_init(void);
69#else
70static inline void vsmp_init(void) { }
71#endif
60 72
61void setup_bios_corruption_check(void); 73void setup_bios_corruption_check(void);
62 74
@@ -68,8 +80,6 @@ static inline void visws_early_detect(void) { }
68static inline int is_visws_box(void) { return 0; } 80static inline int is_visws_box(void) { return 0; }
69#endif 81#endif
70 82
71extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
72extern int wakeup_secondary_cpu_via_init(int apicid, unsigned long start_eip);
73extern struct x86_quirks *x86_quirks; 83extern struct x86_quirks *x86_quirks;
74extern unsigned long saved_video_mode; 84extern unsigned long saved_video_mode;
75 85
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index 258ef730aaa4..7043408f6904 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -82,7 +82,7 @@ asmlinkage long sys_iopl(unsigned int, struct pt_regs *);
82/* kernel/signal_64.c */ 82/* kernel/signal_64.c */
83asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *, 83asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *,
84 struct pt_regs *); 84 struct pt_regs *);
85asmlinkage long sys_rt_sigreturn(struct pt_regs *); 85long sys_rt_sigreturn(struct pt_regs *);
86 86
87/* kernel/sys_x86_64.c */ 87/* kernel/sys_x86_64.c */
88asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long, 88asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long,
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index c00bfdbdd456..643c59b4bc6e 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -20,6 +20,9 @@
20struct task_struct; /* one of the stranger aspects of C forward declarations */ 20struct task_struct; /* one of the stranger aspects of C forward declarations */
21struct task_struct *__switch_to(struct task_struct *prev, 21struct task_struct *__switch_to(struct task_struct *prev,
22 struct task_struct *next); 22 struct task_struct *next);
23struct tss_struct;
24void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
25 struct tss_struct *tss);
23 26
24#ifdef CONFIG_X86_32 27#ifdef CONFIG_X86_32
25 28
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index 2bb6a835c453..a81195eaa2b3 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -3,6 +3,7 @@
3#include <linux/init.h> 3#include <linux/init.h>
4#include <linux/pm.h> 4#include <linux/pm.h>
5#include <linux/percpu.h> 5#include <linux/percpu.h>
6#include <linux/interrupt.h>
6 7
7#define TICK_SIZE (tick_nsec / 1000) 8#define TICK_SIZE (tick_nsec / 1000)
8 9
@@ -12,6 +13,7 @@ unsigned long native_calibrate_tsc(void);
12#ifdef CONFIG_X86_32 13#ifdef CONFIG_X86_32
13extern int timer_ack; 14extern int timer_ack;
14extern int recalibrate_cpu_khz(void); 15extern int recalibrate_cpu_khz(void);
16extern irqreturn_t timer_interrupt(int irq, void *dev_id);
15#endif /* CONFIG_X86_32 */ 17#endif /* CONFIG_X86_32 */
16 18
17extern int no_timer_check; 19extern int no_timer_check;
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 84210c479fca..8cc687326eb8 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -188,16 +188,16 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
188extern long __copy_user_nocache(void *dst, const void __user *src, 188extern long __copy_user_nocache(void *dst, const void __user *src,
189 unsigned size, int zerorest); 189 unsigned size, int zerorest);
190 190
191static inline int __copy_from_user_nocache(void *dst, const void __user *src, 191static inline int
192 unsigned size) 192__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
193{ 193{
194 might_sleep(); 194 might_sleep();
195 return __copy_user_nocache(dst, src, size, 1); 195 return __copy_user_nocache(dst, src, size, 1);
196} 196}
197 197
198static inline int __copy_from_user_inatomic_nocache(void *dst, 198static inline int
199 const void __user *src, 199__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
200 unsigned size) 200 unsigned size)
201{ 201{
202 return __copy_user_nocache(dst, src, size, 0); 202 return __copy_user_nocache(dst, src, size, 0);
203} 203}
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index 8242bf965812..c0a01b5d985b 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -12,7 +12,6 @@ extern enum uv_system_type get_uv_system_type(void);
12extern int is_uv_system(void); 12extern int is_uv_system(void);
13extern void uv_cpu_init(void); 13extern void uv_cpu_init(void);
14extern void uv_system_init(void); 14extern void uv_system_init(void);
15extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
16extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, 15extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
17 struct mm_struct *mm, 16 struct mm_struct *mm,
18 unsigned long va, 17 unsigned long va,
@@ -24,8 +23,6 @@ static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
24static inline int is_uv_system(void) { return 0; } 23static inline int is_uv_system(void) { return 0; }
25static inline void uv_cpu_init(void) { } 24static inline void uv_cpu_init(void) { }
26static inline void uv_system_init(void) { } 25static inline void uv_system_init(void) { }
27static inline int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
28{ return 1; }
29static inline const struct cpumask * 26static inline const struct cpumask *
30uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, 27uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
31 unsigned long va, unsigned int cpu) 28 unsigned long va, unsigned int cpu)
diff --git a/arch/x86/include/asm/vic.h b/arch/x86/include/asm/vic.h
deleted file mode 100644
index 53100f353612..000000000000
--- a/arch/x86/include/asm/vic.h
+++ /dev/null
@@ -1,61 +0,0 @@
1/* Copyright (C) 1999,2001
2 *
3 * Author: J.E.J.Bottomley@HansenPartnership.com
4 *
5 * Standard include definitions for the NCR Voyager Interrupt Controller */
6
7/* The eight CPI vectors. To activate a CPI, you write a bit mask
8 * corresponding to the processor set to be interrupted into the
9 * relevant register. That set of CPUs will then be interrupted with
10 * the CPI */
11static const int VIC_CPI_Registers[] =
12 {0xFC00, 0xFC01, 0xFC08, 0xFC09,
13 0xFC10, 0xFC11, 0xFC18, 0xFC19 };
14
15#define VIC_PROC_WHO_AM_I 0xfc29
16# define QUAD_IDENTIFIER 0xC0
17# define EIGHT_SLOT_IDENTIFIER 0xE0
18#define QIC_EXTENDED_PROCESSOR_SELECT 0xFC72
19#define VIC_CPI_BASE_REGISTER 0xFC41
20#define VIC_PROCESSOR_ID 0xFC21
21# define VIC_CPU_MASQUERADE_ENABLE 0x8
22
23#define VIC_CLAIM_REGISTER_0 0xFC38
24#define VIC_CLAIM_REGISTER_1 0xFC39
25#define VIC_REDIRECT_REGISTER_0 0xFC60
26#define VIC_REDIRECT_REGISTER_1 0xFC61
27#define VIC_PRIORITY_REGISTER 0xFC20
28
29#define VIC_PRIMARY_MC_BASE 0xFC48
30#define VIC_SECONDARY_MC_BASE 0xFC49
31
32#define QIC_PROCESSOR_ID 0xFC71
33# define QIC_CPUID_ENABLE 0x08
34
35#define QIC_VIC_CPI_BASE_REGISTER 0xFC79
36#define QIC_CPI_BASE_REGISTER 0xFC7A
37
38#define QIC_MASK_REGISTER0 0xFC80
39/* NOTE: these are masked high, enabled low */
40# define QIC_PERF_TIMER 0x01
41# define QIC_LPE 0x02
42# define QIC_SYS_INT 0x04
43# define QIC_CMN_INT 0x08
44/* at the moment, just enable CMN_INT, disable SYS_INT */
45# define QIC_DEFAULT_MASK0 (~(QIC_CMN_INT /* | VIC_SYS_INT */))
46#define QIC_MASK_REGISTER1 0xFC81
47# define QIC_BOOT_CPI_MASK 0xFE
48/* Enable CPI's 1-6 inclusive */
49# define QIC_CPI_ENABLE 0x81
50
51#define QIC_INTERRUPT_CLEAR0 0xFC8A
52#define QIC_INTERRUPT_CLEAR1 0xFC8B
53
54/* this is where we place the CPI vectors */
55#define VIC_DEFAULT_CPI_BASE 0xC0
56/* this is where we place the QIC CPI vectors */
57#define QIC_DEFAULT_CPI_BASE 0xD0
58
59#define VIC_BOOT_INTERRUPT_MASK 0xfe
60
61extern void smp_vic_timer_interrupt(void);
diff --git a/arch/x86/include/asm/voyager.h b/arch/x86/include/asm/voyager.h
deleted file mode 100644
index c1635d43616f..000000000000
--- a/arch/x86/include/asm/voyager.h
+++ /dev/null
@@ -1,571 +0,0 @@
1/* Copyright (C) 1999,2001
2 *
3 * Author: J.E.J.Bottomley@HansenPartnership.com
4 *
5 * Standard include definitions for the NCR Voyager system */
6
7#undef VOYAGER_DEBUG
8#undef VOYAGER_CAT_DEBUG
9
10#ifdef VOYAGER_DEBUG
11#define VDEBUG(x) printk x
12#else
13#define VDEBUG(x)
14#endif
15
16/* There are three levels of voyager machine: 3,4 and 5. The rule is
17 * if it's less than 3435 it's a Level 3 except for a 3360 which is
18 * a level 4. A 3435 or above is a Level 5 */
19#define VOYAGER_LEVEL5_AND_ABOVE 0x3435
20#define VOYAGER_LEVEL4 0x3360
21
22/* The L4 DINO ASIC */
23#define VOYAGER_DINO 0x43
24
25/* voyager ports in standard I/O space */
26#define VOYAGER_MC_SETUP 0x96
27
28
29#define VOYAGER_CAT_CONFIG_PORT 0x97
30# define VOYAGER_CAT_DESELECT 0xff
31#define VOYAGER_SSPB_RELOCATION_PORT 0x98
32
33/* Valid CAT controller commands */
34/* start instruction register cycle */
35#define VOYAGER_CAT_IRCYC 0x01
36/* start data register cycle */
37#define VOYAGER_CAT_DRCYC 0x02
38/* move to execute state */
39#define VOYAGER_CAT_RUN 0x0F
40/* end operation */
41#define VOYAGER_CAT_END 0x80
42/* hold in idle state */
43#define VOYAGER_CAT_HOLD 0x90
44/* single step an "intest" vector */
45#define VOYAGER_CAT_STEP 0xE0
46/* return cat controller to CLEMSON mode */
47#define VOYAGER_CAT_CLEMSON 0xFF
48
49/* the default cat command header */
50#define VOYAGER_CAT_HEADER 0x7F
51
52/* the range of possible CAT module ids in the system */
53#define VOYAGER_MIN_MODULE 0x10
54#define VOYAGER_MAX_MODULE 0x1f
55
56/* The voyager registers per asic */
57#define VOYAGER_ASIC_ID_REG 0x00
58#define VOYAGER_ASIC_TYPE_REG 0x01
59/* the sub address registers can be made auto incrementing on reads */
60#define VOYAGER_AUTO_INC_REG 0x02
61# define VOYAGER_AUTO_INC 0x04
62# define VOYAGER_NO_AUTO_INC 0xfb
63#define VOYAGER_SUBADDRDATA 0x03
64#define VOYAGER_SCANPATH 0x05
65# define VOYAGER_CONNECT_ASIC 0x01
66# define VOYAGER_DISCONNECT_ASIC 0xfe
67#define VOYAGER_SUBADDRLO 0x06
68#define VOYAGER_SUBADDRHI 0x07
69#define VOYAGER_SUBMODSELECT 0x08
70#define VOYAGER_SUBMODPRESENT 0x09
71
72#define VOYAGER_SUBADDR_LO 0xff
73#define VOYAGER_SUBADDR_HI 0xffff
74
75/* the maximum size of a scan path -- used to form instructions */
76#define VOYAGER_MAX_SCAN_PATH 0x100
77/* the biggest possible register size (in bytes) */
78#define VOYAGER_MAX_REG_SIZE 4
79
80/* Total number of possible modules (including submodules) */
81#define VOYAGER_MAX_MODULES 16
82/* Largest number of asics per module */
83#define VOYAGER_MAX_ASICS_PER_MODULE 7
84
85/* the CAT asic of each module is always the first one */
86#define VOYAGER_CAT_ID 0
87#define VOYAGER_PSI 0x1a
88
89/* voyager instruction operations and registers */
90#define VOYAGER_READ_CONFIG 0x1
91#define VOYAGER_WRITE_CONFIG 0x2
92#define VOYAGER_BYPASS 0xff
93
94typedef struct voyager_asic {
95 __u8 asic_addr; /* ASIC address; Level 4 */
96 __u8 asic_type; /* ASIC type */
97 __u8 asic_id; /* ASIC id */
98 __u8 jtag_id[4]; /* JTAG id */
99 __u8 asic_location; /* Location within scan path; start w/ 0 */
100 __u8 bit_location; /* Location within bit stream; start w/ 0 */
101 __u8 ireg_length; /* Instruction register length */
102 __u16 subaddr; /* Amount of sub address space */
103 struct voyager_asic *next; /* Next asic in linked list */
104} voyager_asic_t;
105
106typedef struct voyager_module {
107 __u8 module_addr; /* Module address */
108 __u8 scan_path_connected; /* Scan path connected */
109 __u16 ee_size; /* Size of the EEPROM */
110 __u16 num_asics; /* Number of Asics */
111 __u16 inst_bits; /* Instruction bits in the scan path */
112 __u16 largest_reg; /* Largest register in the scan path */
113 __u16 smallest_reg; /* Smallest register in the scan path */
114 voyager_asic_t *asic; /* First ASIC in scan path (CAT_I) */
115 struct voyager_module *submodule; /* Submodule pointer */
116 struct voyager_module *next; /* Next module in linked list */
117} voyager_module_t;
118
119typedef struct voyager_eeprom_hdr {
120 __u8 module_id[4];
121 __u8 version_id;
122 __u8 config_id;
123 __u16 boundry_id; /* boundary scan id */
124 __u16 ee_size; /* size of EEPROM */
125 __u8 assembly[11]; /* assembly # */
126 __u8 assembly_rev; /* assembly rev */
127 __u8 tracer[4]; /* tracer number */
128 __u16 assembly_cksum; /* asm checksum */
129 __u16 power_consump; /* pwr requirements */
130 __u16 num_asics; /* number of asics */
131 __u16 bist_time; /* min. bist time */
132 __u16 err_log_offset; /* error log offset */
133 __u16 scan_path_offset;/* scan path offset */
134 __u16 cct_offset;
135 __u16 log_length; /* length of err log */
136 __u16 xsum_end; /* offset to end of
137 checksum */
138 __u8 reserved[4];
139 __u8 sflag; /* starting sentinal */
140 __u8 part_number[13]; /* prom part number */
141 __u8 version[10]; /* version number */
142 __u8 signature[8];
143 __u16 eeprom_chksum;
144 __u32 data_stamp_offset;
145 __u8 eflag ; /* ending sentinal */
146} __attribute__((packed)) voyager_eprom_hdr_t;
147
148
149
150#define VOYAGER_EPROM_SIZE_OFFSET \
151 ((__u16)(&(((voyager_eprom_hdr_t *)0)->ee_size)))
152#define VOYAGER_XSUM_END_OFFSET 0x2a
153
154/* the following three definitions are for internal table layouts
155 * in the module EPROMs. We really only care about the IDs and
156 * offsets */
157typedef struct voyager_sp_table {
158 __u8 asic_id;
159 __u8 bypass_flag;
160 __u16 asic_data_offset;
161 __u16 config_data_offset;
162} __attribute__((packed)) voyager_sp_table_t;
163
164typedef struct voyager_jtag_table {
165 __u8 icode[4];
166 __u8 runbist[4];
167 __u8 intest[4];
168 __u8 samp_preld[4];
169 __u8 ireg_len;
170} __attribute__((packed)) voyager_jtt_t;
171
172typedef struct voyager_asic_data_table {
173 __u8 jtag_id[4];
174 __u16 length_bsr;
175 __u16 length_bist_reg;
176 __u32 bist_clk;
177 __u16 subaddr_bits;
178 __u16 seed_bits;
179 __u16 sig_bits;
180 __u16 jtag_offset;
181} __attribute__((packed)) voyager_at_t;
182
183/* Voyager Interrupt Controller (VIC) registers */
184
185/* Base to add to Cross Processor Interrupts (CPIs) when triggering
186 * the CPU IRQ line */
187/* register defines for the WCBICs (one per processor) */
188#define VOYAGER_WCBIC0 0x41 /* bus A node P1 processor 0 */
189#define VOYAGER_WCBIC1 0x49 /* bus A node P1 processor 1 */
190#define VOYAGER_WCBIC2 0x51 /* bus A node P2 processor 0 */
191#define VOYAGER_WCBIC3 0x59 /* bus A node P2 processor 1 */
192#define VOYAGER_WCBIC4 0x61 /* bus B node P1 processor 0 */
193#define VOYAGER_WCBIC5 0x69 /* bus B node P1 processor 1 */
194#define VOYAGER_WCBIC6 0x71 /* bus B node P2 processor 0 */
195#define VOYAGER_WCBIC7 0x79 /* bus B node P2 processor 1 */
196
197
198/* top of memory registers */
199#define VOYAGER_WCBIC_TOM_L 0x4
200#define VOYAGER_WCBIC_TOM_H 0x5
201
202/* register defines for Voyager Memory Contol (VMC)
203 * these are present on L4 machines only */
204#define VOYAGER_VMC1 0x81
205#define VOYAGER_VMC2 0x91
206#define VOYAGER_VMC3 0xa1
207#define VOYAGER_VMC4 0xb1
208
209/* VMC Ports */
210#define VOYAGER_VMC_MEMORY_SETUP 0x9
211# define VMC_Interleaving 0x01
212# define VMC_4Way 0x02
213# define VMC_EvenCacheLines 0x04
214# define VMC_HighLine 0x08
215# define VMC_Start0_Enable 0x20
216# define VMC_Start1_Enable 0x40
217# define VMC_Vremap 0x80
218#define VOYAGER_VMC_BANK_DENSITY 0xa
219# define VMC_BANK_EMPTY 0
220# define VMC_BANK_4MB 1
221# define VMC_BANK_16MB 2
222# define VMC_BANK_64MB 3
223# define VMC_BANK0_MASK 0x03
224# define VMC_BANK1_MASK 0x0C
225# define VMC_BANK2_MASK 0x30
226# define VMC_BANK3_MASK 0xC0
227
228/* Magellan Memory Controller (MMC) defines - present on L5 */
229#define VOYAGER_MMC_ASIC_ID 1
230/* the two memory modules corresponding to memory cards in the system */
231#define VOYAGER_MMC_MEMORY0_MODULE 0x14
232#define VOYAGER_MMC_MEMORY1_MODULE 0x15
233/* the Magellan Memory Address (MMA) defines */
234#define VOYAGER_MMA_ASIC_ID 2
235
236/* Submodule number for the Quad Baseboard */
237#define VOYAGER_QUAD_BASEBOARD 1
238
239/* ASIC defines for the Quad Baseboard */
240#define VOYAGER_QUAD_QDATA0 1
241#define VOYAGER_QUAD_QDATA1 2
242#define VOYAGER_QUAD_QABC 3
243
244/* Useful areas in extended CMOS */
245#define VOYAGER_PROCESSOR_PRESENT_MASK 0x88a
246#define VOYAGER_MEMORY_CLICKMAP 0xa23
247#define VOYAGER_DUMP_LOCATION 0xb1a
248
249/* SUS In Control bit - used to tell SUS that we don't need to be
250 * babysat anymore */
251#define VOYAGER_SUS_IN_CONTROL_PORT 0x3ff
252# define VOYAGER_IN_CONTROL_FLAG 0x80
253
254/* Voyager PSI defines */
255#define VOYAGER_PSI_STATUS_REG 0x08
256# define PSI_DC_FAIL 0x01
257# define PSI_MON 0x02
258# define PSI_FAULT 0x04
259# define PSI_ALARM 0x08
260# define PSI_CURRENT 0x10
261# define PSI_DVM 0x20
262# define PSI_PSCFAULT 0x40
263# define PSI_STAT_CHG 0x80
264
265#define VOYAGER_PSI_SUPPLY_REG 0x8000
266 /* read */
267# define PSI_FAIL_DC 0x01
268# define PSI_FAIL_AC 0x02
269# define PSI_MON_INT 0x04
270# define PSI_SWITCH_OFF 0x08
271# define PSI_HX_OFF 0x10
272# define PSI_SECURITY 0x20
273# define PSI_CMOS_BATT_LOW 0x40
274# define PSI_CMOS_BATT_FAIL 0x80
275 /* write */
276# define PSI_CLR_SWITCH_OFF 0x13
277# define PSI_CLR_HX_OFF 0x14
278# define PSI_CLR_CMOS_BATT_FAIL 0x17
279
280#define VOYAGER_PSI_MASK 0x8001
281# define PSI_MASK_MASK 0x10
282
283#define VOYAGER_PSI_AC_FAIL_REG 0x8004
284#define AC_FAIL_STAT_CHANGE 0x80
285
286#define VOYAGER_PSI_GENERAL_REG 0x8007
287 /* read */
288# define PSI_SWITCH_ON 0x01
289# define PSI_SWITCH_ENABLED 0x02
290# define PSI_ALARM_ENABLED 0x08
291# define PSI_SECURE_ENABLED 0x10
292# define PSI_COLD_RESET 0x20
293# define PSI_COLD_START 0x80
294 /* write */
295# define PSI_POWER_DOWN 0x10
296# define PSI_SWITCH_DISABLE 0x01
297# define PSI_SWITCH_ENABLE 0x11
298# define PSI_CLEAR 0x12
299# define PSI_ALARM_DISABLE 0x03
300# define PSI_ALARM_ENABLE 0x13
301# define PSI_CLEAR_COLD_RESET 0x05
302# define PSI_SET_COLD_RESET 0x15
303# define PSI_CLEAR_COLD_START 0x07
304# define PSI_SET_COLD_START 0x17
305
306
307
308struct voyager_bios_info {
309 __u8 len;
310 __u8 major;
311 __u8 minor;
312 __u8 debug;
313 __u8 num_classes;
314 __u8 class_1;
315 __u8 class_2;
316};
317
318/* The following structures and definitions are for the Kernel/SUS
319 * interface these are needed to find out how SUS initialised any Quad
320 * boards in the system */
321
322#define NUMBER_OF_MC_BUSSES 2
323#define SLOTS_PER_MC_BUS 8
324#define MAX_CPUS 16 /* 16 way CPU system */
325#define MAX_PROCESSOR_BOARDS 4 /* 4 processor slot system */
326#define MAX_CACHE_LEVELS 4 /* # of cache levels supported */
327#define MAX_SHARED_CPUS 4 /* # of CPUs that can share a LARC */
328#define NUMBER_OF_POS_REGS 8
329
330typedef struct {
331 __u8 MC_Slot;
332 __u8 POS_Values[NUMBER_OF_POS_REGS];
333} __attribute__((packed)) MC_SlotInformation_t;
334
335struct QuadDescription {
336 __u8 Type; /* for type 0 (DYADIC or MONADIC) all fields
337 * will be zero except for slot */
338 __u8 StructureVersion;
339 __u32 CPI_BaseAddress;
340 __u32 LARC_BankSize;
341 __u32 LocalMemoryStateBits;
342 __u8 Slot; /* Processor slots 1 - 4 */
343} __attribute__((packed));
344
345struct ProcBoardInfo {
346 __u8 Type;
347 __u8 StructureVersion;
348 __u8 NumberOfBoards;
349 struct QuadDescription QuadData[MAX_PROCESSOR_BOARDS];
350} __attribute__((packed));
351
352struct CacheDescription {
353 __u8 Level;
354 __u32 TotalSize;
355 __u16 LineSize;
356 __u8 Associativity;
357 __u8 CacheType;
358 __u8 WriteType;
359 __u8 Number_CPUs_SharedBy;
360 __u8 Shared_CPUs_Hardware_IDs[MAX_SHARED_CPUS];
361
362} __attribute__((packed));
363
364struct CPU_Description {
365 __u8 CPU_HardwareId;
366 char *FRU_String;
367 __u8 NumberOfCacheLevels;
368 struct CacheDescription CacheLevelData[MAX_CACHE_LEVELS];
369} __attribute__((packed));
370
371struct CPU_Info {
372 __u8 Type;
373 __u8 StructureVersion;
374 __u8 NumberOf_CPUs;
375 struct CPU_Description CPU_Data[MAX_CPUS];
376} __attribute__((packed));
377
378
379/*
380 * This structure will be used by SUS and the OS.
381 * The assumption about this structure is that no blank space is
382 * packed in it by our friend the compiler.
383 */
384typedef struct {
385 __u8 Mailbox_SUS; /* Written to by SUS to give
386 commands/response to the OS */
387 __u8 Mailbox_OS; /* Written to by the OS to give
388 commands/response to SUS */
389 __u8 SUS_MailboxVersion; /* Tells the OS which iteration of the
390 interface SUS supports */
391 __u8 OS_MailboxVersion; /* Tells SUS which iteration of the
392 interface the OS supports */
393 __u32 OS_Flags; /* Flags set by the OS as info for
394 SUS */
395 __u32 SUS_Flags; /* Flags set by SUS as info
396 for the OS */
397 __u32 WatchDogPeriod; /* Watchdog period (in seconds) which
398 the DP uses to see if the OS
399 is dead */
400 __u32 WatchDogCount; /* Updated by the OS on every tic. */
401 __u32 MemoryFor_SUS_ErrorLog; /* Flat 32 bit address which tells SUS
402 where to stuff the SUS error log
403 on a dump */
404 MC_SlotInformation_t MC_SlotInfo[NUMBER_OF_MC_BUSSES*SLOTS_PER_MC_BUS];
405 /* Storage for MCA POS data */
406 /* All new SECOND_PASS_INTERFACE fields added from this point */
407 struct ProcBoardInfo *BoardData;
408 struct CPU_Info *CPU_Data;
409 /* All new fields must be added from this point */
410} Voyager_KernelSUS_Mbox_t;
411
412/* structure for finding the right memory address to send a QIC CPI to */
413struct voyager_qic_cpi {
414 /* Each cache line (32 bytes) can trigger a cpi. The cpi
415 * read/write may occur anywhere in the cache line---pick the
416 * middle to be safe */
417 struct {
418 __u32 pad1[3];
419 __u32 cpi;
420 __u32 pad2[4];
421 } qic_cpi[8];
422};
423
424struct voyager_status {
425 __u32 power_fail:1;
426 __u32 switch_off:1;
427 __u32 request_from_kernel:1;
428};
429
430struct voyager_psi_regs {
431 __u8 cat_id;
432 __u8 cat_dev;
433 __u8 cat_control;
434 __u8 subaddr;
435 __u8 dummy4;
436 __u8 checkbit;
437 __u8 subaddr_low;
438 __u8 subaddr_high;
439 __u8 intstatus;
440 __u8 stat1;
441 __u8 stat3;
442 __u8 fault;
443 __u8 tms;
444 __u8 gen;
445 __u8 sysconf;
446 __u8 dummy15;
447};
448
449struct voyager_psi_subregs {
450 __u8 supply;
451 __u8 mask;
452 __u8 present;
453 __u8 DCfail;
454 __u8 ACfail;
455 __u8 fail;
456 __u8 UPSfail;
457 __u8 genstatus;
458};
459
460struct voyager_psi {
461 struct voyager_psi_regs regs;
462 struct voyager_psi_subregs subregs;
463};
464
465struct voyager_SUS {
466#define VOYAGER_DUMP_BUTTON_NMI 0x1
467#define VOYAGER_SUS_VALID 0x2
468#define VOYAGER_SYSINT_COMPLETE 0x3
469 __u8 SUS_mbox;
470#define VOYAGER_NO_COMMAND 0x0
471#define VOYAGER_IGNORE_DUMP 0x1
472#define VOYAGER_DO_DUMP 0x2
473#define VOYAGER_SYSINT_HANDSHAKE 0x3
474#define VOYAGER_DO_MEM_DUMP 0x4
475#define VOYAGER_SYSINT_WAS_RECOVERED 0x5
476 __u8 kernel_mbox;
477#define VOYAGER_MAILBOX_VERSION 0x10
478 __u8 SUS_version;
479 __u8 kernel_version;
480#define VOYAGER_OS_HAS_SYSINT 0x1
481#define VOYAGER_OS_IN_PROGRESS 0x2
482#define VOYAGER_UPDATING_WDPERIOD 0x4
483 __u32 kernel_flags;
484#define VOYAGER_SUS_BOOTING 0x1
485#define VOYAGER_SUS_IN_PROGRESS 0x2
486 __u32 SUS_flags;
487 __u32 watchdog_period;
488 __u32 watchdog_count;
489 __u32 SUS_errorlog;
490 /* lots of system configuration stuff under here */
491};
492
493/* Variables exported by voyager_smp */
494extern __u32 voyager_extended_vic_processors;
495extern __u32 voyager_allowed_boot_processors;
496extern __u32 voyager_quad_processors;
497extern struct voyager_qic_cpi *voyager_quad_cpi_addr[NR_CPUS];
498extern struct voyager_SUS *voyager_SUS;
499
500/* variables exported always */
501extern struct task_struct *voyager_thread;
502extern int voyager_level;
503extern struct voyager_status voyager_status;
504
505/* functions exported by the voyager and voyager_smp modules */
506extern int voyager_cat_readb(__u8 module, __u8 asic, int reg);
507extern void voyager_cat_init(void);
508extern void voyager_detect(struct voyager_bios_info *);
509extern void voyager_trap_init(void);
510extern void voyager_setup_irqs(void);
511extern int voyager_memory_detect(int region, __u32 *addr, __u32 *length);
512extern void voyager_smp_intr_init(void);
513extern __u8 voyager_extended_cmos_read(__u16 cmos_address);
514extern void voyager_smp_dump(void);
515extern void voyager_timer_interrupt(void);
516extern void smp_local_timer_interrupt(void);
517extern void voyager_power_off(void);
518extern void smp_voyager_power_off(void *dummy);
519extern void voyager_restart(void);
520extern void voyager_cat_power_off(void);
521extern void voyager_cat_do_common_interrupt(void);
522extern void voyager_handle_nmi(void);
523extern void voyager_smp_intr_init(void);
524/* Commands for the following are */
525#define VOYAGER_PSI_READ 0
526#define VOYAGER_PSI_WRITE 1
527#define VOYAGER_PSI_SUBREAD 2
528#define VOYAGER_PSI_SUBWRITE 3
529extern void voyager_cat_psi(__u8, __u16, __u8 *);
530
531/* These define the CPIs we use in linux */
532#define VIC_CPI_LEVEL0 0
533#define VIC_CPI_LEVEL1 1
534/* now the fake CPIs */
535#define VIC_TIMER_CPI 2
536#define VIC_INVALIDATE_CPI 3
537#define VIC_RESCHEDULE_CPI 4
538#define VIC_ENABLE_IRQ_CPI 5
539#define VIC_CALL_FUNCTION_CPI 6
540#define VIC_CALL_FUNCTION_SINGLE_CPI 7
541
542/* Now the QIC CPIs: Since we don't need the two initial levels,
543 * these are 2 less than the VIC CPIs */
544#define QIC_CPI_OFFSET 1
545#define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET)
546#define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET)
547#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
548#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
549#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
550#define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET)
551
552#define VIC_START_FAKE_CPI VIC_TIMER_CPI
553#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI
554
555/* this is the SYS_INT CPI. */
556#define VIC_SYS_INT 8
557#define VIC_CMN_INT 15
558
559/* This is the boot CPI for alternate processors. It gets overwritten
560 * by the above once the system has activated all available processors */
561#define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0
562#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8)
563
564extern asmlinkage void vic_cpi_interrupt(void);
565extern asmlinkage void vic_sys_interrupt(void);
566extern asmlinkage void vic_cmn_interrupt(void);
567extern asmlinkage void qic_timer_interrupt(void);
568extern asmlinkage void qic_invalidate_interrupt(void);
569extern asmlinkage void qic_reschedule_interrupt(void);
570extern asmlinkage void qic_enable_irq_interrupt(void);
571extern asmlinkage void qic_call_function_interrupt(void);
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 24f357e7557a..95f216bbfaf1 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -30,7 +30,7 @@ obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
30obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o 30obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o
31obj-y += setup.o i8259.o irqinit_$(BITS).o 31obj-y += setup.o i8259.o irqinit_$(BITS).o
32obj-$(CONFIG_X86_VISWS) += visws_quirks.o 32obj-$(CONFIG_X86_VISWS) += visws_quirks.o
33obj-$(CONFIG_X86_32) += probe_32.o probe_roms_32.o 33obj-$(CONFIG_X86_32) += probe_roms_32.o
34obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o 34obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
35obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o 35obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
36obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o 36obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o
@@ -58,24 +58,19 @@ obj-$(CONFIG_PCI) += early-quirks.o
58apm-y := apm_32.o 58apm-y := apm_32.o
59obj-$(CONFIG_APM) += apm.o 59obj-$(CONFIG_APM) += apm.o
60obj-$(CONFIG_SMP) += smp.o 60obj-$(CONFIG_SMP) += smp.o
61obj-$(CONFIG_SMP) += smpboot.o tsc_sync.o ipi.o 61obj-$(CONFIG_SMP) += smpboot.o tsc_sync.o
62obj-$(CONFIG_SMP) += setup_percpu.o 62obj-$(CONFIG_SMP) += setup_percpu.o
63obj-$(CONFIG_X86_64_SMP) += tsc_sync.o 63obj-$(CONFIG_X86_64_SMP) += tsc_sync.o
64obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o 64obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o
65obj-$(CONFIG_X86_MPPARSE) += mpparse.o 65obj-$(CONFIG_X86_MPPARSE) += mpparse.o
66obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o ipi.o 66obj-y += apic/
67obj-$(CONFIG_X86_IO_APIC) += io_apic.o
68obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o 67obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
69obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 68obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
70obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 69obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
71obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o 70obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
72obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o 71obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
73obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o 72obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
74obj-$(CONFIG_X86_BIGSMP) += bigsmp_32.o 73obj-$(CONFIG_X86_VSMP) += vsmp_64.o
75obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
76obj-$(CONFIG_X86_ES7000) += es7000_32.o
77obj-$(CONFIG_X86_SUMMIT) += summit_32.o
78obj-y += vsmp_64.o
79obj-$(CONFIG_KPROBES) += kprobes.o 74obj-$(CONFIG_KPROBES) += kprobes.o
80obj-$(CONFIG_MODULES) += module_$(BITS).o 75obj-$(CONFIG_MODULES) += module_$(BITS).o
81obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o 76obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o
@@ -116,17 +111,13 @@ obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o # NB rename without _64
116### 111###
117# 64 bit specific files 112# 64 bit specific files
118ifeq ($(CONFIG_X86_64),y) 113ifeq ($(CONFIG_X86_64),y)
119 obj-y += genapic_64.o genapic_flat_64.o 114 obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o
120 obj-y += genx2apic_cluster.o 115 obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o
121 obj-y += genx2apic_phys.o 116 obj-$(CONFIG_AUDIT) += audit_64.o
122 obj-$(CONFIG_X86_UV) += genx2apic_uv_x.o tlb_uv.o 117
123 obj-$(CONFIG_X86_UV) += bios_uv.o uv_irq.o uv_sysfs.o 118 obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o
124 obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o 119 obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o
125 obj-$(CONFIG_AUDIT) += audit_64.o 120 obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o
126 121
127 obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o 122 obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o
128 obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o
129 obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o
130
131 obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o
132endif 123endif
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 956c1dee6fbe..a18eb7ce2236 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -37,7 +37,6 @@
37#include <asm/pgtable.h> 37#include <asm/pgtable.h>
38#include <asm/io_apic.h> 38#include <asm/io_apic.h>
39#include <asm/apic.h> 39#include <asm/apic.h>
40#include <asm/genapic.h>
41#include <asm/io.h> 40#include <asm/io.h>
42#include <asm/mpspec.h> 41#include <asm/mpspec.h>
43#include <asm/smp.h> 42#include <asm/smp.h>
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
index 3355973b12ac..580b4e296010 100644
--- a/arch/x86/kernel/acpi/realmode/wakeup.S
+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
@@ -3,8 +3,8 @@
3 */ 3 */
4#include <asm/segment.h> 4#include <asm/segment.h>
5#include <asm/msr-index.h> 5#include <asm/msr-index.h>
6#include <asm/page.h> 6#include <asm/page_types.h>
7#include <asm/pgtable.h> 7#include <asm/pgtable_types.h>
8#include <asm/processor-flags.h> 8#include <asm/processor-flags.h>
9 9
10 .code16 10 .code16
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index a12e6a9fb659..8ded418b0593 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -1,7 +1,7 @@
1 .section .text.page_aligned 1 .section .text.page_aligned
2#include <linux/linkage.h> 2#include <linux/linkage.h>
3#include <asm/segment.h> 3#include <asm/segment.h>
4#include <asm/page.h> 4#include <asm/page_types.h>
5 5
6# Copyright 2003, 2008 Pavel Machek <pavel@suse.cz>, distribute under GPLv2 6# Copyright 2003, 2008 Pavel Machek <pavel@suse.cz>, distribute under GPLv2
7 7
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index bcc293423a70..8ea5164cbd04 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -1,8 +1,8 @@
1.text 1.text
2#include <linux/linkage.h> 2#include <linux/linkage.h>
3#include <asm/segment.h> 3#include <asm/segment.h>
4#include <asm/pgtable.h> 4#include <asm/pgtable_types.h>
5#include <asm/page.h> 5#include <asm/page_types.h>
6#include <asm/msr.h> 6#include <asm/msr.h>
7#include <asm/asm-offsets.h> 7#include <asm/asm-offsets.h>
8 8
@@ -13,7 +13,6 @@
13 * Hooray, we are in Long 64-bit mode (but still running in low memory) 13 * Hooray, we are in Long 64-bit mode (but still running in low memory)
14 */ 14 */
15ENTRY(wakeup_long64) 15ENTRY(wakeup_long64)
16wakeup_long64:
17 movq saved_magic, %rax 16 movq saved_magic, %rax
18 movq $0x123456789abcdef0, %rdx 17 movq $0x123456789abcdef0, %rdx
19 cmpq %rdx, %rax 18 cmpq %rdx, %rax
@@ -34,16 +33,12 @@ wakeup_long64:
34 33
35 movq saved_rip, %rax 34 movq saved_rip, %rax
36 jmp *%rax 35 jmp *%rax
36ENDPROC(wakeup_long64)
37 37
38bogus_64_magic: 38bogus_64_magic:
39 jmp bogus_64_magic 39 jmp bogus_64_magic
40 40
41 .align 2 41ENTRY(do_suspend_lowlevel)
42 .p2align 4,,15
43.globl do_suspend_lowlevel
44 .type do_suspend_lowlevel,@function
45do_suspend_lowlevel:
46.LFB5:
47 subq $8, %rsp 42 subq $8, %rsp
48 xorl %eax, %eax 43 xorl %eax, %eax
49 call save_processor_state 44 call save_processor_state
@@ -67,7 +62,7 @@ do_suspend_lowlevel:
67 pushfq 62 pushfq
68 popq pt_regs_flags(%rax) 63 popq pt_regs_flags(%rax)
69 64
70 movq $.L97, saved_rip(%rip) 65 movq $resume_point, saved_rip(%rip)
71 66
72 movq %rsp, saved_rsp 67 movq %rsp, saved_rsp
73 movq %rbp, saved_rbp 68 movq %rbp, saved_rbp
@@ -78,14 +73,12 @@ do_suspend_lowlevel:
78 addq $8, %rsp 73 addq $8, %rsp
79 movl $3, %edi 74 movl $3, %edi
80 xorl %eax, %eax 75 xorl %eax, %eax
81 jmp acpi_enter_sleep_state 76 call acpi_enter_sleep_state
82.L97: 77 /* in case something went wrong, restore the machine status and go on */
83 .p2align 4,,7 78 jmp resume_point
84.L99:
85 .align 4
86 movl $24, %eax
87 movw %ax, %ds
88 79
80 .align 4
81resume_point:
89 /* We don't restore %rax, it must be 0 anyway */ 82 /* We don't restore %rax, it must be 0 anyway */
90 movq $saved_context, %rax 83 movq $saved_context, %rax
91 movq saved_context_cr4(%rax), %rbx 84 movq saved_context_cr4(%rax), %rbx
@@ -117,12 +110,9 @@ do_suspend_lowlevel:
117 xorl %eax, %eax 110 xorl %eax, %eax
118 addq $8, %rsp 111 addq $8, %rsp
119 jmp restore_processor_state 112 jmp restore_processor_state
120.LFE5: 113ENDPROC(do_suspend_lowlevel)
121.Lfe5: 114
122 .size do_suspend_lowlevel, .Lfe5-do_suspend_lowlevel
123
124.data 115.data
125ALIGN
126ENTRY(saved_rbp) .quad 0 116ENTRY(saved_rbp) .quad 0
127ENTRY(saved_rsi) .quad 0 117ENTRY(saved_rsi) .quad 0
128ENTRY(saved_rdi) .quad 0 118ENTRY(saved_rdi) .quad 0
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index a84ac7b570e6..6907b8e85d52 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -498,12 +498,12 @@ void *text_poke_early(void *addr, const void *opcode, size_t len)
498 */ 498 */
499void *__kprobes text_poke(void *addr, const void *opcode, size_t len) 499void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
500{ 500{
501 unsigned long flags;
502 char *vaddr; 501 char *vaddr;
503 int nr_pages = 2; 502 int nr_pages = 2;
504 struct page *pages[2]; 503 struct page *pages[2];
505 int i; 504 int i;
506 505
506 might_sleep();
507 if (!core_kernel_text((unsigned long)addr)) { 507 if (!core_kernel_text((unsigned long)addr)) {
508 pages[0] = vmalloc_to_page(addr); 508 pages[0] = vmalloc_to_page(addr);
509 pages[1] = vmalloc_to_page(addr + PAGE_SIZE); 509 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
@@ -517,9 +517,9 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
517 nr_pages = 1; 517 nr_pages = 1;
518 vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); 518 vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
519 BUG_ON(!vaddr); 519 BUG_ON(!vaddr);
520 local_irq_save(flags); 520 local_irq_disable();
521 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); 521 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
522 local_irq_restore(flags); 522 local_irq_enable();
523 vunmap(vaddr); 523 vunmap(vaddr);
524 sync_core(); 524 sync_core();
525 /* Could also do a CLFLUSH here to speed up CPU recovery; but 525 /* Could also do a CLFLUSH here to speed up CPU recovery; but
diff --git a/arch/x86/kernel/apic/Makefile b/arch/x86/kernel/apic/Makefile
new file mode 100644
index 000000000000..da7b7b9f8bd8
--- /dev/null
+++ b/arch/x86/kernel/apic/Makefile
@@ -0,0 +1,19 @@
1#
2# Makefile for local APIC drivers and for the IO-APIC code
3#
4
5obj-$(CONFIG_X86_LOCAL_APIC) += apic.o probe_$(BITS).o ipi.o nmi.o
6obj-$(CONFIG_X86_IO_APIC) += io_apic.o
7obj-$(CONFIG_SMP) += ipi.o
8
9ifeq ($(CONFIG_X86_64),y)
10obj-y += apic_flat_64.o
11obj-$(CONFIG_X86_X2APIC) += x2apic_cluster.o
12obj-$(CONFIG_X86_X2APIC) += x2apic_phys.o
13obj-$(CONFIG_X86_UV) += x2apic_uv_x.o
14endif
15
16obj-$(CONFIG_X86_BIGSMP) += bigsmp_32.o
17obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
18obj-$(CONFIG_X86_ES7000) += es7000_32.o
19obj-$(CONFIG_X86_SUMMIT) += summit_32.o
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic/apic.c
index a894eea9d51a..f9cecdfd05c5 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -34,9 +34,7 @@
34#include <linux/smp.h> 34#include <linux/smp.h>
35#include <linux/mm.h> 35#include <linux/mm.h>
36 36
37#include <asm/arch_hooks.h>
38#include <asm/pgalloc.h> 37#include <asm/pgalloc.h>
39#include <asm/genapic.h>
40#include <asm/atomic.h> 38#include <asm/atomic.h>
41#include <asm/mpspec.h> 39#include <asm/mpspec.h>
42#include <asm/i8253.h> 40#include <asm/i8253.h>
@@ -112,11 +110,7 @@ static __init int setup_apicpmtimer(char *s)
112__setup("apicpmtimer", setup_apicpmtimer); 110__setup("apicpmtimer", setup_apicpmtimer);
113#endif 111#endif
114 112
115#ifdef CONFIG_X86_64 113#ifdef CONFIG_X86_X2APIC
116#define HAVE_X2APIC
117#endif
118
119#ifdef HAVE_X2APIC
120int x2apic; 114int x2apic;
121/* x2apic enabled before OS handover */ 115/* x2apic enabled before OS handover */
122static int x2apic_preenabled; 116static int x2apic_preenabled;
@@ -214,18 +208,13 @@ static int modern_apic(void)
214 return lapic_get_version() >= 0x14; 208 return lapic_get_version() >= 0x14;
215} 209}
216 210
217/* 211void native_apic_wait_icr_idle(void)
218 * Paravirt kernels also might be using these below ops. So we still
219 * use generic apic_read()/apic_write(), which might be pointing to different
220 * ops in PARAVIRT case.
221 */
222void xapic_wait_icr_idle(void)
223{ 212{
224 while (apic_read(APIC_ICR) & APIC_ICR_BUSY) 213 while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
225 cpu_relax(); 214 cpu_relax();
226} 215}
227 216
228u32 safe_xapic_wait_icr_idle(void) 217u32 native_safe_apic_wait_icr_idle(void)
229{ 218{
230 u32 send_status; 219 u32 send_status;
231 int timeout; 220 int timeout;
@@ -241,13 +230,13 @@ u32 safe_xapic_wait_icr_idle(void)
241 return send_status; 230 return send_status;
242} 231}
243 232
244void xapic_icr_write(u32 low, u32 id) 233void native_apic_icr_write(u32 low, u32 id)
245{ 234{
246 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id)); 235 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
247 apic_write(APIC_ICR, low); 236 apic_write(APIC_ICR, low);
248} 237}
249 238
250static u64 xapic_icr_read(void) 239u64 native_apic_icr_read(void)
251{ 240{
252 u32 icr1, icr2; 241 u32 icr1, icr2;
253 242
@@ -257,54 +246,6 @@ static u64 xapic_icr_read(void)
257 return icr1 | ((u64)icr2 << 32); 246 return icr1 | ((u64)icr2 << 32);
258} 247}
259 248
260static struct apic_ops xapic_ops = {
261 .read = native_apic_mem_read,
262 .write = native_apic_mem_write,
263 .icr_read = xapic_icr_read,
264 .icr_write = xapic_icr_write,
265 .wait_icr_idle = xapic_wait_icr_idle,
266 .safe_wait_icr_idle = safe_xapic_wait_icr_idle,
267};
268
269struct apic_ops __read_mostly *apic_ops = &xapic_ops;
270EXPORT_SYMBOL_GPL(apic_ops);
271
272#ifdef HAVE_X2APIC
273static void x2apic_wait_icr_idle(void)
274{
275 /* no need to wait for icr idle in x2apic */
276 return;
277}
278
279static u32 safe_x2apic_wait_icr_idle(void)
280{
281 /* no need to wait for icr idle in x2apic */
282 return 0;
283}
284
285void x2apic_icr_write(u32 low, u32 id)
286{
287 wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
288}
289
290static u64 x2apic_icr_read(void)
291{
292 unsigned long val;
293
294 rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
295 return val;
296}
297
298static struct apic_ops x2apic_ops = {
299 .read = native_apic_msr_read,
300 .write = native_apic_msr_write,
301 .icr_read = x2apic_icr_read,
302 .icr_write = x2apic_icr_write,
303 .wait_icr_idle = x2apic_wait_icr_idle,
304 .safe_wait_icr_idle = safe_x2apic_wait_icr_idle,
305};
306#endif
307
308/** 249/**
309 * enable_NMI_through_LVT0 - enable NMI through local vector table 0 250 * enable_NMI_through_LVT0 - enable NMI through local vector table 0
310 */ 251 */
@@ -895,7 +836,7 @@ void clear_local_APIC(void)
895 } 836 }
896 837
897 /* lets not touch this if we didn't frob it */ 838 /* lets not touch this if we didn't frob it */
898#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(X86_MCE_INTEL) 839#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL)
899 if (maxlvt >= 5) { 840 if (maxlvt >= 5) {
900 v = apic_read(APIC_LVTTHMR); 841 v = apic_read(APIC_LVTTHMR);
901 apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); 842 apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED);
@@ -1320,17 +1261,12 @@ void __cpuinit end_local_APIC_setup(void)
1320 apic_pm_activate(); 1261 apic_pm_activate();
1321} 1262}
1322 1263
1323#ifdef HAVE_X2APIC 1264#ifdef CONFIG_X86_X2APIC
1324void check_x2apic(void) 1265void check_x2apic(void)
1325{ 1266{
1326 int msr, msr2; 1267 if (x2apic_enabled()) {
1327
1328 rdmsr(MSR_IA32_APICBASE, msr, msr2);
1329
1330 if (msr & X2APIC_ENABLE) {
1331 pr_info("x2apic enabled by BIOS, switching to x2apic ops\n"); 1268 pr_info("x2apic enabled by BIOS, switching to x2apic ops\n");
1332 x2apic_preenabled = x2apic = 1; 1269 x2apic_preenabled = x2apic = 1;
1333 apic_ops = &x2apic_ops;
1334 } 1270 }
1335} 1271}
1336 1272
@@ -1338,6 +1274,9 @@ void enable_x2apic(void)
1338{ 1274{
1339 int msr, msr2; 1275 int msr, msr2;
1340 1276
1277 if (!x2apic)
1278 return;
1279
1341 rdmsr(MSR_IA32_APICBASE, msr, msr2); 1280 rdmsr(MSR_IA32_APICBASE, msr, msr2);
1342 if (!(msr & X2APIC_ENABLE)) { 1281 if (!(msr & X2APIC_ENABLE)) {
1343 pr_info("Enabling x2apic\n"); 1282 pr_info("Enabling x2apic\n");
@@ -1401,7 +1340,6 @@ void __init enable_IR_x2apic(void)
1401 1340
1402 if (!x2apic) { 1341 if (!x2apic) {
1403 x2apic = 1; 1342 x2apic = 1;
1404 apic_ops = &x2apic_ops;
1405 enable_x2apic(); 1343 enable_x2apic();
1406 } 1344 }
1407 1345
@@ -1439,7 +1377,7 @@ end:
1439 1377
1440 return; 1378 return;
1441} 1379}
1442#endif /* HAVE_X2APIC */ 1380#endif /* CONFIG_X86_X2APIC */
1443 1381
1444#ifdef CONFIG_X86_64 1382#ifdef CONFIG_X86_64
1445/* 1383/*
@@ -1570,7 +1508,7 @@ void __init early_init_lapic_mapping(void)
1570 */ 1508 */
1571void __init init_apic_mappings(void) 1509void __init init_apic_mappings(void)
1572{ 1510{
1573#ifdef HAVE_X2APIC 1511#ifdef CONFIG_X86_X2APIC
1574 if (x2apic) { 1512 if (x2apic) {
1575 boot_cpu_physical_apicid = read_apic_id(); 1513 boot_cpu_physical_apicid = read_apic_id();
1576 return; 1514 return;
@@ -1634,9 +1572,7 @@ int __init APIC_init_uniprocessor(void)
1634 } 1572 }
1635#endif 1573#endif
1636 1574
1637#ifdef HAVE_X2APIC
1638 enable_IR_x2apic(); 1575 enable_IR_x2apic();
1639#endif
1640#ifdef CONFIG_X86_64 1576#ifdef CONFIG_X86_64
1641 default_setup_apic_routing(); 1577 default_setup_apic_routing();
1642#endif 1578#endif
@@ -2021,7 +1957,7 @@ static int lapic_resume(struct sys_device *dev)
2021 1957
2022 local_irq_save(flags); 1958 local_irq_save(flags);
2023 1959
2024#ifdef HAVE_X2APIC 1960#ifdef CONFIG_X86_X2APIC
2025 if (x2apic) 1961 if (x2apic)
2026 enable_x2apic(); 1962 enable_x2apic();
2027 else 1963 else
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 249d2d3c034c..f933822dba18 100644
--- a/arch/x86/kernel/genapic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -17,8 +17,8 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/hardirq.h> 18#include <linux/hardirq.h>
19#include <asm/smp.h> 19#include <asm/smp.h>
20#include <asm/apic.h>
20#include <asm/ipi.h> 21#include <asm/ipi.h>
21#include <asm/genapic.h>
22 22
23#ifdef CONFIG_ACPI 23#ifdef CONFIG_ACPI
24#include <acpi/acpi_bus.h> 24#include <acpi/acpi_bus.h>
@@ -178,7 +178,7 @@ static int flat_phys_pkg_id(int initial_apic_id, int index_msb)
178 return hard_smp_processor_id() >> index_msb; 178 return hard_smp_processor_id() >> index_msb;
179} 179}
180 180
181struct genapic apic_flat = { 181struct apic apic_flat = {
182 .name = "flat", 182 .name = "flat",
183 .probe = NULL, 183 .probe = NULL,
184 .acpi_madt_oem_check = flat_acpi_madt_oem_check, 184 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
@@ -222,13 +222,18 @@ struct genapic apic_flat = {
222 .send_IPI_all = flat_send_IPI_all, 222 .send_IPI_all = flat_send_IPI_all,
223 .send_IPI_self = apic_send_IPI_self, 223 .send_IPI_self = apic_send_IPI_self,
224 224
225 .wakeup_cpu = NULL,
226 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 225 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
227 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 226 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
228 .wait_for_init_deassert = NULL, 227 .wait_for_init_deassert = NULL,
229 .smp_callin_clear_local_apic = NULL, 228 .smp_callin_clear_local_apic = NULL,
230 .store_NMI_vector = NULL,
231 .inquire_remote_apic = NULL, 229 .inquire_remote_apic = NULL,
230
231 .read = native_apic_mem_read,
232 .write = native_apic_mem_write,
233 .icr_read = native_apic_icr_read,
234 .icr_write = native_apic_icr_write,
235 .wait_icr_idle = native_apic_wait_icr_idle,
236 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
232}; 237};
233 238
234/* 239/*
@@ -321,7 +326,7 @@ physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
321 return BAD_APICID; 326 return BAD_APICID;
322} 327}
323 328
324struct genapic apic_physflat = { 329struct apic apic_physflat = {
325 330
326 .name = "physical flat", 331 .name = "physical flat",
327 .probe = NULL, 332 .probe = NULL,
@@ -367,11 +372,16 @@ struct genapic apic_physflat = {
367 .send_IPI_all = physflat_send_IPI_all, 372 .send_IPI_all = physflat_send_IPI_all,
368 .send_IPI_self = apic_send_IPI_self, 373 .send_IPI_self = apic_send_IPI_self,
369 374
370 .wakeup_cpu = NULL,
371 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 375 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
372 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 376 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
373 .wait_for_init_deassert = NULL, 377 .wait_for_init_deassert = NULL,
374 .smp_callin_clear_local_apic = NULL, 378 .smp_callin_clear_local_apic = NULL,
375 .store_NMI_vector = NULL,
376 .inquire_remote_apic = NULL, 379 .inquire_remote_apic = NULL,
380
381 .read = native_apic_mem_read,
382 .write = native_apic_mem_write,
383 .icr_read = native_apic_icr_read,
384 .icr_write = native_apic_icr_write,
385 .wait_icr_idle = native_apic_wait_icr_idle,
386 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
377}; 387};
diff --git a/arch/x86/kernel/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index 47a62f46afdb..d806ecaa948f 100644
--- a/arch/x86/kernel/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -1,34 +1,32 @@
1/* 1/*
2 * APIC driver for "bigsmp" XAPIC machines with more than 8 virtual CPUs. 2 * APIC driver for "bigsmp" xAPIC machines with more than 8 virtual CPUs.
3 *
3 * Drives the local APIC in "clustered mode". 4 * Drives the local APIC in "clustered mode".
4 */ 5 */
5#define APIC_DEFINITION 1
6#include <linux/threads.h> 6#include <linux/threads.h>
7#include <linux/cpumask.h> 7#include <linux/cpumask.h>
8#include <asm/mpspec.h>
9#include <asm/genapic.h>
10#include <asm/fixmap.h>
11#include <asm/apicdef.h>
12#include <asm/ipi.h>
13#include <linux/kernel.h> 8#include <linux/kernel.h>
14#include <linux/init.h> 9#include <linux/init.h>
15#include <linux/dmi.h> 10#include <linux/dmi.h>
16#include <linux/smp.h> 11#include <linux/smp.h>
17 12
13#include <asm/apicdef.h>
14#include <asm/fixmap.h>
15#include <asm/mpspec.h>
16#include <asm/apic.h>
17#include <asm/ipi.h>
18 18
19static inline unsigned bigsmp_get_apic_id(unsigned long x) 19static unsigned bigsmp_get_apic_id(unsigned long x)
20{ 20{
21 return (x >> 24) & 0xFF; 21 return (x >> 24) & 0xFF;
22} 22}
23 23
24#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu)) 24static int bigsmp_apic_id_registered(void)
25
26static inline int bigsmp_apic_id_registered(void)
27{ 25{
28 return 1; 26 return 1;
29} 27}
30 28
31static inline const cpumask_t *bigsmp_target_cpus(void) 29static const cpumask_t *bigsmp_target_cpus(void)
32{ 30{
33#ifdef CONFIG_SMP 31#ifdef CONFIG_SMP
34 return &cpu_online_map; 32 return &cpu_online_map;
@@ -37,15 +35,12 @@ static inline const cpumask_t *bigsmp_target_cpus(void)
37#endif 35#endif
38} 36}
39 37
40#define APIC_DFR_VALUE (APIC_DFR_FLAT) 38static unsigned long bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid)
41
42static inline unsigned long
43bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid)
44{ 39{
45 return 0; 40 return 0;
46} 41}
47 42
48static inline unsigned long bigsmp_check_apicid_present(int bit) 43static unsigned long bigsmp_check_apicid_present(int bit)
49{ 44{
50 return 1; 45 return 1;
51} 46}
@@ -53,9 +48,11 @@ static inline unsigned long bigsmp_check_apicid_present(int bit)
53static inline unsigned long calculate_ldr(int cpu) 48static inline unsigned long calculate_ldr(int cpu)
54{ 49{
55 unsigned long val, id; 50 unsigned long val, id;
51
56 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; 52 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
57 id = xapic_phys_to_log_apicid(cpu); 53 id = per_cpu(x86_bios_cpu_apicid, cpu);
58 val |= SET_APIC_LOGICAL_ID(id); 54 val |= SET_APIC_LOGICAL_ID(id);
55
59 return val; 56 return val;
60} 57}
61 58
@@ -66,28 +63,29 @@ static inline unsigned long calculate_ldr(int cpu)
66 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel 63 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
67 * document number 292116). So here it goes... 64 * document number 292116). So here it goes...
68 */ 65 */
69static inline void bigsmp_init_apic_ldr(void) 66static void bigsmp_init_apic_ldr(void)
70{ 67{
71 unsigned long val; 68 unsigned long val;
72 int cpu = smp_processor_id(); 69 int cpu = smp_processor_id();
73 70
74 apic_write(APIC_DFR, APIC_DFR_VALUE); 71 apic_write(APIC_DFR, APIC_DFR_FLAT);
75 val = calculate_ldr(cpu); 72 val = calculate_ldr(cpu);
76 apic_write(APIC_LDR, val); 73 apic_write(APIC_LDR, val);
77} 74}
78 75
79static inline void bigsmp_setup_apic_routing(void) 76static void bigsmp_setup_apic_routing(void)
80{ 77{
81 printk("Enabling APIC mode: %s. Using %d I/O APICs\n", 78 printk(KERN_INFO
82 "Physflat", nr_ioapics); 79 "Enabling APIC mode: Physflat. Using %d I/O APICs\n",
80 nr_ioapics);
83} 81}
84 82
85static inline int bigsmp_apicid_to_node(int logical_apicid) 83static int bigsmp_apicid_to_node(int logical_apicid)
86{ 84{
87 return apicid_2_node[hard_smp_processor_id()]; 85 return apicid_2_node[hard_smp_processor_id()];
88} 86}
89 87
90static inline int bigsmp_cpu_present_to_apicid(int mps_cpu) 88static int bigsmp_cpu_present_to_apicid(int mps_cpu)
91{ 89{
92 if (mps_cpu < nr_cpu_ids) 90 if (mps_cpu < nr_cpu_ids)
93 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); 91 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
@@ -95,12 +93,11 @@ static inline int bigsmp_cpu_present_to_apicid(int mps_cpu)
95 return BAD_APICID; 93 return BAD_APICID;
96} 94}
97 95
98static inline physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid) 96static physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid)
99{ 97{
100 return physid_mask_of_physid(phys_apicid); 98 return physid_mask_of_physid(phys_apicid);
101} 99}
102 100
103extern u8 cpu_2_logical_apicid[];
104/* Mapping from cpu number to logical apicid */ 101/* Mapping from cpu number to logical apicid */
105static inline int bigsmp_cpu_to_logical_apicid(int cpu) 102static inline int bigsmp_cpu_to_logical_apicid(int cpu)
106{ 103{
@@ -109,29 +106,24 @@ static inline int bigsmp_cpu_to_logical_apicid(int cpu)
109 return cpu_physical_id(cpu); 106 return cpu_physical_id(cpu);
110} 107}
111 108
112static inline physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map) 109static physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map)
113{ 110{
114 /* For clustered we don't have a good way to do this yet - hack */ 111 /* For clustered we don't have a good way to do this yet - hack */
115 return physids_promote(0xFFL); 112 return physids_promote(0xFFL);
116} 113}
117 114
118static inline void bigsmp_setup_portio_remap(void) 115static int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid)
119{
120}
121
122static inline int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid)
123{ 116{
124 return 1; 117 return 1;
125} 118}
126 119
127/* As we are using single CPU as destination, pick only one CPU here */ 120/* As we are using single CPU as destination, pick only one CPU here */
128static inline unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask) 121static unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask)
129{ 122{
130 return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask)); 123 return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask));
131} 124}
132 125
133static inline unsigned int 126static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
134bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
135 const struct cpumask *andmask) 127 const struct cpumask *andmask)
136{ 128{
137 int cpu; 129 int cpu;
@@ -150,7 +142,7 @@ bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
150 return BAD_APICID; 142 return BAD_APICID;
151} 143}
152 144
153static inline int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) 145static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
154{ 146{
155 return cpuid_apic >> index_msb; 147 return cpuid_apic >> index_msb;
156} 148}
@@ -160,12 +152,12 @@ static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector)
160 default_send_IPI_mask_sequence_phys(mask, vector); 152 default_send_IPI_mask_sequence_phys(mask, vector);
161} 153}
162 154
163static inline void bigsmp_send_IPI_allbutself(int vector) 155static void bigsmp_send_IPI_allbutself(int vector)
164{ 156{
165 default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); 157 default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
166} 158}
167 159
168static inline void bigsmp_send_IPI_all(int vector) 160static void bigsmp_send_IPI_all(int vector)
169{ 161{
170 bigsmp_send_IPI_mask(cpu_online_mask, vector); 162 bigsmp_send_IPI_mask(cpu_online_mask, vector);
171} 163}
@@ -176,21 +168,24 @@ static int hp_ht_bigsmp(const struct dmi_system_id *d)
176{ 168{
177 printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident); 169 printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
178 dmi_bigsmp = 1; 170 dmi_bigsmp = 1;
171
179 return 0; 172 return 0;
180} 173}
181 174
182 175
183static const struct dmi_system_id bigsmp_dmi_table[] = { 176static const struct dmi_system_id bigsmp_dmi_table[] = {
184 { hp_ht_bigsmp, "HP ProLiant DL760 G2", 177 { hp_ht_bigsmp, "HP ProLiant DL760 G2",
185 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"), 178 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
186 DMI_MATCH(DMI_BIOS_VERSION, "P44-"),} 179 DMI_MATCH(DMI_BIOS_VERSION, "P44-"),
180 }
187 }, 181 },
188 182
189 { hp_ht_bigsmp, "HP ProLiant DL740", 183 { hp_ht_bigsmp, "HP ProLiant DL740",
190 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"), 184 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
191 DMI_MATCH(DMI_BIOS_VERSION, "P47-"),} 185 DMI_MATCH(DMI_BIOS_VERSION, "P47-"),
186 }
192 }, 187 },
193 { } 188 { } /* NULL entry stops DMI scanning */
194}; 189};
195 190
196static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask) 191static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask)
@@ -205,10 +200,11 @@ static int probe_bigsmp(void)
205 dmi_bigsmp = 1; 200 dmi_bigsmp = 1;
206 else 201 else
207 dmi_check_system(bigsmp_dmi_table); 202 dmi_check_system(bigsmp_dmi_table);
203
208 return dmi_bigsmp; 204 return dmi_bigsmp;
209} 205}
210 206
211struct genapic apic_bigsmp = { 207struct apic apic_bigsmp = {
212 208
213 .name = "bigsmp", 209 .name = "bigsmp",
214 .probe = probe_bigsmp, 210 .probe = probe_bigsmp,
@@ -254,13 +250,18 @@ struct genapic apic_bigsmp = {
254 .send_IPI_all = bigsmp_send_IPI_all, 250 .send_IPI_all = bigsmp_send_IPI_all,
255 .send_IPI_self = default_send_IPI_self, 251 .send_IPI_self = default_send_IPI_self,
256 252
257 .wakeup_cpu = NULL,
258 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 253 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
259 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 254 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
260 255
261 .wait_for_init_deassert = default_wait_for_init_deassert, 256 .wait_for_init_deassert = default_wait_for_init_deassert,
262 257
263 .smp_callin_clear_local_apic = NULL, 258 .smp_callin_clear_local_apic = NULL,
264 .store_NMI_vector = NULL,
265 .inquire_remote_apic = default_inquire_remote_apic, 259 .inquire_remote_apic = default_inquire_remote_apic,
260
261 .read = native_apic_mem_read,
262 .write = native_apic_mem_write,
263 .icr_read = native_apic_icr_read,
264 .icr_write = native_apic_icr_write,
265 .wait_icr_idle = native_apic_wait_icr_idle,
266 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
266}; 267};
diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index 55515d73d9c2..19588f2770ee 100644
--- a/arch/x86/kernel/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -1,10 +1,14 @@
1/* 1/*
2 * Written by: Garry Forsgren, Unisys Corporation 2 * Written by: Garry Forsgren, Unisys Corporation
3 * Natalie Protasevich, Unisys Corporation 3 * Natalie Protasevich, Unisys Corporation
4 *
4 * This file contains the code to configure and interface 5 * This file contains the code to configure and interface
5 * with Unisys ES7000 series hardware system manager. 6 * with Unisys ES7000 series hardware system manager.
6 * 7 *
7 * Copyright (c) 2003 Unisys Corporation. All Rights Reserved. 8 * Copyright (c) 2003 Unisys Corporation.
9 * Copyright (C) 2009, Red Hat, Inc., Ingo Molnar
10 *
11 * All Rights Reserved.
8 * 12 *
9 * This program is free software; you can redistribute it and/or modify it 13 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of version 2 of the GNU General Public License as 14 * under the terms of version 2 of the GNU General Public License as
@@ -23,128 +27,105 @@
23 * 27 *
24 * http://www.unisys.com 28 * http://www.unisys.com
25 */ 29 */
26 30#include <linux/notifier.h>
27#include <linux/module.h> 31#include <linux/spinlock.h>
28#include <linux/types.h> 32#include <linux/cpumask.h>
33#include <linux/threads.h>
29#include <linux/kernel.h> 34#include <linux/kernel.h>
30#include <linux/smp.h> 35#include <linux/module.h>
36#include <linux/reboot.h>
31#include <linux/string.h> 37#include <linux/string.h>
32#include <linux/spinlock.h> 38#include <linux/types.h>
33#include <linux/errno.h> 39#include <linux/errno.h>
34#include <linux/notifier.h>
35#include <linux/reboot.h>
36#include <linux/init.h>
37#include <linux/acpi.h> 40#include <linux/acpi.h>
38#include <asm/io.h> 41#include <linux/init.h>
39#include <asm/nmi.h> 42#include <linux/nmi.h>
40#include <asm/smp.h> 43#include <linux/smp.h>
41#include <asm/atomic.h> 44#include <linux/io.h>
45
42#include <asm/apicdef.h> 46#include <asm/apicdef.h>
43#include <asm/genapic.h> 47#include <asm/atomic.h>
48#include <asm/fixmap.h>
49#include <asm/mpspec.h>
44#include <asm/setup.h> 50#include <asm/setup.h>
51#include <asm/apic.h>
52#include <asm/ipi.h>
45 53
46/* 54/*
47 * ES7000 chipsets 55 * ES7000 chipsets
48 */ 56 */
49 57
50#define NON_UNISYS 0 58#define NON_UNISYS 0
51#define ES7000_CLASSIC 1 59#define ES7000_CLASSIC 1
52#define ES7000_ZORRO 2 60#define ES7000_ZORRO 2
53 61
62#define MIP_REG 1
63#define MIP_PSAI_REG 4
54 64
55#define MIP_REG 1 65#define MIP_BUSY 1
56#define MIP_PSAI_REG 4 66#define MIP_SPIN 0xf0000
67#define MIP_VALID 0x0100000000000000ULL
68#define MIP_SW_APIC 0x1020b
57 69
58#define MIP_BUSY 1 70#define MIP_PORT(val) ((val >> 32) & 0xffff)
59#define MIP_SPIN 0xf0000
60#define MIP_VALID 0x0100000000000000ULL
61#define MIP_PORT(VALUE) ((VALUE >> 32) & 0xffff)
62 71
63#define MIP_RD_LO(VALUE) (VALUE & 0xffffffff) 72#define MIP_RD_LO(val) (val & 0xffffffff)
64 73
65struct mip_reg_info { 74struct mip_reg {
66 unsigned long long mip_info; 75 unsigned long long off_0x00;
67 unsigned long long delivery_info; 76 unsigned long long off_0x08;
68 unsigned long long host_reg; 77 unsigned long long off_0x10;
69 unsigned long long mip_reg; 78 unsigned long long off_0x18;
79 unsigned long long off_0x20;
80 unsigned long long off_0x28;
81 unsigned long long off_0x30;
82 unsigned long long off_0x38;
70}; 83};
71 84
72struct part_info { 85struct mip_reg_info {
73 unsigned char type; 86 unsigned long long mip_info;
74 unsigned char length; 87 unsigned long long delivery_info;
75 unsigned char part_id; 88 unsigned long long host_reg;
76 unsigned char apic_mode; 89 unsigned long long mip_reg;
77 unsigned long snum;
78 char ptype[16];
79 char sname[64];
80 char pname[64];
81}; 90};
82 91
83struct psai { 92struct psai {
84 unsigned long long entry_type; 93 unsigned long long entry_type;
85 unsigned long long addr; 94 unsigned long long addr;
86 unsigned long long bep_addr; 95 unsigned long long bep_addr;
87}; 96};
88 97
89struct es7000_mem_info { 98#ifdef CONFIG_ACPI
90 unsigned char type;
91 unsigned char length;
92 unsigned char resv[6];
93 unsigned long long start;
94 unsigned long long size;
95};
96 99
97struct es7000_oem_table { 100struct es7000_oem_table {
98 unsigned long long hdr; 101 struct acpi_table_header Header;
99 struct mip_reg_info mip; 102 u32 OEMTableAddr;
100 struct part_info pif; 103 u32 OEMTableSize;
101 struct es7000_mem_info shm;
102 struct psai psai;
103}; 104};
104 105
105#ifdef CONFIG_ACPI 106static unsigned long oem_addrX;
107static unsigned long oem_size;
106 108
107struct oem_table {
108 struct acpi_table_header Header;
109 u32 OEMTableAddr;
110 u32 OEMTableSize;
111};
112
113extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
114extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr);
115#endif 109#endif
116 110
117struct mip_reg {
118 unsigned long long off_0;
119 unsigned long long off_8;
120 unsigned long long off_10;
121 unsigned long long off_18;
122 unsigned long long off_20;
123 unsigned long long off_28;
124 unsigned long long off_30;
125 unsigned long long off_38;
126};
127
128#define MIP_SW_APIC 0x1020b
129#define MIP_FUNC(VALUE) (VALUE & 0xff)
130
131/* 111/*
132 * ES7000 Globals 112 * ES7000 Globals
133 */ 113 */
134 114
135static volatile unsigned long *psai = NULL; 115static volatile unsigned long *psai;
136static struct mip_reg *mip_reg; 116static struct mip_reg *mip_reg;
137static struct mip_reg *host_reg; 117static struct mip_reg *host_reg;
138static int mip_port; 118static int mip_port;
139static unsigned long mip_addr, host_addr; 119static unsigned long mip_addr;
120static unsigned long host_addr;
140 121
141int es7000_plat; 122int es7000_plat;
142 123
143/* 124/*
144 * GSI override for ES7000 platforms. 125 * GSI override for ES7000 platforms.
145 */ 126 */
146 127
147static unsigned int base; 128static unsigned int base;
148 129
149static int 130static int
150es7000_rename_gsi(int ioapic, int gsi) 131es7000_rename_gsi(int ioapic, int gsi)
@@ -160,6 +141,7 @@ es7000_rename_gsi(int ioapic, int gsi)
160 141
161 if (!ioapic && (gsi < 16)) 142 if (!ioapic && (gsi < 16))
162 gsi += base; 143 gsi += base;
144
163 return gsi; 145 return gsi;
164} 146}
165 147
@@ -181,23 +163,17 @@ static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
181 return 0; 163 return 0;
182} 164}
183 165
184static int __init es7000_update_genapic(void) 166static int es7000_apic_is_cluster(void)
185{ 167{
186 apic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
187
188 /* MPENTIUMIII */ 168 /* MPENTIUMIII */
189 if (boot_cpu_data.x86 == 6 && 169 if (boot_cpu_data.x86 == 6 &&
190 (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) { 170 (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11))
191 es7000_update_genapic_to_cluster(); 171 return 1;
192 apic->wait_for_init_deassert = NULL;
193 apic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
194 }
195 172
196 return 0; 173 return 0;
197} 174}
198 175
199void __init 176static void setup_unisys(void)
200setup_unisys(void)
201{ 177{
202 /* 178 /*
203 * Determine the generation of the ES7000 currently running. 179 * Determine the generation of the ES7000 currently running.
@@ -211,23 +187,19 @@ setup_unisys(void)
211 else 187 else
212 es7000_plat = ES7000_CLASSIC; 188 es7000_plat = ES7000_CLASSIC;
213 ioapic_renumber_irq = es7000_rename_gsi; 189 ioapic_renumber_irq = es7000_rename_gsi;
214
215 x86_quirks->update_genapic = es7000_update_genapic;
216} 190}
217 191
218/* 192/*
219 * Parse the OEM Table 193 * Parse the OEM Table:
220 */ 194 */
221 195static int parse_unisys_oem(char *oemptr)
222int __init
223parse_unisys_oem (char *oemptr)
224{ 196{
225 int i; 197 int i;
226 int success = 0; 198 int success = 0;
227 unsigned char type, size; 199 unsigned char type, size;
228 unsigned long val; 200 unsigned long val;
229 char *tp = NULL; 201 char *tp = NULL;
230 struct psai *psaip = NULL; 202 struct psai *psaip = NULL;
231 struct mip_reg_info *mi; 203 struct mip_reg_info *mi;
232 struct mip_reg *host, *mip; 204 struct mip_reg *host, *mip;
233 205
@@ -235,7 +207,7 @@ parse_unisys_oem (char *oemptr)
235 207
236 tp += 8; 208 tp += 8;
237 209
238 for (i=0; i <= 6; i++) { 210 for (i = 0; i <= 6; i++) {
239 type = *tp++; 211 type = *tp++;
240 size = *tp++; 212 size = *tp++;
241 tp -= 2; 213 tp -= 2;
@@ -273,50 +245,114 @@ parse_unisys_oem (char *oemptr)
273 tp += size; 245 tp += size;
274 } 246 }
275 247
276 if (success < 2) { 248 if (success < 2)
277 es7000_plat = NON_UNISYS; 249 es7000_plat = NON_UNISYS;
278 } else 250 else
279 setup_unisys(); 251 setup_unisys();
252
280 return es7000_plat; 253 return es7000_plat;
281} 254}
282 255
283#ifdef CONFIG_ACPI 256#ifdef CONFIG_ACPI
284static unsigned long oem_addrX; 257static int find_unisys_acpi_oem_table(unsigned long *oem_addr)
285static unsigned long oem_size;
286int __init find_unisys_acpi_oem_table(unsigned long *oem_addr)
287{ 258{
288 struct acpi_table_header *header = NULL; 259 struct acpi_table_header *header = NULL;
289 int i = 0; 260 struct es7000_oem_table *table;
290 acpi_size tbl_size; 261 acpi_size tbl_size;
262 acpi_status ret;
263 int i = 0;
291 264
292 while (ACPI_SUCCESS(acpi_get_table_with_size("OEM1", i++, &header, &tbl_size))) { 265 for (;;) {
293 if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) { 266 ret = acpi_get_table_with_size("OEM1", i++, &header, &tbl_size);
294 struct oem_table *t = (struct oem_table *)header; 267 if (!ACPI_SUCCESS(ret))
268 return -1;
295 269
296 oem_addrX = t->OEMTableAddr; 270 if (!memcmp((char *) &header->oem_id, "UNISYS", 6))
297 oem_size = t->OEMTableSize; 271 break;
298 early_acpi_os_unmap_memory(header, tbl_size);
299 272
300 *oem_addr = (unsigned long)__acpi_map_table(oem_addrX,
301 oem_size);
302 return 0;
303 }
304 early_acpi_os_unmap_memory(header, tbl_size); 273 early_acpi_os_unmap_memory(header, tbl_size);
305 } 274 }
306 return -1; 275
276 table = (void *)header;
277
278 oem_addrX = table->OEMTableAddr;
279 oem_size = table->OEMTableSize;
280
281 early_acpi_os_unmap_memory(header, tbl_size);
282
283 *oem_addr = (unsigned long)__acpi_map_table(oem_addrX, oem_size);
284
285 return 0;
307} 286}
308 287
309void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) 288static void unmap_unisys_acpi_oem_table(unsigned long oem_addr)
310{ 289{
311 if (!oem_addr) 290 if (!oem_addr)
312 return; 291 return;
313 292
314 __acpi_unmap_table((char *)oem_addr, oem_size); 293 __acpi_unmap_table((char *)oem_addr, oem_size);
315} 294}
316#endif
317 295
318static void 296static int es7000_check_dsdt(void)
319es7000_spin(int n) 297{
298 struct acpi_table_header header;
299
300 if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) &&
301 !strncmp(header.oem_id, "UNISYS", 6))
302 return 1;
303 return 0;
304}
305
306static int es7000_acpi_ret;
307
308/* Hook from generic ACPI tables.c */
309static int es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
310{
311 unsigned long oem_addr = 0;
312 int check_dsdt;
313 int ret = 0;
314
315 /* check dsdt at first to avoid clear fix_map for oem_addr */
316 check_dsdt = es7000_check_dsdt();
317
318 if (!find_unisys_acpi_oem_table(&oem_addr)) {
319 if (check_dsdt) {
320 ret = parse_unisys_oem((char *)oem_addr);
321 } else {
322 setup_unisys();
323 ret = 1;
324 }
325 /*
326 * we need to unmap it
327 */
328 unmap_unisys_acpi_oem_table(oem_addr);
329 }
330
331 es7000_acpi_ret = ret;
332
333 return ret && !es7000_apic_is_cluster();
334}
335
336static int es7000_acpi_madt_oem_check_cluster(char *oem_id, char *oem_table_id)
337{
338 int ret = es7000_acpi_ret;
339
340 return ret && es7000_apic_is_cluster();
341}
342
343#else /* !CONFIG_ACPI: */
344static int es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
345{
346 return 0;
347}
348
349static int es7000_acpi_madt_oem_check_cluster(char *oem_id, char *oem_table_id)
350{
351 return 0;
352}
353#endif /* !CONFIG_ACPI */
354
355static void es7000_spin(int n)
320{ 356{
321 int i = 0; 357 int i = 0;
322 358
@@ -324,19 +360,17 @@ es7000_spin(int n)
324 rep_nop(); 360 rep_nop();
325} 361}
326 362
327static int __init 363static int es7000_mip_write(struct mip_reg *mip_reg)
328es7000_mip_write(struct mip_reg *mip_reg)
329{ 364{
330 int status = 0; 365 int status = 0;
331 int spin; 366 int spin;
332 367
333 spin = MIP_SPIN; 368 spin = MIP_SPIN;
334 while (((unsigned long long)host_reg->off_38 & 369 while ((host_reg->off_0x38 & MIP_VALID) != 0) {
335 (unsigned long long)MIP_VALID) != 0) { 370 if (--spin <= 0) {
336 if (--spin <= 0) { 371 WARN(1, "Timeout waiting for Host Valid Flag\n");
337 printk("es7000_mip_write: Timeout waiting for Host Valid Flag"); 372 return -1;
338 return -1; 373 }
339 }
340 es7000_spin(MIP_SPIN); 374 es7000_spin(MIP_SPIN);
341 } 375 }
342 376
@@ -345,23 +379,21 @@ es7000_mip_write(struct mip_reg *mip_reg)
345 379
346 spin = MIP_SPIN; 380 spin = MIP_SPIN;
347 381
348 while (((unsigned long long)mip_reg->off_38 & 382 while ((mip_reg->off_0x38 & MIP_VALID) == 0) {
349 (unsigned long long)MIP_VALID) == 0) {
350 if (--spin <= 0) { 383 if (--spin <= 0) {
351 printk("es7000_mip_write: Timeout waiting for MIP Valid Flag"); 384 WARN(1, "Timeout waiting for MIP Valid Flag\n");
352 return -1; 385 return -1;
353 } 386 }
354 es7000_spin(MIP_SPIN); 387 es7000_spin(MIP_SPIN);
355 } 388 }
356 389
357 status = ((unsigned long long)mip_reg->off_0 & 390 status = (mip_reg->off_0x00 & 0xffff0000000000ULL) >> 48;
358 (unsigned long long)0xffff0000000000ULL) >> 48; 391 mip_reg->off_0x38 &= ~MIP_VALID;
359 mip_reg->off_38 = ((unsigned long long)mip_reg->off_38 & 392
360 (unsigned long long)~MIP_VALID);
361 return status; 393 return status;
362} 394}
363 395
364void __init es7000_enable_apic_mode(void) 396static void es7000_enable_apic_mode(void)
365{ 397{
366 struct mip_reg es7000_mip_reg; 398 struct mip_reg es7000_mip_reg;
367 int mip_status; 399 int mip_status;
@@ -369,53 +401,15 @@ void __init es7000_enable_apic_mode(void)
369 if (!es7000_plat) 401 if (!es7000_plat)
370 return; 402 return;
371 403
372 printk("ES7000: Enabling APIC mode.\n"); 404 printk(KERN_INFO "ES7000: Enabling APIC mode.\n");
373 memset(&es7000_mip_reg, 0, sizeof(struct mip_reg)); 405 memset(&es7000_mip_reg, 0, sizeof(struct mip_reg));
374 es7000_mip_reg.off_0 = MIP_SW_APIC; 406 es7000_mip_reg.off_0x00 = MIP_SW_APIC;
375 es7000_mip_reg.off_38 = MIP_VALID; 407 es7000_mip_reg.off_0x38 = MIP_VALID;
376 408
377 while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0) { 409 while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0)
378 printk("es7000_enable_apic_mode: command failed, status = %x\n", 410 WARN(1, "Command failed, status = %x\n", mip_status);
379 mip_status);
380 }
381} 411}
382 412
383/*
384 * APIC driver for the Unisys ES7000 chipset.
385 */
386#define APIC_DEFINITION 1
387#include <linux/threads.h>
388#include <linux/cpumask.h>
389#include <asm/mpspec.h>
390#include <asm/genapic.h>
391#include <asm/fixmap.h>
392#include <asm/apicdef.h>
393#include <linux/kernel.h>
394#include <linux/string.h>
395#include <linux/init.h>
396#include <linux/acpi.h>
397#include <linux/smp.h>
398#include <asm/ipi.h>
399
400#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
401#define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio)
402#define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */
403
404#define APIC_DFR_VALUE (APIC_DFR_FLAT)
405
406extern void es7000_enable_apic_mode(void);
407extern int apic_version [MAX_APICS];
408extern u8 cpu_2_logical_apicid[];
409extern unsigned int boot_cpu_physical_apicid;
410
411extern int parse_unisys_oem (char *oemptr);
412extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
413extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr);
414extern void setup_unisys(void);
415
416#define apicid_cluster(apicid) (apicid & 0xF0)
417#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
418
419static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask) 413static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask)
420{ 414{
421 /* Careful. Some cpus do not strictly honor the set of cpus 415 /* Careful. Some cpus do not strictly honor the set of cpus
@@ -432,11 +426,8 @@ static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask)
432 426
433static void es7000_wait_for_init_deassert(atomic_t *deassert) 427static void es7000_wait_for_init_deassert(atomic_t *deassert)
434{ 428{
435#ifndef CONFIG_ES7000_CLUSTERED_APIC
436 while (!atomic_read(deassert)) 429 while (!atomic_read(deassert))
437 cpu_relax(); 430 cpu_relax();
438#endif
439 return;
440} 431}
441 432
442static unsigned int es7000_get_apic_id(unsigned long x) 433static unsigned int es7000_get_apic_id(unsigned long x)
@@ -444,18 +435,6 @@ static unsigned int es7000_get_apic_id(unsigned long x)
444 return (x >> 24) & 0xFF; 435 return (x >> 24) & 0xFF;
445} 436}
446 437
447#ifdef CONFIG_ACPI
448static int es7000_check_dsdt(void)
449{
450 struct acpi_table_header header;
451
452 if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) &&
453 !strncmp(header.oem_id, "UNISYS", 6))
454 return 1;
455 return 0;
456}
457#endif
458
459static void es7000_send_IPI_mask(const struct cpumask *mask, int vector) 438static void es7000_send_IPI_mask(const struct cpumask *mask, int vector)
460{ 439{
461 default_send_IPI_mask_sequence_phys(mask, vector); 440 default_send_IPI_mask_sequence_phys(mask, vector);
@@ -473,7 +452,7 @@ static void es7000_send_IPI_all(int vector)
473 452
474static int es7000_apic_id_registered(void) 453static int es7000_apic_id_registered(void)
475{ 454{
476 return 1; 455 return 1;
477} 456}
478 457
479static const cpumask_t *target_cpus_cluster(void) 458static const cpumask_t *target_cpus_cluster(void)
@@ -498,9 +477,9 @@ static unsigned long es7000_check_apicid_present(int bit)
498 477
499static unsigned long calculate_ldr(int cpu) 478static unsigned long calculate_ldr(int cpu)
500{ 479{
501 unsigned long id = xapic_phys_to_log_apicid(cpu); 480 unsigned long id = per_cpu(x86_bios_cpu_apicid, cpu);
502 481
503 return (SET_APIC_LOGICAL_ID(id)); 482 return SET_APIC_LOGICAL_ID(id);
504} 483}
505 484
506/* 485/*
@@ -515,7 +494,7 @@ static void es7000_init_apic_ldr_cluster(void)
515 unsigned long val; 494 unsigned long val;
516 int cpu = smp_processor_id(); 495 int cpu = smp_processor_id();
517 496
518 apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER); 497 apic_write(APIC_DFR, APIC_DFR_CLUSTER);
519 val = calculate_ldr(cpu); 498 val = calculate_ldr(cpu);
520 apic_write(APIC_LDR, val); 499 apic_write(APIC_LDR, val);
521} 500}
@@ -525,7 +504,7 @@ static void es7000_init_apic_ldr(void)
525 unsigned long val; 504 unsigned long val;
526 int cpu = smp_processor_id(); 505 int cpu = smp_processor_id();
527 506
528 apic_write(APIC_DFR, APIC_DFR_VALUE); 507 apic_write(APIC_DFR, APIC_DFR_FLAT);
529 val = calculate_ldr(cpu); 508 val = calculate_ldr(cpu);
530 apic_write(APIC_LDR, val); 509 apic_write(APIC_LDR, val);
531} 510}
@@ -533,10 +512,12 @@ static void es7000_init_apic_ldr(void)
533static void es7000_setup_apic_routing(void) 512static void es7000_setup_apic_routing(void)
534{ 513{
535 int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); 514 int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
536 printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", 515
516 printk(KERN_INFO
517 "Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
537 (apic_version[apic] == 0x14) ? 518 (apic_version[apic] == 0x14) ?
538 "Physical Cluster" : "Logical Cluster", 519 "Physical Cluster" : "Logical Cluster",
539 nr_ioapics, cpus_addr(*es7000_target_cpus())[0]); 520 nr_ioapics, cpus_addr(*es7000_target_cpus())[0]);
540} 521}
541 522
542static int es7000_apicid_to_node(int logical_apicid) 523static int es7000_apicid_to_node(int logical_apicid)
@@ -550,18 +531,19 @@ static int es7000_cpu_present_to_apicid(int mps_cpu)
550 if (!mps_cpu) 531 if (!mps_cpu)
551 return boot_cpu_physical_apicid; 532 return boot_cpu_physical_apicid;
552 else if (mps_cpu < nr_cpu_ids) 533 else if (mps_cpu < nr_cpu_ids)
553 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); 534 return per_cpu(x86_bios_cpu_apicid, mps_cpu);
554 else 535 else
555 return BAD_APICID; 536 return BAD_APICID;
556} 537}
557 538
539static int cpu_id;
540
558static physid_mask_t es7000_apicid_to_cpu_present(int phys_apicid) 541static physid_mask_t es7000_apicid_to_cpu_present(int phys_apicid)
559{ 542{
560 static int id = 0;
561 physid_mask_t mask; 543 physid_mask_t mask;
562 544
563 mask = physid_mask_of_physid(id); 545 mask = physid_mask_of_physid(cpu_id);
564 ++id; 546 ++cpu_id;
565 547
566 return mask; 548 return mask;
567} 549}
@@ -572,7 +554,7 @@ static int es7000_cpu_to_logical_apicid(int cpu)
572#ifdef CONFIG_SMP 554#ifdef CONFIG_SMP
573 if (cpu >= nr_cpu_ids) 555 if (cpu >= nr_cpu_ids)
574 return BAD_APICID; 556 return BAD_APICID;
575 return (int)cpu_2_logical_apicid[cpu]; 557 return cpu_2_logical_apicid[cpu];
576#else 558#else
577 return logical_smp_processor_id(); 559 return logical_smp_processor_id();
578#endif 560#endif
@@ -587,77 +569,27 @@ static physid_mask_t es7000_ioapic_phys_id_map(physid_mask_t phys_map)
587static int es7000_check_phys_apicid_present(int cpu_physical_apicid) 569static int es7000_check_phys_apicid_present(int cpu_physical_apicid)
588{ 570{
589 boot_cpu_physical_apicid = read_apic_id(); 571 boot_cpu_physical_apicid = read_apic_id();
590 return (1); 572 return 1;
591}
592
593static unsigned int
594es7000_cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
595{
596 int cpus_found = 0;
597 int num_bits_set;
598 int apicid;
599 int cpu;
600
601 num_bits_set = cpumask_weight(cpumask);
602 /* Return id to all */
603 if (num_bits_set == nr_cpu_ids)
604 return 0xFF;
605 /*
606 * The cpus in the mask must all be on the apic cluster. If are not
607 * on the same apicid cluster return default value of target_cpus():
608 */
609 cpu = cpumask_first(cpumask);
610 apicid = es7000_cpu_to_logical_apicid(cpu);
611
612 while (cpus_found < num_bits_set) {
613 if (cpumask_test_cpu(cpu, cpumask)) {
614 int new_apicid = es7000_cpu_to_logical_apicid(cpu);
615
616 if (apicid_cluster(apicid) !=
617 apicid_cluster(new_apicid)) {
618 printk ("%s: Not a valid mask!\n", __func__);
619
620 return 0xFF;
621 }
622 apicid = new_apicid;
623 cpus_found++;
624 }
625 cpu++;
626 }
627 return apicid;
628} 573}
629 574
630static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask) 575static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask)
631{ 576{
632 int cpus_found = 0; 577 unsigned int round = 0;
633 int num_bits_set; 578 int cpu, uninitialized_var(apicid);
634 int apicid;
635 int cpu;
636 579
637 num_bits_set = cpus_weight(*cpumask);
638 /* Return id to all */
639 if (num_bits_set == nr_cpu_ids)
640 return es7000_cpu_to_logical_apicid(0);
641 /* 580 /*
642 * The cpus in the mask must all be on the apic cluster. If are not 581 * The cpus in the mask must all be on the apic cluster.
643 * on the same apicid cluster return default value of target_cpus():
644 */ 582 */
645 cpu = first_cpu(*cpumask); 583 for_each_cpu(cpu, cpumask) {
646 apicid = es7000_cpu_to_logical_apicid(cpu); 584 int new_apicid = es7000_cpu_to_logical_apicid(cpu);
647 while (cpus_found < num_bits_set) {
648 if (cpu_isset(cpu, *cpumask)) {
649 int new_apicid = es7000_cpu_to_logical_apicid(cpu);
650 585
651 if (apicid_cluster(apicid) != 586 if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
652 apicid_cluster(new_apicid)) { 587 WARN(1, "Not a valid mask!");
653 printk ("%s: Not a valid mask!\n", __func__);
654 588
655 return es7000_cpu_to_logical_apicid(0); 589 return BAD_APICID;
656 }
657 apicid = new_apicid;
658 cpus_found++;
659 } 590 }
660 cpu++; 591 apicid = new_apicid;
592 round++;
661 } 593 }
662 return apicid; 594 return apicid;
663} 595}
@@ -686,70 +618,105 @@ static int es7000_phys_pkg_id(int cpuid_apic, int index_msb)
686 return cpuid_apic >> index_msb; 618 return cpuid_apic >> index_msb;
687} 619}
688 620
689void __init es7000_update_genapic_to_cluster(void)
690{
691 apic->target_cpus = target_cpus_cluster;
692 apic->irq_delivery_mode = INT_DELIVERY_MODE_CLUSTER;
693 apic->irq_dest_mode = INT_DEST_MODE_CLUSTER;
694
695 apic->init_apic_ldr = es7000_init_apic_ldr_cluster;
696
697 apic->cpu_mask_to_apicid = es7000_cpu_mask_to_apicid_cluster;
698}
699
700static int probe_es7000(void) 621static int probe_es7000(void)
701{ 622{
702 /* probed later in mptable/ACPI hooks */ 623 /* probed later in mptable/ACPI hooks */
703 return 0; 624 return 0;
704} 625}
705 626
706static __init int 627static int es7000_mps_ret;
707es7000_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) 628static int es7000_mps_oem_check(struct mpc_table *mpc, char *oem,
629 char *productid)
708{ 630{
631 int ret = 0;
632
709 if (mpc->oemptr) { 633 if (mpc->oemptr) {
710 struct mpc_oemtable *oem_table = 634 struct mpc_oemtable *oem_table =
711 (struct mpc_oemtable *)mpc->oemptr; 635 (struct mpc_oemtable *)mpc->oemptr;
712 636
713 if (!strncmp(oem, "UNISYS", 6)) 637 if (!strncmp(oem, "UNISYS", 6))
714 return parse_unisys_oem((char *)oem_table); 638 ret = parse_unisys_oem((char *)oem_table);
715 } 639 }
716 return 0;
717}
718 640
719#ifdef CONFIG_ACPI 641 es7000_mps_ret = ret;
720/* Hook from generic ACPI tables.c */
721static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
722{
723 unsigned long oem_addr = 0;
724 int check_dsdt;
725 int ret = 0;
726 642
727 /* check dsdt at first to avoid clear fix_map for oem_addr */ 643 return ret && !es7000_apic_is_cluster();
728 check_dsdt = es7000_check_dsdt();
729
730 if (!find_unisys_acpi_oem_table(&oem_addr)) {
731 if (check_dsdt)
732 ret = parse_unisys_oem((char *)oem_addr);
733 else {
734 setup_unisys();
735 ret = 1;
736 }
737 /*
738 * we need to unmap it
739 */
740 unmap_unisys_acpi_oem_table(oem_addr);
741 }
742 return ret;
743} 644}
744#else 645
745static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 646static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
647 char *productid)
746{ 648{
747 return 0; 649 int ret = es7000_mps_ret;
650
651 return ret && es7000_apic_is_cluster();
748} 652}
749#endif
750 653
654struct apic apic_es7000_cluster = {
655
656 .name = "es7000",
657 .probe = probe_es7000,
658 .acpi_madt_oem_check = es7000_acpi_madt_oem_check_cluster,
659 .apic_id_registered = es7000_apic_id_registered,
660
661 .irq_delivery_mode = dest_LowestPrio,
662 /* logical delivery broadcast to all procs: */
663 .irq_dest_mode = 1,
664
665 .target_cpus = target_cpus_cluster,
666 .disable_esr = 1,
667 .dest_logical = 0,
668 .check_apicid_used = es7000_check_apicid_used,
669 .check_apicid_present = es7000_check_apicid_present,
670
671 .vector_allocation_domain = es7000_vector_allocation_domain,
672 .init_apic_ldr = es7000_init_apic_ldr_cluster,
673
674 .ioapic_phys_id_map = es7000_ioapic_phys_id_map,
675 .setup_apic_routing = es7000_setup_apic_routing,
676 .multi_timer_check = NULL,
677 .apicid_to_node = es7000_apicid_to_node,
678 .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid,
679 .cpu_present_to_apicid = es7000_cpu_present_to_apicid,
680 .apicid_to_cpu_present = es7000_apicid_to_cpu_present,
681 .setup_portio_remap = NULL,
682 .check_phys_apicid_present = es7000_check_phys_apicid_present,
683 .enable_apic_mode = es7000_enable_apic_mode,
684 .phys_pkg_id = es7000_phys_pkg_id,
685 .mps_oem_check = es7000_mps_oem_check_cluster,
686
687 .get_apic_id = es7000_get_apic_id,
688 .set_apic_id = NULL,
689 .apic_id_mask = 0xFF << 24,
690
691 .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid,
692 .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and,
693
694 .send_IPI_mask = es7000_send_IPI_mask,
695 .send_IPI_mask_allbutself = NULL,
696 .send_IPI_allbutself = es7000_send_IPI_allbutself,
697 .send_IPI_all = es7000_send_IPI_all,
698 .send_IPI_self = default_send_IPI_self,
699
700 .wakeup_secondary_cpu = wakeup_secondary_cpu_via_mip,
701
702 .trampoline_phys_low = 0x467,
703 .trampoline_phys_high = 0x469,
751 704
752struct genapic apic_es7000 = { 705 .wait_for_init_deassert = NULL,
706
707 /* Nothing to do for most platforms, since cleared by the INIT cycle: */
708 .smp_callin_clear_local_apic = NULL,
709 .inquire_remote_apic = default_inquire_remote_apic,
710
711 .read = native_apic_mem_read,
712 .write = native_apic_mem_write,
713 .icr_read = native_apic_icr_read,
714 .icr_write = native_apic_icr_write,
715 .wait_icr_idle = native_apic_wait_icr_idle,
716 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
717};
718
719struct apic apic_es7000 = {
753 720
754 .name = "es7000", 721 .name = "es7000",
755 .probe = probe_es7000, 722 .probe = probe_es7000,
@@ -795,8 +762,6 @@ struct genapic apic_es7000 = {
795 .send_IPI_all = es7000_send_IPI_all, 762 .send_IPI_all = es7000_send_IPI_all,
796 .send_IPI_self = default_send_IPI_self, 763 .send_IPI_self = default_send_IPI_self,
797 764
798 .wakeup_cpu = NULL,
799
800 .trampoline_phys_low = 0x467, 765 .trampoline_phys_low = 0x467,
801 .trampoline_phys_high = 0x469, 766 .trampoline_phys_high = 0x469,
802 767
@@ -804,6 +769,12 @@ struct genapic apic_es7000 = {
804 769
805 /* Nothing to do for most platforms, since cleared by the INIT cycle: */ 770 /* Nothing to do for most platforms, since cleared by the INIT cycle: */
806 .smp_callin_clear_local_apic = NULL, 771 .smp_callin_clear_local_apic = NULL,
807 .store_NMI_vector = NULL,
808 .inquire_remote_apic = default_inquire_remote_apic, 772 .inquire_remote_apic = default_inquire_remote_apic,
773
774 .read = native_apic_mem_read,
775 .write = native_apic_mem_write,
776 .icr_read = native_apic_icr_read,
777 .icr_write = native_apic_icr_write,
778 .wait_icr_idle = native_apic_wait_icr_idle,
779 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
809}; 780};
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index a89878e08a42..00e6071cefc4 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -62,7 +62,7 @@
62#include <asm/uv/uv_hub.h> 62#include <asm/uv/uv_hub.h>
63#include <asm/uv/uv_irq.h> 63#include <asm/uv/uv_irq.h>
64 64
65#include <asm/genapic.h> 65#include <asm/apic.h>
66 66
67#define __apicdebuginit(type) static type __init 67#define __apicdebuginit(type) static type __init
68 68
diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/apic/ipi.c
index dbf5445727a9..dbf5445727a9 100644
--- a/arch/x86/kernel/ipi.c
+++ b/arch/x86/kernel/apic/ipi.c
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/apic/nmi.c
index bdfad80c3cf1..bdfad80c3cf1 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
index 0cc41a1d2550..ba2fc6465534 100644
--- a/arch/x86/kernel/numaq_32.c
+++ b/arch/x86/kernel/apic/numaq_32.c
@@ -2,6 +2,7 @@
2 * Written by: Patricia Gaughen, IBM Corporation 2 * Written by: Patricia Gaughen, IBM Corporation
3 * 3 *
4 * Copyright (C) 2002, IBM Corp. 4 * Copyright (C) 2002, IBM Corp.
5 * Copyright (C) 2009, Red Hat, Inc., Ingo Molnar
5 * 6 *
6 * All rights reserved. 7 * All rights reserved.
7 * 8 *
@@ -22,21 +23,81 @@
22 * 23 *
23 * Send feedback to <gone@us.ibm.com> 24 * Send feedback to <gone@us.ibm.com>
24 */ 25 */
25
26#include <linux/nodemask.h> 26#include <linux/nodemask.h>
27#include <linux/topology.h>
27#include <linux/bootmem.h> 28#include <linux/bootmem.h>
29#include <linux/threads.h>
30#include <linux/cpumask.h>
31#include <linux/kernel.h>
28#include <linux/mmzone.h> 32#include <linux/mmzone.h>
29#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/string.h>
35#include <linux/init.h>
36#include <linux/numa.h>
37#include <linux/smp.h>
38#include <linux/io.h>
30#include <linux/mm.h> 39#include <linux/mm.h>
31 40
32#include <asm/processor.h> 41#include <asm/processor.h>
33#include <asm/topology.h> 42#include <asm/fixmap.h>
34#include <asm/genapic.h> 43#include <asm/mpspec.h>
35#include <asm/numaq.h> 44#include <asm/numaq.h>
36#include <asm/setup.h> 45#include <asm/setup.h>
46#include <asm/apic.h>
37#include <asm/e820.h> 47#include <asm/e820.h>
48#include <asm/ipi.h>
49
50#define MB_TO_PAGES(addr) ((addr) << (20 - PAGE_SHIFT))
51
52int found_numaq;
53
54/*
55 * Have to match translation table entries to main table entries by counter
56 * hence the mpc_record variable .... can't see a less disgusting way of
57 * doing this ....
58 */
59struct mpc_trans {
60 unsigned char mpc_type;
61 unsigned char trans_len;
62 unsigned char trans_type;
63 unsigned char trans_quad;
64 unsigned char trans_global;
65 unsigned char trans_local;
66 unsigned short trans_reserved;
67};
68
69/* x86_quirks member */
70static int mpc_record;
71
72static struct mpc_trans *translation_table[MAX_MPC_ENTRY];
73
74int mp_bus_id_to_node[MAX_MP_BUSSES];
75int mp_bus_id_to_local[MAX_MP_BUSSES];
76int quad_local_to_mp_bus_id[NR_CPUS/4][4];
77
78
79static inline void numaq_register_node(int node, struct sys_cfg_data *scd)
80{
81 struct eachquadmem *eq = scd->eq + node;
82
83 node_set_online(node);
84
85 /* Convert to pages */
86 node_start_pfn[node] =
87 MB_TO_PAGES(eq->hi_shrd_mem_start - eq->priv_mem_size);
88
89 node_end_pfn[node] =
90 MB_TO_PAGES(eq->hi_shrd_mem_start + eq->hi_shrd_mem_size);
38 91
39#define MB_TO_PAGES(addr) ((addr) << (20 - PAGE_SHIFT)) 92 e820_register_active_regions(node, node_start_pfn[node],
93 node_end_pfn[node]);
94
95 memory_present(node, node_start_pfn[node], node_end_pfn[node]);
96
97 node_remap_size[node] = node_memmap_size_bytes(node,
98 node_start_pfn[node],
99 node_end_pfn[node]);
100}
40 101
41/* 102/*
42 * Function: smp_dump_qct() 103 * Function: smp_dump_qct()
@@ -46,34 +107,18 @@
46 */ 107 */
47static void __init smp_dump_qct(void) 108static void __init smp_dump_qct(void)
48{ 109{
110 struct sys_cfg_data *scd;
49 int node; 111 int node;
50 struct eachquadmem *eq; 112
51 struct sys_cfg_data *scd = 113 scd = (void *)__va(SYS_CFG_DATA_PRIV_ADDR);
52 (struct sys_cfg_data *)__va(SYS_CFG_DATA_PRIV_ADDR);
53 114
54 nodes_clear(node_online_map); 115 nodes_clear(node_online_map);
55 for_each_node(node) { 116 for_each_node(node) {
56 if (scd->quads_present31_0 & (1 << node)) { 117 if (scd->quads_present31_0 & (1 << node))
57 node_set_online(node); 118 numaq_register_node(node, scd);
58 eq = &scd->eq[node];
59 /* Convert to pages */
60 node_start_pfn[node] = MB_TO_PAGES(
61 eq->hi_shrd_mem_start - eq->priv_mem_size);
62 node_end_pfn[node] = MB_TO_PAGES(
63 eq->hi_shrd_mem_start + eq->hi_shrd_mem_size);
64
65 e820_register_active_regions(node, node_start_pfn[node],
66 node_end_pfn[node]);
67 memory_present(node,
68 node_start_pfn[node], node_end_pfn[node]);
69 node_remap_size[node] = node_memmap_size_bytes(node,
70 node_start_pfn[node],
71 node_end_pfn[node]);
72 }
73 } 119 }
74} 120}
75 121
76
77void __cpuinit numaq_tsc_disable(void) 122void __cpuinit numaq_tsc_disable(void)
78{ 123{
79 if (!found_numaq) 124 if (!found_numaq)
@@ -91,28 +136,6 @@ static int __init numaq_pre_time_init(void)
91 return 0; 136 return 0;
92} 137}
93 138
94int found_numaq;
95
96/*
97 * Have to match translation table entries to main table entries by counter
98 * hence the mpc_record variable .... can't see a less disgusting way of
99 * doing this ....
100 */
101struct mpc_config_translation {
102 unsigned char mpc_type;
103 unsigned char trans_len;
104 unsigned char trans_type;
105 unsigned char trans_quad;
106 unsigned char trans_global;
107 unsigned char trans_local;
108 unsigned short trans_reserved;
109};
110
111/* x86_quirks member */
112static int mpc_record;
113static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY]
114 __cpuinitdata;
115
116static inline int generate_logical_apicid(int quad, int phys_apicid) 139static inline int generate_logical_apicid(int quad, int phys_apicid)
117{ 140{
118 return (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1); 141 return (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1);
@@ -124,17 +147,15 @@ static int mpc_apic_id(struct mpc_cpu *m)
124 int quad = translation_table[mpc_record]->trans_quad; 147 int quad = translation_table[mpc_record]->trans_quad;
125 int logical_apicid = generate_logical_apicid(quad, m->apicid); 148 int logical_apicid = generate_logical_apicid(quad, m->apicid);
126 149
127 printk(KERN_DEBUG "Processor #%d %u:%u APIC version %d (quad %d, apic %d)\n", 150 printk(KERN_DEBUG
128 m->apicid, (m->cpufeature & CPU_FAMILY_MASK) >> 8, 151 "Processor #%d %u:%u APIC version %d (quad %d, apic %d)\n",
129 (m->cpufeature & CPU_MODEL_MASK) >> 4, 152 m->apicid, (m->cpufeature & CPU_FAMILY_MASK) >> 8,
130 m->apicver, quad, logical_apicid); 153 (m->cpufeature & CPU_MODEL_MASK) >> 4,
154 m->apicver, quad, logical_apicid);
155
131 return logical_apicid; 156 return logical_apicid;
132} 157}
133 158
134int mp_bus_id_to_node[MAX_MP_BUSSES];
135
136int mp_bus_id_to_local[MAX_MP_BUSSES];
137
138/* x86_quirks member */ 159/* x86_quirks member */
139static void mpc_oem_bus_info(struct mpc_bus *m, char *name) 160static void mpc_oem_bus_info(struct mpc_bus *m, char *name)
140{ 161{
@@ -143,11 +164,9 @@ static void mpc_oem_bus_info(struct mpc_bus *m, char *name)
143 164
144 mp_bus_id_to_node[m->busid] = quad; 165 mp_bus_id_to_node[m->busid] = quad;
145 mp_bus_id_to_local[m->busid] = local; 166 mp_bus_id_to_local[m->busid] = local;
146 printk(KERN_INFO "Bus #%d is %s (node %d)\n",
147 m->busid, name, quad);
148}
149 167
150int quad_local_to_mp_bus_id [NR_CPUS/4][4]; 168 printk(KERN_INFO "Bus #%d is %s (node %d)\n", m->busid, name, quad);
169}
151 170
152/* x86_quirks member */ 171/* x86_quirks member */
153static void mpc_oem_pci_bus(struct mpc_bus *m) 172static void mpc_oem_pci_bus(struct mpc_bus *m)
@@ -158,17 +177,18 @@ static void mpc_oem_pci_bus(struct mpc_bus *m)
158 quad_local_to_mp_bus_id[quad][local] = m->busid; 177 quad_local_to_mp_bus_id[quad][local] = m->busid;
159} 178}
160 179
161static void __init MP_translation_info(struct mpc_config_translation *m) 180static void __init MP_translation_info(struct mpc_trans *m)
162{ 181{
163 printk(KERN_INFO 182 printk(KERN_INFO
164 "Translation: record %d, type %d, quad %d, global %d, local %d\n", 183 "Translation: record %d, type %d, quad %d, global %d, local %d\n",
165 mpc_record, m->trans_type, m->trans_quad, m->trans_global, 184 mpc_record, m->trans_type, m->trans_quad, m->trans_global,
166 m->trans_local); 185 m->trans_local);
167 186
168 if (mpc_record >= MAX_MPC_ENTRY) 187 if (mpc_record >= MAX_MPC_ENTRY)
169 printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n"); 188 printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
170 else 189 else
171 translation_table[mpc_record] = m; /* stash this for later */ 190 translation_table[mpc_record] = m; /* stash this for later */
191
172 if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad)) 192 if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
173 node_set_online(m->trans_quad); 193 node_set_online(m->trans_quad);
174} 194}
@@ -186,16 +206,16 @@ static int __init mpf_checksum(unsigned char *mp, int len)
186/* 206/*
187 * Read/parse the MPC oem tables 207 * Read/parse the MPC oem tables
188 */ 208 */
189 209static void __init
190static void __init smp_read_mpc_oem(struct mpc_oemtable *oemtable, 210 smp_read_mpc_oem(struct mpc_oemtable *oemtable, unsigned short oemsize)
191 unsigned short oemsize)
192{ 211{
193 int count = sizeof(*oemtable); /* the header size */ 212 int count = sizeof(*oemtable); /* the header size */
194 unsigned char *oemptr = ((unsigned char *)oemtable) + count; 213 unsigned char *oemptr = ((unsigned char *)oemtable) + count;
195 214
196 mpc_record = 0; 215 mpc_record = 0;
197 printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", 216 printk(KERN_INFO
198 oemtable); 217 "Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
218
199 if (memcmp(oemtable->signature, MPC_OEM_SIGNATURE, 4)) { 219 if (memcmp(oemtable->signature, MPC_OEM_SIGNATURE, 4)) {
200 printk(KERN_WARNING 220 printk(KERN_WARNING
201 "SMP mpc oemtable: bad signature [%c%c%c%c]!\n", 221 "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
@@ -203,16 +223,18 @@ static void __init smp_read_mpc_oem(struct mpc_oemtable *oemtable,
203 oemtable->signature[2], oemtable->signature[3]); 223 oemtable->signature[2], oemtable->signature[3]);
204 return; 224 return;
205 } 225 }
226
206 if (mpf_checksum((unsigned char *)oemtable, oemtable->length)) { 227 if (mpf_checksum((unsigned char *)oemtable, oemtable->length)) {
207 printk(KERN_WARNING "SMP oem mptable: checksum error!\n"); 228 printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
208 return; 229 return;
209 } 230 }
231
210 while (count < oemtable->length) { 232 while (count < oemtable->length) {
211 switch (*oemptr) { 233 switch (*oemptr) {
212 case MP_TRANSLATION: 234 case MP_TRANSLATION:
213 { 235 {
214 struct mpc_config_translation *m = 236 struct mpc_trans *m = (void *)oemptr;
215 (struct mpc_config_translation *)oemptr; 237
216 MP_translation_info(m); 238 MP_translation_info(m);
217 oemptr += sizeof(*m); 239 oemptr += sizeof(*m);
218 count += sizeof(*m); 240 count += sizeof(*m);
@@ -220,12 +242,10 @@ static void __init smp_read_mpc_oem(struct mpc_oemtable *oemtable,
220 break; 242 break;
221 } 243 }
222 default: 244 default:
223 { 245 printk(KERN_WARNING
224 printk(KERN_WARNING 246 "Unrecognised OEM table entry type! - %d\n",
225 "Unrecognised OEM table entry type! - %d\n", 247 (int)*oemptr);
226 (int)*oemptr); 248 return;
227 return;
228 }
229 } 249 }
230 } 250 }
231} 251}
@@ -236,45 +256,30 @@ static int __init numaq_setup_ioapic_ids(void)
236 return 1; 256 return 1;
237} 257}
238 258
239static int __init numaq_update_genapic(void)
240{
241 apic->wakeup_cpu = wakeup_secondary_cpu_via_nmi;
242
243 return 0;
244}
245
246static struct x86_quirks numaq_x86_quirks __initdata = { 259static struct x86_quirks numaq_x86_quirks __initdata = {
247 .arch_pre_time_init = numaq_pre_time_init, 260 .arch_pre_time_init = numaq_pre_time_init,
248 .arch_time_init = NULL, 261 .arch_time_init = NULL,
249 .arch_pre_intr_init = NULL, 262 .arch_pre_intr_init = NULL,
250 .arch_memory_setup = NULL, 263 .arch_memory_setup = NULL,
251 .arch_intr_init = NULL, 264 .arch_intr_init = NULL,
252 .arch_trap_init = NULL, 265 .arch_trap_init = NULL,
253 .mach_get_smp_config = NULL, 266 .mach_get_smp_config = NULL,
254 .mach_find_smp_config = NULL, 267 .mach_find_smp_config = NULL,
255 .mpc_record = &mpc_record, 268 .mpc_record = &mpc_record,
256 .mpc_apic_id = mpc_apic_id, 269 .mpc_apic_id = mpc_apic_id,
257 .mpc_oem_bus_info = mpc_oem_bus_info, 270 .mpc_oem_bus_info = mpc_oem_bus_info,
258 .mpc_oem_pci_bus = mpc_oem_pci_bus, 271 .mpc_oem_pci_bus = mpc_oem_pci_bus,
259 .smp_read_mpc_oem = smp_read_mpc_oem, 272 .smp_read_mpc_oem = smp_read_mpc_oem,
260 .setup_ioapic_ids = numaq_setup_ioapic_ids, 273 .setup_ioapic_ids = numaq_setup_ioapic_ids,
261 .update_genapic = numaq_update_genapic,
262}; 274};
263 275
264void numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
265{
266 if (strncmp(oem, "IBM NUMA", 8))
267 printk("Warning! Not a NUMA-Q system!\n");
268 else
269 found_numaq = 1;
270}
271
272static __init void early_check_numaq(void) 276static __init void early_check_numaq(void)
273{ 277{
274 /* 278 /*
275 * Find possible boot-time SMP configuration: 279 * Find possible boot-time SMP configuration:
276 */ 280 */
277 early_find_smp_config(); 281 early_find_smp_config();
282
278 /* 283 /*
279 * get boot-time SMP configuration: 284 * get boot-time SMP configuration:
280 */ 285 */
@@ -291,30 +296,10 @@ int __init get_memcfg_numaq(void)
291 if (!found_numaq) 296 if (!found_numaq)
292 return 0; 297 return 0;
293 smp_dump_qct(); 298 smp_dump_qct();
299
294 return 1; 300 return 1;
295} 301}
296 302
297/*
298 * APIC driver for the IBM NUMAQ chipset.
299 */
300#define APIC_DEFINITION 1
301#include <linux/threads.h>
302#include <linux/cpumask.h>
303#include <asm/mpspec.h>
304#include <asm/genapic.h>
305#include <asm/fixmap.h>
306#include <asm/apicdef.h>
307#include <asm/ipi.h>
308#include <linux/kernel.h>
309#include <linux/string.h>
310#include <linux/init.h>
311#include <linux/numa.h>
312#include <linux/smp.h>
313#include <asm/numaq.h>
314#include <asm/io.h>
315#include <linux/mmzone.h>
316#include <linux/nodemask.h>
317
318#define NUMAQ_APIC_DFR_VALUE (APIC_DFR_CLUSTER) 303#define NUMAQ_APIC_DFR_VALUE (APIC_DFR_CLUSTER)
319 304
320static inline unsigned int numaq_get_apic_id(unsigned long x) 305static inline unsigned int numaq_get_apic_id(unsigned long x)
@@ -337,10 +322,8 @@ static inline void numaq_send_IPI_all(int vector)
337 numaq_send_IPI_mask(cpu_online_mask, vector); 322 numaq_send_IPI_mask(cpu_online_mask, vector);
338} 323}
339 324
340extern void numaq_mps_oem_check(struct mpc_table *, char *, char *); 325#define NUMAQ_TRAMPOLINE_PHYS_LOW (0x8)
341 326#define NUMAQ_TRAMPOLINE_PHYS_HIGH (0xa)
342#define NUMAQ_TRAMPOLINE_PHYS_LOW (0x8)
343#define NUMAQ_TRAMPOLINE_PHYS_HIGH (0xa)
344 327
345/* 328/*
346 * Because we use NMIs rather than the INIT-STARTUP sequence to 329 * Because we use NMIs rather than the INIT-STARTUP sequence to
@@ -351,16 +334,6 @@ static inline void numaq_smp_callin_clear_local_apic(void)
351 clear_local_APIC(); 334 clear_local_APIC();
352} 335}
353 336
354static inline void
355numaq_store_NMI_vector(unsigned short *high, unsigned short *low)
356{
357 printk("Storing NMI vector\n");
358 *high =
359 *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_HIGH));
360 *low =
361 *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_LOW));
362}
363
364static inline const cpumask_t *numaq_target_cpus(void) 337static inline const cpumask_t *numaq_target_cpus(void)
365{ 338{
366 return &CPU_MASK_ALL; 339 return &CPU_MASK_ALL;
@@ -377,8 +350,6 @@ static inline unsigned long numaq_check_apicid_present(int bit)
377 return physid_isset(bit, phys_cpu_present_map); 350 return physid_isset(bit, phys_cpu_present_map);
378} 351}
379 352
380#define apicid_cluster(apicid) (apicid & 0xF0)
381
382static inline int numaq_apic_id_registered(void) 353static inline int numaq_apic_id_registered(void)
383{ 354{
384 return 1; 355 return 1;
@@ -391,8 +362,9 @@ static inline void numaq_init_apic_ldr(void)
391 362
392static inline void numaq_setup_apic_routing(void) 363static inline void numaq_setup_apic_routing(void)
393{ 364{
394 printk("Enabling APIC mode: %s. Using %d I/O APICs\n", 365 printk(KERN_INFO
395 "NUMA-Q", nr_ioapics); 366 "Enabling APIC mode: NUMA-Q. Using %d I/O APICs\n",
367 nr_ioapics);
396} 368}
397 369
398/* 370/*
@@ -410,14 +382,11 @@ static inline physid_mask_t numaq_ioapic_phys_id_map(physid_mask_t phys_map)
410 return physids_promote(0xFUL); 382 return physids_promote(0xFUL);
411} 383}
412 384
413/* Mapping from cpu number to logical apicid */
414extern u8 cpu_2_logical_apicid[];
415
416static inline int numaq_cpu_to_logical_apicid(int cpu) 385static inline int numaq_cpu_to_logical_apicid(int cpu)
417{ 386{
418 if (cpu >= nr_cpu_ids) 387 if (cpu >= nr_cpu_ids)
419 return BAD_APICID; 388 return BAD_APICID;
420 return (int)cpu_2_logical_apicid[cpu]; 389 return cpu_2_logical_apicid[cpu];
421} 390}
422 391
423/* 392/*
@@ -433,7 +402,7 @@ static inline int numaq_cpu_present_to_apicid(int mps_cpu)
433 return BAD_APICID; 402 return BAD_APICID;
434} 403}
435 404
436static inline int numaq_apicid_to_node(int logical_apicid) 405static inline int numaq_apicid_to_node(int logical_apicid)
437{ 406{
438 return logical_apicid >> 4; 407 return logical_apicid >> 4;
439} 408}
@@ -475,9 +444,15 @@ static inline int numaq_phys_pkg_id(int cpuid_apic, int index_msb)
475{ 444{
476 return cpuid_apic >> index_msb; 445 return cpuid_apic >> index_msb;
477} 446}
478static int __numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) 447
448static int
449numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
479{ 450{
480 numaq_mps_oem_check(mpc, oem, productid); 451 if (strncmp(oem, "IBM NUMA", 8))
452 printk(KERN_ERR "Warning! Not a NUMA-Q system!\n");
453 else
454 found_numaq = 1;
455
481 return found_numaq; 456 return found_numaq;
482} 457}
483 458
@@ -507,13 +482,17 @@ static void numaq_setup_portio_remap(void)
507 if (num_quads <= 1) 482 if (num_quads <= 1)
508 return; 483 return;
509 484
510 printk("Remapping cross-quad port I/O for %d quads\n", num_quads); 485 printk(KERN_INFO
486 "Remapping cross-quad port I/O for %d quads\n", num_quads);
487
511 xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD); 488 xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD);
512 printk("xquad_portio vaddr 0x%08lx, len %08lx\n", 489
490 printk(KERN_INFO
491 "xquad_portio vaddr 0x%08lx, len %08lx\n",
513 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD); 492 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
514} 493}
515 494
516struct genapic apic_numaq = { 495struct apic apic_numaq = {
517 496
518 .name = "NUMAQ", 497 .name = "NUMAQ",
519 .probe = probe_numaq, 498 .probe = probe_numaq,
@@ -544,7 +523,7 @@ struct genapic apic_numaq = {
544 .check_phys_apicid_present = numaq_check_phys_apicid_present, 523 .check_phys_apicid_present = numaq_check_phys_apicid_present,
545 .enable_apic_mode = NULL, 524 .enable_apic_mode = NULL,
546 .phys_pkg_id = numaq_phys_pkg_id, 525 .phys_pkg_id = numaq_phys_pkg_id,
547 .mps_oem_check = __numaq_mps_oem_check, 526 .mps_oem_check = numaq_mps_oem_check,
548 527
549 .get_apic_id = numaq_get_apic_id, 528 .get_apic_id = numaq_get_apic_id,
550 .set_apic_id = NULL, 529 .set_apic_id = NULL,
@@ -559,7 +538,7 @@ struct genapic apic_numaq = {
559 .send_IPI_all = numaq_send_IPI_all, 538 .send_IPI_all = numaq_send_IPI_all,
560 .send_IPI_self = default_send_IPI_self, 539 .send_IPI_self = default_send_IPI_self,
561 540
562 .wakeup_cpu = NULL, 541 .wakeup_secondary_cpu = wakeup_secondary_cpu_via_nmi,
563 .trampoline_phys_low = NUMAQ_TRAMPOLINE_PHYS_LOW, 542 .trampoline_phys_low = NUMAQ_TRAMPOLINE_PHYS_LOW,
564 .trampoline_phys_high = NUMAQ_TRAMPOLINE_PHYS_HIGH, 543 .trampoline_phys_high = NUMAQ_TRAMPOLINE_PHYS_HIGH,
565 544
@@ -567,6 +546,12 @@ struct genapic apic_numaq = {
567 .wait_for_init_deassert = NULL, 546 .wait_for_init_deassert = NULL,
568 547
569 .smp_callin_clear_local_apic = numaq_smp_callin_clear_local_apic, 548 .smp_callin_clear_local_apic = numaq_smp_callin_clear_local_apic,
570 .store_NMI_vector = numaq_store_NMI_vector,
571 .inquire_remote_apic = NULL, 549 .inquire_remote_apic = NULL,
550
551 .read = native_apic_mem_read,
552 .write = native_apic_mem_write,
553 .icr_read = native_apic_icr_read,
554 .icr_write = native_apic_icr_write,
555 .wait_icr_idle = native_apic_wait_icr_idle,
556 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
572}; 557};
diff --git a/arch/x86/kernel/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 22337b75de62..141c99a1c264 100644
--- a/arch/x86/kernel/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -8,6 +8,7 @@
8 */ 8 */
9#include <linux/threads.h> 9#include <linux/threads.h>
10#include <linux/cpumask.h> 10#include <linux/cpumask.h>
11#include <linux/module.h>
11#include <linux/string.h> 12#include <linux/string.h>
12#include <linux/kernel.h> 13#include <linux/kernel.h>
13#include <linux/ctype.h> 14#include <linux/ctype.h>
@@ -16,32 +17,27 @@
16#include <asm/fixmap.h> 17#include <asm/fixmap.h>
17#include <asm/mpspec.h> 18#include <asm/mpspec.h>
18#include <asm/apicdef.h> 19#include <asm/apicdef.h>
19#include <asm/genapic.h> 20#include <asm/apic.h>
20#include <asm/setup.h> 21#include <asm/setup.h>
21 22
22#include <linux/threads.h> 23#include <linux/threads.h>
23#include <linux/cpumask.h> 24#include <linux/cpumask.h>
24#include <asm/mpspec.h> 25#include <asm/mpspec.h>
25#include <asm/genapic.h>
26#include <asm/fixmap.h> 26#include <asm/fixmap.h>
27#include <asm/apicdef.h> 27#include <asm/apicdef.h>
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/string.h> 29#include <linux/string.h>
30#include <linux/smp.h> 30#include <linux/smp.h>
31#include <linux/init.h> 31#include <linux/init.h>
32#include <asm/genapic.h>
33#include <asm/ipi.h> 32#include <asm/ipi.h>
34 33
35#include <linux/smp.h> 34#include <linux/smp.h>
36#include <linux/init.h> 35#include <linux/init.h>
37#include <linux/interrupt.h> 36#include <linux/interrupt.h>
38#include <asm/acpi.h> 37#include <asm/acpi.h>
39#include <asm/arch_hooks.h>
40#include <asm/e820.h> 38#include <asm/e820.h>
41#include <asm/setup.h> 39#include <asm/setup.h>
42 40
43#include <asm/genapic.h>
44
45#ifdef CONFIG_HOTPLUG_CPU 41#ifdef CONFIG_HOTPLUG_CPU
46#define DEFAULT_SEND_IPI (1) 42#define DEFAULT_SEND_IPI (1)
47#else 43#else
@@ -50,7 +46,31 @@
50 46
51int no_broadcast = DEFAULT_SEND_IPI; 47int no_broadcast = DEFAULT_SEND_IPI;
52 48
53#ifdef CONFIG_X86_LOCAL_APIC 49static __init int no_ipi_broadcast(char *str)
50{
51 get_option(&str, &no_broadcast);
52 pr_info("Using %s mode\n",
53 no_broadcast ? "No IPI Broadcast" : "IPI Broadcast");
54 return 1;
55}
56__setup("no_ipi_broadcast=", no_ipi_broadcast);
57
58static int __init print_ipi_mode(void)
59{
60 pr_info("Using IPI %s mode\n",
61 no_broadcast ? "No-Shortcut" : "Shortcut");
62 return 0;
63}
64late_initcall(print_ipi_mode);
65
66void default_setup_apic_routing(void)
67{
68#ifdef CONFIG_X86_IO_APIC
69 printk(KERN_INFO
70 "Enabling APIC mode: Flat. Using %d I/O APICs\n",
71 nr_ioapics);
72#endif
73}
54 74
55static void default_vector_allocation_domain(int cpu, struct cpumask *retmask) 75static void default_vector_allocation_domain(int cpu, struct cpumask *retmask)
56{ 76{
@@ -72,7 +92,7 @@ static int probe_default(void)
72 return 1; 92 return 1;
73} 93}
74 94
75struct genapic apic_default = { 95struct apic apic_default = {
76 96
77 .name = "default", 97 .name = "default",
78 .probe = probe_default, 98 .probe = probe_default,
@@ -118,26 +138,33 @@ struct genapic apic_default = {
118 .send_IPI_all = default_send_IPI_all, 138 .send_IPI_all = default_send_IPI_all,
119 .send_IPI_self = default_send_IPI_self, 139 .send_IPI_self = default_send_IPI_self,
120 140
121 .wakeup_cpu = NULL,
122 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 141 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
123 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 142 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
124 143
125 .wait_for_init_deassert = default_wait_for_init_deassert, 144 .wait_for_init_deassert = default_wait_for_init_deassert,
126 145
127 .smp_callin_clear_local_apic = NULL, 146 .smp_callin_clear_local_apic = NULL,
128 .store_NMI_vector = NULL,
129 .inquire_remote_apic = default_inquire_remote_apic, 147 .inquire_remote_apic = default_inquire_remote_apic,
148
149 .read = native_apic_mem_read,
150 .write = native_apic_mem_write,
151 .icr_read = native_apic_icr_read,
152 .icr_write = native_apic_icr_write,
153 .wait_icr_idle = native_apic_wait_icr_idle,
154 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
130}; 155};
131 156
132extern struct genapic apic_numaq; 157extern struct apic apic_numaq;
133extern struct genapic apic_summit; 158extern struct apic apic_summit;
134extern struct genapic apic_bigsmp; 159extern struct apic apic_bigsmp;
135extern struct genapic apic_es7000; 160extern struct apic apic_es7000;
136extern struct genapic apic_default; 161extern struct apic apic_es7000_cluster;
162extern struct apic apic_default;
137 163
138struct genapic *apic = &apic_default; 164struct apic *apic = &apic_default;
165EXPORT_SYMBOL_GPL(apic);
139 166
140static struct genapic *apic_probe[] __initdata = { 167static struct apic *apic_probe[] __initdata = {
141#ifdef CONFIG_X86_NUMAQ 168#ifdef CONFIG_X86_NUMAQ
142 &apic_numaq, 169 &apic_numaq,
143#endif 170#endif
@@ -149,6 +176,7 @@ static struct genapic *apic_probe[] __initdata = {
149#endif 176#endif
150#ifdef CONFIG_X86_ES7000 177#ifdef CONFIG_X86_ES7000
151 &apic_es7000, 178 &apic_es7000,
179 &apic_es7000_cluster,
152#endif 180#endif
153 &apic_default, /* must be last */ 181 &apic_default, /* must be last */
154 NULL, 182 NULL,
@@ -170,9 +198,6 @@ static int __init parse_apic(char *arg)
170 } 198 }
171 } 199 }
172 200
173 if (x86_quirks->update_genapic)
174 x86_quirks->update_genapic();
175
176 /* Parsed again by __setup for debug/verbose */ 201 /* Parsed again by __setup for debug/verbose */
177 return 0; 202 return 0;
178} 203}
@@ -191,8 +216,6 @@ void __init generic_bigsmp_probe(void)
191 if (!cmdline_apic && apic == &apic_default) { 216 if (!cmdline_apic && apic == &apic_default) {
192 if (apic_bigsmp.probe()) { 217 if (apic_bigsmp.probe()) {
193 apic = &apic_bigsmp; 218 apic = &apic_bigsmp;
194 if (x86_quirks->update_genapic)
195 x86_quirks->update_genapic();
196 printk(KERN_INFO "Overriding APIC driver with %s\n", 219 printk(KERN_INFO "Overriding APIC driver with %s\n",
197 apic->name); 220 apic->name);
198 } 221 }
@@ -213,9 +236,6 @@ void __init generic_apic_probe(void)
213 /* Not visible without early console */ 236 /* Not visible without early console */
214 if (!apic_probe[i]) 237 if (!apic_probe[i])
215 panic("Didn't find an APIC driver"); 238 panic("Didn't find an APIC driver");
216
217 if (x86_quirks->update_genapic)
218 x86_quirks->update_genapic();
219 } 239 }
220 printk(KERN_INFO "Using APIC driver %s\n", apic->name); 240 printk(KERN_INFO "Using APIC driver %s\n", apic->name);
221} 241}
@@ -235,8 +255,6 @@ generic_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
235 255
236 if (!cmdline_apic) { 256 if (!cmdline_apic) {
237 apic = apic_probe[i]; 257 apic = apic_probe[i];
238 if (x86_quirks->update_genapic)
239 x86_quirks->update_genapic();
240 printk(KERN_INFO "Switched to APIC driver `%s'.\n", 258 printk(KERN_INFO "Switched to APIC driver `%s'.\n",
241 apic->name); 259 apic->name);
242 } 260 }
@@ -257,8 +275,6 @@ int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
257 275
258 if (!cmdline_apic) { 276 if (!cmdline_apic) {
259 apic = apic_probe[i]; 277 apic = apic_probe[i];
260 if (x86_quirks->update_genapic)
261 x86_quirks->update_genapic();
262 printk(KERN_INFO "Switched to APIC driver `%s'.\n", 278 printk(KERN_INFO "Switched to APIC driver `%s'.\n",
263 apic->name); 279 apic->name);
264 } 280 }
@@ -266,146 +282,3 @@ int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
266 } 282 }
267 return 0; 283 return 0;
268} 284}
269
270#endif /* CONFIG_X86_LOCAL_APIC */
271
272/**
273 * pre_intr_init_hook - initialisation prior to setting up interrupt vectors
274 *
275 * Description:
276 * Perform any necessary interrupt initialisation prior to setting up
277 * the "ordinary" interrupt call gates. For legacy reasons, the ISA
278 * interrupts should be initialised here if the machine emulates a PC
279 * in any way.
280 **/
281void __init pre_intr_init_hook(void)
282{
283 if (x86_quirks->arch_pre_intr_init) {
284 if (x86_quirks->arch_pre_intr_init())
285 return;
286 }
287 init_ISA_irqs();
288}
289
290/**
291 * intr_init_hook - post gate setup interrupt initialisation
292 *
293 * Description:
294 * Fill in any interrupts that may have been left out by the general
295 * init_IRQ() routine. interrupts having to do with the machine rather
296 * than the devices on the I/O bus (like APIC interrupts in intel MP
297 * systems) are started here.
298 **/
299void __init intr_init_hook(void)
300{
301 if (x86_quirks->arch_intr_init) {
302 if (x86_quirks->arch_intr_init())
303 return;
304 }
305}
306
307/**
308 * pre_setup_arch_hook - hook called prior to any setup_arch() execution
309 *
310 * Description:
311 * generally used to activate any machine specific identification
312 * routines that may be needed before setup_arch() runs. On Voyager
313 * this is used to get the board revision and type.
314 **/
315void __init pre_setup_arch_hook(void)
316{
317}
318
319/**
320 * trap_init_hook - initialise system specific traps
321 *
322 * Description:
323 * Called as the final act of trap_init(). Used in VISWS to initialise
324 * the various board specific APIC traps.
325 **/
326void __init trap_init_hook(void)
327{
328 if (x86_quirks->arch_trap_init) {
329 if (x86_quirks->arch_trap_init())
330 return;
331 }
332}
333
334static struct irqaction irq0 = {
335 .handler = timer_interrupt,
336 .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL,
337 .mask = CPU_MASK_NONE,
338 .name = "timer"
339};
340
341/**
342 * pre_time_init_hook - do any specific initialisations before.
343 *
344 **/
345void __init pre_time_init_hook(void)
346{
347 if (x86_quirks->arch_pre_time_init)
348 x86_quirks->arch_pre_time_init();
349}
350
351/**
352 * time_init_hook - do any specific initialisations for the system timer.
353 *
354 * Description:
355 * Must plug the system timer interrupt source at HZ into the IRQ listed
356 * in irq_vectors.h:TIMER_IRQ
357 **/
358void __init time_init_hook(void)
359{
360 if (x86_quirks->arch_time_init) {
361 /*
362 * A nonzero return code does not mean failure, it means
363 * that the architecture quirk does not want any
364 * generic (timer) setup to be performed after this:
365 */
366 if (x86_quirks->arch_time_init())
367 return;
368 }
369
370 irq0.mask = cpumask_of_cpu(0);
371 setup_irq(0, &irq0);
372}
373
374#ifdef CONFIG_MCA
375/**
376 * mca_nmi_hook - hook into MCA specific NMI chain
377 *
378 * Description:
379 * The MCA (Microchannel Architecture) has an NMI chain for NMI sources
380 * along the MCA bus. Use this to hook into that chain if you will need
381 * it.
382 **/
383void mca_nmi_hook(void)
384{
385 /*
386 * If I recall correctly, there's a whole bunch of other things that
387 * we can do to check for NMI problems, but that's all I know about
388 * at the moment.
389 */
390 pr_warning("NMI generated from unknown source!\n");
391}
392#endif
393
394static __init int no_ipi_broadcast(char *str)
395{
396 get_option(&str, &no_broadcast);
397 pr_info("Using %s mode\n",
398 no_broadcast ? "No IPI Broadcast" : "IPI Broadcast");
399 return 1;
400}
401__setup("no_ipi_broadcast=", no_ipi_broadcast);
402
403static int __init print_ipi_mode(void)
404{
405 pr_info("Using IPI %s mode\n",
406 no_broadcast ? "No-Shortcut" : "Shortcut");
407 return 0;
408}
409
410late_initcall(print_ipi_mode);
411
diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/apic/probe_64.c
index 820dea5d0ebe..8d7748efe6a8 100644
--- a/arch/x86/kernel/genapic_64.c
+++ b/arch/x86/kernel/apic/probe_64.c
@@ -19,24 +19,27 @@
19#include <linux/dmar.h> 19#include <linux/dmar.h>
20 20
21#include <asm/smp.h> 21#include <asm/smp.h>
22#include <asm/apic.h>
22#include <asm/ipi.h> 23#include <asm/ipi.h>
23#include <asm/genapic.h>
24#include <asm/setup.h> 24#include <asm/setup.h>
25 25
26extern struct genapic apic_flat; 26extern struct apic apic_flat;
27extern struct genapic apic_physflat; 27extern struct apic apic_physflat;
28extern struct genapic apic_x2xpic_uv_x; 28extern struct apic apic_x2xpic_uv_x;
29extern struct genapic apic_x2apic_phys; 29extern struct apic apic_x2apic_phys;
30extern struct genapic apic_x2apic_cluster; 30extern struct apic apic_x2apic_cluster;
31 31
32struct genapic __read_mostly *apic = &apic_flat; 32struct apic __read_mostly *apic = &apic_flat;
33EXPORT_SYMBOL_GPL(apic);
33 34
34static struct genapic *apic_probe[] __initdata = { 35static struct apic *apic_probe[] __initdata = {
35#ifdef CONFIG_X86_UV 36#ifdef CONFIG_X86_UV
36 &apic_x2apic_uv_x, 37 &apic_x2apic_uv_x,
37#endif 38#endif
39#ifdef CONFIG_X86_X2APIC
38 &apic_x2apic_phys, 40 &apic_x2apic_phys,
39 &apic_x2apic_cluster, 41 &apic_x2apic_cluster,
42#endif
40 &apic_physflat, 43 &apic_physflat,
41 NULL, 44 NULL,
42}; 45};
@@ -46,19 +49,25 @@ static struct genapic *apic_probe[] __initdata = {
46 */ 49 */
47void __init default_setup_apic_routing(void) 50void __init default_setup_apic_routing(void)
48{ 51{
49 if (apic == &apic_x2apic_phys || apic == &apic_x2apic_cluster) { 52#ifdef CONFIG_X86_X2APIC
50 if (!intr_remapping_enabled) 53 if (x2apic && (apic != &apic_x2apic_phys &&
51 apic = &apic_flat; 54#ifdef CONFIG_X86_UV
55 apic != &apic_x2apic_uv_x &&
56#endif
57 apic != &apic_x2apic_cluster)) {
58 if (x2apic_phys)
59 apic = &apic_x2apic_phys;
60 else
61 apic = &apic_x2apic_cluster;
62 printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
52 } 63 }
64#endif
53 65
54 if (apic == &apic_flat) { 66 if (apic == &apic_flat) {
55 if (max_physical_apicid >= 8) 67 if (max_physical_apicid >= 8)
56 apic = &apic_physflat; 68 apic = &apic_physflat;
57 printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); 69 printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
58 } 70 }
59
60 if (x86_quirks->update_genapic)
61 x86_quirks->update_genapic();
62} 71}
63 72
64/* Same for both flat and physical. */ 73/* Same for both flat and physical. */
diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/apic/summit_32.c
index 1e733eff9b33..aac52fa873ff 100644
--- a/arch/x86/kernel/summit_32.c
+++ b/arch/x86/kernel/apic/summit_32.c
@@ -34,13 +34,11 @@
34/* 34/*
35 * APIC driver for the IBM "Summit" chipset. 35 * APIC driver for the IBM "Summit" chipset.
36 */ 36 */
37#define APIC_DEFINITION 1
38#include <linux/threads.h> 37#include <linux/threads.h>
39#include <linux/cpumask.h> 38#include <linux/cpumask.h>
40#include <asm/mpspec.h> 39#include <asm/mpspec.h>
41#include <asm/apic.h> 40#include <asm/apic.h>
42#include <asm/smp.h> 41#include <asm/smp.h>
43#include <asm/genapic.h>
44#include <asm/fixmap.h> 42#include <asm/fixmap.h>
45#include <asm/apicdef.h> 43#include <asm/apicdef.h>
46#include <asm/ipi.h> 44#include <asm/ipi.h>
@@ -50,7 +48,7 @@
50#include <linux/gfp.h> 48#include <linux/gfp.h>
51#include <linux/smp.h> 49#include <linux/smp.h>
52 50
53static inline unsigned summit_get_apic_id(unsigned long x) 51static unsigned summit_get_apic_id(unsigned long x)
54{ 52{
55 return (x >> 24) & 0xFF; 53 return (x >> 24) & 0xFF;
56} 54}
@@ -60,7 +58,7 @@ static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector)
60 default_send_IPI_mask_sequence_logical(mask, vector); 58 default_send_IPI_mask_sequence_logical(mask, vector);
61} 59}
62 60
63static inline void summit_send_IPI_allbutself(int vector) 61static void summit_send_IPI_allbutself(int vector)
64{ 62{
65 cpumask_t mask = cpu_online_map; 63 cpumask_t mask = cpu_online_map;
66 cpu_clear(smp_processor_id(), mask); 64 cpu_clear(smp_processor_id(), mask);
@@ -69,7 +67,7 @@ static inline void summit_send_IPI_allbutself(int vector)
69 summit_send_IPI_mask(&mask, vector); 67 summit_send_IPI_mask(&mask, vector);
70} 68}
71 69
72static inline void summit_send_IPI_all(int vector) 70static void summit_send_IPI_all(int vector)
73{ 71{
74 summit_send_IPI_mask(&cpu_online_map, vector); 72 summit_send_IPI_mask(&cpu_online_map, vector);
75} 73}
@@ -79,13 +77,13 @@ static inline void summit_send_IPI_all(int vector)
79extern int use_cyclone; 77extern int use_cyclone;
80 78
81#ifdef CONFIG_X86_SUMMIT_NUMA 79#ifdef CONFIG_X86_SUMMIT_NUMA
82extern void setup_summit(void); 80static void setup_summit(void);
83#else 81#else
84#define setup_summit() {} 82static inline void setup_summit(void) {}
85#endif 83#endif
86 84
87static inline int 85static int summit_mps_oem_check(struct mpc_table *mpc, char *oem,
88summit_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) 86 char *productid)
89{ 87{
90 if (!strncmp(oem, "IBM ENSW", 8) && 88 if (!strncmp(oem, "IBM ENSW", 8) &&
91 (!strncmp(productid, "VIGIL SMP", 9) 89 (!strncmp(productid, "VIGIL SMP", 9)
@@ -100,7 +98,7 @@ summit_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
100} 98}
101 99
102/* Hook from generic ACPI tables.c */ 100/* Hook from generic ACPI tables.c */
103static inline int summit_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 101static int summit_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
104{ 102{
105 if (!strncmp(oem_id, "IBM", 3) && 103 if (!strncmp(oem_id, "IBM", 3) &&
106 (!strncmp(oem_table_id, "SERVIGIL", 8) 104 (!strncmp(oem_table_id, "SERVIGIL", 8)
@@ -188,7 +186,7 @@ static inline int is_WPEG(struct rio_detail *rio){
188 186
189#define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER) 187#define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER)
190 188
191static inline const cpumask_t *summit_target_cpus(void) 189static const cpumask_t *summit_target_cpus(void)
192{ 190{
193 /* CPU_MASK_ALL (0xff) has undefined behaviour with 191 /* CPU_MASK_ALL (0xff) has undefined behaviour with
194 * dest_LowestPrio mode logical clustered apic interrupt routing 192 * dest_LowestPrio mode logical clustered apic interrupt routing
@@ -197,28 +195,23 @@ static inline const cpumask_t *summit_target_cpus(void)
197 return &cpumask_of_cpu(0); 195 return &cpumask_of_cpu(0);
198} 196}
199 197
200static inline unsigned long 198static unsigned long summit_check_apicid_used(physid_mask_t bitmap, int apicid)
201summit_check_apicid_used(physid_mask_t bitmap, int apicid)
202{ 199{
203 return 0; 200 return 0;
204} 201}
205 202
206/* we don't use the phys_cpu_present_map to indicate apicid presence */ 203/* we don't use the phys_cpu_present_map to indicate apicid presence */
207static inline unsigned long summit_check_apicid_present(int bit) 204static unsigned long summit_check_apicid_present(int bit)
208{ 205{
209 return 1; 206 return 1;
210} 207}
211 208
212#define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) 209static void summit_init_apic_ldr(void)
213
214extern u8 cpu_2_logical_apicid[];
215
216static inline void summit_init_apic_ldr(void)
217{ 210{
218 unsigned long val, id; 211 unsigned long val, id;
219 int count = 0; 212 int count = 0;
220 u8 my_id = (u8)hard_smp_processor_id(); 213 u8 my_id = (u8)hard_smp_processor_id();
221 u8 my_cluster = (u8)apicid_cluster(my_id); 214 u8 my_cluster = APIC_CLUSTER(my_id);
222#ifdef CONFIG_SMP 215#ifdef CONFIG_SMP
223 u8 lid; 216 u8 lid;
224 int i; 217 int i;
@@ -226,7 +219,7 @@ static inline void summit_init_apic_ldr(void)
226 /* Create logical APIC IDs by counting CPUs already in cluster. */ 219 /* Create logical APIC IDs by counting CPUs already in cluster. */
227 for (count = 0, i = nr_cpu_ids; --i >= 0; ) { 220 for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
228 lid = cpu_2_logical_apicid[i]; 221 lid = cpu_2_logical_apicid[i];
229 if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) 222 if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster)
230 ++count; 223 ++count;
231 } 224 }
232#endif 225#endif
@@ -240,18 +233,18 @@ static inline void summit_init_apic_ldr(void)
240 apic_write(APIC_LDR, val); 233 apic_write(APIC_LDR, val);
241} 234}
242 235
243static inline int summit_apic_id_registered(void) 236static int summit_apic_id_registered(void)
244{ 237{
245 return 1; 238 return 1;
246} 239}
247 240
248static inline void summit_setup_apic_routing(void) 241static void summit_setup_apic_routing(void)
249{ 242{
250 printk("Enabling APIC mode: Summit. Using %d I/O APICs\n", 243 printk("Enabling APIC mode: Summit. Using %d I/O APICs\n",
251 nr_ioapics); 244 nr_ioapics);
252} 245}
253 246
254static inline int summit_apicid_to_node(int logical_apicid) 247static int summit_apicid_to_node(int logical_apicid)
255{ 248{
256#ifdef CONFIG_SMP 249#ifdef CONFIG_SMP
257 return apicid_2_node[hard_smp_processor_id()]; 250 return apicid_2_node[hard_smp_processor_id()];
@@ -266,13 +259,13 @@ static inline int summit_cpu_to_logical_apicid(int cpu)
266#ifdef CONFIG_SMP 259#ifdef CONFIG_SMP
267 if (cpu >= nr_cpu_ids) 260 if (cpu >= nr_cpu_ids)
268 return BAD_APICID; 261 return BAD_APICID;
269 return (int)cpu_2_logical_apicid[cpu]; 262 return cpu_2_logical_apicid[cpu];
270#else 263#else
271 return logical_smp_processor_id(); 264 return logical_smp_processor_id();
272#endif 265#endif
273} 266}
274 267
275static inline int summit_cpu_present_to_apicid(int mps_cpu) 268static int summit_cpu_present_to_apicid(int mps_cpu)
276{ 269{
277 if (mps_cpu < nr_cpu_ids) 270 if (mps_cpu < nr_cpu_ids)
278 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); 271 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
@@ -280,65 +273,44 @@ static inline int summit_cpu_present_to_apicid(int mps_cpu)
280 return BAD_APICID; 273 return BAD_APICID;
281} 274}
282 275
283static inline physid_mask_t 276static physid_mask_t summit_ioapic_phys_id_map(physid_mask_t phys_id_map)
284summit_ioapic_phys_id_map(physid_mask_t phys_id_map)
285{ 277{
286 /* For clustered we don't have a good way to do this yet - hack */ 278 /* For clustered we don't have a good way to do this yet - hack */
287 return physids_promote(0x0F); 279 return physids_promote(0x0F);
288} 280}
289 281
290static inline physid_mask_t summit_apicid_to_cpu_present(int apicid) 282static physid_mask_t summit_apicid_to_cpu_present(int apicid)
291{ 283{
292 return physid_mask_of_physid(0); 284 return physid_mask_of_physid(0);
293} 285}
294 286
295static inline void summit_setup_portio_remap(void) 287static int summit_check_phys_apicid_present(int boot_cpu_physical_apicid)
296{
297}
298
299static inline int summit_check_phys_apicid_present(int boot_cpu_physical_apicid)
300{ 288{
301 return 1; 289 return 1;
302} 290}
303 291
304static inline unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) 292static unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask)
305{ 293{
306 int cpus_found = 0; 294 unsigned int round = 0;
307 int num_bits_set; 295 int cpu, apicid = 0;
308 int apicid; 296
309 int cpu;
310
311 num_bits_set = cpus_weight(*cpumask);
312 /* Return id to all */
313 if (num_bits_set >= nr_cpu_ids)
314 return 0xFF;
315 /* 297 /*
316 * The cpus in the mask must all be on the apic cluster. If are not 298 * The cpus in the mask must all be on the apic cluster.
317 * on the same apicid cluster return default value of target_cpus():
318 */ 299 */
319 cpu = first_cpu(*cpumask); 300 for_each_cpu(cpu, cpumask) {
320 apicid = summit_cpu_to_logical_apicid(cpu); 301 int new_apicid = summit_cpu_to_logical_apicid(cpu);
321
322 while (cpus_found < num_bits_set) {
323 if (cpu_isset(cpu, *cpumask)) {
324 int new_apicid = summit_cpu_to_logical_apicid(cpu);
325
326 if (apicid_cluster(apicid) !=
327 apicid_cluster(new_apicid)) {
328 printk ("%s: Not a valid mask!\n", __func__);
329 302
330 return 0xFF; 303 if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
331 } 304 printk("%s: Not a valid mask!\n", __func__);
332 apicid = apicid | new_apicid; 305 return BAD_APICID;
333 cpus_found++;
334 } 306 }
335 cpu++; 307 apicid |= new_apicid;
308 round++;
336 } 309 }
337 return apicid; 310 return apicid;
338} 311}
339 312
340static inline unsigned int 313static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
341summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
342 const struct cpumask *andmask) 314 const struct cpumask *andmask)
343{ 315{
344 int apicid = summit_cpu_to_logical_apicid(0); 316 int apicid = summit_cpu_to_logical_apicid(0);
@@ -363,7 +335,7 @@ summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
363 * 335 *
364 * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID. 336 * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
365 */ 337 */
366static inline int summit_phys_pkg_id(int cpuid_apic, int index_msb) 338static int summit_phys_pkg_id(int cpuid_apic, int index_msb)
367{ 339{
368 return hard_smp_processor_id() >> index_msb; 340 return hard_smp_processor_id() >> index_msb;
369} 341}
@@ -388,15 +360,15 @@ static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask)
388} 360}
389 361
390#ifdef CONFIG_X86_SUMMIT_NUMA 362#ifdef CONFIG_X86_SUMMIT_NUMA
391static struct rio_table_hdr *rio_table_hdr __initdata; 363static struct rio_table_hdr *rio_table_hdr;
392static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata; 364static struct scal_detail *scal_devs[MAX_NUMNODES];
393static struct rio_detail *rio_devs[MAX_NUMNODES*4] __initdata; 365static struct rio_detail *rio_devs[MAX_NUMNODES*4];
394 366
395#ifndef CONFIG_X86_NUMAQ 367#ifndef CONFIG_X86_NUMAQ
396static int mp_bus_id_to_node[MAX_MP_BUSSES] __initdata; 368static int mp_bus_id_to_node[MAX_MP_BUSSES];
397#endif 369#endif
398 370
399static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) 371static int setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus)
400{ 372{
401 int twister = 0, node = 0; 373 int twister = 0, node = 0;
402 int i, bus, num_buses; 374 int i, bus, num_buses;
@@ -458,7 +430,7 @@ static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus)
458 return bus; 430 return bus;
459} 431}
460 432
461static int __init build_detail_arrays(void) 433static int build_detail_arrays(void)
462{ 434{
463 unsigned long ptr; 435 unsigned long ptr;
464 int i, scal_detail_size, rio_detail_size; 436 int i, scal_detail_size, rio_detail_size;
@@ -492,7 +464,7 @@ static int __init build_detail_arrays(void)
492 return 1; 464 return 1;
493} 465}
494 466
495void __init setup_summit(void) 467void setup_summit(void)
496{ 468{
497 unsigned long ptr; 469 unsigned long ptr;
498 unsigned short offset; 470 unsigned short offset;
@@ -544,7 +516,7 @@ void __init setup_summit(void)
544} 516}
545#endif 517#endif
546 518
547struct genapic apic_summit = { 519struct apic apic_summit = {
548 520
549 .name = "summit", 521 .name = "summit",
550 .probe = probe_summit, 522 .probe = probe_summit,
@@ -590,13 +562,18 @@ struct genapic apic_summit = {
590 .send_IPI_all = summit_send_IPI_all, 562 .send_IPI_all = summit_send_IPI_all,
591 .send_IPI_self = default_send_IPI_self, 563 .send_IPI_self = default_send_IPI_self,
592 564
593 .wakeup_cpu = NULL,
594 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 565 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
595 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 566 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
596 567
597 .wait_for_init_deassert = default_wait_for_init_deassert, 568 .wait_for_init_deassert = default_wait_for_init_deassert,
598 569
599 .smp_callin_clear_local_apic = NULL, 570 .smp_callin_clear_local_apic = NULL,
600 .store_NMI_vector = NULL,
601 .inquire_remote_apic = default_inquire_remote_apic, 571 .inquire_remote_apic = default_inquire_remote_apic,
572
573 .read = native_apic_mem_read,
574 .write = native_apic_mem_write,
575 .icr_read = native_apic_icr_read,
576 .icr_write = native_apic_icr_write,
577 .wait_icr_idle = native_apic_wait_icr_idle,
578 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
602}; 579};
diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 7c87156b6411..8fb87b6dd633 100644
--- a/arch/x86/kernel/genx2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -7,17 +7,14 @@
7#include <linux/dmar.h> 7#include <linux/dmar.h>
8 8
9#include <asm/smp.h> 9#include <asm/smp.h>
10#include <asm/apic.h>
10#include <asm/ipi.h> 11#include <asm/ipi.h>
11#include <asm/genapic.h>
12 12
13DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid); 13DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
14 14
15static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 15static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
16{ 16{
17 if (cpu_has_x2apic) 17 return x2apic_enabled();
18 return 1;
19
20 return 0;
21} 18}
22 19
23/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 20/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
@@ -46,7 +43,7 @@ static void
46 /* 43 /*
47 * send the IPI. 44 * send the IPI.
48 */ 45 */
49 x2apic_icr_write(cfg, apicid); 46 native_x2apic_icr_write(cfg, apicid);
50} 47}
51 48
52/* 49/*
@@ -182,7 +179,7 @@ static void init_x2apic_ldr(void)
182 per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR); 179 per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR);
183} 180}
184 181
185struct genapic apic_x2apic_cluster = { 182struct apic apic_x2apic_cluster = {
186 183
187 .name = "cluster x2apic", 184 .name = "cluster x2apic",
188 .probe = NULL, 185 .probe = NULL,
@@ -227,11 +224,16 @@ struct genapic apic_x2apic_cluster = {
227 .send_IPI_all = x2apic_send_IPI_all, 224 .send_IPI_all = x2apic_send_IPI_all,
228 .send_IPI_self = x2apic_send_IPI_self, 225 .send_IPI_self = x2apic_send_IPI_self,
229 226
230 .wakeup_cpu = NULL,
231 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 227 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
232 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 228 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
233 .wait_for_init_deassert = NULL, 229 .wait_for_init_deassert = NULL,
234 .smp_callin_clear_local_apic = NULL, 230 .smp_callin_clear_local_apic = NULL,
235 .store_NMI_vector = NULL,
236 .inquire_remote_apic = NULL, 231 .inquire_remote_apic = NULL,
232
233 .read = native_apic_msr_read,
234 .write = native_apic_msr_write,
235 .icr_read = native_x2apic_icr_read,
236 .icr_write = native_x2apic_icr_write,
237 .wait_icr_idle = native_x2apic_wait_icr_idle,
238 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
237}; 239};
diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index 5cbae8aa0408..23625b9f98b2 100644
--- a/arch/x86/kernel/genx2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -7,10 +7,10 @@
7#include <linux/dmar.h> 7#include <linux/dmar.h>
8 8
9#include <asm/smp.h> 9#include <asm/smp.h>
10#include <asm/apic.h>
10#include <asm/ipi.h> 11#include <asm/ipi.h>
11#include <asm/genapic.h>
12 12
13static int x2apic_phys; 13int x2apic_phys;
14 14
15static int set_x2apic_phys_mode(char *arg) 15static int set_x2apic_phys_mode(char *arg)
16{ 16{
@@ -21,10 +21,10 @@ early_param("x2apic_phys", set_x2apic_phys_mode);
21 21
22static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 22static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
23{ 23{
24 if (cpu_has_x2apic && x2apic_phys) 24 if (x2apic_phys)
25 return 1; 25 return x2apic_enabled();
26 26 else
27 return 0; 27 return 0;
28} 28}
29 29
30/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 30/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
@@ -50,7 +50,7 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
50 /* 50 /*
51 * send the IPI. 51 * send the IPI.
52 */ 52 */
53 x2apic_icr_write(cfg, apicid); 53 native_x2apic_icr_write(cfg, apicid);
54} 54}
55 55
56static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) 56static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
@@ -168,7 +168,7 @@ static void init_x2apic_ldr(void)
168{ 168{
169} 169}
170 170
171struct genapic apic_x2apic_phys = { 171struct apic apic_x2apic_phys = {
172 172
173 .name = "physical x2apic", 173 .name = "physical x2apic",
174 .probe = NULL, 174 .probe = NULL,
@@ -213,11 +213,16 @@ struct genapic apic_x2apic_phys = {
213 .send_IPI_all = x2apic_send_IPI_all, 213 .send_IPI_all = x2apic_send_IPI_all,
214 .send_IPI_self = x2apic_send_IPI_self, 214 .send_IPI_self = x2apic_send_IPI_self,
215 215
216 .wakeup_cpu = NULL,
217 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 216 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
218 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 217 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
219 .wait_for_init_deassert = NULL, 218 .wait_for_init_deassert = NULL,
220 .smp_callin_clear_local_apic = NULL, 219 .smp_callin_clear_local_apic = NULL,
221 .store_NMI_vector = NULL,
222 .inquire_remote_apic = NULL, 220 .inquire_remote_apic = NULL,
221
222 .read = native_apic_msr_read,
223 .write = native_apic_msr_write,
224 .icr_read = native_x2apic_icr_read,
225 .icr_write = native_x2apic_icr_write,
226 .wait_icr_idle = native_x2apic_wait_icr_idle,
227 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
223}; 228};
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 89b84e004f04..1bd6da1f8fad 100644
--- a/arch/x86/kernel/genx2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -7,28 +7,28 @@
7 * 7 *
8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10
11#include <linux/kernel.h>
12#include <linux/threads.h>
13#include <linux/cpu.h>
14#include <linux/cpumask.h> 10#include <linux/cpumask.h>
11#include <linux/hardirq.h>
12#include <linux/proc_fs.h>
13#include <linux/threads.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
15#include <linux/string.h> 16#include <linux/string.h>
16#include <linux/ctype.h> 17#include <linux/ctype.h>
17#include <linux/init.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/module.h>
20#include <linux/hardirq.h>
21#include <linux/timer.h> 19#include <linux/timer.h>
22#include <linux/proc_fs.h> 20#include <linux/cpu.h>
23#include <asm/current.h> 21#include <linux/init.h>
24#include <asm/smp.h> 22
25#include <asm/ipi.h>
26#include <asm/genapic.h>
27#include <asm/pgtable.h>
28#include <asm/uv/uv.h>
29#include <asm/uv/uv_mmrs.h> 23#include <asm/uv/uv_mmrs.h>
30#include <asm/uv/uv_hub.h> 24#include <asm/uv/uv_hub.h>
25#include <asm/current.h>
26#include <asm/pgtable.h>
31#include <asm/uv/bios.h> 27#include <asm/uv/bios.h>
28#include <asm/uv/uv.h>
29#include <asm/apic.h>
30#include <asm/ipi.h>
31#include <asm/smp.h>
32 32
33DEFINE_PER_CPU(int, x2apic_extra_bits); 33DEFINE_PER_CPU(int, x2apic_extra_bits);
34 34
@@ -91,24 +91,28 @@ static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
91 cpumask_set_cpu(cpu, retmask); 91 cpumask_set_cpu(cpu, retmask);
92} 92}
93 93
94int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) 94static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
95{ 95{
96#ifdef CONFIG_SMP
96 unsigned long val; 97 unsigned long val;
97 int pnode; 98 int pnode;
98 99
99 pnode = uv_apicid_to_pnode(phys_apicid); 100 pnode = uv_apicid_to_pnode(phys_apicid);
100 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 101 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
101 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | 102 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
102 (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | 103 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
103 APIC_DM_INIT; 104 APIC_DM_INIT;
104 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 105 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
105 mdelay(10); 106 mdelay(10);
106 107
107 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 108 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
108 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | 109 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
109 (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | 110 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
110 APIC_DM_STARTUP; 111 APIC_DM_STARTUP;
111 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 112 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
113
114 atomic_set(&init_deasserted, 1);
115#endif
112 return 0; 116 return 0;
113} 117}
114 118
@@ -240,7 +244,7 @@ static void uv_send_IPI_self(int vector)
240 apic_write(APIC_SELF_IPI, vector); 244 apic_write(APIC_SELF_IPI, vector);
241} 245}
242 246
243struct genapic apic_x2apic_uv_x = { 247struct apic apic_x2apic_uv_x = {
244 248
245 .name = "UV large system", 249 .name = "UV large system",
246 .probe = NULL, 250 .probe = NULL,
@@ -285,13 +289,19 @@ struct genapic apic_x2apic_uv_x = {
285 .send_IPI_all = uv_send_IPI_all, 289 .send_IPI_all = uv_send_IPI_all,
286 .send_IPI_self = uv_send_IPI_self, 290 .send_IPI_self = uv_send_IPI_self,
287 291
288 .wakeup_cpu = NULL, 292 .wakeup_secondary_cpu = uv_wakeup_secondary,
289 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 293 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
290 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 294 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
291 .wait_for_init_deassert = NULL, 295 .wait_for_init_deassert = NULL,
292 .smp_callin_clear_local_apic = NULL, 296 .smp_callin_clear_local_apic = NULL,
293 .store_NMI_vector = NULL,
294 .inquire_remote_apic = NULL, 297 .inquire_remote_apic = NULL,
298
299 .read = native_apic_msr_read,
300 .write = native_apic_msr_write,
301 .icr_read = native_x2apic_icr_read,
302 .icr_write = native_x2apic_icr_write,
303 .wait_icr_idle = native_x2apic_wait_icr_idle,
304 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
295}; 305};
296 306
297static __cpuinit void set_x2apic_extra_bits(int pnode) 307static __cpuinit void set_x2apic_extra_bits(int pnode)
@@ -359,7 +369,7 @@ static __init void map_high(char *id, unsigned long base, int shift,
359 paddr = base << shift; 369 paddr = base << shift;
360 bytes = (1UL << shift) * (max_pnode + 1); 370 bytes = (1UL << shift) * (max_pnode + 1);
361 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, 371 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr,
362 paddr + bytes); 372 paddr + bytes);
363 if (map_type == map_uc) 373 if (map_type == map_uc)
364 init_extra_mapping_uc(paddr, bytes); 374 init_extra_mapping_uc(paddr, bytes);
365 else 375 else
@@ -522,7 +532,7 @@ late_initcall(uv_init_heartbeat);
522 532
523/* 533/*
524 * Called on each cpu to initialize the per_cpu UV data area. 534 * Called on each cpu to initialize the per_cpu UV data area.
525 * ZZZ hotplug not supported yet 535 * FIXME: hotplug not supported yet
526 */ 536 */
527void __cpuinit uv_cpu_init(void) 537void __cpuinit uv_cpu_init(void)
528{ 538{
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 37ba5f85b718..10033fe718e0 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -1192,6 +1192,7 @@ static int suspend(int vetoable)
1192 device_suspend(PMSG_SUSPEND); 1192 device_suspend(PMSG_SUSPEND);
1193 local_irq_disable(); 1193 local_irq_disable();
1194 device_power_down(PMSG_SUSPEND); 1194 device_power_down(PMSG_SUSPEND);
1195 sysdev_suspend(PMSG_SUSPEND);
1195 1196
1196 local_irq_enable(); 1197 local_irq_enable();
1197 1198
@@ -1208,6 +1209,7 @@ static int suspend(int vetoable)
1208 if (err != APM_SUCCESS) 1209 if (err != APM_SUCCESS)
1209 apm_error("suspend", err); 1210 apm_error("suspend", err);
1210 err = (err == APM_SUCCESS) ? 0 : -EIO; 1211 err = (err == APM_SUCCESS) ? 0 : -EIO;
1212 sysdev_resume();
1211 device_power_up(PMSG_RESUME); 1213 device_power_up(PMSG_RESUME);
1212 local_irq_enable(); 1214 local_irq_enable();
1213 device_resume(PMSG_RESUME); 1215 device_resume(PMSG_RESUME);
@@ -1228,6 +1230,7 @@ static void standby(void)
1228 1230
1229 local_irq_disable(); 1231 local_irq_disable();
1230 device_power_down(PMSG_SUSPEND); 1232 device_power_down(PMSG_SUSPEND);
1233 sysdev_suspend(PMSG_SUSPEND);
1231 local_irq_enable(); 1234 local_irq_enable();
1232 1235
1233 err = set_system_power_state(APM_STATE_STANDBY); 1236 err = set_system_power_state(APM_STATE_STANDBY);
@@ -1235,6 +1238,7 @@ static void standby(void)
1235 apm_error("standby", err); 1238 apm_error("standby", err);
1236 1239
1237 local_irq_disable(); 1240 local_irq_disable();
1241 sysdev_resume();
1238 device_power_up(PMSG_RESUME); 1242 device_power_up(PMSG_RESUME);
1239 local_irq_enable(); 1243 local_irq_enable();
1240} 1244}
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index e48640cfac0c..6882a735d9c0 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -7,7 +7,7 @@
7#include <asm/pat.h> 7#include <asm/pat.h>
8#include <asm/processor.h> 8#include <asm/processor.h>
9 9
10#include <asm/genapic.h> 10#include <asm/apic.h>
11 11
12struct cpuid_bit { 12struct cpuid_bit {
13 u16 feature; 13 u16 feature;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index ff4d7b9e32e4..25423a5b80ed 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -12,8 +12,6 @@
12# include <asm/cacheflush.h> 12# include <asm/cacheflush.h>
13#endif 13#endif
14 14
15#include <asm/genapic.h>
16
17#include "cpu.h" 15#include "cpu.h"
18 16
19#ifdef CONFIG_X86_32 17#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 4db150ed446d..826d5c876278 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -23,11 +23,9 @@
23#include <asm/smp.h> 23#include <asm/smp.h>
24#include <asm/cpu.h> 24#include <asm/cpu.h>
25#include <asm/cpumask.h> 25#include <asm/cpumask.h>
26#ifdef CONFIG_X86_LOCAL_APIC
27#include <asm/mpspec.h>
28#include <asm/apic.h> 26#include <asm/apic.h>
29#include <asm/genapic.h> 27
30#include <asm/genapic.h> 28#ifdef CONFIG_X86_LOCAL_APIC
31#include <asm/uv/uv.h> 29#include <asm/uv/uv.h>
32#endif 30#endif
33 31
@@ -1051,7 +1049,7 @@ void __cpuinit cpu_init(void)
1051 barrier(); 1049 barrier();
1052 1050
1053 check_efer(); 1051 check_efer();
1054 if (cpu != 0 && x2apic) 1052 if (cpu != 0)
1055 enable_x2apic(); 1053 enable_x2apic();
1056 1054
1057 /* 1055 /*
diff --git a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
index c2f930d86640..41ab3f064cb1 100644
--- a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
+++ b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
@@ -204,12 +204,12 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
204 } 204 }
205 /* Enable Enhanced PowerSaver */ 205 /* Enable Enhanced PowerSaver */
206 rdmsrl(MSR_IA32_MISC_ENABLE, val); 206 rdmsrl(MSR_IA32_MISC_ENABLE, val);
207 if (!(val & 1 << 16)) { 207 if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
208 val |= 1 << 16; 208 val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
209 wrmsrl(MSR_IA32_MISC_ENABLE, val); 209 wrmsrl(MSR_IA32_MISC_ENABLE, val);
210 /* Can be locked at 0 */ 210 /* Can be locked at 0 */
211 rdmsrl(MSR_IA32_MISC_ENABLE, val); 211 rdmsrl(MSR_IA32_MISC_ENABLE, val);
212 if (!(val & 1 << 16)) { 212 if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
213 printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n"); 213 printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n");
214 return -ENODEV; 214 return -ENODEV;
215 } 215 }
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index fb039cd345d8..6428aa17b40e 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1157,8 +1157,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1157 data->cpu = pol->cpu; 1157 data->cpu = pol->cpu;
1158 data->currpstate = HW_PSTATE_INVALID; 1158 data->currpstate = HW_PSTATE_INVALID;
1159 1159
1160 rc = powernow_k8_cpu_init_acpi(data); 1160 if (powernow_k8_cpu_init_acpi(data)) {
1161 if (rc) {
1162 /* 1161 /*
1163 * Use the PSB BIOS structure. This is only availabe on 1162 * Use the PSB BIOS structure. This is only availabe on
1164 * an UP version, and is deprecated by AMD. 1163 * an UP version, and is deprecated by AMD.
@@ -1176,17 +1175,20 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1176 "ACPI maintainers and complain to your BIOS " 1175 "ACPI maintainers and complain to your BIOS "
1177 "vendor.\n"); 1176 "vendor.\n");
1178#endif 1177#endif
1179 goto err_out; 1178 kfree(data);
1179 return -ENODEV;
1180 } 1180 }
1181 if (pol->cpu != 0) { 1181 if (pol->cpu != 0) {
1182 printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " 1182 printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for "
1183 "CPU other than CPU0. Complain to your BIOS " 1183 "CPU other than CPU0. Complain to your BIOS "
1184 "vendor.\n"); 1184 "vendor.\n");
1185 goto err_out; 1185 kfree(data);
1186 return -ENODEV;
1186 } 1187 }
1187 rc = find_psb_table(data); 1188 rc = find_psb_table(data);
1188 if (rc) { 1189 if (rc) {
1189 goto err_out; 1190 kfree(data);
1191 return -ENODEV;
1190 } 1192 }
1191 /* Take a crude guess here. 1193 /* Take a crude guess here.
1192 * That guess was in microseconds, so multiply with 1000 */ 1194 * That guess was in microseconds, so multiply with 1000 */
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
index f08998278a3a..c9f1fdc02830 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -390,14 +390,14 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
390 enable it if not. */ 390 enable it if not. */
391 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 391 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
392 392
393 if (!(l & (1<<16))) { 393 if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
394 l |= (1<<16); 394 l |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
395 dprintk("trying to enable Enhanced SpeedStep (%x)\n", l); 395 dprintk("trying to enable Enhanced SpeedStep (%x)\n", l);
396 wrmsr(MSR_IA32_MISC_ENABLE, l, h); 396 wrmsr(MSR_IA32_MISC_ENABLE, l, h);
397 397
398 /* check to see if it stuck */ 398 /* check to see if it stuck */
399 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 399 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
400 if (!(l & (1<<16))) { 400 if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
401 printk(KERN_INFO PFX 401 printk(KERN_INFO PFX
402 "couldn't enable Enhanced SpeedStep\n"); 402 "couldn't enable Enhanced SpeedStep\n");
403 return -ENODEV; 403 return -ENODEV;
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 1f137a87d4bd..25c559ba8d54 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -24,7 +24,6 @@
24#ifdef CONFIG_X86_LOCAL_APIC 24#ifdef CONFIG_X86_LOCAL_APIC
25#include <asm/mpspec.h> 25#include <asm/mpspec.h>
26#include <asm/apic.h> 26#include <asm/apic.h>
27#include <asm/genapic.h>
28#endif 27#endif
29 28
30static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) 29static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
@@ -147,10 +146,10 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
147 */ 146 */
148 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { 147 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
149 rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); 148 rdmsr(MSR_IA32_MISC_ENABLE, lo, hi);
150 if ((lo & (1<<9)) == 0) { 149 if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) {
151 printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); 150 printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
152 printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); 151 printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
153 lo |= (1<<9); /* Disable hw prefetching */ 152 lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE;
154 wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); 153 wrmsr (MSR_IA32_MISC_ENABLE, lo, hi);
155 } 154 }
156 } 155 }
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 1c838032fd37..fe79985ce0f2 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -295,11 +295,11 @@ void do_machine_check(struct pt_regs * regs, long error_code)
295 * If we know that the error was in user space, send a 295 * If we know that the error was in user space, send a
296 * SIGBUS. Otherwise, panic if tolerance is low. 296 * SIGBUS. Otherwise, panic if tolerance is low.
297 * 297 *
298 * do_exit() takes an awful lot of locks and has a slight 298 * force_sig() takes an awful lot of locks and has a slight
299 * risk of deadlocking. 299 * risk of deadlocking.
300 */ 300 */
301 if (user_space) { 301 if (user_space) {
302 do_exit(SIGBUS); 302 force_sig(SIGBUS, current);
303 } else if (panic_on_oops || tolerant < 2) { 303 } else if (panic_on_oops || tolerant < 2) {
304 mce_panic("Uncorrected machine check", 304 mce_panic("Uncorrected machine check",
305 &panicm, mcestart); 305 &panicm, mcestart);
@@ -490,7 +490,7 @@ static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
490 490
491} 491}
492 492
493static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c) 493static void mce_cpu_features(struct cpuinfo_x86 *c)
494{ 494{
495 switch (c->x86_vendor) { 495 switch (c->x86_vendor) {
496 case X86_VENDOR_INTEL: 496 case X86_VENDOR_INTEL:
@@ -734,6 +734,7 @@ __setup("mce=", mcheck_enable);
734static int mce_resume(struct sys_device *dev) 734static int mce_resume(struct sys_device *dev)
735{ 735{
736 mce_init(NULL); 736 mce_init(NULL);
737 mce_cpu_features(&current_cpu_data);
737 return 0; 738 return 0;
738} 739}
739 740
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index 4772e91e8246..9817506dd469 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
@@ -121,7 +121,7 @@ static long threshold_restart_bank(void *_tr)
121} 121}
122 122
123/* cpu init entry point, called from mce.c with preempt off */ 123/* cpu init entry point, called from mce.c with preempt off */
124void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) 124void mce_amd_feature_init(struct cpuinfo_x86 *c)
125{ 125{
126 unsigned int bank, block; 126 unsigned int bank, block;
127 unsigned int cpu = smp_processor_id(); 127 unsigned int cpu = smp_processor_id();
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
index 5e8c79e748a6..aa5e287c98e0 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
@@ -31,7 +31,7 @@ asmlinkage void smp_thermal_interrupt(void)
31 irq_exit(); 31 irq_exit();
32} 32}
33 33
34static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c) 34static void intel_init_thermal(struct cpuinfo_x86 *c)
35{ 35{
36 u32 l, h; 36 u32 l, h;
37 int tm2 = 0; 37 int tm2 = 0;
@@ -49,13 +49,13 @@ static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c)
49 */ 49 */
50 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 50 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
51 h = apic_read(APIC_LVTTHMR); 51 h = apic_read(APIC_LVTTHMR);
52 if ((l & (1 << 3)) && (h & APIC_DM_SMI)) { 52 if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
53 printk(KERN_DEBUG 53 printk(KERN_DEBUG
54 "CPU%d: Thermal monitoring handled by SMI\n", cpu); 54 "CPU%d: Thermal monitoring handled by SMI\n", cpu);
55 return; 55 return;
56 } 56 }
57 57
58 if (cpu_has(c, X86_FEATURE_TM2) && (l & (1 << 13))) 58 if (cpu_has(c, X86_FEATURE_TM2) && (l & MSR_IA32_MISC_ENABLE_TM2))
59 tm2 = 1; 59 tm2 = 1;
60 60
61 if (h & APIC_VECTOR_MASK) { 61 if (h & APIC_VECTOR_MASK) {
@@ -73,7 +73,7 @@ static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c)
73 wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h); 73 wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h);
74 74
75 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 75 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
76 wrmsr(MSR_IA32_MISC_ENABLE, l | (1 << 3), h); 76 wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
77 77
78 l = apic_read(APIC_LVTTHMR); 78 l = apic_read(APIC_LVTTHMR);
79 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); 79 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
@@ -85,7 +85,7 @@ static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c)
85 return; 85 return;
86} 86}
87 87
88void __cpuinit mce_intel_feature_init(struct cpuinfo_x86 *c) 88void mce_intel_feature_init(struct cpuinfo_x86 *c)
89{ 89{
90 intel_init_thermal(c); 90 intel_init_thermal(c);
91} 91}
diff --git a/arch/x86/kernel/cpu/mcheck/p4.c b/arch/x86/kernel/cpu/mcheck/p4.c
index 9b60fce09f75..f53bdcbaf382 100644
--- a/arch/x86/kernel/cpu/mcheck/p4.c
+++ b/arch/x86/kernel/cpu/mcheck/p4.c
@@ -85,7 +85,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c)
85 */ 85 */
86 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 86 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
87 h = apic_read(APIC_LVTTHMR); 87 h = apic_read(APIC_LVTTHMR);
88 if ((l & (1<<3)) && (h & APIC_DM_SMI)) { 88 if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
89 printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n", 89 printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n",
90 cpu); 90 cpu);
91 return; /* -EBUSY */ 91 return; /* -EBUSY */
@@ -111,7 +111,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c)
111 vendor_thermal_interrupt = intel_thermal_interrupt; 111 vendor_thermal_interrupt = intel_thermal_interrupt;
112 112
113 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 113 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
114 wrmsr(MSR_IA32_MISC_ENABLE, l | (1<<3), h); 114 wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
115 115
116 l = apic_read(APIC_LVTTHMR); 116 l = apic_read(APIC_LVTTHMR);
117 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); 117 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 9abd48b22674..f6c70a164e32 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -19,7 +19,7 @@
19#include <linux/nmi.h> 19#include <linux/nmi.h>
20#include <linux/kprobes.h> 20#include <linux/kprobes.h>
21 21
22#include <asm/apic.h> 22#include <asm/genapic.h>
23#include <asm/intel_arch_perfmon.h> 23#include <asm/intel_arch_perfmon.h>
24 24
25struct nmi_watchdog_ctlblk { 25struct nmi_watchdog_ctlblk {
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index ad7f2a696f4a..ff958248e61d 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -28,8 +28,6 @@
28#include <asm/reboot.h> 28#include <asm/reboot.h>
29#include <asm/virtext.h> 29#include <asm/virtext.h>
30 30
31#include <asm/genapic.h>
32
33 31
34#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) 32#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
35 33
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index e85826829cf2..508bec1cee27 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -858,6 +858,9 @@ void __init reserve_early_overlap_ok(u64 start, u64 end, char *name)
858 */ 858 */
859void __init reserve_early(u64 start, u64 end, char *name) 859void __init reserve_early(u64 start, u64 end, char *name)
860{ 860{
861 if (start >= end)
862 return;
863
861 drop_overlaps_that_are_ok(start, end); 864 drop_overlaps_that_are_ok(start, end);
862 __reserve_early(start, end, name, 0); 865 __reserve_early(start, end, name, 0);
863} 866}
diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
index ef00bb77d7e4..fbe66e626c09 100644
--- a/arch/x86/kernel/efi_stub_32.S
+++ b/arch/x86/kernel/efi_stub_32.S
@@ -6,7 +6,7 @@
6 */ 6 */
7 7
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/page.h> 9#include <asm/page_types.h>
10 10
11/* 11/*
12 * efi_call_phys(void *, ...) is a function with variable parameters. 12 * efi_call_phys(void *, ...) is a function with variable parameters.
@@ -113,6 +113,7 @@ ENTRY(efi_call_phys)
113 movl (%edx), %ecx 113 movl (%edx), %ecx
114 pushl %ecx 114 pushl %ecx
115 ret 115 ret
116ENDPROC(efi_call_phys)
116.previous 117.previous
117 118
118.data 119.data
diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
index 99b47d48c9f4..4c07ccab8146 100644
--- a/arch/x86/kernel/efi_stub_64.S
+++ b/arch/x86/kernel/efi_stub_64.S
@@ -41,6 +41,7 @@ ENTRY(efi_call0)
41 addq $32, %rsp 41 addq $32, %rsp
42 RESTORE_XMM 42 RESTORE_XMM
43 ret 43 ret
44ENDPROC(efi_call0)
44 45
45ENTRY(efi_call1) 46ENTRY(efi_call1)
46 SAVE_XMM 47 SAVE_XMM
@@ -50,6 +51,7 @@ ENTRY(efi_call1)
50 addq $32, %rsp 51 addq $32, %rsp
51 RESTORE_XMM 52 RESTORE_XMM
52 ret 53 ret
54ENDPROC(efi_call1)
53 55
54ENTRY(efi_call2) 56ENTRY(efi_call2)
55 SAVE_XMM 57 SAVE_XMM
@@ -59,6 +61,7 @@ ENTRY(efi_call2)
59 addq $32, %rsp 61 addq $32, %rsp
60 RESTORE_XMM 62 RESTORE_XMM
61 ret 63 ret
64ENDPROC(efi_call2)
62 65
63ENTRY(efi_call3) 66ENTRY(efi_call3)
64 SAVE_XMM 67 SAVE_XMM
@@ -69,6 +72,7 @@ ENTRY(efi_call3)
69 addq $32, %rsp 72 addq $32, %rsp
70 RESTORE_XMM 73 RESTORE_XMM
71 ret 74 ret
75ENDPROC(efi_call3)
72 76
73ENTRY(efi_call4) 77ENTRY(efi_call4)
74 SAVE_XMM 78 SAVE_XMM
@@ -80,6 +84,7 @@ ENTRY(efi_call4)
80 addq $32, %rsp 84 addq $32, %rsp
81 RESTORE_XMM 85 RESTORE_XMM
82 ret 86 ret
87ENDPROC(efi_call4)
83 88
84ENTRY(efi_call5) 89ENTRY(efi_call5)
85 SAVE_XMM 90 SAVE_XMM
@@ -92,6 +97,7 @@ ENTRY(efi_call5)
92 addq $48, %rsp 97 addq $48, %rsp
93 RESTORE_XMM 98 RESTORE_XMM
94 ret 99 ret
100ENDPROC(efi_call5)
95 101
96ENTRY(efi_call6) 102ENTRY(efi_call6)
97 SAVE_XMM 103 SAVE_XMM
@@ -107,3 +113,4 @@ ENTRY(efi_call6)
107 addq $48, %rsp 113 addq $48, %rsp
108 RESTORE_XMM 114 RESTORE_XMM
109 ret 115 ret
116ENDPROC(efi_call6)
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index e99206831459..899e8938e79f 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -47,7 +47,7 @@
47#include <asm/errno.h> 47#include <asm/errno.h>
48#include <asm/segment.h> 48#include <asm/segment.h>
49#include <asm/smp.h> 49#include <asm/smp.h>
50#include <asm/page.h> 50#include <asm/page_types.h>
51#include <asm/desc.h> 51#include <asm/desc.h>
52#include <asm/percpu.h> 52#include <asm/percpu.h>
53#include <asm/dwarf2.h> 53#include <asm/dwarf2.h>
@@ -1359,7 +1359,7 @@ nmi_espfix_stack:
1359 CFI_ADJUST_CFA_OFFSET 4 1359 CFI_ADJUST_CFA_OFFSET 4
1360 pushl %esp 1360 pushl %esp
1361 CFI_ADJUST_CFA_OFFSET 4 1361 CFI_ADJUST_CFA_OFFSET 4
1362 addw $4, (%esp) 1362 addl $4, (%esp)
1363 /* copy the iret frame of 12 bytes */ 1363 /* copy the iret frame of 12 bytes */
1364 .rept 3 1364 .rept 3
1365 pushl 16(%esp) 1365 pushl 16(%esp)
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index fbcf96b295ff..83d1836b9467 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -48,7 +48,7 @@
48#include <asm/unistd.h> 48#include <asm/unistd.h>
49#include <asm/thread_info.h> 49#include <asm/thread_info.h>
50#include <asm/hw_irq.h> 50#include <asm/hw_irq.h>
51#include <asm/page.h> 51#include <asm/page_types.h>
52#include <asm/irqflags.h> 52#include <asm/irqflags.h>
53#include <asm/paravirt.h> 53#include <asm/paravirt.h>
54#include <asm/ftrace.h> 54#include <asm/ftrace.h>
@@ -77,20 +77,17 @@ ENTRY(ftrace_caller)
77 movq 8(%rbp), %rsi 77 movq 8(%rbp), %rsi
78 subq $MCOUNT_INSN_SIZE, %rdi 78 subq $MCOUNT_INSN_SIZE, %rdi
79 79
80.globl ftrace_call 80GLOBAL(ftrace_call)
81ftrace_call:
82 call ftrace_stub 81 call ftrace_stub
83 82
84 MCOUNT_RESTORE_FRAME 83 MCOUNT_RESTORE_FRAME
85 84
86#ifdef CONFIG_FUNCTION_GRAPH_TRACER 85#ifdef CONFIG_FUNCTION_GRAPH_TRACER
87.globl ftrace_graph_call 86GLOBAL(ftrace_graph_call)
88ftrace_graph_call:
89 jmp ftrace_stub 87 jmp ftrace_stub
90#endif 88#endif
91 89
92.globl ftrace_stub 90GLOBAL(ftrace_stub)
93ftrace_stub:
94 retq 91 retq
95END(ftrace_caller) 92END(ftrace_caller)
96 93
@@ -110,8 +107,7 @@ ENTRY(mcount)
110 jnz ftrace_graph_caller 107 jnz ftrace_graph_caller
111#endif 108#endif
112 109
113.globl ftrace_stub 110GLOBAL(ftrace_stub)
114ftrace_stub:
115 retq 111 retq
116 112
117trace: 113trace:
@@ -148,9 +144,7 @@ ENTRY(ftrace_graph_caller)
148 retq 144 retq
149END(ftrace_graph_caller) 145END(ftrace_graph_caller)
150 146
151 147GLOBAL(return_to_handler)
152.globl return_to_handler
153return_to_handler:
154 subq $80, %rsp 148 subq $80, %rsp
155 149
156 movq %rax, (%rsp) 150 movq %rax, (%rsp)
@@ -188,6 +182,7 @@ return_to_handler:
188ENTRY(native_usergs_sysret64) 182ENTRY(native_usergs_sysret64)
189 swapgs 183 swapgs
190 sysretq 184 sysretq
185ENDPROC(native_usergs_sysret64)
191#endif /* CONFIG_PARAVIRT */ 186#endif /* CONFIG_PARAVIRT */
192 187
193 188
@@ -633,16 +628,14 @@ tracesys:
633 * Syscall return path ending with IRET. 628 * Syscall return path ending with IRET.
634 * Has correct top of stack, but partial stack frame. 629 * Has correct top of stack, but partial stack frame.
635 */ 630 */
636 .globl int_ret_from_sys_call 631GLOBAL(int_ret_from_sys_call)
637 .globl int_with_check
638int_ret_from_sys_call:
639 DISABLE_INTERRUPTS(CLBR_NONE) 632 DISABLE_INTERRUPTS(CLBR_NONE)
640 TRACE_IRQS_OFF 633 TRACE_IRQS_OFF
641 testl $3,CS-ARGOFFSET(%rsp) 634 testl $3,CS-ARGOFFSET(%rsp)
642 je retint_restore_args 635 je retint_restore_args
643 movl $_TIF_ALLWORK_MASK,%edi 636 movl $_TIF_ALLWORK_MASK,%edi
644 /* edi: mask to check */ 637 /* edi: mask to check */
645int_with_check: 638GLOBAL(int_with_check)
646 LOCKDEP_SYS_EXIT_IRQ 639 LOCKDEP_SYS_EXIT_IRQ
647 GET_THREAD_INFO(%rcx) 640 GET_THREAD_INFO(%rcx)
648 movl TI_flags(%rcx),%edx 641 movl TI_flags(%rcx),%edx
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 2a0aad7718d5..c32ca19d591a 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -11,8 +11,8 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <asm/segment.h> 13#include <asm/segment.h>
14#include <asm/page.h> 14#include <asm/page_types.h>
15#include <asm/pgtable.h> 15#include <asm/pgtable_types.h>
16#include <asm/desc.h> 16#include <asm/desc.h>
17#include <asm/cache.h> 17#include <asm/cache.h>
18#include <asm/thread_info.h> 18#include <asm/thread_info.h>
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 2e648e3a5ea4..54b29bb24e71 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -329,8 +329,6 @@ early_idt_ripmsg:
329#endif /* CONFIG_EARLY_PRINTK */ 329#endif /* CONFIG_EARLY_PRINTK */
330 .previous 330 .previous
331 331
332.balign PAGE_SIZE
333
334#define NEXT_PAGE(name) \ 332#define NEXT_PAGE(name) \
335 .balign PAGE_SIZE; \ 333 .balign PAGE_SIZE; \
336ENTRY(name) 334ENTRY(name)
@@ -419,7 +417,7 @@ ENTRY(phys_base)
419 .section .bss, "aw", @nobits 417 .section .bss, "aw", @nobits
420 .align L1_CACHE_BYTES 418 .align L1_CACHE_BYTES
421ENTRY(idt_table) 419ENTRY(idt_table)
422 .skip 256 * 16 420 .skip IDT_ENTRIES * 16
423 421
424 .section .bss.page_aligned, "aw", @nobits 422 .section .bss.page_aligned, "aw", @nobits
425 .align PAGE_SIZE 423 .align PAGE_SIZE
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 11d5093eb281..df89102bef80 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -22,7 +22,6 @@
22#include <asm/pgtable.h> 22#include <asm/pgtable.h>
23#include <asm/desc.h> 23#include <asm/desc.h>
24#include <asm/apic.h> 24#include <asm/apic.h>
25#include <asm/arch_hooks.h>
26#include <asm/i8259.h> 25#include <asm/i8259.h>
27 26
28/* 27/*
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
index e41980a373ab..99c4d308f16b 100644
--- a/arch/x86/kernel/ioport.c
+++ b/arch/x86/kernel/ioport.c
@@ -85,19 +85,8 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
85 85
86 t->io_bitmap_max = bytes; 86 t->io_bitmap_max = bytes;
87 87
88#ifdef CONFIG_X86_32
89 /*
90 * Sets the lazy trigger so that the next I/O operation will
91 * reload the correct bitmap.
92 * Reset the owner so that a process switch will not set
93 * tss->io_bitmap_base to IO_BITMAP_OFFSET.
94 */
95 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
96 tss->io_bitmap_owner = NULL;
97#else
98 /* Update the TSS: */ 88 /* Update the TSS: */
99 memcpy(tss->io_bitmap, t->io_bitmap_ptr, bytes_updated); 89 memcpy(tss->io_bitmap, t->io_bitmap_ptr, bytes_updated);
100#endif
101 90
102 put_cpu(); 91 put_cpu();
103 92
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 4beb9a13873d..9dc6b2b24275 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -212,7 +212,6 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
212} 212}
213 213
214#ifdef CONFIG_HOTPLUG_CPU 214#ifdef CONFIG_HOTPLUG_CPU
215#include <asm/genapic.h>
216 215
217/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ 216/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
218void fixup_irqs(void) 217void fixup_irqs(void)
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c
index bf629cadec1a..50b8c3a3006c 100644
--- a/arch/x86/kernel/irqinit_32.c
+++ b/arch/x86/kernel/irqinit_32.c
@@ -18,7 +18,7 @@
18#include <asm/pgtable.h> 18#include <asm/pgtable.h>
19#include <asm/desc.h> 19#include <asm/desc.h>
20#include <asm/apic.h> 20#include <asm/apic.h>
21#include <asm/arch_hooks.h> 21#include <asm/setup.h>
22#include <asm/i8259.h> 22#include <asm/i8259.h>
23#include <asm/traps.h> 23#include <asm/traps.h>
24 24
@@ -127,8 +127,8 @@ void __init native_init_IRQ(void)
127{ 127{
128 int i; 128 int i;
129 129
130 /* all the set up before the call gates are initialised */ 130 /* Execute any quirks before the call gates are initialised: */
131 pre_intr_init_hook(); 131 x86_quirk_pre_intr_init();
132 132
133 /* 133 /*
134 * Cover the whole vector space, no vector can escape 134 * Cover the whole vector space, no vector can escape
@@ -188,10 +188,11 @@ void __init native_init_IRQ(void)
188 if (!acpi_ioapic) 188 if (!acpi_ioapic)
189 setup_irq(2, &irq2); 189 setup_irq(2, &irq2);
190 190
191 /* setup after call gates are initialised (usually add in 191 /*
192 * the architecture specific gates) 192 * Call quirks after call gates are initialised (usually add in
193 * the architecture specific gates):
193 */ 194 */
194 intr_init_hook(); 195 x86_quirk_intr_init();
195 196
196 /* 197 /*
197 * External FPU? Set up irq13 if so, for 198 * External FPU? Set up irq13 if so, for
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 5c4f55483849..eedfaebe1063 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -46,7 +46,7 @@
46#include <asm/apicdef.h> 46#include <asm/apicdef.h>
47#include <asm/system.h> 47#include <asm/system.h>
48 48
49#include <asm/genapic.h> 49#include <asm/apic.h>
50 50
51/* 51/*
52 * Put the error code here just in case the user cares: 52 * Put the error code here just in case the user cares:
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 652fce6d2cce..137f2e8132df 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -19,7 +19,6 @@
19#include <linux/clocksource.h> 19#include <linux/clocksource.h>
20#include <linux/kvm_para.h> 20#include <linux/kvm_para.h>
21#include <asm/pvclock.h> 21#include <asm/pvclock.h>
22#include <asm/arch_hooks.h>
23#include <asm/msr.h> 22#include <asm/msr.h>
24#include <asm/apic.h> 23#include <asm/apic.h>
25#include <linux/percpu.h> 24#include <linux/percpu.h>
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 37f420018a41..f5fc8c781a62 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -121,7 +121,7 @@ static void machine_kexec_page_table_set_one(
121static void machine_kexec_prepare_page_tables(struct kimage *image) 121static void machine_kexec_prepare_page_tables(struct kimage *image)
122{ 122{
123 void *control_page; 123 void *control_page;
124 pmd_t *pmd = 0; 124 pmd_t *pmd = NULL;
125 125
126 control_page = page_address(image->control_code_page); 126 control_page = page_address(image->control_code_page);
127#ifdef CONFIG_X86_PAE 127#ifdef CONFIG_X86_PAE
diff --git a/arch/x86/kernel/mca_32.c b/arch/x86/kernel/mca_32.c
index 2dc183758be3..845d80ce1ef1 100644
--- a/arch/x86/kernel/mca_32.c
+++ b/arch/x86/kernel/mca_32.c
@@ -51,7 +51,6 @@
51#include <linux/ioport.h> 51#include <linux/ioport.h>
52#include <asm/uaccess.h> 52#include <asm/uaccess.h>
53#include <linux/init.h> 53#include <linux/init.h>
54#include <asm/arch_hooks.h>
55 54
56static unsigned char which_scsi; 55static unsigned char which_scsi;
57 56
@@ -474,6 +473,4 @@ void __kprobes mca_handle_nmi(void)
474 * adapter was responsible for the error. 473 * adapter was responsible for the error.
475 */ 474 */
476 bus_for_each_dev(&mca_bus_type, NULL, NULL, mca_handle_nmi_callback); 475 bus_for_each_dev(&mca_bus_type, NULL, NULL, mca_handle_nmi_callback);
477 476}
478 mca_nmi_hook();
479} /* mca_handle_nmi */
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 200764453195..37cb1bda1baf 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -29,7 +29,7 @@
29#include <asm/setup.h> 29#include <asm/setup.h>
30#include <asm/smp.h> 30#include <asm/smp.h>
31 31
32#include <asm/genapic.h> 32#include <asm/apic.h>
33/* 33/*
34 * Checksum an MP configuration block. 34 * Checksum an MP configuration block.
35 */ 35 */
@@ -710,13 +710,22 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
710 * of physical memory; so that simply reserving 710 * of physical memory; so that simply reserving
711 * PAGE_SIZE from mpf->physptr yields BUG() 711 * PAGE_SIZE from mpf->physptr yields BUG()
712 * in reserve_bootmem. 712 * in reserve_bootmem.
713 * also need to make sure physptr is below than
714 * max_low_pfn
715 * we don't need reserve the area above max_low_pfn
713 */ 716 */
714 unsigned long end = max_low_pfn * PAGE_SIZE; 717 unsigned long end = max_low_pfn * PAGE_SIZE;
715 if (mpf->physptr + size > end) 718
716 size = end - mpf->physptr; 719 if (mpf->physptr < end) {
717#endif 720 if (mpf->physptr + size > end)
721 size = end - mpf->physptr;
722 reserve_bootmem_generic(mpf->physptr, size,
723 BOOTMEM_DEFAULT);
724 }
725#else
718 reserve_bootmem_generic(mpf->physptr, size, 726 reserve_bootmem_generic(mpf->physptr, size,
719 BOOTMEM_DEFAULT); 727 BOOTMEM_DEFAULT);
728#endif
720 } 729 }
721 730
722 return 1; 731 return 1;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 6dc4dca255e4..63dd358d8ee1 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -28,7 +28,6 @@
28#include <asm/paravirt.h> 28#include <asm/paravirt.h>
29#include <asm/desc.h> 29#include <asm/desc.h>
30#include <asm/setup.h> 30#include <asm/setup.h>
31#include <asm/arch_hooks.h>
32#include <asm/pgtable.h> 31#include <asm/pgtable.h>
33#include <asm/time.h> 32#include <asm/time.h>
34#include <asm/pgalloc.h> 33#include <asm/pgalloc.h>
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 87b69d4fac16..6afa5232dbb7 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -1,8 +1,8 @@
1#include <linux/errno.h> 1#include <linux/errno.h>
2#include <linux/kernel.h> 2#include <linux/kernel.h>
3#include <linux/mm.h> 3#include <linux/mm.h>
4#include <asm/idle.h>
5#include <linux/smp.h> 4#include <linux/smp.h>
5#include <linux/prctl.h>
6#include <linux/slab.h> 6#include <linux/slab.h>
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/module.h> 8#include <linux/module.h>
@@ -11,6 +11,9 @@
11#include <linux/ftrace.h> 11#include <linux/ftrace.h>
12#include <asm/system.h> 12#include <asm/system.h>
13#include <asm/apic.h> 13#include <asm/apic.h>
14#include <asm/idle.h>
15#include <asm/uaccess.h>
16#include <asm/i387.h>
14 17
15unsigned long idle_halt; 18unsigned long idle_halt;
16EXPORT_SYMBOL(idle_halt); 19EXPORT_SYMBOL(idle_halt);
@@ -56,6 +59,192 @@ void arch_task_cache_init(void)
56} 59}
57 60
58/* 61/*
62 * Free current thread data structures etc..
63 */
64void exit_thread(void)
65{
66 struct task_struct *me = current;
67 struct thread_struct *t = &me->thread;
68
69 if (me->thread.io_bitmap_ptr) {
70 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
71
72 kfree(t->io_bitmap_ptr);
73 t->io_bitmap_ptr = NULL;
74 clear_thread_flag(TIF_IO_BITMAP);
75 /*
76 * Careful, clear this in the TSS too:
77 */
78 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
79 t->io_bitmap_max = 0;
80 put_cpu();
81 }
82
83 ds_exit_thread(current);
84}
85
86void flush_thread(void)
87{
88 struct task_struct *tsk = current;
89
90#ifdef CONFIG_X86_64
91 if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
92 clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
93 if (test_tsk_thread_flag(tsk, TIF_IA32)) {
94 clear_tsk_thread_flag(tsk, TIF_IA32);
95 } else {
96 set_tsk_thread_flag(tsk, TIF_IA32);
97 current_thread_info()->status |= TS_COMPAT;
98 }
99 }
100#endif
101
102 clear_tsk_thread_flag(tsk, TIF_DEBUG);
103
104 tsk->thread.debugreg0 = 0;
105 tsk->thread.debugreg1 = 0;
106 tsk->thread.debugreg2 = 0;
107 tsk->thread.debugreg3 = 0;
108 tsk->thread.debugreg6 = 0;
109 tsk->thread.debugreg7 = 0;
110 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
111 /*
112 * Forget coprocessor state..
113 */
114 tsk->fpu_counter = 0;
115 clear_fpu(tsk);
116 clear_used_math();
117}
118
119static void hard_disable_TSC(void)
120{
121 write_cr4(read_cr4() | X86_CR4_TSD);
122}
123
124void disable_TSC(void)
125{
126 preempt_disable();
127 if (!test_and_set_thread_flag(TIF_NOTSC))
128 /*
129 * Must flip the CPU state synchronously with
130 * TIF_NOTSC in the current running context.
131 */
132 hard_disable_TSC();
133 preempt_enable();
134}
135
136static void hard_enable_TSC(void)
137{
138 write_cr4(read_cr4() & ~X86_CR4_TSD);
139}
140
141static void enable_TSC(void)
142{
143 preempt_disable();
144 if (test_and_clear_thread_flag(TIF_NOTSC))
145 /*
146 * Must flip the CPU state synchronously with
147 * TIF_NOTSC in the current running context.
148 */
149 hard_enable_TSC();
150 preempt_enable();
151}
152
153int get_tsc_mode(unsigned long adr)
154{
155 unsigned int val;
156
157 if (test_thread_flag(TIF_NOTSC))
158 val = PR_TSC_SIGSEGV;
159 else
160 val = PR_TSC_ENABLE;
161
162 return put_user(val, (unsigned int __user *)adr);
163}
164
165int set_tsc_mode(unsigned int val)
166{
167 if (val == PR_TSC_SIGSEGV)
168 disable_TSC();
169 else if (val == PR_TSC_ENABLE)
170 enable_TSC();
171 else
172 return -EINVAL;
173
174 return 0;
175}
176
177void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
178 struct tss_struct *tss)
179{
180 struct thread_struct *prev, *next;
181
182 prev = &prev_p->thread;
183 next = &next_p->thread;
184
185 if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
186 test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
187 ds_switch_to(prev_p, next_p);
188 else if (next->debugctlmsr != prev->debugctlmsr)
189 update_debugctlmsr(next->debugctlmsr);
190
191 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
192 set_debugreg(next->debugreg0, 0);
193 set_debugreg(next->debugreg1, 1);
194 set_debugreg(next->debugreg2, 2);
195 set_debugreg(next->debugreg3, 3);
196 /* no 4 and 5 */
197 set_debugreg(next->debugreg6, 6);
198 set_debugreg(next->debugreg7, 7);
199 }
200
201 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
202 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
203 /* prev and next are different */
204 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
205 hard_disable_TSC();
206 else
207 hard_enable_TSC();
208 }
209
210 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
211 /*
212 * Copy the relevant range of the IO bitmap.
213 * Normally this is 128 bytes or less:
214 */
215 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
216 max(prev->io_bitmap_max, next->io_bitmap_max));
217 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
218 /*
219 * Clear any possible leftover bits:
220 */
221 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
222 }
223}
224
225int sys_fork(struct pt_regs *regs)
226{
227 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
228}
229
230/*
231 * This is trivial, and on the face of it looks like it
232 * could equally well be done in user mode.
233 *
234 * Not so, for quite unobvious reasons - register pressure.
235 * In user mode vfork() cannot have a stack frame, and if
236 * done by calling the "clone()" system call directly, you
237 * do not have enough call-clobbered registers to hold all
238 * the information you need.
239 */
240int sys_vfork(struct pt_regs *regs)
241{
242 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
243 NULL, NULL);
244}
245
246
247/*
59 * Idle related variables and functions 248 * Idle related variables and functions
60 */ 249 */
61unsigned long boot_option_idle_override = 0; 250unsigned long boot_option_idle_override = 0;
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index fec79ad85dc6..14014d766cad 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -111,9 +111,6 @@ void cpu_idle(void)
111 check_pgt_cache(); 111 check_pgt_cache();
112 rmb(); 112 rmb();
113 113
114 if (rcu_pending(cpu))
115 rcu_check_callbacks(cpu, 0);
116
117 if (cpu_is_offline(cpu)) 114 if (cpu_is_offline(cpu))
118 play_dead(); 115 play_dead();
119 116
@@ -233,55 +230,6 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
233} 230}
234EXPORT_SYMBOL(kernel_thread); 231EXPORT_SYMBOL(kernel_thread);
235 232
236/*
237 * Free current thread data structures etc..
238 */
239void exit_thread(void)
240{
241 /* The process may have allocated an io port bitmap... nuke it. */
242 if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
243 struct task_struct *tsk = current;
244 struct thread_struct *t = &tsk->thread;
245 int cpu = get_cpu();
246 struct tss_struct *tss = &per_cpu(init_tss, cpu);
247
248 kfree(t->io_bitmap_ptr);
249 t->io_bitmap_ptr = NULL;
250 clear_thread_flag(TIF_IO_BITMAP);
251 /*
252 * Careful, clear this in the TSS too:
253 */
254 memset(tss->io_bitmap, 0xff, tss->io_bitmap_max);
255 t->io_bitmap_max = 0;
256 tss->io_bitmap_owner = NULL;
257 tss->io_bitmap_max = 0;
258 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
259 put_cpu();
260 }
261
262 ds_exit_thread(current);
263}
264
265void flush_thread(void)
266{
267 struct task_struct *tsk = current;
268
269 tsk->thread.debugreg0 = 0;
270 tsk->thread.debugreg1 = 0;
271 tsk->thread.debugreg2 = 0;
272 tsk->thread.debugreg3 = 0;
273 tsk->thread.debugreg6 = 0;
274 tsk->thread.debugreg7 = 0;
275 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
276 clear_tsk_thread_flag(tsk, TIF_DEBUG);
277 /*
278 * Forget coprocessor state..
279 */
280 tsk->fpu_counter = 0;
281 clear_fpu(tsk);
282 clear_used_math();
283}
284
285void release_thread(struct task_struct *dead_task) 233void release_thread(struct task_struct *dead_task)
286{ 234{
287 BUG_ON(dead_task->mm); 235 BUG_ON(dead_task->mm);
@@ -369,127 +317,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
369} 317}
370EXPORT_SYMBOL_GPL(start_thread); 318EXPORT_SYMBOL_GPL(start_thread);
371 319
372static void hard_disable_TSC(void)
373{
374 write_cr4(read_cr4() | X86_CR4_TSD);
375}
376
377void disable_TSC(void)
378{
379 preempt_disable();
380 if (!test_and_set_thread_flag(TIF_NOTSC))
381 /*
382 * Must flip the CPU state synchronously with
383 * TIF_NOTSC in the current running context.
384 */
385 hard_disable_TSC();
386 preempt_enable();
387}
388
389static void hard_enable_TSC(void)
390{
391 write_cr4(read_cr4() & ~X86_CR4_TSD);
392}
393
394static void enable_TSC(void)
395{
396 preempt_disable();
397 if (test_and_clear_thread_flag(TIF_NOTSC))
398 /*
399 * Must flip the CPU state synchronously with
400 * TIF_NOTSC in the current running context.
401 */
402 hard_enable_TSC();
403 preempt_enable();
404}
405
406int get_tsc_mode(unsigned long adr)
407{
408 unsigned int val;
409
410 if (test_thread_flag(TIF_NOTSC))
411 val = PR_TSC_SIGSEGV;
412 else
413 val = PR_TSC_ENABLE;
414
415 return put_user(val, (unsigned int __user *)adr);
416}
417
418int set_tsc_mode(unsigned int val)
419{
420 if (val == PR_TSC_SIGSEGV)
421 disable_TSC();
422 else if (val == PR_TSC_ENABLE)
423 enable_TSC();
424 else
425 return -EINVAL;
426
427 return 0;
428}
429
430static noinline void
431__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
432 struct tss_struct *tss)
433{
434 struct thread_struct *prev, *next;
435
436 prev = &prev_p->thread;
437 next = &next_p->thread;
438
439 if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
440 test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
441 ds_switch_to(prev_p, next_p);
442 else if (next->debugctlmsr != prev->debugctlmsr)
443 update_debugctlmsr(next->debugctlmsr);
444
445 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
446 set_debugreg(next->debugreg0, 0);
447 set_debugreg(next->debugreg1, 1);
448 set_debugreg(next->debugreg2, 2);
449 set_debugreg(next->debugreg3, 3);
450 /* no 4 and 5 */
451 set_debugreg(next->debugreg6, 6);
452 set_debugreg(next->debugreg7, 7);
453 }
454
455 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
456 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
457 /* prev and next are different */
458 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
459 hard_disable_TSC();
460 else
461 hard_enable_TSC();
462 }
463
464 if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
465 /*
466 * Disable the bitmap via an invalid offset. We still cache
467 * the previous bitmap owner and the IO bitmap contents:
468 */
469 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
470 return;
471 }
472
473 if (likely(next == tss->io_bitmap_owner)) {
474 /*
475 * Previous owner of the bitmap (hence the bitmap content)
476 * matches the next task, we dont have to do anything but
477 * to set a valid offset in the TSS:
478 */
479 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
480 return;
481 }
482 /*
483 * Lazy TSS's I/O bitmap copy. We set an invalid offset here
484 * and we let the task to get a GPF in case an I/O instruction
485 * is performed. The handler of the GPF will verify that the
486 * faulting task has a valid I/O bitmap and, it true, does the
487 * real copy and restart the instruction. This will save us
488 * redundant copies when the currently switched task does not
489 * perform any I/O during its timeslice.
490 */
491 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
492}
493 320
494/* 321/*
495 * switch_to(x,yn) should switch tasks from x to y. 322 * switch_to(x,yn) should switch tasks from x to y.
@@ -603,11 +430,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
603 return prev_p; 430 return prev_p;
604} 431}
605 432
606int sys_fork(struct pt_regs *regs)
607{
608 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
609}
610
611int sys_clone(struct pt_regs *regs) 433int sys_clone(struct pt_regs *regs)
612{ 434{
613 unsigned long clone_flags; 435 unsigned long clone_flags;
@@ -624,21 +446,6 @@ int sys_clone(struct pt_regs *regs)
624} 446}
625 447
626/* 448/*
627 * This is trivial, and on the face of it looks like it
628 * could equally well be done in user mode.
629 *
630 * Not so, for quite unobvious reasons - register pressure.
631 * In user mode vfork() cannot have a stack frame, and if
632 * done by calling the "clone()" system call directly, you
633 * do not have enough call-clobbered registers to hold all
634 * the information you need.
635 */
636int sys_vfork(struct pt_regs *regs)
637{
638 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0, NULL, NULL);
639}
640
641/*
642 * sys_execve() executes a new program. 449 * sys_execve() executes a new program.
643 */ 450 */
644int sys_execve(struct pt_regs *regs) 451int sys_execve(struct pt_regs *regs)
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 836ef6575f01..abb7e6a7f0c6 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -237,61 +237,6 @@ void show_regs(struct pt_regs *regs)
237 show_trace(NULL, regs, (void *)(regs + 1), regs->bp); 237 show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
238} 238}
239 239
240/*
241 * Free current thread data structures etc..
242 */
243void exit_thread(void)
244{
245 struct task_struct *me = current;
246 struct thread_struct *t = &me->thread;
247
248 if (me->thread.io_bitmap_ptr) {
249 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
250
251 kfree(t->io_bitmap_ptr);
252 t->io_bitmap_ptr = NULL;
253 clear_thread_flag(TIF_IO_BITMAP);
254 /*
255 * Careful, clear this in the TSS too:
256 */
257 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
258 t->io_bitmap_max = 0;
259 put_cpu();
260 }
261
262 ds_exit_thread(current);
263}
264
265void flush_thread(void)
266{
267 struct task_struct *tsk = current;
268
269 if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
270 clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
271 if (test_tsk_thread_flag(tsk, TIF_IA32)) {
272 clear_tsk_thread_flag(tsk, TIF_IA32);
273 } else {
274 set_tsk_thread_flag(tsk, TIF_IA32);
275 current_thread_info()->status |= TS_COMPAT;
276 }
277 }
278 clear_tsk_thread_flag(tsk, TIF_DEBUG);
279
280 tsk->thread.debugreg0 = 0;
281 tsk->thread.debugreg1 = 0;
282 tsk->thread.debugreg2 = 0;
283 tsk->thread.debugreg3 = 0;
284 tsk->thread.debugreg6 = 0;
285 tsk->thread.debugreg7 = 0;
286 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
287 /*
288 * Forget coprocessor state..
289 */
290 tsk->fpu_counter = 0;
291 clear_fpu(tsk);
292 clear_used_math();
293}
294
295void release_thread(struct task_struct *dead_task) 240void release_thread(struct task_struct *dead_task)
296{ 241{
297 if (dead_task->mm) { 242 if (dead_task->mm) {
@@ -425,118 +370,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
425} 370}
426EXPORT_SYMBOL_GPL(start_thread); 371EXPORT_SYMBOL_GPL(start_thread);
427 372
428static void hard_disable_TSC(void)
429{
430 write_cr4(read_cr4() | X86_CR4_TSD);
431}
432
433void disable_TSC(void)
434{
435 preempt_disable();
436 if (!test_and_set_thread_flag(TIF_NOTSC))
437 /*
438 * Must flip the CPU state synchronously with
439 * TIF_NOTSC in the current running context.
440 */
441 hard_disable_TSC();
442 preempt_enable();
443}
444
445static void hard_enable_TSC(void)
446{
447 write_cr4(read_cr4() & ~X86_CR4_TSD);
448}
449
450static void enable_TSC(void)
451{
452 preempt_disable();
453 if (test_and_clear_thread_flag(TIF_NOTSC))
454 /*
455 * Must flip the CPU state synchronously with
456 * TIF_NOTSC in the current running context.
457 */
458 hard_enable_TSC();
459 preempt_enable();
460}
461
462int get_tsc_mode(unsigned long adr)
463{
464 unsigned int val;
465
466 if (test_thread_flag(TIF_NOTSC))
467 val = PR_TSC_SIGSEGV;
468 else
469 val = PR_TSC_ENABLE;
470
471 return put_user(val, (unsigned int __user *)adr);
472}
473
474int set_tsc_mode(unsigned int val)
475{
476 if (val == PR_TSC_SIGSEGV)
477 disable_TSC();
478 else if (val == PR_TSC_ENABLE)
479 enable_TSC();
480 else
481 return -EINVAL;
482
483 return 0;
484}
485
486/*
487 * This special macro can be used to load a debugging register
488 */
489#define loaddebug(thread, r) set_debugreg(thread->debugreg ## r, r)
490
491static inline void __switch_to_xtra(struct task_struct *prev_p,
492 struct task_struct *next_p,
493 struct tss_struct *tss)
494{
495 struct thread_struct *prev, *next;
496
497 prev = &prev_p->thread,
498 next = &next_p->thread;
499
500 if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
501 test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
502 ds_switch_to(prev_p, next_p);
503 else if (next->debugctlmsr != prev->debugctlmsr)
504 update_debugctlmsr(next->debugctlmsr);
505
506 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
507 loaddebug(next, 0);
508 loaddebug(next, 1);
509 loaddebug(next, 2);
510 loaddebug(next, 3);
511 /* no 4 and 5 */
512 loaddebug(next, 6);
513 loaddebug(next, 7);
514 }
515
516 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
517 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
518 /* prev and next are different */
519 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
520 hard_disable_TSC();
521 else
522 hard_enable_TSC();
523 }
524
525 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
526 /*
527 * Copy the relevant range of the IO bitmap.
528 * Normally this is 128 bytes or less:
529 */
530 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
531 max(prev->io_bitmap_max, next->io_bitmap_max));
532 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
533 /*
534 * Clear any possible leftover bits:
535 */
536 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
537 }
538}
539
540/* 373/*
541 * switch_to(x,y) should switch tasks from x to y. 374 * switch_to(x,y) should switch tasks from x to y.
542 * 375 *
@@ -694,11 +527,6 @@ void set_personality_64bit(void)
694 current->personality &= ~READ_IMPLIES_EXEC; 527 current->personality &= ~READ_IMPLIES_EXEC;
695} 528}
696 529
697asmlinkage long sys_fork(struct pt_regs *regs)
698{
699 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
700}
701
702asmlinkage long 530asmlinkage long
703sys_clone(unsigned long clone_flags, unsigned long newsp, 531sys_clone(unsigned long clone_flags, unsigned long newsp,
704 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs) 532 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
@@ -708,22 +536,6 @@ sys_clone(unsigned long clone_flags, unsigned long newsp,
708 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); 536 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
709} 537}
710 538
711/*
712 * This is trivial, and on the face of it looks like it
713 * could equally well be done in user mode.
714 *
715 * Not so, for quite unobvious reasons - register pressure.
716 * In user mode vfork() cannot have a stack frame, and if
717 * done by calling the "clone()" system call directly, you
718 * do not have enough call-clobbered registers to hold all
719 * the information you need.
720 */
721asmlinkage long sys_vfork(struct pt_regs *regs)
722{
723 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
724 NULL, NULL);
725}
726
727unsigned long get_wchan(struct task_struct *p) 539unsigned long get_wchan(struct task_struct *p)
728{ 540{
729 unsigned long stack; 541 unsigned long stack;
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index d2f7cd5b2c83..3d9672e59c16 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -268,7 +268,7 @@ static unsigned long debugreg_addr_limit(struct task_struct *task)
268 if (test_tsk_thread_flag(task, TIF_IA32)) 268 if (test_tsk_thread_flag(task, TIF_IA32))
269 return IA32_PAGE_OFFSET - 3; 269 return IA32_PAGE_OFFSET - 3;
270#endif 270#endif
271 return TASK_SIZE64 - 7; 271 return TASK_SIZE_MAX - 7;
272} 272}
273 273
274#endif /* CONFIG_X86_32 */ 274#endif /* CONFIG_X86_32 */
@@ -1383,7 +1383,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
1383#ifdef CONFIG_X86_32 1383#ifdef CONFIG_X86_32
1384# define IS_IA32 1 1384# define IS_IA32 1
1385#elif defined CONFIG_IA32_EMULATION 1385#elif defined CONFIG_IA32_EMULATION
1386# define IS_IA32 test_thread_flag(TIF_IA32) 1386# define IS_IA32 is_compat_task()
1387#else 1387#else
1388# define IS_IA32 0 1388# define IS_IA32 0
1389#endif 1389#endif
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 32e8f0af292c..1cc18d439bbb 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -24,8 +24,6 @@
24# include <asm/iommu.h> 24# include <asm/iommu.h>
25#endif 25#endif
26 26
27#include <asm/genapic.h>
28
29/* 27/*
30 * Power off function, if any 28 * Power off function, if any
31 */ 29 */
diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S
index a160f3119725..2064d0aa8d28 100644
--- a/arch/x86/kernel/relocate_kernel_32.S
+++ b/arch/x86/kernel/relocate_kernel_32.S
@@ -7,7 +7,7 @@
7 */ 7 */
8 8
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <asm/page.h> 10#include <asm/page_types.h>
11#include <asm/kexec.h> 11#include <asm/kexec.h>
12#include <asm/processor-flags.h> 12#include <asm/processor-flags.h>
13 13
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
index b0bbdd4829c9..d32cfb27a479 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -7,10 +7,10 @@
7 */ 7 */
8 8
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <asm/page.h> 10#include <asm/page_types.h>
11#include <asm/kexec.h> 11#include <asm/kexec.h>
12#include <asm/processor-flags.h> 12#include <asm/processor-flags.h>
13#include <asm/pgtable.h> 13#include <asm/pgtable_types.h>
14 14
15/* 15/*
16 * Must be relocatable PIC code callable as a C function 16 * Must be relocatable PIC code callable as a C function
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 8fce6c714514..4c54bc0d8ff3 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -74,8 +74,9 @@
74#include <asm/e820.h> 74#include <asm/e820.h>
75#include <asm/mpspec.h> 75#include <asm/mpspec.h>
76#include <asm/setup.h> 76#include <asm/setup.h>
77#include <asm/arch_hooks.h>
78#include <asm/efi.h> 77#include <asm/efi.h>
78#include <asm/timer.h>
79#include <asm/i8259.h>
79#include <asm/sections.h> 80#include <asm/sections.h>
80#include <asm/dmi.h> 81#include <asm/dmi.h>
81#include <asm/io_apic.h> 82#include <asm/io_apic.h>
@@ -97,7 +98,6 @@
97#include <asm/mmu_context.h> 98#include <asm/mmu_context.h>
98#include <asm/proto.h> 99#include <asm/proto.h>
99 100
100#include <asm/genapic.h>
101#include <asm/paravirt.h> 101#include <asm/paravirt.h>
102#include <asm/hypervisor.h> 102#include <asm/hypervisor.h>
103 103
@@ -600,19 +600,7 @@ static int __init setup_elfcorehdr(char *arg)
600early_param("elfcorehdr", setup_elfcorehdr); 600early_param("elfcorehdr", setup_elfcorehdr);
601#endif 601#endif
602 602
603static int __init default_update_genapic(void) 603static struct x86_quirks default_x86_quirks __initdata;
604{
605#ifdef CONFIG_SMP
606 if (!apic->wakeup_cpu)
607 apic->wakeup_cpu = wakeup_secondary_cpu_via_init;
608#endif
609
610 return 0;
611}
612
613static struct x86_quirks default_x86_quirks __initdata = {
614 .update_genapic = default_update_genapic,
615};
616 604
617struct x86_quirks *x86_quirks __initdata = &default_x86_quirks; 605struct x86_quirks *x86_quirks __initdata = &default_x86_quirks;
618 606
@@ -669,7 +657,6 @@ void __init setup_arch(char **cmdline_p)
669#ifdef CONFIG_X86_32 657#ifdef CONFIG_X86_32
670 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); 658 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
671 visws_early_detect(); 659 visws_early_detect();
672 pre_setup_arch_hook();
673#else 660#else
674 printk(KERN_INFO "Command line: %s\n", boot_command_line); 661 printk(KERN_INFO "Command line: %s\n", boot_command_line);
675#endif 662#endif
@@ -836,8 +823,7 @@ void __init setup_arch(char **cmdline_p)
836#else 823#else
837 num_physpages = max_pfn; 824 num_physpages = max_pfn;
838 825
839 if (cpu_has_x2apic) 826 check_x2apic();
840 check_x2apic();
841 827
842 /* How many end-of-memory variables you have, grandma! */ 828 /* How many end-of-memory variables you have, grandma! */
843 /* need this before calling reserve_initrd */ 829 /* need this before calling reserve_initrd */
@@ -877,9 +863,7 @@ void __init setup_arch(char **cmdline_p)
877 863
878 reserve_initrd(); 864 reserve_initrd();
879 865
880#ifdef CONFIG_X86_64
881 vsmp_init(); 866 vsmp_init();
882#endif
883 867
884 io_delay_init(); 868 io_delay_init();
885 869
@@ -987,4 +971,95 @@ void __init setup_arch(char **cmdline_p)
987#endif 971#endif
988} 972}
989 973
974#ifdef CONFIG_X86_32
975
976/**
977 * x86_quirk_pre_intr_init - initialisation prior to setting up interrupt vectors
978 *
979 * Description:
980 * Perform any necessary interrupt initialisation prior to setting up
981 * the "ordinary" interrupt call gates. For legacy reasons, the ISA
982 * interrupts should be initialised here if the machine emulates a PC
983 * in any way.
984 **/
985void __init x86_quirk_pre_intr_init(void)
986{
987 if (x86_quirks->arch_pre_intr_init) {
988 if (x86_quirks->arch_pre_intr_init())
989 return;
990 }
991 init_ISA_irqs();
992}
993
994/**
995 * x86_quirk_intr_init - post gate setup interrupt initialisation
996 *
997 * Description:
998 * Fill in any interrupts that may have been left out by the general
999 * init_IRQ() routine. interrupts having to do with the machine rather
1000 * than the devices on the I/O bus (like APIC interrupts in intel MP
1001 * systems) are started here.
1002 **/
1003void __init x86_quirk_intr_init(void)
1004{
1005 if (x86_quirks->arch_intr_init) {
1006 if (x86_quirks->arch_intr_init())
1007 return;
1008 }
1009}
990 1010
1011/**
1012 * x86_quirk_trap_init - initialise system specific traps
1013 *
1014 * Description:
1015 * Called as the final act of trap_init(). Used in VISWS to initialise
1016 * the various board specific APIC traps.
1017 **/
1018void __init x86_quirk_trap_init(void)
1019{
1020 if (x86_quirks->arch_trap_init) {
1021 if (x86_quirks->arch_trap_init())
1022 return;
1023 }
1024}
1025
1026static struct irqaction irq0 = {
1027 .handler = timer_interrupt,
1028 .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER,
1029 .mask = CPU_MASK_NONE,
1030 .name = "timer"
1031};
1032
1033/**
1034 * x86_quirk_pre_time_init - do any specific initialisations before.
1035 *
1036 **/
1037void __init x86_quirk_pre_time_init(void)
1038{
1039 if (x86_quirks->arch_pre_time_init)
1040 x86_quirks->arch_pre_time_init();
1041}
1042
1043/**
1044 * x86_quirk_time_init - do any specific initialisations for the system timer.
1045 *
1046 * Description:
1047 * Must plug the system timer interrupt source at HZ into the IRQ listed
1048 * in irq_vectors.h:TIMER_IRQ
1049 **/
1050void __init x86_quirk_time_init(void)
1051{
1052 if (x86_quirks->arch_time_init) {
1053 /*
1054 * A nonzero return code does not mean failure, it means
1055 * that the architecture quirk does not want any
1056 * generic (timer) setup to be performed after this:
1057 */
1058 if (x86_quirks->arch_time_init())
1059 return;
1060 }
1061
1062 irq0.mask = cpumask_of_cpu(0);
1063 setup_irq(0, &irq0);
1064}
1065#endif /* CONFIG_X86_32 */
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 7cdcd16885ed..d2cc6428c587 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -187,40 +187,35 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
187/* 187/*
188 * Set up a signal frame. 188 * Set up a signal frame.
189 */ 189 */
190#ifdef CONFIG_X86_32
191static const struct {
192 u16 poplmovl;
193 u32 val;
194 u16 int80;
195} __attribute__((packed)) retcode = {
196 0xb858, /* popl %eax; movl $..., %eax */
197 __NR_sigreturn,
198 0x80cd, /* int $0x80 */
199};
200
201static const struct {
202 u8 movl;
203 u32 val;
204 u16 int80;
205 u8 pad;
206} __attribute__((packed)) rt_retcode = {
207 0xb8, /* movl $..., %eax */
208 __NR_rt_sigreturn,
209 0x80cd, /* int $0x80 */
210 0
211};
212 190
213/* 191/*
214 * Determine which stack to use.. 192 * Determine which stack to use..
215 */ 193 */
194static unsigned long align_sigframe(unsigned long sp)
195{
196#ifdef CONFIG_X86_32
197 /*
198 * Align the stack pointer according to the i386 ABI,
199 * i.e. so that on function entry ((sp + 4) & 15) == 0.
200 */
201 sp = ((sp + 4) & -16ul) - 4;
202#else /* !CONFIG_X86_32 */
203 sp = round_down(sp, 16) - 8;
204#endif
205 return sp;
206}
207
216static inline void __user * 208static inline void __user *
217get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, 209get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
218 void **fpstate) 210 void __user **fpstate)
219{ 211{
220 unsigned long sp;
221
222 /* Default to using normal stack */ 212 /* Default to using normal stack */
223 sp = regs->sp; 213 unsigned long sp = regs->sp;
214
215#ifdef CONFIG_X86_64
216 /* redzone */
217 sp -= 128;
218#endif /* CONFIG_X86_64 */
224 219
225 /* 220 /*
226 * If we are on the alternate signal stack and would overflow it, don't. 221 * If we are on the alternate signal stack and would overflow it, don't.
@@ -234,30 +229,52 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
234 if (sas_ss_flags(sp) == 0) 229 if (sas_ss_flags(sp) == 0)
235 sp = current->sas_ss_sp + current->sas_ss_size; 230 sp = current->sas_ss_sp + current->sas_ss_size;
236 } else { 231 } else {
232#ifdef CONFIG_X86_32
237 /* This is the legacy signal stack switching. */ 233 /* This is the legacy signal stack switching. */
238 if ((regs->ss & 0xffff) != __USER_DS && 234 if ((regs->ss & 0xffff) != __USER_DS &&
239 !(ka->sa.sa_flags & SA_RESTORER) && 235 !(ka->sa.sa_flags & SA_RESTORER) &&
240 ka->sa.sa_restorer) 236 ka->sa.sa_restorer)
241 sp = (unsigned long) ka->sa.sa_restorer; 237 sp = (unsigned long) ka->sa.sa_restorer;
238#endif /* CONFIG_X86_32 */
242 } 239 }
243 240
244 if (used_math()) { 241 if (used_math()) {
245 sp = sp - sig_xstate_size; 242 sp -= sig_xstate_size;
246 *fpstate = (struct _fpstate *) sp; 243#ifdef CONFIG_X86_64
244 sp = round_down(sp, 64);
245#endif /* CONFIG_X86_64 */
246 *fpstate = (void __user *)sp;
247
247 if (save_i387_xstate(*fpstate) < 0) 248 if (save_i387_xstate(*fpstate) < 0)
248 return (void __user *)-1L; 249 return (void __user *)-1L;
249 } 250 }
250 251
251 sp -= frame_size; 252 return (void __user *)align_sigframe(sp - frame_size);
252 /*
253 * Align the stack pointer according to the i386 ABI,
254 * i.e. so that on function entry ((sp + 4) & 15) == 0.
255 */
256 sp = ((sp + 4) & -16ul) - 4;
257
258 return (void __user *) sp;
259} 253}
260 254
255#ifdef CONFIG_X86_32
256static const struct {
257 u16 poplmovl;
258 u32 val;
259 u16 int80;
260} __attribute__((packed)) retcode = {
261 0xb858, /* popl %eax; movl $..., %eax */
262 __NR_sigreturn,
263 0x80cd, /* int $0x80 */
264};
265
266static const struct {
267 u8 movl;
268 u32 val;
269 u16 int80;
270 u8 pad;
271} __attribute__((packed)) rt_retcode = {
272 0xb8, /* movl $..., %eax */
273 __NR_rt_sigreturn,
274 0x80cd, /* int $0x80 */
275 0
276};
277
261static int 278static int
262__setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, 279__setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
263 struct pt_regs *regs) 280 struct pt_regs *regs)
@@ -388,24 +405,6 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
388 return 0; 405 return 0;
389} 406}
390#else /* !CONFIG_X86_32 */ 407#else /* !CONFIG_X86_32 */
391/*
392 * Determine which stack to use..
393 */
394static void __user *
395get_stack(struct k_sigaction *ka, unsigned long sp, unsigned long size)
396{
397 /* Default to using normal stack - redzone*/
398 sp -= 128;
399
400 /* This is the X/Open sanctioned signal stack switching. */
401 if (ka->sa.sa_flags & SA_ONSTACK) {
402 if (sas_ss_flags(sp) == 0)
403 sp = current->sas_ss_sp + current->sas_ss_size;
404 }
405
406 return (void __user *)round_down(sp - size, 64);
407}
408
409static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 408static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
410 sigset_t *set, struct pt_regs *regs) 409 sigset_t *set, struct pt_regs *regs)
411{ 410{
@@ -414,15 +413,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
414 int err = 0; 413 int err = 0;
415 struct task_struct *me = current; 414 struct task_struct *me = current;
416 415
417 if (used_math()) { 416 frame = get_sigframe(ka, regs, sizeof(struct rt_sigframe), &fp);
418 fp = get_stack(ka, regs->sp, sig_xstate_size);
419 frame = (void __user *)round_down(
420 (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8;
421
422 if (save_i387_xstate(fp) < 0)
423 return -EFAULT;
424 } else
425 frame = get_stack(ka, regs->sp, sizeof(struct rt_sigframe)) - 8;
426 417
427 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 418 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
428 return -EFAULT; 419 return -EFAULT;
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index eaaffae31cc0..13f33ea8ccaa 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -26,7 +26,7 @@
26#include <asm/tlbflush.h> 26#include <asm/tlbflush.h>
27#include <asm/mmu_context.h> 27#include <asm/mmu_context.h>
28#include <asm/proto.h> 28#include <asm/proto.h>
29#include <asm/genapic.h> 29#include <asm/apic.h>
30/* 30/*
31 * Some notes on x86 processor bugs affecting SMP operation: 31 * Some notes on x86 processor bugs affecting SMP operation:
32 * 32 *
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 10834954e301..249334f5080a 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -60,12 +60,11 @@
60#include <asm/tlbflush.h> 60#include <asm/tlbflush.h>
61#include <asm/mtrr.h> 61#include <asm/mtrr.h>
62#include <asm/vmi.h> 62#include <asm/vmi.h>
63#include <asm/genapic.h> 63#include <asm/apic.h>
64#include <asm/setup.h> 64#include <asm/setup.h>
65#include <asm/uv/uv.h> 65#include <asm/uv/uv.h>
66#include <linux/mc146818rtc.h> 66#include <linux/mc146818rtc.h>
67 67
68#include <asm/genapic.h>
69#include <asm/smpboot_hooks.h> 68#include <asm/smpboot_hooks.h>
70 69
71#ifdef CONFIG_X86_32 70#ifdef CONFIG_X86_32
@@ -113,7 +112,7 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map);
113DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); 112DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
114EXPORT_PER_CPU_SYMBOL(cpu_info); 113EXPORT_PER_CPU_SYMBOL(cpu_info);
115 114
116static atomic_t init_deasserted; 115atomic_t init_deasserted;
117 116
118 117
119/* Set if we find a B stepping CPU */ 118/* Set if we find a B stepping CPU */
@@ -615,12 +614,6 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
615 unsigned long send_status, accept_status = 0; 614 unsigned long send_status, accept_status = 0;
616 int maxlvt, num_starts, j; 615 int maxlvt, num_starts, j;
617 616
618 if (get_uv_system_type() == UV_NON_UNIQUE_APIC) {
619 send_status = uv_wakeup_secondary(phys_apicid, start_eip);
620 atomic_set(&init_deasserted, 1);
621 return send_status;
622 }
623
624 maxlvt = lapic_get_maxlvt(); 617 maxlvt = lapic_get_maxlvt();
625 618
626 /* 619 /*
@@ -746,21 +739,22 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
746 complete(&c_idle->done); 739 complete(&c_idle->done);
747} 740}
748 741
749static int __cpuinit do_boot_cpu(int apicid, int cpu)
750/* 742/*
751 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad 743 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
752 * (ie clustered apic addressing mode), this is a LOGICAL apic ID. 744 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
753 * Returns zero if CPU booted OK, else error code from ->wakeup_cpu. 745 * Returns zero if CPU booted OK, else error code from
746 * ->wakeup_secondary_cpu.
754 */ 747 */
748static int __cpuinit do_boot_cpu(int apicid, int cpu)
755{ 749{
756 unsigned long boot_error = 0; 750 unsigned long boot_error = 0;
757 int timeout;
758 unsigned long start_ip; 751 unsigned long start_ip;
759 unsigned short nmi_high = 0, nmi_low = 0; 752 int timeout;
760 struct create_idle c_idle = { 753 struct create_idle c_idle = {
761 .cpu = cpu, 754 .cpu = cpu,
762 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), 755 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
763 }; 756 };
757
764 INIT_WORK(&c_idle.work, do_fork_idle); 758 INIT_WORK(&c_idle.work, do_fork_idle);
765 759
766 alternatives_smp_switch(1); 760 alternatives_smp_switch(1);
@@ -825,9 +819,6 @@ do_rest:
825 819
826 pr_debug("Setting warm reset code and vector.\n"); 820 pr_debug("Setting warm reset code and vector.\n");
827 821
828 if (apic->store_NMI_vector)
829 apic->store_NMI_vector(&nmi_high, &nmi_low);
830
831 smpboot_setup_warm_reset_vector(start_ip); 822 smpboot_setup_warm_reset_vector(start_ip);
832 /* 823 /*
833 * Be paranoid about clearing APIC errors. 824 * Be paranoid about clearing APIC errors.
@@ -839,9 +830,13 @@ do_rest:
839 } 830 }
840 831
841 /* 832 /*
842 * Starting actual IPI sequence... 833 * Kick the secondary CPU. Use the method in the APIC driver
834 * if it's defined - or use an INIT boot APIC message otherwise:
843 */ 835 */
844 boot_error = apic->wakeup_cpu(apicid, start_ip); 836 if (apic->wakeup_secondary_cpu)
837 boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
838 else
839 boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
845 840
846 if (!boot_error) { 841 if (!boot_error) {
847 /* 842 /*
@@ -1128,8 +1123,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1128 current_thread_info()->cpu = 0; /* needed? */ 1123 current_thread_info()->cpu = 0; /* needed? */
1129 set_cpu_sibling_map(0); 1124 set_cpu_sibling_map(0);
1130 1125
1131#ifdef CONFIG_X86_64
1132 enable_IR_x2apic(); 1126 enable_IR_x2apic();
1127#ifdef CONFIG_X86_64
1133 default_setup_apic_routing(); 1128 default_setup_apic_routing();
1134#endif 1129#endif
1135 1130
diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c
index 764c74e871f2..5c5d87f0b2e1 100644
--- a/arch/x86/kernel/time_32.c
+++ b/arch/x86/kernel/time_32.c
@@ -33,7 +33,7 @@
33#include <linux/time.h> 33#include <linux/time.h>
34#include <linux/mca.h> 34#include <linux/mca.h>
35 35
36#include <asm/arch_hooks.h> 36#include <asm/setup.h>
37#include <asm/hpet.h> 37#include <asm/hpet.h>
38#include <asm/time.h> 38#include <asm/time.h>
39#include <asm/timer.h> 39#include <asm/timer.h>
@@ -118,7 +118,7 @@ void __init hpet_time_init(void)
118{ 118{
119 if (!hpet_enable()) 119 if (!hpet_enable())
120 setup_pit_timer(); 120 setup_pit_timer();
121 time_init_hook(); 121 x86_quirk_time_init();
122} 122}
123 123
124/* 124/*
@@ -131,7 +131,7 @@ void __init hpet_time_init(void)
131 */ 131 */
132void __init time_init(void) 132void __init time_init(void)
133{ 133{
134 pre_time_init_hook(); 134 x86_quirk_pre_time_init();
135 tsc_init(); 135 tsc_init();
136 late_time_init = choose_time_init(); 136 late_time_init = choose_time_init();
137} 137}
diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c
index e6e695acd725..241ec3923f61 100644
--- a/arch/x86/kernel/time_64.c
+++ b/arch/x86/kernel/time_64.c
@@ -115,7 +115,7 @@ unsigned long __init calibrate_cpu(void)
115 115
116static struct irqaction irq0 = { 116static struct irqaction irq0 = {
117 .handler = timer_interrupt, 117 .handler = timer_interrupt,
118 .flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING, 118 .flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING | IRQF_TIMER,
119 .mask = CPU_MASK_NONE, 119 .mask = CPU_MASK_NONE,
120 .name = "timer" 120 .name = "timer"
121}; 121};
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index f396e61bcb34..f04549afcfe9 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -15,13 +15,11 @@
15#include <asm/uv/uv_mmrs.h> 15#include <asm/uv/uv_mmrs.h>
16#include <asm/uv/uv_hub.h> 16#include <asm/uv/uv_hub.h>
17#include <asm/uv/uv_bau.h> 17#include <asm/uv/uv_bau.h>
18#include <asm/genapic.h> 18#include <asm/apic.h>
19#include <asm/idle.h> 19#include <asm/idle.h>
20#include <asm/tsc.h> 20#include <asm/tsc.h>
21#include <asm/irq_vectors.h> 21#include <asm/irq_vectors.h>
22 22
23#include <asm/genapic.h>
24
25static struct bau_control **uv_bau_table_bases __read_mostly; 23static struct bau_control **uv_bau_table_bases __read_mostly;
26static int uv_bau_retry_limit __read_mostly; 24static int uv_bau_retry_limit __read_mostly;
27 25
diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
index d8ccc3c6552f..66d874e5404c 100644
--- a/arch/x86/kernel/trampoline_32.S
+++ b/arch/x86/kernel/trampoline_32.S
@@ -29,7 +29,7 @@
29 29
30#include <linux/linkage.h> 30#include <linux/linkage.h>
31#include <asm/segment.h> 31#include <asm/segment.h>
32#include <asm/page.h> 32#include <asm/page_types.h>
33 33
34/* We can free up trampoline after bootup if cpu hotplug is not supported. */ 34/* We can free up trampoline after bootup if cpu hotplug is not supported. */
35#ifndef CONFIG_HOTPLUG_CPU 35#ifndef CONFIG_HOTPLUG_CPU
diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
index 95a012a4664e..cddfb8d386b9 100644
--- a/arch/x86/kernel/trampoline_64.S
+++ b/arch/x86/kernel/trampoline_64.S
@@ -25,8 +25,8 @@
25 */ 25 */
26 26
27#include <linux/linkage.h> 27#include <linux/linkage.h>
28#include <asm/pgtable.h> 28#include <asm/pgtable_types.h>
29#include <asm/page.h> 29#include <asm/page_types.h>
30#include <asm/msr.h> 30#include <asm/msr.h>
31#include <asm/segment.h> 31#include <asm/segment.h>
32#include <asm/processor-flags.h> 32#include <asm/processor-flags.h>
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index acb8c0585ab9..a1d288327ff0 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -61,7 +61,7 @@
61#include <asm/proto.h> 61#include <asm/proto.h>
62#else 62#else
63#include <asm/processor-flags.h> 63#include <asm/processor-flags.h>
64#include <asm/arch_hooks.h> 64#include <asm/setup.h>
65#include <asm/traps.h> 65#include <asm/traps.h>
66 66
67#include "cpu/mcheck/mce.h" 67#include "cpu/mcheck/mce.h"
@@ -118,47 +118,6 @@ die_if_kernel(const char *str, struct pt_regs *regs, long err)
118 if (!user_mode_vm(regs)) 118 if (!user_mode_vm(regs))
119 die(str, regs, err); 119 die(str, regs, err);
120} 120}
121
122/*
123 * Perform the lazy TSS's I/O bitmap copy. If the TSS has an
124 * invalid offset set (the LAZY one) and the faulting thread has
125 * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS,
126 * we set the offset field correctly and return 1.
127 */
128static int lazy_iobitmap_copy(void)
129{
130 struct thread_struct *thread;
131 struct tss_struct *tss;
132 int cpu;
133
134 cpu = get_cpu();
135 tss = &per_cpu(init_tss, cpu);
136 thread = &current->thread;
137
138 if (tss->x86_tss.io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&
139 thread->io_bitmap_ptr) {
140 memcpy(tss->io_bitmap, thread->io_bitmap_ptr,
141 thread->io_bitmap_max);
142 /*
143 * If the previously set map was extending to higher ports
144 * than the current one, pad extra space with 0xff (no access).
145 */
146 if (thread->io_bitmap_max < tss->io_bitmap_max) {
147 memset((char *) tss->io_bitmap +
148 thread->io_bitmap_max, 0xff,
149 tss->io_bitmap_max - thread->io_bitmap_max);
150 }
151 tss->io_bitmap_max = thread->io_bitmap_max;
152 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
153 tss->io_bitmap_owner = thread;
154 put_cpu();
155
156 return 1;
157 }
158 put_cpu();
159
160 return 0;
161}
162#endif 121#endif
163 122
164static void __kprobes 123static void __kprobes
@@ -309,11 +268,6 @@ do_general_protection(struct pt_regs *regs, long error_code)
309 conditional_sti(regs); 268 conditional_sti(regs);
310 269
311#ifdef CONFIG_X86_32 270#ifdef CONFIG_X86_32
312 if (lazy_iobitmap_copy()) {
313 /* restart the faulting instruction */
314 return;
315 }
316
317 if (regs->flags & X86_VM_MASK) 271 if (regs->flags & X86_VM_MASK)
318 goto gp_in_vm86; 272 goto gp_in_vm86;
319#endif 273#endif
@@ -942,7 +896,7 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
942 info.si_signo = SIGILL; 896 info.si_signo = SIGILL;
943 info.si_errno = 0; 897 info.si_errno = 0;
944 info.si_code = ILL_BADSTK; 898 info.si_code = ILL_BADSTK;
945 info.si_addr = 0; 899 info.si_addr = NULL;
946 if (notify_die(DIE_TRAP, "iret exception", 900 if (notify_die(DIE_TRAP, "iret exception",
947 regs, error_code, 32, SIGILL) == NOTIFY_STOP) 901 regs, error_code, 32, SIGILL) == NOTIFY_STOP)
948 return; 902 return;
@@ -1026,6 +980,6 @@ void __init trap_init(void)
1026 cpu_init(); 980 cpu_init();
1027 981
1028#ifdef CONFIG_X86_32 982#ifdef CONFIG_X86_32
1029 trap_init_hook(); 983 x86_quirk_trap_init();
1030#endif 984#endif
1031} 985}
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index 4fd646e6dd43..191a876e9e87 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -24,18 +24,14 @@
24 24
25#include <asm/visws/cobalt.h> 25#include <asm/visws/cobalt.h>
26#include <asm/visws/piix4.h> 26#include <asm/visws/piix4.h>
27#include <asm/arch_hooks.h>
28#include <asm/io_apic.h> 27#include <asm/io_apic.h>
29#include <asm/fixmap.h> 28#include <asm/fixmap.h>
30#include <asm/reboot.h> 29#include <asm/reboot.h>
31#include <asm/setup.h> 30#include <asm/setup.h>
31#include <asm/apic.h>
32#include <asm/e820.h> 32#include <asm/e820.h>
33#include <asm/io.h> 33#include <asm/io.h>
34 34
35#include <asm/genapic.h>
36
37#include <asm/genapic.h>
38
39#include <linux/kernel_stat.h> 35#include <linux/kernel_stat.h>
40 36
41#include <asm/i8259.h> 37#include <asm/i8259.h>
@@ -49,8 +45,6 @@
49 45
50extern int no_broadcast; 46extern int no_broadcast;
51 47
52#include <asm/apic.h>
53
54char visws_board_type = -1; 48char visws_board_type = -1;
55char visws_board_rev = -1; 49char visws_board_rev = -1;
56 50
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index f052c84ecbe4..2cc4a90e2cb3 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -798,8 +798,8 @@ static inline int __init activate_vmi(void)
798#endif 798#endif
799 799
800#ifdef CONFIG_X86_LOCAL_APIC 800#ifdef CONFIG_X86_LOCAL_APIC
801 para_fill(apic_ops->read, APICRead); 801 para_fill(apic->read, APICRead);
802 para_fill(apic_ops->write, APICWrite); 802 para_fill(apic->write, APICWrite);
803#endif 803#endif
804 804
805 /* 805 /*
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index a4791ef412d1..33a788d5879c 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -28,7 +28,6 @@
28 28
29#include <asm/vmi.h> 29#include <asm/vmi.h>
30#include <asm/vmi_time.h> 30#include <asm/vmi_time.h>
31#include <asm/arch_hooks.h>
32#include <asm/apicdef.h> 31#include <asm/apicdef.h>
33#include <asm/apic.h> 32#include <asm/apic.h>
34#include <asm/timer.h> 33#include <asm/timer.h>
@@ -202,7 +201,7 @@ static irqreturn_t vmi_timer_interrupt(int irq, void *dev_id)
202static struct irqaction vmi_clock_action = { 201static struct irqaction vmi_clock_action = {
203 .name = "vmi-timer", 202 .name = "vmi-timer",
204 .handler = vmi_timer_interrupt, 203 .handler = vmi_timer_interrupt,
205 .flags = IRQF_DISABLED | IRQF_NOBALANCING, 204 .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER,
206 .mask = CPU_MASK_ALL, 205 .mask = CPU_MASK_ALL,
207}; 206};
208 207
@@ -283,10 +282,12 @@ void __devinit vmi_time_ap_init(void)
283#endif 282#endif
284 283
285/** vmi clocksource */ 284/** vmi clocksource */
285static struct clocksource clocksource_vmi;
286 286
287static cycle_t read_real_cycles(void) 287static cycle_t read_real_cycles(void)
288{ 288{
289 return vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); 289 cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL);
290 return max(ret, clocksource_vmi.cycle_last);
290} 291}
291 292
292static struct clocksource clocksource_vmi = { 293static struct clocksource clocksource_vmi = {
diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S
index 3eba7f7bac05..0d860963f268 100644
--- a/arch/x86/kernel/vmlinux_32.lds.S
+++ b/arch/x86/kernel/vmlinux_32.lds.S
@@ -12,7 +12,7 @@
12 12
13#include <asm-generic/vmlinux.lds.h> 13#include <asm-generic/vmlinux.lds.h>
14#include <asm/thread_info.h> 14#include <asm/thread_info.h>
15#include <asm/page.h> 15#include <asm/page_types.h>
16#include <asm/cache.h> 16#include <asm/cache.h>
17#include <asm/boot.h> 17#include <asm/boot.h>
18 18
diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S
index 087a7f2c639b..fbfced6f6800 100644
--- a/arch/x86/kernel/vmlinux_64.lds.S
+++ b/arch/x86/kernel/vmlinux_64.lds.S
@@ -6,7 +6,7 @@
6 6
7#include <asm-generic/vmlinux.lds.h> 7#include <asm-generic/vmlinux.lds.h>
8#include <asm/asm-offsets.h> 8#include <asm/asm-offsets.h>
9#include <asm/page.h> 9#include <asm/page_types.h>
10 10
11#undef i386 /* in case the preprocessor is a 32bit one */ 11#undef i386 /* in case the preprocessor is a 32bit one */
12 12
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index c609205df594..74de562812cc 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -22,7 +22,7 @@
22#include <asm/paravirt.h> 22#include <asm/paravirt.h>
23#include <asm/setup.h> 23#include <asm/setup.h>
24 24
25#if defined CONFIG_PCI && defined CONFIG_PARAVIRT 25#ifdef CONFIG_PARAVIRT
26/* 26/*
27 * Interrupt control on vSMPowered systems: 27 * Interrupt control on vSMPowered systems:
28 * ~AC is a shadow of IF. If IF is 'on' AC should be 'off' 28 * ~AC is a shadow of IF. If IF is 'on' AC should be 'off'
@@ -114,7 +114,6 @@ static void __init set_vsmp_pv_ops(void)
114} 114}
115#endif 115#endif
116 116
117#ifdef CONFIG_PCI
118static int is_vsmp = -1; 117static int is_vsmp = -1;
119 118
120static void __init detect_vsmp_box(void) 119static void __init detect_vsmp_box(void)
@@ -139,15 +138,6 @@ int is_vsmp_box(void)
139 return 0; 138 return 0;
140 } 139 }
141} 140}
142#else
143static void __init detect_vsmp_box(void)
144{
145}
146int is_vsmp_box(void)
147{
148 return 0;
149}
150#endif
151 141
152void __init vsmp_init(void) 142void __init vsmp_init(void)
153{ 143{
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index e665d1c623ca..72bd275a9b5c 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -207,7 +207,7 @@ static int __pit_timer_fn(struct kvm_kpit_state *ps)
207 hrtimer_add_expires_ns(&pt->timer, pt->period); 207 hrtimer_add_expires_ns(&pt->timer, pt->period);
208 pt->scheduled = hrtimer_get_expires_ns(&pt->timer); 208 pt->scheduled = hrtimer_get_expires_ns(&pt->timer);
209 if (pt->period) 209 if (pt->period)
210 ps->channels[0].count_load_time = hrtimer_get_expires(&pt->timer); 210 ps->channels[0].count_load_time = ktime_get();
211 211
212 return (pt->period == 0 ? 0 : 1); 212 return (pt->period == 0 ? 0 : 1);
213} 213}
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index c019b8edcdb7..cf17ed52f6fb 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -87,13 +87,6 @@ void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
87} 87}
88EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs); 88EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs);
89 89
90void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
91{
92 kvm_apic_timer_intr_post(vcpu, vec);
93 /* TODO: PIT, RTC etc. */
94}
95EXPORT_SYMBOL_GPL(kvm_timer_intr_post);
96
97void __kvm_migrate_timers(struct kvm_vcpu *vcpu) 90void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
98{ 91{
99 __kvm_migrate_apic_timer(vcpu); 92 __kvm_migrate_apic_timer(vcpu);
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index 2bf32a03ceec..82579ee538d0 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -89,7 +89,6 @@ static inline int irqchip_in_kernel(struct kvm *kvm)
89 89
90void kvm_pic_reset(struct kvm_kpic_state *s); 90void kvm_pic_reset(struct kvm_kpic_state *s);
91 91
92void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
93void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu); 92void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
94void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu); 93void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
95void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu); 94void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index afac68c0815c..f0b67f2cdd69 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -35,6 +35,12 @@
35#include "kvm_cache_regs.h" 35#include "kvm_cache_regs.h"
36#include "irq.h" 36#include "irq.h"
37 37
38#ifndef CONFIG_X86_64
39#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
40#else
41#define mod_64(x, y) ((x) % (y))
42#endif
43
38#define PRId64 "d" 44#define PRId64 "d"
39#define PRIx64 "llx" 45#define PRIx64 "llx"
40#define PRIu64 "u" 46#define PRIu64 "u"
@@ -511,52 +517,22 @@ static void apic_send_ipi(struct kvm_lapic *apic)
511 517
512static u32 apic_get_tmcct(struct kvm_lapic *apic) 518static u32 apic_get_tmcct(struct kvm_lapic *apic)
513{ 519{
514 u64 counter_passed; 520 ktime_t remaining;
515 ktime_t passed, now; 521 s64 ns;
516 u32 tmcct; 522 u32 tmcct;
517 523
518 ASSERT(apic != NULL); 524 ASSERT(apic != NULL);
519 525
520 now = apic->timer.dev.base->get_time();
521 tmcct = apic_get_reg(apic, APIC_TMICT);
522
523 /* if initial count is 0, current count should also be 0 */ 526 /* if initial count is 0, current count should also be 0 */
524 if (tmcct == 0) 527 if (apic_get_reg(apic, APIC_TMICT) == 0)
525 return 0; 528 return 0;
526 529
527 if (unlikely(ktime_to_ns(now) <= 530 remaining = hrtimer_expires_remaining(&apic->timer.dev);
528 ktime_to_ns(apic->timer.last_update))) { 531 if (ktime_to_ns(remaining) < 0)
529 /* Wrap around */ 532 remaining = ktime_set(0, 0);
530 passed = ktime_add(( { 533
531 (ktime_t) { 534 ns = mod_64(ktime_to_ns(remaining), apic->timer.period);
532 .tv64 = KTIME_MAX - 535 tmcct = div64_u64(ns, (APIC_BUS_CYCLE_NS * apic->timer.divide_count));
533 (apic->timer.last_update).tv64}; }
534 ), now);
535 apic_debug("time elapsed\n");
536 } else
537 passed = ktime_sub(now, apic->timer.last_update);
538
539 counter_passed = div64_u64(ktime_to_ns(passed),
540 (APIC_BUS_CYCLE_NS * apic->timer.divide_count));
541
542 if (counter_passed > tmcct) {
543 if (unlikely(!apic_lvtt_period(apic))) {
544 /* one-shot timers stick at 0 until reset */
545 tmcct = 0;
546 } else {
547 /*
548 * periodic timers reset to APIC_TMICT when they
549 * hit 0. The while loop simulates this happening N
550 * times. (counter_passed %= tmcct) would also work,
551 * but might be slower or not work on 32-bit??
552 */
553 while (counter_passed > tmcct)
554 counter_passed -= tmcct;
555 tmcct -= counter_passed;
556 }
557 } else {
558 tmcct -= counter_passed;
559 }
560 536
561 return tmcct; 537 return tmcct;
562} 538}
@@ -653,8 +629,6 @@ static void start_apic_timer(struct kvm_lapic *apic)
653{ 629{
654 ktime_t now = apic->timer.dev.base->get_time(); 630 ktime_t now = apic->timer.dev.base->get_time();
655 631
656 apic->timer.last_update = now;
657
658 apic->timer.period = apic_get_reg(apic, APIC_TMICT) * 632 apic->timer.period = apic_get_reg(apic, APIC_TMICT) *
659 APIC_BUS_CYCLE_NS * apic->timer.divide_count; 633 APIC_BUS_CYCLE_NS * apic->timer.divide_count;
660 atomic_set(&apic->timer.pending, 0); 634 atomic_set(&apic->timer.pending, 0);
@@ -1110,16 +1084,6 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
1110 } 1084 }
1111} 1085}
1112 1086
1113void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
1114{
1115 struct kvm_lapic *apic = vcpu->arch.apic;
1116
1117 if (apic && apic_lvt_vector(apic, APIC_LVTT) == vec)
1118 apic->timer.last_update = ktime_add_ns(
1119 apic->timer.last_update,
1120 apic->timer.period);
1121}
1122
1123int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) 1087int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
1124{ 1088{
1125 int vector = kvm_apic_has_interrupt(vcpu); 1089 int vector = kvm_apic_has_interrupt(vcpu);
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 81858881287e..45ab6ee71209 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -12,7 +12,6 @@ struct kvm_lapic {
12 atomic_t pending; 12 atomic_t pending;
13 s64 period; /* unit: ns */ 13 s64 period; /* unit: ns */
14 u32 divide_count; 14 u32 divide_count;
15 ktime_t last_update;
16 struct hrtimer dev; 15 struct hrtimer dev;
17 } timer; 16 } timer;
18 struct kvm_vcpu *vcpu; 17 struct kvm_vcpu *vcpu;
@@ -42,7 +41,6 @@ void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data);
42void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu); 41void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu);
43int kvm_lapic_enabled(struct kvm_vcpu *vcpu); 42int kvm_lapic_enabled(struct kvm_vcpu *vcpu);
44int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu); 43int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
45void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
46 44
47void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr); 45void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
48void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu); 46void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 83f11c7474a1..2d4477c71473 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1698,8 +1698,13 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1698 if (largepage) 1698 if (largepage)
1699 spte |= PT_PAGE_SIZE_MASK; 1699 spte |= PT_PAGE_SIZE_MASK;
1700 if (mt_mask) { 1700 if (mt_mask) {
1701 mt_mask = get_memory_type(vcpu, gfn) << 1701 if (!kvm_is_mmio_pfn(pfn)) {
1702 kvm_x86_ops->get_mt_mask_shift(); 1702 mt_mask = get_memory_type(vcpu, gfn) <<
1703 kvm_x86_ops->get_mt_mask_shift();
1704 mt_mask |= VMX_EPT_IGMT_BIT;
1705 } else
1706 mt_mask = MTRR_TYPE_UNCACHABLE <<
1707 kvm_x86_ops->get_mt_mask_shift();
1703 spte |= mt_mask; 1708 spte |= mt_mask;
1704 } 1709 }
1705 1710
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1452851ae258..a9e769e4e251 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1600,7 +1600,6 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu)
1600 /* Okay, we can deliver the interrupt: grab it and update PIC state. */ 1600 /* Okay, we can deliver the interrupt: grab it and update PIC state. */
1601 intr_vector = kvm_cpu_get_interrupt(vcpu); 1601 intr_vector = kvm_cpu_get_interrupt(vcpu);
1602 svm_inject_irq(svm, intr_vector); 1602 svm_inject_irq(svm, intr_vector);
1603 kvm_timer_intr_post(vcpu, intr_vector);
1604out: 1603out:
1605 update_cr8_intercept(vcpu); 1604 update_cr8_intercept(vcpu);
1606} 1605}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 6259d7467648..7611af576829 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -903,6 +903,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
903 data = vmcs_readl(GUEST_SYSENTER_ESP); 903 data = vmcs_readl(GUEST_SYSENTER_ESP);
904 break; 904 break;
905 default: 905 default:
906 vmx_load_host_state(to_vmx(vcpu));
906 msr = find_msr_entry(to_vmx(vcpu), msr_index); 907 msr = find_msr_entry(to_vmx(vcpu), msr_index);
907 if (msr) { 908 if (msr) {
908 data = msr->data; 909 data = msr->data;
@@ -3285,7 +3286,6 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
3285 } 3286 }
3286 if (vcpu->arch.interrupt.pending) { 3287 if (vcpu->arch.interrupt.pending) {
3287 vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); 3288 vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr);
3288 kvm_timer_intr_post(vcpu, vcpu->arch.interrupt.nr);
3289 if (kvm_cpu_has_interrupt(vcpu)) 3289 if (kvm_cpu_has_interrupt(vcpu))
3290 enable_irq_window(vcpu); 3290 enable_irq_window(vcpu);
3291 } 3291 }
@@ -3687,8 +3687,7 @@ static int __init vmx_init(void)
3687 if (vm_need_ept()) { 3687 if (vm_need_ept()) {
3688 bypass_guest_pf = 0; 3688 bypass_guest_pf = 0;
3689 kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | 3689 kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
3690 VMX_EPT_WRITABLE_MASK | 3690 VMX_EPT_WRITABLE_MASK);
3691 VMX_EPT_IGMT_BIT);
3692 kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, 3691 kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
3693 VMX_EPT_EXECUTABLE_MASK, 3692 VMX_EPT_EXECUTABLE_MASK,
3694 VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT); 3693 VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index cc17546a2406..758b7a155ae9 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -967,7 +967,6 @@ int kvm_dev_ioctl_check_extension(long ext)
967 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: 967 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
968 case KVM_CAP_SET_TSS_ADDR: 968 case KVM_CAP_SET_TSS_ADDR:
969 case KVM_CAP_EXT_CPUID: 969 case KVM_CAP_EXT_CPUID:
970 case KVM_CAP_CLOCKSOURCE:
971 case KVM_CAP_PIT: 970 case KVM_CAP_PIT:
972 case KVM_CAP_NOP_IO_DELAY: 971 case KVM_CAP_NOP_IO_DELAY:
973 case KVM_CAP_MP_STATE: 972 case KVM_CAP_MP_STATE:
@@ -992,6 +991,9 @@ int kvm_dev_ioctl_check_extension(long ext)
992 case KVM_CAP_IOMMU: 991 case KVM_CAP_IOMMU:
993 r = iommu_found(); 992 r = iommu_found();
994 break; 993 break;
994 case KVM_CAP_CLOCKSOURCE:
995 r = boot_cpu_has(X86_FEATURE_CONSTANT_TSC);
996 break;
995 default: 997 default:
996 r = 0; 998 r = 0;
997 break; 999 break;
@@ -4127,9 +4129,13 @@ static void kvm_free_vcpus(struct kvm *kvm)
4127 4129
4128} 4130}
4129 4131
4130void kvm_arch_destroy_vm(struct kvm *kvm) 4132void kvm_arch_sync_events(struct kvm *kvm)
4131{ 4133{
4132 kvm_free_all_assigned_devices(kvm); 4134 kvm_free_all_assigned_devices(kvm);
4135}
4136
4137void kvm_arch_destroy_vm(struct kvm *kvm)
4138{
4133 kvm_iommu_unmap_guest(kvm); 4139 kvm_iommu_unmap_guest(kvm);
4134 kvm_free_pit(kvm); 4140 kvm_free_pit(kvm);
4135 kfree(kvm->arch.vpic); 4141 kfree(kvm->arch.vpic);
diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig
index c70e12b1a637..8dab8f7844d3 100644
--- a/arch/x86/lguest/Kconfig
+++ b/arch/x86/lguest/Kconfig
@@ -3,7 +3,6 @@ config LGUEST_GUEST
3 select PARAVIRT 3 select PARAVIRT
4 depends on X86_32 4 depends on X86_32
5 depends on !X86_PAE 5 depends on !X86_PAE
6 depends on !X86_VOYAGER
7 select VIRTIO 6 select VIRTIO
8 select VIRTIO_RING 7 select VIRTIO_RING
9 select VIRTIO_CONSOLE 8 select VIRTIO_CONSOLE
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index da2e314f61b5..f3a5305b8adf 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -828,13 +828,14 @@ static u32 lguest_apic_safe_wait_icr_idle(void)
828 return 0; 828 return 0;
829} 829}
830 830
831static struct apic_ops lguest_basic_apic_ops = { 831static void set_lguest_basic_apic_ops(void)
832 .read = lguest_apic_read, 832{
833 .write = lguest_apic_write, 833 apic->read = lguest_apic_read;
834 .icr_read = lguest_apic_icr_read, 834 apic->write = lguest_apic_write;
835 .icr_write = lguest_apic_icr_write, 835 apic->icr_read = lguest_apic_icr_read;
836 .wait_icr_idle = lguest_apic_wait_icr_idle, 836 apic->icr_write = lguest_apic_icr_write;
837 .safe_wait_icr_idle = lguest_apic_safe_wait_icr_idle, 837 apic->wait_icr_idle = lguest_apic_wait_icr_idle;
838 apic->safe_wait_icr_idle = lguest_apic_safe_wait_icr_idle;
838}; 839};
839#endif 840#endif
840 841
@@ -1035,7 +1036,7 @@ __init void lguest_init(void)
1035 1036
1036#ifdef CONFIG_X86_LOCAL_APIC 1037#ifdef CONFIG_X86_LOCAL_APIC
1037 /* apic read/write intercepts */ 1038 /* apic read/write intercepts */
1038 apic_ops = &lguest_basic_apic_ops; 1039 set_lguest_basic_apic_ops();
1039#endif 1040#endif
1040 1041
1041 /* time operations */ 1042 /* time operations */
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index ad374003742f..51f1504cddd9 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -28,7 +28,7 @@
28 28
29#include <linux/linkage.h> 29#include <linux/linkage.h>
30#include <asm/dwarf2.h> 30#include <asm/dwarf2.h>
31#include <asm/page.h> 31#include <asm/page_types.h>
32#include <asm/errno.h> 32#include <asm/errno.h>
33#include <asm/asm-offsets.h> 33#include <asm/asm-offsets.h>
34#include <asm/thread_info.h> 34#include <asm/thread_info.h>
diff --git a/arch/x86/mach-voyager/Makefile b/arch/x86/mach-voyager/Makefile
deleted file mode 100644
index 15c250b371d3..000000000000
--- a/arch/x86/mach-voyager/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
1#
2# Makefile for the linux kernel.
3#
4
5EXTRA_CFLAGS := -Iarch/x86/kernel
6obj-y := setup.o voyager_basic.o voyager_thread.o
7
8obj-$(CONFIG_SMP) += voyager_smp.o voyager_cat.o
diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c
deleted file mode 100644
index 66b7eb57d8e4..000000000000
--- a/arch/x86/mach-voyager/setup.c
+++ /dev/null
@@ -1,119 +0,0 @@
1/*
2 * Machine specific setup for generic
3 */
4
5#include <linux/init.h>
6#include <linux/interrupt.h>
7#include <asm/arch_hooks.h>
8#include <asm/voyager.h>
9#include <asm/e820.h>
10#include <asm/io.h>
11#include <asm/setup.h>
12#include <asm/cpu.h>
13
14void __init pre_intr_init_hook(void)
15{
16 init_ISA_irqs();
17}
18
19/*
20 * IRQ2 is cascade interrupt to second interrupt controller
21 */
22static struct irqaction irq2 = {
23 .handler = no_action,
24 .mask = CPU_MASK_NONE,
25 .name = "cascade",
26};
27
28void __init intr_init_hook(void)
29{
30#ifdef CONFIG_SMP
31 voyager_smp_intr_init();
32#endif
33
34 setup_irq(2, &irq2);
35}
36
37static void voyager_disable_tsc(void)
38{
39 /* Voyagers run their CPUs from independent clocks, so disable
40 * the TSC code because we can't sync them */
41 setup_clear_cpu_cap(X86_FEATURE_TSC);
42}
43
44void __init pre_setup_arch_hook(void)
45{
46 voyager_disable_tsc();
47}
48
49void __init pre_time_init_hook(void)
50{
51 voyager_disable_tsc();
52}
53
54void __init trap_init_hook(void)
55{
56}
57
58static struct irqaction irq0 = {
59 .handler = timer_interrupt,
60 .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL,
61 .mask = CPU_MASK_NONE,
62 .name = "timer"
63};
64
65void __init time_init_hook(void)
66{
67 irq0.mask = cpumask_of_cpu(safe_smp_processor_id());
68 setup_irq(0, &irq0);
69}
70
71/* Hook for machine specific memory setup. */
72
73char *__init machine_specific_memory_setup(void)
74{
75 char *who;
76 int new_nr;
77
78 who = "NOT VOYAGER";
79
80 if (voyager_level == 5) {
81 __u32 addr, length;
82 int i;
83
84 who = "Voyager-SUS";
85
86 e820.nr_map = 0;
87 for (i = 0; voyager_memory_detect(i, &addr, &length); i++) {
88 e820_add_region(addr, length, E820_RAM);
89 }
90 return who;
91 } else if (voyager_level == 4) {
92 __u32 tom;
93 __u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT) << 8;
94 /* select the DINO config space */
95 outb(VOYAGER_DINO, VOYAGER_CAT_CONFIG_PORT);
96 /* Read DINO top of memory register */
97 tom = ((inb(catbase + 0x4) & 0xf0) << 16)
98 + ((inb(catbase + 0x5) & 0x7f) << 24);
99
100 if (inb(catbase) != VOYAGER_DINO) {
101 printk(KERN_ERR
102 "Voyager: Failed to get DINO for L4, setting tom to EXT_MEM_K\n");
103 tom = (boot_params.screen_info.ext_mem_k) << 10;
104 }
105 who = "Voyager-TOM";
106 e820_add_region(0, 0x9f000, E820_RAM);
107 /* map from 1M to top of memory */
108 e820_add_region(1 * 1024 * 1024, tom - 1 * 1024 * 1024,
109 E820_RAM);
110 /* FIXME: Should check the ASICs to see if I need to
111 * take out the 8M window. Just do it at the moment
112 * */
113 e820_add_region(8 * 1024 * 1024, 8 * 1024 * 1024,
114 E820_RESERVED);
115 return who;
116 }
117
118 return default_machine_specific_memory_setup();
119}
diff --git a/arch/x86/mach-voyager/voyager_basic.c b/arch/x86/mach-voyager/voyager_basic.c
deleted file mode 100644
index 46d6f8067690..000000000000
--- a/arch/x86/mach-voyager/voyager_basic.c
+++ /dev/null
@@ -1,317 +0,0 @@
1/* Copyright (C) 1999,2001
2 *
3 * Author: J.E.J.Bottomley@HansenPartnership.com
4 *
5 * This file contains all the voyager specific routines for getting
6 * initialisation of the architecture to function. For additional
7 * features see:
8 *
9 * voyager_cat.c - Voyager CAT bus interface
10 * voyager_smp.c - Voyager SMP hal (emulates linux smp.c)
11 */
12
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/sched.h>
16#include <linux/ptrace.h>
17#include <linux/ioport.h>
18#include <linux/interrupt.h>
19#include <linux/init.h>
20#include <linux/delay.h>
21#include <linux/reboot.h>
22#include <linux/sysrq.h>
23#include <linux/smp.h>
24#include <linux/nodemask.h>
25#include <asm/io.h>
26#include <asm/voyager.h>
27#include <asm/vic.h>
28#include <linux/pm.h>
29#include <asm/tlbflush.h>
30#include <asm/arch_hooks.h>
31#include <asm/i8253.h>
32
33/*
34 * Power off function, if any
35 */
36void (*pm_power_off) (void);
37EXPORT_SYMBOL(pm_power_off);
38
39int voyager_level = 0;
40
41struct voyager_SUS *voyager_SUS = NULL;
42
43#ifdef CONFIG_SMP
44static void voyager_dump(int dummy1, struct tty_struct *dummy3)
45{
46 /* get here via a sysrq */
47 voyager_smp_dump();
48}
49
50static struct sysrq_key_op sysrq_voyager_dump_op = {
51 .handler = voyager_dump,
52 .help_msg = "Voyager",
53 .action_msg = "Dump Voyager Status",
54};
55#endif
56
57void voyager_detect(struct voyager_bios_info *bios)
58{
59 if (bios->len != 0xff) {
60 int class = (bios->class_1 << 8)
61 | (bios->class_2 & 0xff);
62
63 printk("Voyager System detected.\n"
64 " Class %x, Revision %d.%d\n",
65 class, bios->major, bios->minor);
66 if (class == VOYAGER_LEVEL4)
67 voyager_level = 4;
68 else if (class < VOYAGER_LEVEL5_AND_ABOVE)
69 voyager_level = 3;
70 else
71 voyager_level = 5;
72 printk(" Architecture Level %d\n", voyager_level);
73 if (voyager_level < 4)
74 printk
75 ("\n**WARNING**: Voyager HAL only supports Levels 4 and 5 Architectures at the moment\n\n");
76 /* install the power off handler */
77 pm_power_off = voyager_power_off;
78#ifdef CONFIG_SMP
79 register_sysrq_key('v', &sysrq_voyager_dump_op);
80#endif
81 } else {
82 printk("\n\n**WARNING**: No Voyager Subsystem Found\n");
83 }
84}
85
86void voyager_system_interrupt(int cpl, void *dev_id)
87{
88 printk("Voyager: detected system interrupt\n");
89}
90
91/* Routine to read information from the extended CMOS area */
92__u8 voyager_extended_cmos_read(__u16 addr)
93{
94 outb(addr & 0xff, 0x74);
95 outb((addr >> 8) & 0xff, 0x75);
96 return inb(0x76);
97}
98
99/* internal definitions for the SUS Click Map of memory */
100
101#define CLICK_ENTRIES 16
102#define CLICK_SIZE 4096 /* click to byte conversion for Length */
103
104typedef struct ClickMap {
105 struct Entry {
106 __u32 Address;
107 __u32 Length;
108 } Entry[CLICK_ENTRIES];
109} ClickMap_t;
110
111/* This routine is pretty much an awful hack to read the bios clickmap by
112 * mapping it into page 0. There are usually three regions in the map:
113 * Base Memory
114 * Extended Memory
115 * zero length marker for end of map
116 *
117 * Returns are 0 for failure and 1 for success on extracting region.
118 */
119int __init voyager_memory_detect(int region, __u32 * start, __u32 * length)
120{
121 int i;
122 int retval = 0;
123 __u8 cmos[4];
124 ClickMap_t *map;
125 unsigned long map_addr;
126 unsigned long old;
127
128 if (region >= CLICK_ENTRIES) {
129 printk("Voyager: Illegal ClickMap region %d\n", region);
130 return 0;
131 }
132
133 for (i = 0; i < sizeof(cmos); i++)
134 cmos[i] =
135 voyager_extended_cmos_read(VOYAGER_MEMORY_CLICKMAP + i);
136
137 map_addr = *(unsigned long *)cmos;
138
139 /* steal page 0 for this */
140 old = pg0[0];
141 pg0[0] = ((map_addr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT);
142 local_flush_tlb();
143 /* now clear everything out but page 0 */
144 map = (ClickMap_t *) (map_addr & (~PAGE_MASK));
145
146 /* zero length is the end of the clickmap */
147 if (map->Entry[region].Length != 0) {
148 *length = map->Entry[region].Length * CLICK_SIZE;
149 *start = map->Entry[region].Address;
150 retval = 1;
151 }
152
153 /* replace the mapping */
154 pg0[0] = old;
155 local_flush_tlb();
156 return retval;
157}
158
159/* voyager specific handling code for timer interrupts. Used to hand
160 * off the timer tick to the SMP code, since the VIC doesn't have an
161 * internal timer (The QIC does, but that's another story). */
162void voyager_timer_interrupt(void)
163{
164 if ((jiffies & 0x3ff) == 0) {
165
166 /* There seems to be something flaky in either
167 * hardware or software that is resetting the timer 0
168 * count to something much higher than it should be
169 * This seems to occur in the boot sequence, just
170 * before root is mounted. Therefore, every 10
171 * seconds or so, we sanity check the timer zero count
172 * and kick it back to where it should be.
173 *
174 * FIXME: This is the most awful hack yet seen. I
175 * should work out exactly what is interfering with
176 * the timer count settings early in the boot sequence
177 * and swiftly introduce it to something sharp and
178 * pointy. */
179 __u16 val;
180
181 spin_lock(&i8253_lock);
182
183 outb_p(0x00, 0x43);
184 val = inb_p(0x40);
185 val |= inb(0x40) << 8;
186 spin_unlock(&i8253_lock);
187
188 if (val > LATCH) {
189 printk
190 ("\nVOYAGER: countdown timer value too high (%d), resetting\n\n",
191 val);
192 spin_lock(&i8253_lock);
193 outb(0x34, 0x43);
194 outb_p(LATCH & 0xff, 0x40); /* LSB */
195 outb(LATCH >> 8, 0x40); /* MSB */
196 spin_unlock(&i8253_lock);
197 }
198 }
199#ifdef CONFIG_SMP
200 smp_vic_timer_interrupt();
201#endif
202}
203
204void voyager_power_off(void)
205{
206 printk("VOYAGER Power Off\n");
207
208 if (voyager_level == 5) {
209 voyager_cat_power_off();
210 } else if (voyager_level == 4) {
211 /* This doesn't apparently work on most L4 machines,
212 * but the specs say to do this to get automatic power
213 * off. Unfortunately, if it doesn't power off the
214 * machine, it ends up doing a cold restart, which
215 * isn't really intended, so comment out the code */
216#if 0
217 int port;
218
219 /* enable the voyager Configuration Space */
220 outb((inb(VOYAGER_MC_SETUP) & 0xf0) | 0x8, VOYAGER_MC_SETUP);
221 /* the port for the power off flag is an offset from the
222 floating base */
223 port = (inb(VOYAGER_SSPB_RELOCATION_PORT) << 8) + 0x21;
224 /* set the power off flag */
225 outb(inb(port) | 0x1, port);
226#endif
227 }
228 /* and wait for it to happen */
229 local_irq_disable();
230 for (;;)
231 halt();
232}
233
234/* copied from process.c */
235static inline void kb_wait(void)
236{
237 int i;
238
239 for (i = 0; i < 0x10000; i++)
240 if ((inb_p(0x64) & 0x02) == 0)
241 break;
242}
243
244void machine_shutdown(void)
245{
246 /* Architecture specific shutdown needed before a kexec */
247}
248
249void machine_restart(char *cmd)
250{
251 printk("Voyager Warm Restart\n");
252 kb_wait();
253
254 if (voyager_level == 5) {
255 /* write magic values to the RTC to inform system that
256 * shutdown is beginning */
257 outb(0x8f, 0x70);
258 outb(0x5, 0x71);
259
260 udelay(50);
261 outb(0xfe, 0x64); /* pull reset low */
262 } else if (voyager_level == 4) {
263 __u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT) << 8;
264 __u8 basebd = inb(VOYAGER_MC_SETUP);
265
266 outb(basebd | 0x08, VOYAGER_MC_SETUP);
267 outb(0x02, catbase + 0x21);
268 }
269 local_irq_disable();
270 for (;;)
271 halt();
272}
273
274void machine_emergency_restart(void)
275{
276 /*for now, just hook this to a warm restart */
277 machine_restart(NULL);
278}
279
280void mca_nmi_hook(void)
281{
282 __u8 dumpval __maybe_unused = inb(0xf823);
283 __u8 swnmi __maybe_unused = inb(0xf813);
284
285 /* FIXME: assume dump switch pressed */
286 /* check to see if the dump switch was pressed */
287 VDEBUG(("VOYAGER: dumpval = 0x%x, swnmi = 0x%x\n", dumpval, swnmi));
288 /* clear swnmi */
289 outb(0xff, 0xf813);
290 /* tell SUS to ignore dump */
291 if (voyager_level == 5 && voyager_SUS != NULL) {
292 if (voyager_SUS->SUS_mbox == VOYAGER_DUMP_BUTTON_NMI) {
293 voyager_SUS->kernel_mbox = VOYAGER_NO_COMMAND;
294 voyager_SUS->kernel_flags |= VOYAGER_OS_IN_PROGRESS;
295 udelay(1000);
296 voyager_SUS->kernel_mbox = VOYAGER_IGNORE_DUMP;
297 voyager_SUS->kernel_flags &= ~VOYAGER_OS_IN_PROGRESS;
298 }
299 }
300 printk(KERN_ERR
301 "VOYAGER: Dump switch pressed, printing CPU%d tracebacks\n",
302 smp_processor_id());
303 show_stack(NULL, NULL);
304 show_state();
305}
306
307void machine_halt(void)
308{
309 /* treat a halt like a power off */
310 machine_power_off();
311}
312
313void machine_power_off(void)
314{
315 if (pm_power_off)
316 pm_power_off();
317}
diff --git a/arch/x86/mach-voyager/voyager_cat.c b/arch/x86/mach-voyager/voyager_cat.c
deleted file mode 100644
index 2ad598c104af..000000000000
--- a/arch/x86/mach-voyager/voyager_cat.c
+++ /dev/null
@@ -1,1197 +0,0 @@
1/* -*- mode: c; c-basic-offset: 8 -*- */
2
3/* Copyright (C) 1999,2001
4 *
5 * Author: J.E.J.Bottomley@HansenPartnership.com
6 *
7 * This file contains all the logic for manipulating the CAT bus
8 * in a level 5 machine.
9 *
10 * The CAT bus is a serial configuration and test bus. Its primary
11 * uses are to probe the initial configuration of the system and to
12 * diagnose error conditions when a system interrupt occurs. The low
13 * level interface is fairly primitive, so most of this file consists
14 * of bit shift manipulations to send and receive packets on the
15 * serial bus */
16
17#include <linux/types.h>
18#include <linux/completion.h>
19#include <linux/sched.h>
20#include <asm/voyager.h>
21#include <asm/vic.h>
22#include <linux/ioport.h>
23#include <linux/init.h>
24#include <linux/slab.h>
25#include <linux/delay.h>
26#include <asm/io.h>
27
28#ifdef VOYAGER_CAT_DEBUG
29#define CDEBUG(x) printk x
30#else
31#define CDEBUG(x)
32#endif
33
34/* the CAT command port */
35#define CAT_CMD (sspb + 0xe)
36/* the CAT data port */
37#define CAT_DATA (sspb + 0xd)
38
39/* the internal cat functions */
40static void cat_pack(__u8 * msg, __u16 start_bit, __u8 * data, __u16 num_bits);
41static void cat_unpack(__u8 * msg, __u16 start_bit, __u8 * data,
42 __u16 num_bits);
43static void cat_build_header(__u8 * header, const __u16 len,
44 const __u16 smallest_reg_bits,
45 const __u16 longest_reg_bits);
46static int cat_sendinst(voyager_module_t * modp, voyager_asic_t * asicp,
47 __u8 reg, __u8 op);
48static int cat_getdata(voyager_module_t * modp, voyager_asic_t * asicp,
49 __u8 reg, __u8 * value);
50static int cat_shiftout(__u8 * data, __u16 data_bytes, __u16 header_bytes,
51 __u8 pad_bits);
52static int cat_write(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg,
53 __u8 value);
54static int cat_read(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg,
55 __u8 * value);
56static int cat_subread(voyager_module_t * modp, voyager_asic_t * asicp,
57 __u16 offset, __u16 len, void *buf);
58static int cat_senddata(voyager_module_t * modp, voyager_asic_t * asicp,
59 __u8 reg, __u8 value);
60static int cat_disconnect(voyager_module_t * modp, voyager_asic_t * asicp);
61static int cat_connect(voyager_module_t * modp, voyager_asic_t * asicp);
62
63static inline const char *cat_module_name(int module_id)
64{
65 switch (module_id) {
66 case 0x10:
67 return "Processor Slot 0";
68 case 0x11:
69 return "Processor Slot 1";
70 case 0x12:
71 return "Processor Slot 2";
72 case 0x13:
73 return "Processor Slot 4";
74 case 0x14:
75 return "Memory Slot 0";
76 case 0x15:
77 return "Memory Slot 1";
78 case 0x18:
79 return "Primary Microchannel";
80 case 0x19:
81 return "Secondary Microchannel";
82 case 0x1a:
83 return "Power Supply Interface";
84 case 0x1c:
85 return "Processor Slot 5";
86 case 0x1d:
87 return "Processor Slot 6";
88 case 0x1e:
89 return "Processor Slot 7";
90 case 0x1f:
91 return "Processor Slot 8";
92 default:
93 return "Unknown Module";
94 }
95}
96
97static int sspb = 0; /* stores the super port location */
98int voyager_8slot = 0; /* set to true if a 51xx monster */
99
100voyager_module_t *voyager_cat_list;
101
102/* the I/O port assignments for the VIC and QIC */
103static struct resource vic_res = {
104 .name = "Voyager Interrupt Controller",
105 .start = 0xFC00,
106 .end = 0xFC6F
107};
108static struct resource qic_res = {
109 .name = "Quad Interrupt Controller",
110 .start = 0xFC70,
111 .end = 0xFCFF
112};
113
114/* This function is used to pack a data bit stream inside a message.
115 * It writes num_bits of the data buffer in msg starting at start_bit.
116 * Note: This function assumes that any unused bit in the data stream
117 * is set to zero so that the ors will work correctly */
118static void
119cat_pack(__u8 * msg, const __u16 start_bit, __u8 * data, const __u16 num_bits)
120{
121 /* compute initial shift needed */
122 const __u16 offset = start_bit % BITS_PER_BYTE;
123 __u16 len = num_bits / BITS_PER_BYTE;
124 __u16 byte = start_bit / BITS_PER_BYTE;
125 __u16 residue = (num_bits % BITS_PER_BYTE) + offset;
126 int i;
127
128 /* adjust if we have more than a byte of residue */
129 if (residue >= BITS_PER_BYTE) {
130 residue -= BITS_PER_BYTE;
131 len++;
132 }
133
134 /* clear out the bits. We assume here that if len==0 then
135 * residue >= offset. This is always true for the catbus
136 * operations */
137 msg[byte] &= 0xff << (BITS_PER_BYTE - offset);
138 msg[byte++] |= data[0] >> offset;
139 if (len == 0)
140 return;
141 for (i = 1; i < len; i++)
142 msg[byte++] = (data[i - 1] << (BITS_PER_BYTE - offset))
143 | (data[i] >> offset);
144 if (residue != 0) {
145 __u8 mask = 0xff >> residue;
146 __u8 last_byte = data[i - 1] << (BITS_PER_BYTE - offset)
147 | (data[i] >> offset);
148
149 last_byte &= ~mask;
150 msg[byte] &= mask;
151 msg[byte] |= last_byte;
152 }
153 return;
154}
155
156/* unpack the data again (same arguments as cat_pack()). data buffer
157 * must be zero populated.
158 *
159 * Function: given a message string move to start_bit and copy num_bits into
160 * data (starting at bit 0 in data).
161 */
162static void
163cat_unpack(__u8 * msg, const __u16 start_bit, __u8 * data, const __u16 num_bits)
164{
165 /* compute initial shift needed */
166 const __u16 offset = start_bit % BITS_PER_BYTE;
167 __u16 len = num_bits / BITS_PER_BYTE;
168 const __u8 last_bits = num_bits % BITS_PER_BYTE;
169 __u16 byte = start_bit / BITS_PER_BYTE;
170 int i;
171
172 if (last_bits != 0)
173 len++;
174
175 /* special case: want < 8 bits from msg and we can get it from
176 * a single byte of the msg */
177 if (len == 0 && BITS_PER_BYTE - offset >= num_bits) {
178 data[0] = msg[byte] << offset;
179 data[0] &= 0xff >> (BITS_PER_BYTE - num_bits);
180 return;
181 }
182 for (i = 0; i < len; i++) {
183 /* this annoying if has to be done just in case a read of
184 * msg one beyond the array causes a panic */
185 if (offset != 0) {
186 data[i] = msg[byte++] << offset;
187 data[i] |= msg[byte] >> (BITS_PER_BYTE - offset);
188 } else {
189 data[i] = msg[byte++];
190 }
191 }
192 /* do we need to truncate the final byte */
193 if (last_bits != 0) {
194 data[i - 1] &= 0xff << (BITS_PER_BYTE - last_bits);
195 }
196 return;
197}
198
199static void
200cat_build_header(__u8 * header, const __u16 len, const __u16 smallest_reg_bits,
201 const __u16 longest_reg_bits)
202{
203 int i;
204 __u16 start_bit = (smallest_reg_bits - 1) % BITS_PER_BYTE;
205 __u8 *last_byte = &header[len - 1];
206
207 if (start_bit == 0)
208 start_bit = 1; /* must have at least one bit in the hdr */
209
210 for (i = 0; i < len; i++)
211 header[i] = 0;
212
213 for (i = start_bit; i > 0; i--)
214 *last_byte = ((*last_byte) << 1) + 1;
215
216}
217
218static int
219cat_sendinst(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg, __u8 op)
220{
221 __u8 parity, inst, inst_buf[4] = { 0 };
222 __u8 iseq[VOYAGER_MAX_SCAN_PATH], hseq[VOYAGER_MAX_REG_SIZE];
223 __u16 ibytes, hbytes, padbits;
224 int i;
225
226 /*
227 * Parity is the parity of the register number + 1 (READ_REGISTER
228 * and WRITE_REGISTER always add '1' to the number of bits == 1)
229 */
230 parity = (__u8) (1 + (reg & 0x01) +
231 ((__u8) (reg & 0x02) >> 1) +
232 ((__u8) (reg & 0x04) >> 2) +
233 ((__u8) (reg & 0x08) >> 3)) % 2;
234
235 inst = ((parity << 7) | (reg << 2) | op);
236
237 outb(VOYAGER_CAT_IRCYC, CAT_CMD);
238 if (!modp->scan_path_connected) {
239 if (asicp->asic_id != VOYAGER_CAT_ID) {
240 printk
241 ("**WARNING***: cat_sendinst has disconnected scan path not to CAT asic\n");
242 return 1;
243 }
244 outb(VOYAGER_CAT_HEADER, CAT_DATA);
245 outb(inst, CAT_DATA);
246 if (inb(CAT_DATA) != VOYAGER_CAT_HEADER) {
247 CDEBUG(("VOYAGER CAT: cat_sendinst failed to get CAT_HEADER\n"));
248 return 1;
249 }
250 return 0;
251 }
252 ibytes = modp->inst_bits / BITS_PER_BYTE;
253 if ((padbits = modp->inst_bits % BITS_PER_BYTE) != 0) {
254 padbits = BITS_PER_BYTE - padbits;
255 ibytes++;
256 }
257 hbytes = modp->largest_reg / BITS_PER_BYTE;
258 if (modp->largest_reg % BITS_PER_BYTE)
259 hbytes++;
260 CDEBUG(("cat_sendinst: ibytes=%d, hbytes=%d\n", ibytes, hbytes));
261 /* initialise the instruction sequence to 0xff */
262 for (i = 0; i < ibytes + hbytes; i++)
263 iseq[i] = 0xff;
264 cat_build_header(hseq, hbytes, modp->smallest_reg, modp->largest_reg);
265 cat_pack(iseq, modp->inst_bits, hseq, hbytes * BITS_PER_BYTE);
266 inst_buf[0] = inst;
267 inst_buf[1] = 0xFF >> (modp->largest_reg % BITS_PER_BYTE);
268 cat_pack(iseq, asicp->bit_location, inst_buf, asicp->ireg_length);
269#ifdef VOYAGER_CAT_DEBUG
270 printk("ins = 0x%x, iseq: ", inst);
271 for (i = 0; i < ibytes + hbytes; i++)
272 printk("0x%x ", iseq[i]);
273 printk("\n");
274#endif
275 if (cat_shiftout(iseq, ibytes, hbytes, padbits)) {
276 CDEBUG(("VOYAGER CAT: cat_sendinst: cat_shiftout failed\n"));
277 return 1;
278 }
279 CDEBUG(("CAT SHIFTOUT DONE\n"));
280 return 0;
281}
282
283static int
284cat_getdata(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg,
285 __u8 * value)
286{
287 if (!modp->scan_path_connected) {
288 if (asicp->asic_id != VOYAGER_CAT_ID) {
289 CDEBUG(("VOYAGER CAT: ERROR: cat_getdata to CAT asic with scan path connected\n"));
290 return 1;
291 }
292 if (reg > VOYAGER_SUBADDRHI)
293 outb(VOYAGER_CAT_RUN, CAT_CMD);
294 outb(VOYAGER_CAT_DRCYC, CAT_CMD);
295 outb(VOYAGER_CAT_HEADER, CAT_DATA);
296 *value = inb(CAT_DATA);
297 outb(0xAA, CAT_DATA);
298 if (inb(CAT_DATA) != VOYAGER_CAT_HEADER) {
299 CDEBUG(("cat_getdata: failed to get VOYAGER_CAT_HEADER\n"));
300 return 1;
301 }
302 return 0;
303 } else {
304 __u16 sbits = modp->num_asics - 1 + asicp->ireg_length;
305 __u16 sbytes = sbits / BITS_PER_BYTE;
306 __u16 tbytes;
307 __u8 string[VOYAGER_MAX_SCAN_PATH],
308 trailer[VOYAGER_MAX_REG_SIZE];
309 __u8 padbits;
310 int i;
311
312 outb(VOYAGER_CAT_DRCYC, CAT_CMD);
313
314 if ((padbits = sbits % BITS_PER_BYTE) != 0) {
315 padbits = BITS_PER_BYTE - padbits;
316 sbytes++;
317 }
318 tbytes = asicp->ireg_length / BITS_PER_BYTE;
319 if (asicp->ireg_length % BITS_PER_BYTE)
320 tbytes++;
321 CDEBUG(("cat_getdata: tbytes = %d, sbytes = %d, padbits = %d\n",
322 tbytes, sbytes, padbits));
323 cat_build_header(trailer, tbytes, 1, asicp->ireg_length);
324
325 for (i = tbytes - 1; i >= 0; i--) {
326 outb(trailer[i], CAT_DATA);
327 string[sbytes + i] = inb(CAT_DATA);
328 }
329
330 for (i = sbytes - 1; i >= 0; i--) {
331 outb(0xaa, CAT_DATA);
332 string[i] = inb(CAT_DATA);
333 }
334 *value = 0;
335 cat_unpack(string,
336 padbits + (tbytes * BITS_PER_BYTE) +
337 asicp->asic_location, value, asicp->ireg_length);
338#ifdef VOYAGER_CAT_DEBUG
339 printk("value=0x%x, string: ", *value);
340 for (i = 0; i < tbytes + sbytes; i++)
341 printk("0x%x ", string[i]);
342 printk("\n");
343#endif
344
345 /* sanity check the rest of the return */
346 for (i = 0; i < tbytes; i++) {
347 __u8 input = 0;
348
349 cat_unpack(string, padbits + (i * BITS_PER_BYTE),
350 &input, BITS_PER_BYTE);
351 if (trailer[i] != input) {
352 CDEBUG(("cat_getdata: failed to sanity check rest of ret(%d) 0x%x != 0x%x\n", i, input, trailer[i]));
353 return 1;
354 }
355 }
356 CDEBUG(("cat_getdata DONE\n"));
357 return 0;
358 }
359}
360
361static int
362cat_shiftout(__u8 * data, __u16 data_bytes, __u16 header_bytes, __u8 pad_bits)
363{
364 int i;
365
366 for (i = data_bytes + header_bytes - 1; i >= header_bytes; i--)
367 outb(data[i], CAT_DATA);
368
369 for (i = header_bytes - 1; i >= 0; i--) {
370 __u8 header = 0;
371 __u8 input;
372
373 outb(data[i], CAT_DATA);
374 input = inb(CAT_DATA);
375 CDEBUG(("cat_shiftout: returned 0x%x\n", input));
376 cat_unpack(data, ((data_bytes + i) * BITS_PER_BYTE) - pad_bits,
377 &header, BITS_PER_BYTE);
378 if (input != header) {
379 CDEBUG(("VOYAGER CAT: cat_shiftout failed to return header 0x%x != 0x%x\n", input, header));
380 return 1;
381 }
382 }
383 return 0;
384}
385
386static int
387cat_senddata(voyager_module_t * modp, voyager_asic_t * asicp,
388 __u8 reg, __u8 value)
389{
390 outb(VOYAGER_CAT_DRCYC, CAT_CMD);
391 if (!modp->scan_path_connected) {
392 if (asicp->asic_id != VOYAGER_CAT_ID) {
393 CDEBUG(("VOYAGER CAT: ERROR: scan path disconnected when asic != CAT\n"));
394 return 1;
395 }
396 outb(VOYAGER_CAT_HEADER, CAT_DATA);
397 outb(value, CAT_DATA);
398 if (inb(CAT_DATA) != VOYAGER_CAT_HEADER) {
399 CDEBUG(("cat_senddata: failed to get correct header response to sent data\n"));
400 return 1;
401 }
402 if (reg > VOYAGER_SUBADDRHI) {
403 outb(VOYAGER_CAT_RUN, CAT_CMD);
404 outb(VOYAGER_CAT_END, CAT_CMD);
405 outb(VOYAGER_CAT_RUN, CAT_CMD);
406 }
407
408 return 0;
409 } else {
410 __u16 hbytes = asicp->ireg_length / BITS_PER_BYTE;
411 __u16 dbytes =
412 (modp->num_asics - 1 + asicp->ireg_length) / BITS_PER_BYTE;
413 __u8 padbits, dseq[VOYAGER_MAX_SCAN_PATH],
414 hseq[VOYAGER_MAX_REG_SIZE];
415 int i;
416
417 if ((padbits = (modp->num_asics - 1
418 + asicp->ireg_length) % BITS_PER_BYTE) != 0) {
419 padbits = BITS_PER_BYTE - padbits;
420 dbytes++;
421 }
422 if (asicp->ireg_length % BITS_PER_BYTE)
423 hbytes++;
424
425 cat_build_header(hseq, hbytes, 1, asicp->ireg_length);
426
427 for (i = 0; i < dbytes + hbytes; i++)
428 dseq[i] = 0xff;
429 CDEBUG(("cat_senddata: dbytes=%d, hbytes=%d, padbits=%d\n",
430 dbytes, hbytes, padbits));
431 cat_pack(dseq, modp->num_asics - 1 + asicp->ireg_length,
432 hseq, hbytes * BITS_PER_BYTE);
433 cat_pack(dseq, asicp->asic_location, &value,
434 asicp->ireg_length);
435#ifdef VOYAGER_CAT_DEBUG
436 printk("dseq ");
437 for (i = 0; i < hbytes + dbytes; i++) {
438 printk("0x%x ", dseq[i]);
439 }
440 printk("\n");
441#endif
442 return cat_shiftout(dseq, dbytes, hbytes, padbits);
443 }
444}
445
446static int
447cat_write(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg, __u8 value)
448{
449 if (cat_sendinst(modp, asicp, reg, VOYAGER_WRITE_CONFIG))
450 return 1;
451 return cat_senddata(modp, asicp, reg, value);
452}
453
454static int
455cat_read(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg,
456 __u8 * value)
457{
458 if (cat_sendinst(modp, asicp, reg, VOYAGER_READ_CONFIG))
459 return 1;
460 return cat_getdata(modp, asicp, reg, value);
461}
462
463static int
464cat_subaddrsetup(voyager_module_t * modp, voyager_asic_t * asicp, __u16 offset,
465 __u16 len)
466{
467 __u8 val;
468
469 if (len > 1) {
470 /* set auto increment */
471 __u8 newval;
472
473 if (cat_read(modp, asicp, VOYAGER_AUTO_INC_REG, &val)) {
474 CDEBUG(("cat_subaddrsetup: read of VOYAGER_AUTO_INC_REG failed\n"));
475 return 1;
476 }
477 CDEBUG(("cat_subaddrsetup: VOYAGER_AUTO_INC_REG = 0x%x\n",
478 val));
479 newval = val | VOYAGER_AUTO_INC;
480 if (newval != val) {
481 if (cat_write(modp, asicp, VOYAGER_AUTO_INC_REG, val)) {
482 CDEBUG(("cat_subaddrsetup: write to VOYAGER_AUTO_INC_REG failed\n"));
483 return 1;
484 }
485 }
486 }
487 if (cat_write(modp, asicp, VOYAGER_SUBADDRLO, (__u8) (offset & 0xff))) {
488 CDEBUG(("cat_subaddrsetup: write to SUBADDRLO failed\n"));
489 return 1;
490 }
491 if (asicp->subaddr > VOYAGER_SUBADDR_LO) {
492 if (cat_write
493 (modp, asicp, VOYAGER_SUBADDRHI, (__u8) (offset >> 8))) {
494 CDEBUG(("cat_subaddrsetup: write to SUBADDRHI failed\n"));
495 return 1;
496 }
497 cat_read(modp, asicp, VOYAGER_SUBADDRHI, &val);
498 CDEBUG(("cat_subaddrsetup: offset = %d, hi = %d\n", offset,
499 val));
500 }
501 cat_read(modp, asicp, VOYAGER_SUBADDRLO, &val);
502 CDEBUG(("cat_subaddrsetup: offset = %d, lo = %d\n", offset, val));
503 return 0;
504}
505
506static int
507cat_subwrite(voyager_module_t * modp, voyager_asic_t * asicp, __u16 offset,
508 __u16 len, void *buf)
509{
510 int i, retval;
511
512 /* FIXME: need special actions for VOYAGER_CAT_ID here */
513 if (asicp->asic_id == VOYAGER_CAT_ID) {
514 CDEBUG(("cat_subwrite: ATTEMPT TO WRITE TO CAT ASIC\n"));
515 /* FIXME -- This is supposed to be handled better
516 * There is a problem writing to the cat asic in the
517 * PSI. The 30us delay seems to work, though */
518 udelay(30);
519 }
520
521 if ((retval = cat_subaddrsetup(modp, asicp, offset, len)) != 0) {
522 printk("cat_subwrite: cat_subaddrsetup FAILED\n");
523 return retval;
524 }
525
526 if (cat_sendinst
527 (modp, asicp, VOYAGER_SUBADDRDATA, VOYAGER_WRITE_CONFIG)) {
528 printk("cat_subwrite: cat_sendinst FAILED\n");
529 return 1;
530 }
531 for (i = 0; i < len; i++) {
532 if (cat_senddata(modp, asicp, 0xFF, ((__u8 *) buf)[i])) {
533 printk
534 ("cat_subwrite: cat_sendata element at %d FAILED\n",
535 i);
536 return 1;
537 }
538 }
539 return 0;
540}
541static int
542cat_subread(voyager_module_t * modp, voyager_asic_t * asicp, __u16 offset,
543 __u16 len, void *buf)
544{
545 int i, retval;
546
547 if ((retval = cat_subaddrsetup(modp, asicp, offset, len)) != 0) {
548 CDEBUG(("cat_subread: cat_subaddrsetup FAILED\n"));
549 return retval;
550 }
551
552 if (cat_sendinst(modp, asicp, VOYAGER_SUBADDRDATA, VOYAGER_READ_CONFIG)) {
553 CDEBUG(("cat_subread: cat_sendinst failed\n"));
554 return 1;
555 }
556 for (i = 0; i < len; i++) {
557 if (cat_getdata(modp, asicp, 0xFF, &((__u8 *) buf)[i])) {
558 CDEBUG(("cat_subread: cat_getdata element %d failed\n",
559 i));
560 return 1;
561 }
562 }
563 return 0;
564}
565
566/* buffer for storing EPROM data read in during initialisation */
567static __initdata __u8 eprom_buf[0xFFFF];
568static voyager_module_t *voyager_initial_module;
569
570/* Initialise the cat bus components. We assume this is called by the
571 * boot cpu *after* all memory initialisation has been done (so we can
572 * use kmalloc) but before smp initialisation, so we can probe the SMP
573 * configuration and pick up necessary information. */
574void __init voyager_cat_init(void)
575{
576 voyager_module_t **modpp = &voyager_initial_module;
577 voyager_asic_t **asicpp;
578 voyager_asic_t *qabc_asic = NULL;
579 int i, j;
580 unsigned long qic_addr = 0;
581 __u8 qabc_data[0x20];
582 __u8 num_submodules, val;
583 voyager_eprom_hdr_t *eprom_hdr = (voyager_eprom_hdr_t *) & eprom_buf[0];
584
585 __u8 cmos[4];
586 unsigned long addr;
587
588 /* initiallise the SUS mailbox */
589 for (i = 0; i < sizeof(cmos); i++)
590 cmos[i] = voyager_extended_cmos_read(VOYAGER_DUMP_LOCATION + i);
591 addr = *(unsigned long *)cmos;
592 if ((addr & 0xff000000) != 0xff000000) {
593 printk(KERN_ERR
594 "Voyager failed to get SUS mailbox (addr = 0x%lx\n",
595 addr);
596 } else {
597 static struct resource res;
598
599 res.name = "voyager SUS";
600 res.start = addr;
601 res.end = addr + 0x3ff;
602
603 request_resource(&iomem_resource, &res);
604 voyager_SUS = (struct voyager_SUS *)
605 ioremap(addr, 0x400);
606 printk(KERN_NOTICE "Voyager SUS mailbox version 0x%x\n",
607 voyager_SUS->SUS_version);
608 voyager_SUS->kernel_version = VOYAGER_MAILBOX_VERSION;
609 voyager_SUS->kernel_flags = VOYAGER_OS_HAS_SYSINT;
610 }
611
612 /* clear the processor counts */
613 voyager_extended_vic_processors = 0;
614 voyager_quad_processors = 0;
615
616 printk("VOYAGER: beginning CAT bus probe\n");
617 /* set up the SuperSet Port Block which tells us where the
618 * CAT communication port is */
619 sspb = inb(VOYAGER_SSPB_RELOCATION_PORT) * 0x100;
620 VDEBUG(("VOYAGER DEBUG: sspb = 0x%x\n", sspb));
621
622 /* now find out if were 8 slot or normal */
623 if ((inb(VIC_PROC_WHO_AM_I) & EIGHT_SLOT_IDENTIFIER)
624 == EIGHT_SLOT_IDENTIFIER) {
625 voyager_8slot = 1;
626 printk(KERN_NOTICE
627 "Voyager: Eight slot 51xx configuration detected\n");
628 }
629
630 for (i = VOYAGER_MIN_MODULE; i <= VOYAGER_MAX_MODULE; i++) {
631 __u8 input;
632 int asic;
633 __u16 eprom_size;
634 __u16 sp_offset;
635
636 outb(VOYAGER_CAT_DESELECT, VOYAGER_CAT_CONFIG_PORT);
637 outb(i, VOYAGER_CAT_CONFIG_PORT);
638
639 /* check the presence of the module */
640 outb(VOYAGER_CAT_RUN, CAT_CMD);
641 outb(VOYAGER_CAT_IRCYC, CAT_CMD);
642 outb(VOYAGER_CAT_HEADER, CAT_DATA);
643 /* stream series of alternating 1's and 0's to stimulate
644 * response */
645 outb(0xAA, CAT_DATA);
646 input = inb(CAT_DATA);
647 outb(VOYAGER_CAT_END, CAT_CMD);
648 if (input != VOYAGER_CAT_HEADER) {
649 continue;
650 }
651 CDEBUG(("VOYAGER DEBUG: found module id 0x%x, %s\n", i,
652 cat_module_name(i)));
653 *modpp = kmalloc(sizeof(voyager_module_t), GFP_KERNEL); /*&voyager_module_storage[cat_count++]; */
654 if (*modpp == NULL) {
655 printk("**WARNING** kmalloc failure in cat_init\n");
656 continue;
657 }
658 memset(*modpp, 0, sizeof(voyager_module_t));
659 /* need temporary asic for cat_subread. It will be
660 * filled in correctly later */
661 (*modpp)->asic = kmalloc(sizeof(voyager_asic_t), GFP_KERNEL); /*&voyager_asic_storage[asic_count]; */
662 if ((*modpp)->asic == NULL) {
663 printk("**WARNING** kmalloc failure in cat_init\n");
664 continue;
665 }
666 memset((*modpp)->asic, 0, sizeof(voyager_asic_t));
667 (*modpp)->asic->asic_id = VOYAGER_CAT_ID;
668 (*modpp)->asic->subaddr = VOYAGER_SUBADDR_HI;
669 (*modpp)->module_addr = i;
670 (*modpp)->scan_path_connected = 0;
671 if (i == VOYAGER_PSI) {
672 /* Exception leg for modules with no EEPROM */
673 printk("Module \"%s\"\n", cat_module_name(i));
674 continue;
675 }
676
677 CDEBUG(("cat_init: Reading eeprom for module 0x%x at offset %d\n", i, VOYAGER_XSUM_END_OFFSET));
678 outb(VOYAGER_CAT_RUN, CAT_CMD);
679 cat_disconnect(*modpp, (*modpp)->asic);
680 if (cat_subread(*modpp, (*modpp)->asic,
681 VOYAGER_XSUM_END_OFFSET, sizeof(eprom_size),
682 &eprom_size)) {
683 printk
684 ("**WARNING**: Voyager couldn't read EPROM size for module 0x%x\n",
685 i);
686 outb(VOYAGER_CAT_END, CAT_CMD);
687 continue;
688 }
689 if (eprom_size > sizeof(eprom_buf)) {
690 printk
691 ("**WARNING**: Voyager insufficient size to read EPROM data, module 0x%x. Need %d\n",
692 i, eprom_size);
693 outb(VOYAGER_CAT_END, CAT_CMD);
694 continue;
695 }
696 outb(VOYAGER_CAT_END, CAT_CMD);
697 outb(VOYAGER_CAT_RUN, CAT_CMD);
698 CDEBUG(("cat_init: module 0x%x, eeprom_size %d\n", i,
699 eprom_size));
700 if (cat_subread
701 (*modpp, (*modpp)->asic, 0, eprom_size, eprom_buf)) {
702 outb(VOYAGER_CAT_END, CAT_CMD);
703 continue;
704 }
705 outb(VOYAGER_CAT_END, CAT_CMD);
706 printk("Module \"%s\", version 0x%x, tracer 0x%x, asics %d\n",
707 cat_module_name(i), eprom_hdr->version_id,
708 *((__u32 *) eprom_hdr->tracer), eprom_hdr->num_asics);
709 (*modpp)->ee_size = eprom_hdr->ee_size;
710 (*modpp)->num_asics = eprom_hdr->num_asics;
711 asicpp = &((*modpp)->asic);
712 sp_offset = eprom_hdr->scan_path_offset;
713 /* All we really care about are the Quad cards. We
714 * identify them because they are in a processor slot
715 * and have only four asics */
716 if ((i < 0x10 || (i >= 0x14 && i < 0x1c) || i > 0x1f)) {
717 modpp = &((*modpp)->next);
718 continue;
719 }
720 /* Now we know it's in a processor slot, does it have
721 * a quad baseboard submodule */
722 outb(VOYAGER_CAT_RUN, CAT_CMD);
723 cat_read(*modpp, (*modpp)->asic, VOYAGER_SUBMODPRESENT,
724 &num_submodules);
725 /* lowest two bits, active low */
726 num_submodules = ~(0xfc | num_submodules);
727 CDEBUG(("VOYAGER CAT: %d submodules present\n",
728 num_submodules));
729 if (num_submodules == 0) {
730 /* fill in the dyadic extended processors */
731 __u8 cpu = i & 0x07;
732
733 printk("Module \"%s\": Dyadic Processor Card\n",
734 cat_module_name(i));
735 voyager_extended_vic_processors |= (1 << cpu);
736 cpu += 4;
737 voyager_extended_vic_processors |= (1 << cpu);
738 outb(VOYAGER_CAT_END, CAT_CMD);
739 continue;
740 }
741
742 /* now we want to read the asics on the first submodule,
743 * which should be the quad base board */
744
745 cat_read(*modpp, (*modpp)->asic, VOYAGER_SUBMODSELECT, &val);
746 CDEBUG(("cat_init: SUBMODSELECT value = 0x%x\n", val));
747 val = (val & 0x7c) | VOYAGER_QUAD_BASEBOARD;
748 cat_write(*modpp, (*modpp)->asic, VOYAGER_SUBMODSELECT, val);
749
750 outb(VOYAGER_CAT_END, CAT_CMD);
751
752 CDEBUG(("cat_init: Reading eeprom for module 0x%x at offset %d\n", i, VOYAGER_XSUM_END_OFFSET));
753 outb(VOYAGER_CAT_RUN, CAT_CMD);
754 cat_disconnect(*modpp, (*modpp)->asic);
755 if (cat_subread(*modpp, (*modpp)->asic,
756 VOYAGER_XSUM_END_OFFSET, sizeof(eprom_size),
757 &eprom_size)) {
758 printk
759 ("**WARNING**: Voyager couldn't read EPROM size for module 0x%x\n",
760 i);
761 outb(VOYAGER_CAT_END, CAT_CMD);
762 continue;
763 }
764 if (eprom_size > sizeof(eprom_buf)) {
765 printk
766 ("**WARNING**: Voyager insufficient size to read EPROM data, module 0x%x. Need %d\n",
767 i, eprom_size);
768 outb(VOYAGER_CAT_END, CAT_CMD);
769 continue;
770 }
771 outb(VOYAGER_CAT_END, CAT_CMD);
772 outb(VOYAGER_CAT_RUN, CAT_CMD);
773 CDEBUG(("cat_init: module 0x%x, eeprom_size %d\n", i,
774 eprom_size));
775 if (cat_subread
776 (*modpp, (*modpp)->asic, 0, eprom_size, eprom_buf)) {
777 outb(VOYAGER_CAT_END, CAT_CMD);
778 continue;
779 }
780 outb(VOYAGER_CAT_END, CAT_CMD);
781 /* Now do everything for the QBB submodule 1 */
782 (*modpp)->ee_size = eprom_hdr->ee_size;
783 (*modpp)->num_asics = eprom_hdr->num_asics;
784 asicpp = &((*modpp)->asic);
785 sp_offset = eprom_hdr->scan_path_offset;
786 /* get rid of the dummy CAT asic and read the real one */
787 kfree((*modpp)->asic);
788 for (asic = 0; asic < (*modpp)->num_asics; asic++) {
789 int j;
790 voyager_asic_t *asicp = *asicpp = kzalloc(sizeof(voyager_asic_t), GFP_KERNEL); /*&voyager_asic_storage[asic_count++]; */
791 voyager_sp_table_t *sp_table;
792 voyager_at_t *asic_table;
793 voyager_jtt_t *jtag_table;
794
795 if (asicp == NULL) {
796 printk
797 ("**WARNING** kmalloc failure in cat_init\n");
798 continue;
799 }
800 asicpp = &(asicp->next);
801 asicp->asic_location = asic;
802 sp_table =
803 (voyager_sp_table_t *) (eprom_buf + sp_offset);
804 asicp->asic_id = sp_table->asic_id;
805 asic_table =
806 (voyager_at_t *) (eprom_buf +
807 sp_table->asic_data_offset);
808 for (j = 0; j < 4; j++)
809 asicp->jtag_id[j] = asic_table->jtag_id[j];
810 jtag_table =
811 (voyager_jtt_t *) (eprom_buf +
812 asic_table->jtag_offset);
813 asicp->ireg_length = jtag_table->ireg_len;
814 asicp->bit_location = (*modpp)->inst_bits;
815 (*modpp)->inst_bits += asicp->ireg_length;
816 if (asicp->ireg_length > (*modpp)->largest_reg)
817 (*modpp)->largest_reg = asicp->ireg_length;
818 if (asicp->ireg_length < (*modpp)->smallest_reg ||
819 (*modpp)->smallest_reg == 0)
820 (*modpp)->smallest_reg = asicp->ireg_length;
821 CDEBUG(("asic 0x%x, ireg_length=%d, bit_location=%d\n",
822 asicp->asic_id, asicp->ireg_length,
823 asicp->bit_location));
824 if (asicp->asic_id == VOYAGER_QUAD_QABC) {
825 CDEBUG(("VOYAGER CAT: QABC ASIC found\n"));
826 qabc_asic = asicp;
827 }
828 sp_offset += sizeof(voyager_sp_table_t);
829 }
830 CDEBUG(("Module inst_bits = %d, largest_reg = %d, smallest_reg=%d\n", (*modpp)->inst_bits, (*modpp)->largest_reg, (*modpp)->smallest_reg));
831 /* OK, now we have the QUAD ASICs set up, use them.
832 * we need to:
833 *
834 * 1. Find the Memory area for the Quad CPIs.
835 * 2. Find the Extended VIC processor
836 * 3. Configure a second extended VIC processor (This
837 * cannot be done for the 51xx.
838 * */
839 outb(VOYAGER_CAT_RUN, CAT_CMD);
840 cat_connect(*modpp, (*modpp)->asic);
841 CDEBUG(("CAT CONNECTED!!\n"));
842 cat_subread(*modpp, qabc_asic, 0, sizeof(qabc_data), qabc_data);
843 qic_addr = qabc_data[5] << 8;
844 qic_addr = (qic_addr | qabc_data[6]) << 8;
845 qic_addr = (qic_addr | qabc_data[7]) << 8;
846 printk
847 ("Module \"%s\": Quad Processor Card; CPI 0x%lx, SET=0x%x\n",
848 cat_module_name(i), qic_addr, qabc_data[8]);
849#if 0 /* plumbing fails---FIXME */
850 if ((qabc_data[8] & 0xf0) == 0) {
851 /* FIXME: 32 way 8 CPU slot monster cannot be
852 * plumbed this way---need to check for it */
853
854 printk("Plumbing second Extended Quad Processor\n");
855 /* second VIC line hardwired to Quad CPU 1 */
856 qabc_data[8] |= 0x20;
857 cat_subwrite(*modpp, qabc_asic, 8, 1, &qabc_data[8]);
858#ifdef VOYAGER_CAT_DEBUG
859 /* verify plumbing */
860 cat_subread(*modpp, qabc_asic, 8, 1, &qabc_data[8]);
861 if ((qabc_data[8] & 0xf0) == 0) {
862 CDEBUG(("PLUMBING FAILED: 0x%x\n",
863 qabc_data[8]));
864 }
865#endif
866 }
867#endif
868
869 {
870 struct resource *res =
871 kzalloc(sizeof(struct resource), GFP_KERNEL);
872 res->name = kmalloc(128, GFP_KERNEL);
873 sprintf((char *)res->name, "Voyager %s Quad CPI",
874 cat_module_name(i));
875 res->start = qic_addr;
876 res->end = qic_addr + 0x3ff;
877 request_resource(&iomem_resource, res);
878 }
879
880 qic_addr = (unsigned long)ioremap_cache(qic_addr, 0x400);
881
882 for (j = 0; j < 4; j++) {
883 __u8 cpu;
884
885 if (voyager_8slot) {
886 /* 8 slot has a different mapping,
887 * each slot has only one vic line, so
888 * 1 cpu in each slot must be < 8 */
889 cpu = (i & 0x07) + j * 8;
890 } else {
891 cpu = (i & 0x03) + j * 4;
892 }
893 if ((qabc_data[8] & (1 << j))) {
894 voyager_extended_vic_processors |= (1 << cpu);
895 }
896 if (qabc_data[8] & (1 << (j + 4))) {
897 /* Second SET register plumbed: Quad
898 * card has two VIC connected CPUs.
899 * Secondary cannot be booted as a VIC
900 * CPU */
901 voyager_extended_vic_processors |= (1 << cpu);
902 voyager_allowed_boot_processors &=
903 (~(1 << cpu));
904 }
905
906 voyager_quad_processors |= (1 << cpu);
907 voyager_quad_cpi_addr[cpu] = (struct voyager_qic_cpi *)
908 (qic_addr + (j << 8));
909 CDEBUG(("CPU%d: CPI address 0x%lx\n", cpu,
910 (unsigned long)voyager_quad_cpi_addr[cpu]));
911 }
912 outb(VOYAGER_CAT_END, CAT_CMD);
913
914 *asicpp = NULL;
915 modpp = &((*modpp)->next);
916 }
917 *modpp = NULL;
918 printk
919 ("CAT Bus Initialisation finished: extended procs 0x%x, quad procs 0x%x, allowed vic boot = 0x%x\n",
920 voyager_extended_vic_processors, voyager_quad_processors,
921 voyager_allowed_boot_processors);
922 request_resource(&ioport_resource, &vic_res);
923 if (voyager_quad_processors)
924 request_resource(&ioport_resource, &qic_res);
925 /* set up the front power switch */
926}
927
928int voyager_cat_readb(__u8 module, __u8 asic, int reg)
929{
930 return 0;
931}
932
933static int cat_disconnect(voyager_module_t * modp, voyager_asic_t * asicp)
934{
935 __u8 val;
936 int err = 0;
937
938 if (!modp->scan_path_connected)
939 return 0;
940 if (asicp->asic_id != VOYAGER_CAT_ID) {
941 CDEBUG(("cat_disconnect: ASIC is not CAT\n"));
942 return 1;
943 }
944 err = cat_read(modp, asicp, VOYAGER_SCANPATH, &val);
945 if (err) {
946 CDEBUG(("cat_disconnect: failed to read SCANPATH\n"));
947 return err;
948 }
949 val &= VOYAGER_DISCONNECT_ASIC;
950 err = cat_write(modp, asicp, VOYAGER_SCANPATH, val);
951 if (err) {
952 CDEBUG(("cat_disconnect: failed to write SCANPATH\n"));
953 return err;
954 }
955 outb(VOYAGER_CAT_END, CAT_CMD);
956 outb(VOYAGER_CAT_RUN, CAT_CMD);
957 modp->scan_path_connected = 0;
958
959 return 0;
960}
961
962static int cat_connect(voyager_module_t * modp, voyager_asic_t * asicp)
963{
964 __u8 val;
965 int err = 0;
966
967 if (modp->scan_path_connected)
968 return 0;
969 if (asicp->asic_id != VOYAGER_CAT_ID) {
970 CDEBUG(("cat_connect: ASIC is not CAT\n"));
971 return 1;
972 }
973
974 err = cat_read(modp, asicp, VOYAGER_SCANPATH, &val);
975 if (err) {
976 CDEBUG(("cat_connect: failed to read SCANPATH\n"));
977 return err;
978 }
979 val |= VOYAGER_CONNECT_ASIC;
980 err = cat_write(modp, asicp, VOYAGER_SCANPATH, val);
981 if (err) {
982 CDEBUG(("cat_connect: failed to write SCANPATH\n"));
983 return err;
984 }
985 outb(VOYAGER_CAT_END, CAT_CMD);
986 outb(VOYAGER_CAT_RUN, CAT_CMD);
987 modp->scan_path_connected = 1;
988
989 return 0;
990}
991
992void voyager_cat_power_off(void)
993{
994 /* Power the machine off by writing to the PSI over the CAT
995 * bus */
996 __u8 data;
997 voyager_module_t psi = { 0 };
998 voyager_asic_t psi_asic = { 0 };
999
1000 psi.asic = &psi_asic;
1001 psi.asic->asic_id = VOYAGER_CAT_ID;
1002 psi.asic->subaddr = VOYAGER_SUBADDR_HI;
1003 psi.module_addr = VOYAGER_PSI;
1004 psi.scan_path_connected = 0;
1005
1006 outb(VOYAGER_CAT_END, CAT_CMD);
1007 /* Connect the PSI to the CAT Bus */
1008 outb(VOYAGER_CAT_DESELECT, VOYAGER_CAT_CONFIG_PORT);
1009 outb(VOYAGER_PSI, VOYAGER_CAT_CONFIG_PORT);
1010 outb(VOYAGER_CAT_RUN, CAT_CMD);
1011 cat_disconnect(&psi, &psi_asic);
1012 /* Read the status */
1013 cat_subread(&psi, &psi_asic, VOYAGER_PSI_GENERAL_REG, 1, &data);
1014 outb(VOYAGER_CAT_END, CAT_CMD);
1015 CDEBUG(("PSI STATUS 0x%x\n", data));
1016 /* These two writes are power off prep and perform */
1017 data = PSI_CLEAR;
1018 outb(VOYAGER_CAT_RUN, CAT_CMD);
1019 cat_subwrite(&psi, &psi_asic, VOYAGER_PSI_GENERAL_REG, 1, &data);
1020 outb(VOYAGER_CAT_END, CAT_CMD);
1021 data = PSI_POWER_DOWN;
1022 outb(VOYAGER_CAT_RUN, CAT_CMD);
1023 cat_subwrite(&psi, &psi_asic, VOYAGER_PSI_GENERAL_REG, 1, &data);
1024 outb(VOYAGER_CAT_END, CAT_CMD);
1025}
1026
1027struct voyager_status voyager_status = { 0 };
1028
1029void voyager_cat_psi(__u8 cmd, __u16 reg, __u8 * data)
1030{
1031 voyager_module_t psi = { 0 };
1032 voyager_asic_t psi_asic = { 0 };
1033
1034 psi.asic = &psi_asic;
1035 psi.asic->asic_id = VOYAGER_CAT_ID;
1036 psi.asic->subaddr = VOYAGER_SUBADDR_HI;
1037 psi.module_addr = VOYAGER_PSI;
1038 psi.scan_path_connected = 0;
1039
1040 outb(VOYAGER_CAT_END, CAT_CMD);
1041 /* Connect the PSI to the CAT Bus */
1042 outb(VOYAGER_CAT_DESELECT, VOYAGER_CAT_CONFIG_PORT);
1043 outb(VOYAGER_PSI, VOYAGER_CAT_CONFIG_PORT);
1044 outb(VOYAGER_CAT_RUN, CAT_CMD);
1045 cat_disconnect(&psi, &psi_asic);
1046 switch (cmd) {
1047 case VOYAGER_PSI_READ:
1048 cat_read(&psi, &psi_asic, reg, data);
1049 break;
1050 case VOYAGER_PSI_WRITE:
1051 cat_write(&psi, &psi_asic, reg, *data);
1052 break;
1053 case VOYAGER_PSI_SUBREAD:
1054 cat_subread(&psi, &psi_asic, reg, 1, data);
1055 break;
1056 case VOYAGER_PSI_SUBWRITE:
1057 cat_subwrite(&psi, &psi_asic, reg, 1, data);
1058 break;
1059 default:
1060 printk(KERN_ERR "Voyager PSI, unrecognised command %d\n", cmd);
1061 break;
1062 }
1063 outb(VOYAGER_CAT_END, CAT_CMD);
1064}
1065
1066void voyager_cat_do_common_interrupt(void)
1067{
1068 /* This is caused either by a memory parity error or something
1069 * in the PSI */
1070 __u8 data;
1071 voyager_module_t psi = { 0 };
1072 voyager_asic_t psi_asic = { 0 };
1073 struct voyager_psi psi_reg;
1074 int i;
1075 re_read:
1076 psi.asic = &psi_asic;
1077 psi.asic->asic_id = VOYAGER_CAT_ID;
1078 psi.asic->subaddr = VOYAGER_SUBADDR_HI;
1079 psi.module_addr = VOYAGER_PSI;
1080 psi.scan_path_connected = 0;
1081
1082 outb(VOYAGER_CAT_END, CAT_CMD);
1083 /* Connect the PSI to the CAT Bus */
1084 outb(VOYAGER_CAT_DESELECT, VOYAGER_CAT_CONFIG_PORT);
1085 outb(VOYAGER_PSI, VOYAGER_CAT_CONFIG_PORT);
1086 outb(VOYAGER_CAT_RUN, CAT_CMD);
1087 cat_disconnect(&psi, &psi_asic);
1088 /* Read the status. NOTE: Need to read *all* the PSI regs here
1089 * otherwise the cmn int will be reasserted */
1090 for (i = 0; i < sizeof(psi_reg.regs); i++) {
1091 cat_read(&psi, &psi_asic, i, &((__u8 *) & psi_reg.regs)[i]);
1092 }
1093 outb(VOYAGER_CAT_END, CAT_CMD);
1094 if ((psi_reg.regs.checkbit & 0x02) == 0) {
1095 psi_reg.regs.checkbit |= 0x02;
1096 cat_write(&psi, &psi_asic, 5, psi_reg.regs.checkbit);
1097 printk("VOYAGER RE-READ PSI\n");
1098 goto re_read;
1099 }
1100 outb(VOYAGER_CAT_RUN, CAT_CMD);
1101 for (i = 0; i < sizeof(psi_reg.subregs); i++) {
1102 /* This looks strange, but the PSI doesn't do auto increment
1103 * correctly */
1104 cat_subread(&psi, &psi_asic, VOYAGER_PSI_SUPPLY_REG + i,
1105 1, &((__u8 *) & psi_reg.subregs)[i]);
1106 }
1107 outb(VOYAGER_CAT_END, CAT_CMD);
1108#ifdef VOYAGER_CAT_DEBUG
1109 printk("VOYAGER PSI: ");
1110 for (i = 0; i < sizeof(psi_reg.regs); i++)
1111 printk("%02x ", ((__u8 *) & psi_reg.regs)[i]);
1112 printk("\n ");
1113 for (i = 0; i < sizeof(psi_reg.subregs); i++)
1114 printk("%02x ", ((__u8 *) & psi_reg.subregs)[i]);
1115 printk("\n");
1116#endif
1117 if (psi_reg.regs.intstatus & PSI_MON) {
1118 /* switch off or power fail */
1119
1120 if (psi_reg.subregs.supply & PSI_SWITCH_OFF) {
1121 if (voyager_status.switch_off) {
1122 printk(KERN_ERR
1123 "Voyager front panel switch turned off again---Immediate power off!\n");
1124 voyager_cat_power_off();
1125 /* not reached */
1126 } else {
1127 printk(KERN_ERR
1128 "Voyager front panel switch turned off\n");
1129 voyager_status.switch_off = 1;
1130 voyager_status.request_from_kernel = 1;
1131 wake_up_process(voyager_thread);
1132 }
1133 /* Tell the hardware we're taking care of the
1134 * shutdown, otherwise it will power the box off
1135 * within 3 seconds of the switch being pressed and,
1136 * which is much more important to us, continue to
1137 * assert the common interrupt */
1138 data = PSI_CLR_SWITCH_OFF;
1139 outb(VOYAGER_CAT_RUN, CAT_CMD);
1140 cat_subwrite(&psi, &psi_asic, VOYAGER_PSI_SUPPLY_REG,
1141 1, &data);
1142 outb(VOYAGER_CAT_END, CAT_CMD);
1143 } else {
1144
1145 VDEBUG(("Voyager ac fail reg 0x%x\n",
1146 psi_reg.subregs.ACfail));
1147 if ((psi_reg.subregs.ACfail & AC_FAIL_STAT_CHANGE) == 0) {
1148 /* No further update */
1149 return;
1150 }
1151#if 0
1152 /* Don't bother trying to find out who failed.
1153 * FIXME: This probably makes the code incorrect on
1154 * anything other than a 345x */
1155 for (i = 0; i < 5; i++) {
1156 if (psi_reg.subregs.ACfail & (1 << i)) {
1157 break;
1158 }
1159 }
1160 printk(KERN_NOTICE "AC FAIL IN SUPPLY %d\n", i);
1161#endif
1162 /* DON'T do this: it shuts down the AC PSI
1163 outb(VOYAGER_CAT_RUN, CAT_CMD);
1164 data = PSI_MASK_MASK | i;
1165 cat_subwrite(&psi, &psi_asic, VOYAGER_PSI_MASK,
1166 1, &data);
1167 outb(VOYAGER_CAT_END, CAT_CMD);
1168 */
1169 printk(KERN_ERR "Voyager AC power failure\n");
1170 outb(VOYAGER_CAT_RUN, CAT_CMD);
1171 data = PSI_COLD_START;
1172 cat_subwrite(&psi, &psi_asic, VOYAGER_PSI_GENERAL_REG,
1173 1, &data);
1174 outb(VOYAGER_CAT_END, CAT_CMD);
1175 voyager_status.power_fail = 1;
1176 voyager_status.request_from_kernel = 1;
1177 wake_up_process(voyager_thread);
1178 }
1179
1180 } else if (psi_reg.regs.intstatus & PSI_FAULT) {
1181 /* Major fault! */
1182 printk(KERN_ERR
1183 "Voyager PSI Detected major fault, immediate power off!\n");
1184 voyager_cat_power_off();
1185 /* not reached */
1186 } else if (psi_reg.regs.intstatus & (PSI_DC_FAIL | PSI_ALARM
1187 | PSI_CURRENT | PSI_DVM
1188 | PSI_PSCFAULT | PSI_STAT_CHG)) {
1189 /* other psi fault */
1190
1191 printk(KERN_WARNING "Voyager PSI status 0x%x\n", data);
1192 /* clear the PSI fault */
1193 outb(VOYAGER_CAT_RUN, CAT_CMD);
1194 cat_write(&psi, &psi_asic, VOYAGER_PSI_STATUS_REG, 0);
1195 outb(VOYAGER_CAT_END, CAT_CMD);
1196 }
1197}
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
deleted file mode 100644
index 98e3c2bc7563..000000000000
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ /dev/null
@@ -1,1805 +0,0 @@
1/* -*- mode: c; c-basic-offset: 8 -*- */
2
3/* Copyright (C) 1999,2001
4 *
5 * Author: J.E.J.Bottomley@HansenPartnership.com
6 *
7 * This file provides all the same external entries as smp.c but uses
8 * the voyager hal to provide the functionality
9 */
10#include <linux/cpu.h>
11#include <linux/module.h>
12#include <linux/mm.h>
13#include <linux/kernel_stat.h>
14#include <linux/delay.h>
15#include <linux/mc146818rtc.h>
16#include <linux/cache.h>
17#include <linux/interrupt.h>
18#include <linux/init.h>
19#include <linux/kernel.h>
20#include <linux/bootmem.h>
21#include <linux/completion.h>
22#include <asm/desc.h>
23#include <asm/voyager.h>
24#include <asm/vic.h>
25#include <asm/mtrr.h>
26#include <asm/pgalloc.h>
27#include <asm/tlbflush.h>
28#include <asm/arch_hooks.h>
29#include <asm/trampoline.h>
30
31/* TLB state -- visible externally, indexed physically */
32DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0 };
33
34/* CPU IRQ affinity -- set to all ones initially */
35static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned =
36 {[0 ... NR_CPUS-1] = ~0UL };
37
38/* per CPU data structure (for /proc/cpuinfo et al), visible externally
39 * indexed physically */
40DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
41EXPORT_PER_CPU_SYMBOL(cpu_info);
42
43/* physical ID of the CPU used to boot the system */
44unsigned char boot_cpu_id;
45
46/* The memory line addresses for the Quad CPIs */
47struct voyager_qic_cpi *voyager_quad_cpi_addr[NR_CPUS] __cacheline_aligned;
48
49/* The masks for the Extended VIC processors, filled in by cat_init */
50__u32 voyager_extended_vic_processors = 0;
51
52/* Masks for the extended Quad processors which cannot be VIC booted */
53__u32 voyager_allowed_boot_processors = 0;
54
55/* The mask for the Quad Processors (both extended and non-extended) */
56__u32 voyager_quad_processors = 0;
57
58/* Total count of live CPUs, used in process.c to display
59 * the CPU information and in irq.c for the per CPU irq
60 * activity count. Finally exported by i386_ksyms.c */
61static int voyager_extended_cpus = 1;
62
63/* Used for the invalidate map that's also checked in the spinlock */
64static volatile unsigned long smp_invalidate_needed;
65
66/* Bitmask of CPUs present in the system - exported by i386_syms.c, used
67 * by scheduler but indexed physically */
68cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
69
70/* The internal functions */
71static void send_CPI(__u32 cpuset, __u8 cpi);
72static void ack_CPI(__u8 cpi);
73static int ack_QIC_CPI(__u8 cpi);
74static void ack_special_QIC_CPI(__u8 cpi);
75static void ack_VIC_CPI(__u8 cpi);
76static void send_CPI_allbutself(__u8 cpi);
77static void mask_vic_irq(unsigned int irq);
78static void unmask_vic_irq(unsigned int irq);
79static unsigned int startup_vic_irq(unsigned int irq);
80static void enable_local_vic_irq(unsigned int irq);
81static void disable_local_vic_irq(unsigned int irq);
82static void before_handle_vic_irq(unsigned int irq);
83static void after_handle_vic_irq(unsigned int irq);
84static void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask);
85static void ack_vic_irq(unsigned int irq);
86static void vic_enable_cpi(void);
87static void do_boot_cpu(__u8 cpuid);
88static void do_quad_bootstrap(void);
89static void initialize_secondary(void);
90
91int hard_smp_processor_id(void);
92int safe_smp_processor_id(void);
93
94/* Inline functions */
95static inline void send_one_QIC_CPI(__u8 cpu, __u8 cpi)
96{
97 voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi =
98 (smp_processor_id() << 16) + cpi;
99}
100
101static inline void send_QIC_CPI(__u32 cpuset, __u8 cpi)
102{
103 int cpu;
104
105 for_each_online_cpu(cpu) {
106 if (cpuset & (1 << cpu)) {
107#ifdef VOYAGER_DEBUG
108 if (!cpu_online(cpu))
109 VDEBUG(("CPU%d sending cpi %d to CPU%d not in "
110 "cpu_online_map\n",
111 hard_smp_processor_id(), cpi, cpu));
112#endif
113 send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET);
114 }
115 }
116}
117
118static inline void wrapper_smp_local_timer_interrupt(void)
119{
120 irq_enter();
121 smp_local_timer_interrupt();
122 irq_exit();
123}
124
125static inline void send_one_CPI(__u8 cpu, __u8 cpi)
126{
127 if (voyager_quad_processors & (1 << cpu))
128 send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET);
129 else
130 send_CPI(1 << cpu, cpi);
131}
132
133static inline void send_CPI_allbutself(__u8 cpi)
134{
135 __u8 cpu = smp_processor_id();
136 __u32 mask = cpus_addr(cpu_online_map)[0] & ~(1 << cpu);
137 send_CPI(mask, cpi);
138}
139
140static inline int is_cpu_quad(void)
141{
142 __u8 cpumask = inb(VIC_PROC_WHO_AM_I);
143 return ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER);
144}
145
146static inline int is_cpu_extended(void)
147{
148 __u8 cpu = hard_smp_processor_id();
149
150 return (voyager_extended_vic_processors & (1 << cpu));
151}
152
153static inline int is_cpu_vic_boot(void)
154{
155 __u8 cpu = hard_smp_processor_id();
156
157 return (voyager_extended_vic_processors
158 & voyager_allowed_boot_processors & (1 << cpu));
159}
160
161static inline void ack_CPI(__u8 cpi)
162{
163 switch (cpi) {
164 case VIC_CPU_BOOT_CPI:
165 if (is_cpu_quad() && !is_cpu_vic_boot())
166 ack_QIC_CPI(cpi);
167 else
168 ack_VIC_CPI(cpi);
169 break;
170 case VIC_SYS_INT:
171 case VIC_CMN_INT:
172 /* These are slightly strange. Even on the Quad card,
173 * They are vectored as VIC CPIs */
174 if (is_cpu_quad())
175 ack_special_QIC_CPI(cpi);
176 else
177 ack_VIC_CPI(cpi);
178 break;
179 default:
180 printk("VOYAGER ERROR: CPI%d is in common CPI code\n", cpi);
181 break;
182 }
183}
184
185/* local variables */
186
187/* The VIC IRQ descriptors -- these look almost identical to the
188 * 8259 IRQs except that masks and things must be kept per processor
189 */
190static struct irq_chip vic_chip = {
191 .name = "VIC",
192 .startup = startup_vic_irq,
193 .mask = mask_vic_irq,
194 .unmask = unmask_vic_irq,
195 .set_affinity = set_vic_irq_affinity,
196};
197
198/* used to count up as CPUs are brought on line (starts at 0) */
199static int cpucount = 0;
200
201/* The per cpu profile stuff - used in smp_local_timer_interrupt */
202static DEFINE_PER_CPU(int, prof_multiplier) = 1;
203static DEFINE_PER_CPU(int, prof_old_multiplier) = 1;
204static DEFINE_PER_CPU(int, prof_counter) = 1;
205
206/* the map used to check if a CPU has booted */
207static __u32 cpu_booted_map;
208
209/* the synchronize flag used to hold all secondary CPUs spinning in
210 * a tight loop until the boot sequence is ready for them */
211static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
212
213/* This is for the new dynamic CPU boot code */
214
215/* The per processor IRQ masks (these are usually kept in sync) */
216static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
217
218/* the list of IRQs to be enabled by the VIC_ENABLE_IRQ_CPI */
219static __u16 vic_irq_enable_mask[NR_CPUS] __cacheline_aligned = { 0 };
220
221/* Lock for enable/disable of VIC interrupts */
222static __cacheline_aligned DEFINE_SPINLOCK(vic_irq_lock);
223
224/* The boot processor is correctly set up in PC mode when it
225 * comes up, but the secondaries need their master/slave 8259
226 * pairs initializing correctly */
227
228/* Interrupt counters (per cpu) and total - used to try to
229 * even up the interrupt handling routines */
230static long vic_intr_total = 0;
231static long vic_intr_count[NR_CPUS] __cacheline_aligned = { 0 };
232static unsigned long vic_tick[NR_CPUS] __cacheline_aligned = { 0 };
233
234/* Since we can only use CPI0, we fake all the other CPIs */
235static unsigned long vic_cpi_mailbox[NR_CPUS] __cacheline_aligned;
236
237/* debugging routine to read the isr of the cpu's pic */
238static inline __u16 vic_read_isr(void)
239{
240 __u16 isr;
241
242 outb(0x0b, 0xa0);
243 isr = inb(0xa0) << 8;
244 outb(0x0b, 0x20);
245 isr |= inb(0x20);
246
247 return isr;
248}
249
250static __init void qic_setup(void)
251{
252 if (!is_cpu_quad()) {
253 /* not a quad, no setup */
254 return;
255 }
256 outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0);
257 outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1);
258
259 if (is_cpu_extended()) {
260 /* the QIC duplicate of the VIC base register */
261 outb(VIC_DEFAULT_CPI_BASE, QIC_VIC_CPI_BASE_REGISTER);
262 outb(QIC_DEFAULT_CPI_BASE, QIC_CPI_BASE_REGISTER);
263
264 /* FIXME: should set up the QIC timer and memory parity
265 * error vectors here */
266 }
267}
268
269static __init void vic_setup_pic(void)
270{
271 outb(1, VIC_REDIRECT_REGISTER_1);
272 /* clear the claim registers for dynamic routing */
273 outb(0, VIC_CLAIM_REGISTER_0);
274 outb(0, VIC_CLAIM_REGISTER_1);
275
276 outb(0, VIC_PRIORITY_REGISTER);
277 /* Set the Primary and Secondary Microchannel vector
278 * bases to be the same as the ordinary interrupts
279 *
280 * FIXME: This would be more efficient using separate
281 * vectors. */
282 outb(FIRST_EXTERNAL_VECTOR, VIC_PRIMARY_MC_BASE);
283 outb(FIRST_EXTERNAL_VECTOR, VIC_SECONDARY_MC_BASE);
284 /* Now initiallise the master PIC belonging to this CPU by
285 * sending the four ICWs */
286
287 /* ICW1: level triggered, ICW4 needed */
288 outb(0x19, 0x20);
289
290 /* ICW2: vector base */
291 outb(FIRST_EXTERNAL_VECTOR, 0x21);
292
293 /* ICW3: slave at line 2 */
294 outb(0x04, 0x21);
295
296 /* ICW4: 8086 mode */
297 outb(0x01, 0x21);
298
299 /* now the same for the slave PIC */
300
301 /* ICW1: level trigger, ICW4 needed */
302 outb(0x19, 0xA0);
303
304 /* ICW2: slave vector base */
305 outb(FIRST_EXTERNAL_VECTOR + 8, 0xA1);
306
307 /* ICW3: slave ID */
308 outb(0x02, 0xA1);
309
310 /* ICW4: 8086 mode */
311 outb(0x01, 0xA1);
312}
313
314static void do_quad_bootstrap(void)
315{
316 if (is_cpu_quad() && is_cpu_vic_boot()) {
317 int i;
318 unsigned long flags;
319 __u8 cpuid = hard_smp_processor_id();
320
321 local_irq_save(flags);
322
323 for (i = 0; i < 4; i++) {
324 /* FIXME: this would be >>3 &0x7 on the 32 way */
325 if (((cpuid >> 2) & 0x03) == i)
326 /* don't lower our own mask! */
327 continue;
328
329 /* masquerade as local Quad CPU */
330 outb(QIC_CPUID_ENABLE | i, QIC_PROCESSOR_ID);
331 /* enable the startup CPI */
332 outb(QIC_BOOT_CPI_MASK, QIC_MASK_REGISTER1);
333 /* restore cpu id */
334 outb(0, QIC_PROCESSOR_ID);
335 }
336 local_irq_restore(flags);
337 }
338}
339
340void prefill_possible_map(void)
341{
342 /* This is empty on voyager because we need a much
343 * earlier detection which is done in find_smp_config */
344}
345
346/* Set up all the basic stuff: read the SMP config and make all the
347 * SMP information reflect only the boot cpu. All others will be
348 * brought on-line later. */
349void __init find_smp_config(void)
350{
351 int i;
352
353 boot_cpu_id = hard_smp_processor_id();
354
355 printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id);
356
357 /* initialize the CPU structures (moved from smp_boot_cpus) */
358 for (i = 0; i < nr_cpu_ids; i++)
359 cpu_irq_affinity[i] = ~0;
360 cpu_online_map = cpumask_of_cpu(boot_cpu_id);
361
362 /* The boot CPU must be extended */
363 voyager_extended_vic_processors = 1 << boot_cpu_id;
364 /* initially, all of the first 8 CPUs can boot */
365 voyager_allowed_boot_processors = 0xff;
366 /* set up everything for just this CPU, we can alter
367 * this as we start the other CPUs later */
368 /* now get the CPU disposition from the extended CMOS */
369 cpus_addr(phys_cpu_present_map)[0] =
370 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK);
371 cpus_addr(phys_cpu_present_map)[0] |=
372 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8;
373 cpus_addr(phys_cpu_present_map)[0] |=
374 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK +
375 2) << 16;
376 cpus_addr(phys_cpu_present_map)[0] |=
377 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK +
378 3) << 24;
379 init_cpu_possible(&phys_cpu_present_map);
380 printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n",
381 cpus_addr(phys_cpu_present_map)[0]);
382 /* Here we set up the VIC to enable SMP */
383 /* enable the CPIs by writing the base vector to their register */
384 outb(VIC_DEFAULT_CPI_BASE, VIC_CPI_BASE_REGISTER);
385 outb(1, VIC_REDIRECT_REGISTER_1);
386 /* set the claim registers for static routing --- Boot CPU gets
387 * all interrupts untill all other CPUs started */
388 outb(0xff, VIC_CLAIM_REGISTER_0);
389 outb(0xff, VIC_CLAIM_REGISTER_1);
390 /* Set the Primary and Secondary Microchannel vector
391 * bases to be the same as the ordinary interrupts
392 *
393 * FIXME: This would be more efficient using separate
394 * vectors. */
395 outb(FIRST_EXTERNAL_VECTOR, VIC_PRIMARY_MC_BASE);
396 outb(FIRST_EXTERNAL_VECTOR, VIC_SECONDARY_MC_BASE);
397
398 /* Finally tell the firmware that we're driving */
399 outb(inb(VOYAGER_SUS_IN_CONTROL_PORT) | VOYAGER_IN_CONTROL_FLAG,
400 VOYAGER_SUS_IN_CONTROL_PORT);
401
402 current_thread_info()->cpu = boot_cpu_id;
403 percpu_write(cpu_number, boot_cpu_id);
404}
405
406/*
407 * The bootstrap kernel entry code has set these up. Save them
408 * for a given CPU, id is physical */
409void __init smp_store_cpu_info(int id)
410{
411 struct cpuinfo_x86 *c = &cpu_data(id);
412
413 *c = boot_cpu_data;
414 c->cpu_index = id;
415
416 identify_secondary_cpu(c);
417}
418
419/* Routine initially called when a non-boot CPU is brought online */
420static void __init start_secondary(void *unused)
421{
422 __u8 cpuid = hard_smp_processor_id();
423
424 cpu_init();
425
426 /* OK, we're in the routine */
427 ack_CPI(VIC_CPU_BOOT_CPI);
428
429 /* setup the 8259 master slave pair belonging to this CPU ---
430 * we won't actually receive any until the boot CPU
431 * relinquishes it's static routing mask */
432 vic_setup_pic();
433
434 qic_setup();
435
436 if (is_cpu_quad() && !is_cpu_vic_boot()) {
437 /* clear the boot CPI */
438 __u8 dummy;
439
440 dummy =
441 voyager_quad_cpi_addr[cpuid]->qic_cpi[VIC_CPU_BOOT_CPI].cpi;
442 printk("read dummy %d\n", dummy);
443 }
444
445 /* lower the mask to receive CPIs */
446 vic_enable_cpi();
447
448 VDEBUG(("VOYAGER SMP: CPU%d, stack at about %p\n", cpuid, &cpuid));
449
450 notify_cpu_starting(cpuid);
451
452 /* enable interrupts */
453 local_irq_enable();
454
455 /* get our bogomips */
456 calibrate_delay();
457
458 /* save our processor parameters */
459 smp_store_cpu_info(cpuid);
460
461 /* if we're a quad, we may need to bootstrap other CPUs */
462 do_quad_bootstrap();
463
464 /* FIXME: this is rather a poor hack to prevent the CPU
465 * activating softirqs while it's supposed to be waiting for
466 * permission to proceed. Without this, the new per CPU stuff
467 * in the softirqs will fail */
468 local_irq_disable();
469 cpu_set(cpuid, cpu_callin_map);
470
471 /* signal that we're done */
472 cpu_booted_map = 1;
473
474 while (!cpu_isset(cpuid, smp_commenced_mask))
475 rep_nop();
476 local_irq_enable();
477
478 local_flush_tlb();
479
480 cpu_set(cpuid, cpu_online_map);
481 wmb();
482 cpu_idle();
483}
484
485/* Routine to kick start the given CPU and wait for it to report ready
486 * (or timeout in startup). When this routine returns, the requested
487 * CPU is either fully running and configured or known to be dead.
488 *
489 * We call this routine sequentially 1 CPU at a time, so no need for
490 * locking */
491
492static void __init do_boot_cpu(__u8 cpu)
493{
494 struct task_struct *idle;
495 int timeout;
496 unsigned long flags;
497 int quad_boot = (1 << cpu) & voyager_quad_processors
498 & ~(voyager_extended_vic_processors
499 & voyager_allowed_boot_processors);
500
501 /* This is the format of the CPI IDT gate (in real mode) which
502 * we're hijacking to boot the CPU */
503 union IDTFormat {
504 struct seg {
505 __u16 Offset;
506 __u16 Segment;
507 } idt;
508 __u32 val;
509 } hijack_source;
510
511 __u32 *hijack_vector;
512 __u32 start_phys_address = setup_trampoline();
513
514 /* There's a clever trick to this: The linux trampoline is
515 * compiled to begin at absolute location zero, so make the
516 * address zero but have the data segment selector compensate
517 * for the actual address */
518 hijack_source.idt.Offset = start_phys_address & 0x000F;
519 hijack_source.idt.Segment = (start_phys_address >> 4) & 0xFFFF;
520
521 cpucount++;
522 alternatives_smp_switch(1);
523
524 idle = fork_idle(cpu);
525 if (IS_ERR(idle))
526 panic("failed fork for CPU%d", cpu);
527 idle->thread.ip = (unsigned long)start_secondary;
528 /* init_tasks (in sched.c) is indexed logically */
529 stack_start.sp = (void *)idle->thread.sp;
530
531 per_cpu(current_task, cpu) = idle;
532 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
533 irq_ctx_init(cpu);
534
535 /* Note: Don't modify initial ss override */
536 VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu,
537 (unsigned long)hijack_source.val, hijack_source.idt.Segment,
538 hijack_source.idt.Offset, stack_start.sp));
539
540 /* init lowmem identity mapping */
541 clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
542 min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
543 flush_tlb_all();
544
545 if (quad_boot) {
546 printk("CPU %d: non extended Quad boot\n", cpu);
547 hijack_vector =
548 (__u32 *)
549 phys_to_virt((VIC_CPU_BOOT_CPI + QIC_DEFAULT_CPI_BASE) * 4);
550 *hijack_vector = hijack_source.val;
551 } else {
552 printk("CPU%d: extended VIC boot\n", cpu);
553 hijack_vector =
554 (__u32 *)
555 phys_to_virt((VIC_CPU_BOOT_CPI + VIC_DEFAULT_CPI_BASE) * 4);
556 *hijack_vector = hijack_source.val;
557 /* VIC errata, may also receive interrupt at this address */
558 hijack_vector =
559 (__u32 *)
560 phys_to_virt((VIC_CPU_BOOT_ERRATA_CPI +
561 VIC_DEFAULT_CPI_BASE) * 4);
562 *hijack_vector = hijack_source.val;
563 }
564 /* All non-boot CPUs start with interrupts fully masked. Need
565 * to lower the mask of the CPI we're about to send. We do
566 * this in the VIC by masquerading as the processor we're
567 * about to boot and lowering its interrupt mask */
568 local_irq_save(flags);
569 if (quad_boot) {
570 send_one_QIC_CPI(cpu, VIC_CPU_BOOT_CPI);
571 } else {
572 outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID);
573 /* here we're altering registers belonging to `cpu' */
574
575 outb(VIC_BOOT_INTERRUPT_MASK, 0x21);
576 /* now go back to our original identity */
577 outb(boot_cpu_id, VIC_PROCESSOR_ID);
578
579 /* and boot the CPU */
580
581 send_CPI((1 << cpu), VIC_CPU_BOOT_CPI);
582 }
583 cpu_booted_map = 0;
584 local_irq_restore(flags);
585
586 /* now wait for it to become ready (or timeout) */
587 for (timeout = 0; timeout < 50000; timeout++) {
588 if (cpu_booted_map)
589 break;
590 udelay(100);
591 }
592 /* reset the page table */
593 zap_low_mappings();
594
595 if (cpu_booted_map) {
596 VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n",
597 cpu, smp_processor_id()));
598
599 printk("CPU%d: ", cpu);
600 print_cpu_info(&cpu_data(cpu));
601 wmb();
602 cpu_set(cpu, cpu_callout_map);
603 cpu_set(cpu, cpu_present_map);
604 } else {
605 printk("CPU%d FAILED TO BOOT: ", cpu);
606 if (*
607 ((volatile unsigned char *)phys_to_virt(start_phys_address))
608 == 0xA5)
609 printk("Stuck.\n");
610 else
611 printk("Not responding.\n");
612
613 cpucount--;
614 }
615}
616
617void __init smp_boot_cpus(void)
618{
619 int i;
620
621 /* CAT BUS initialisation must be done after the memory */
622 /* FIXME: The L4 has a catbus too, it just needs to be
623 * accessed in a totally different way */
624 if (voyager_level == 5) {
625 voyager_cat_init();
626
627 /* now that the cat has probed the Voyager System Bus, sanity
628 * check the cpu map */
629 if (((voyager_quad_processors | voyager_extended_vic_processors)
630 & cpus_addr(phys_cpu_present_map)[0]) !=
631 cpus_addr(phys_cpu_present_map)[0]) {
632 /* should panic */
633 printk("\n\n***WARNING*** "
634 "Sanity check of CPU present map FAILED\n");
635 }
636 } else if (voyager_level == 4)
637 voyager_extended_vic_processors =
638 cpus_addr(phys_cpu_present_map)[0];
639
640 /* this sets up the idle task to run on the current cpu */
641 voyager_extended_cpus = 1;
642 /* Remove the global_irq_holder setting, it triggers a BUG() on
643 * schedule at the moment */
644 //global_irq_holder = boot_cpu_id;
645
646 /* FIXME: Need to do something about this but currently only works
647 * on CPUs with a tsc which none of mine have.
648 smp_tune_scheduling();
649 */
650 smp_store_cpu_info(boot_cpu_id);
651 /* setup the jump vector */
652 initial_code = (unsigned long)initialize_secondary;
653 printk("CPU%d: ", boot_cpu_id);
654 print_cpu_info(&cpu_data(boot_cpu_id));
655
656 if (is_cpu_quad()) {
657 /* booting on a Quad CPU */
658 printk("VOYAGER SMP: Boot CPU is Quad\n");
659 qic_setup();
660 do_quad_bootstrap();
661 }
662
663 /* enable our own CPIs */
664 vic_enable_cpi();
665
666 cpu_set(boot_cpu_id, cpu_online_map);
667 cpu_set(boot_cpu_id, cpu_callout_map);
668
669 /* loop over all the extended VIC CPUs and boot them. The
670 * Quad CPUs must be bootstrapped by their extended VIC cpu */
671 for (i = 0; i < nr_cpu_ids; i++) {
672 if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map))
673 continue;
674 do_boot_cpu(i);
675 /* This udelay seems to be needed for the Quad boots
676 * don't remove unless you know what you're doing */
677 udelay(1000);
678 }
679 /* we could compute the total bogomips here, but why bother?,
680 * Code added from smpboot.c */
681 {
682 unsigned long bogosum = 0;
683
684 for_each_online_cpu(i)
685 bogosum += cpu_data(i).loops_per_jiffy;
686 printk(KERN_INFO "Total of %d processors activated "
687 "(%lu.%02lu BogoMIPS).\n",
688 cpucount + 1, bogosum / (500000 / HZ),
689 (bogosum / (5000 / HZ)) % 100);
690 }
691 voyager_extended_cpus = hweight32(voyager_extended_vic_processors);
692 printk("VOYAGER: Extended (interrupt handling CPUs): "
693 "%d, non-extended: %d\n", voyager_extended_cpus,
694 num_booting_cpus() - voyager_extended_cpus);
695 /* that's it, switch to symmetric mode */
696 outb(0, VIC_PRIORITY_REGISTER);
697 outb(0, VIC_CLAIM_REGISTER_0);
698 outb(0, VIC_CLAIM_REGISTER_1);
699
700 VDEBUG(("VOYAGER SMP: Booted with %d CPUs\n", num_booting_cpus()));
701}
702
703/* Reload the secondary CPUs task structure (this function does not
704 * return ) */
705static void __init initialize_secondary(void)
706{
707#if 0
708 // AC kernels only
709 set_current(hard_get_current());
710#endif
711
712 /*
713 * We don't actually need to load the full TSS,
714 * basically just the stack pointer and the eip.
715 */
716
717 asm volatile ("movl %0,%%esp\n\t"
718 "jmp *%1"::"r" (current->thread.sp),
719 "r"(current->thread.ip));
720}
721
722/* handle a Voyager SYS_INT -- If we don't, the base board will
723 * panic the system.
724 *
725 * System interrupts occur because some problem was detected on the
726 * various busses. To find out what you have to probe all the
727 * hardware via the CAT bus. FIXME: At the moment we do nothing. */
728void smp_vic_sys_interrupt(struct pt_regs *regs)
729{
730 ack_CPI(VIC_SYS_INT);
731 printk("Voyager SYSTEM INTERRUPT\n");
732}
733
734/* Handle a voyager CMN_INT; These interrupts occur either because of
735 * a system status change or because a single bit memory error
736 * occurred. FIXME: At the moment, ignore all this. */
737void smp_vic_cmn_interrupt(struct pt_regs *regs)
738{
739 static __u8 in_cmn_int = 0;
740 static DEFINE_SPINLOCK(cmn_int_lock);
741
742 /* common ints are broadcast, so make sure we only do this once */
743 _raw_spin_lock(&cmn_int_lock);
744 if (in_cmn_int)
745 goto unlock_end;
746
747 in_cmn_int++;
748 _raw_spin_unlock(&cmn_int_lock);
749
750 VDEBUG(("Voyager COMMON INTERRUPT\n"));
751
752 if (voyager_level == 5)
753 voyager_cat_do_common_interrupt();
754
755 _raw_spin_lock(&cmn_int_lock);
756 in_cmn_int = 0;
757 unlock_end:
758 _raw_spin_unlock(&cmn_int_lock);
759 ack_CPI(VIC_CMN_INT);
760}
761
762/*
763 * Reschedule call back. Nothing to do, all the work is done
764 * automatically when we return from the interrupt. */
765static void smp_reschedule_interrupt(void)
766{
767 /* do nothing */
768}
769
770static struct mm_struct *flush_mm;
771static unsigned long flush_va;
772static DEFINE_SPINLOCK(tlbstate_lock);
773
774/*
775 * We cannot call mmdrop() because we are in interrupt context,
776 * instead update mm->cpu_vm_mask.
777 *
778 * We need to reload %cr3 since the page tables may be going
779 * away from under us..
780 */
781static inline void voyager_leave_mm(unsigned long cpu)
782{
783 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
784 BUG();
785 cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
786 load_cr3(swapper_pg_dir);
787}
788
789/*
790 * Invalidate call-back
791 */
792static void smp_invalidate_interrupt(void)
793{
794 __u8 cpu = smp_processor_id();
795
796 if (!test_bit(cpu, &smp_invalidate_needed))
797 return;
798 /* This will flood messages. Don't uncomment unless you see
799 * Problems with cross cpu invalidation
800 VDEBUG(("VOYAGER SMP: CPU%d received INVALIDATE_CPI\n",
801 smp_processor_id()));
802 */
803
804 if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
805 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
806 if (flush_va == TLB_FLUSH_ALL)
807 local_flush_tlb();
808 else
809 __flush_tlb_one(flush_va);
810 } else
811 voyager_leave_mm(cpu);
812 }
813 smp_mb__before_clear_bit();
814 clear_bit(cpu, &smp_invalidate_needed);
815 smp_mb__after_clear_bit();
816}
817
818/* All the new flush operations for 2.4 */
819
820/* This routine is called with a physical cpu mask */
821static void
822voyager_flush_tlb_others(unsigned long cpumask, struct mm_struct *mm,
823 unsigned long va)
824{
825 int stuck = 50000;
826
827 if (!cpumask)
828 BUG();
829 if ((cpumask & cpus_addr(cpu_online_map)[0]) != cpumask)
830 BUG();
831 if (cpumask & (1 << smp_processor_id()))
832 BUG();
833 if (!mm)
834 BUG();
835
836 spin_lock(&tlbstate_lock);
837
838 flush_mm = mm;
839 flush_va = va;
840 atomic_set_mask(cpumask, &smp_invalidate_needed);
841 /*
842 * We have to send the CPI only to
843 * CPUs affected.
844 */
845 send_CPI(cpumask, VIC_INVALIDATE_CPI);
846
847 while (smp_invalidate_needed) {
848 mb();
849 if (--stuck == 0) {
850 printk("***WARNING*** Stuck doing invalidate CPI "
851 "(CPU%d)\n", smp_processor_id());
852 break;
853 }
854 }
855
856 /* Uncomment only to debug invalidation problems
857 VDEBUG(("VOYAGER SMP: Completed invalidate CPI (CPU%d)\n", cpu));
858 */
859
860 flush_mm = NULL;
861 flush_va = 0;
862 spin_unlock(&tlbstate_lock);
863}
864
865void flush_tlb_current_task(void)
866{
867 struct mm_struct *mm = current->mm;
868 unsigned long cpu_mask;
869
870 preempt_disable();
871
872 cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
873 local_flush_tlb();
874 if (cpu_mask)
875 voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
876
877 preempt_enable();
878}
879
880void flush_tlb_mm(struct mm_struct *mm)
881{
882 unsigned long cpu_mask;
883
884 preempt_disable();
885
886 cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
887
888 if (current->active_mm == mm) {
889 if (current->mm)
890 local_flush_tlb();
891 else
892 voyager_leave_mm(smp_processor_id());
893 }
894 if (cpu_mask)
895 voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
896
897 preempt_enable();
898}
899
900void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
901{
902 struct mm_struct *mm = vma->vm_mm;
903 unsigned long cpu_mask;
904
905 preempt_disable();
906
907 cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
908 if (current->active_mm == mm) {
909 if (current->mm)
910 __flush_tlb_one(va);
911 else
912 voyager_leave_mm(smp_processor_id());
913 }
914
915 if (cpu_mask)
916 voyager_flush_tlb_others(cpu_mask, mm, va);
917
918 preempt_enable();
919}
920
921EXPORT_SYMBOL(flush_tlb_page);
922
923/* enable the requested IRQs */
924static void smp_enable_irq_interrupt(void)
925{
926 __u8 irq;
927 __u8 cpu = get_cpu();
928
929 VDEBUG(("VOYAGER SMP: CPU%d enabling irq mask 0x%x\n", cpu,
930 vic_irq_enable_mask[cpu]));
931
932 spin_lock(&vic_irq_lock);
933 for (irq = 0; irq < 16; irq++) {
934 if (vic_irq_enable_mask[cpu] & (1 << irq))
935 enable_local_vic_irq(irq);
936 }
937 vic_irq_enable_mask[cpu] = 0;
938 spin_unlock(&vic_irq_lock);
939
940 put_cpu_no_resched();
941}
942
943/*
944 * CPU halt call-back
945 */
946static void smp_stop_cpu_function(void *dummy)
947{
948 VDEBUG(("VOYAGER SMP: CPU%d is STOPPING\n", smp_processor_id()));
949 cpu_clear(smp_processor_id(), cpu_online_map);
950 local_irq_disable();
951 for (;;)
952 halt();
953}
954
955/* execute a thread on a new CPU. The function to be called must be
956 * previously set up. This is used to schedule a function for
957 * execution on all CPUs - set up the function then broadcast a
958 * function_interrupt CPI to come here on each CPU */
959static void smp_call_function_interrupt(void)
960{
961 irq_enter();
962 generic_smp_call_function_interrupt();
963 __get_cpu_var(irq_stat).irq_call_count++;
964 irq_exit();
965}
966
967static void smp_call_function_single_interrupt(void)
968{
969 irq_enter();
970 generic_smp_call_function_single_interrupt();
971 __get_cpu_var(irq_stat).irq_call_count++;
972 irq_exit();
973}
974
975/* Sorry about the name. In an APIC based system, the APICs
976 * themselves are programmed to send a timer interrupt. This is used
977 * by linux to reschedule the processor. Voyager doesn't have this,
978 * so we use the system clock to interrupt one processor, which in
979 * turn, broadcasts a timer CPI to all the others --- we receive that
980 * CPI here. We don't use this actually for counting so losing
981 * ticks doesn't matter
982 *
983 * FIXME: For those CPUs which actually have a local APIC, we could
984 * try to use it to trigger this interrupt instead of having to
985 * broadcast the timer tick. Unfortunately, all my pentium DYADs have
986 * no local APIC, so I can't do this
987 *
988 * This function is currently a placeholder and is unused in the code */
989void smp_apic_timer_interrupt(struct pt_regs *regs)
990{
991 struct pt_regs *old_regs = set_irq_regs(regs);
992 wrapper_smp_local_timer_interrupt();
993 set_irq_regs(old_regs);
994}
995
996/* All of the QUAD interrupt GATES */
997void smp_qic_timer_interrupt(struct pt_regs *regs)
998{
999 struct pt_regs *old_regs = set_irq_regs(regs);
1000 ack_QIC_CPI(QIC_TIMER_CPI);
1001 wrapper_smp_local_timer_interrupt();
1002 set_irq_regs(old_regs);
1003}
1004
1005void smp_qic_invalidate_interrupt(struct pt_regs *regs)
1006{
1007 ack_QIC_CPI(QIC_INVALIDATE_CPI);
1008 smp_invalidate_interrupt();
1009}
1010
1011void smp_qic_reschedule_interrupt(struct pt_regs *regs)
1012{
1013 ack_QIC_CPI(QIC_RESCHEDULE_CPI);
1014 smp_reschedule_interrupt();
1015}
1016
1017void smp_qic_enable_irq_interrupt(struct pt_regs *regs)
1018{
1019 ack_QIC_CPI(QIC_ENABLE_IRQ_CPI);
1020 smp_enable_irq_interrupt();
1021}
1022
1023void smp_qic_call_function_interrupt(struct pt_regs *regs)
1024{
1025 ack_QIC_CPI(QIC_CALL_FUNCTION_CPI);
1026 smp_call_function_interrupt();
1027}
1028
1029void smp_qic_call_function_single_interrupt(struct pt_regs *regs)
1030{
1031 ack_QIC_CPI(QIC_CALL_FUNCTION_SINGLE_CPI);
1032 smp_call_function_single_interrupt();
1033}
1034
1035void smp_vic_cpi_interrupt(struct pt_regs *regs)
1036{
1037 struct pt_regs *old_regs = set_irq_regs(regs);
1038 __u8 cpu = smp_processor_id();
1039
1040 if (is_cpu_quad())
1041 ack_QIC_CPI(VIC_CPI_LEVEL0);
1042 else
1043 ack_VIC_CPI(VIC_CPI_LEVEL0);
1044
1045 if (test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu]))
1046 wrapper_smp_local_timer_interrupt();
1047 if (test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu]))
1048 smp_invalidate_interrupt();
1049 if (test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu]))
1050 smp_reschedule_interrupt();
1051 if (test_and_clear_bit(VIC_ENABLE_IRQ_CPI, &vic_cpi_mailbox[cpu]))
1052 smp_enable_irq_interrupt();
1053 if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu]))
1054 smp_call_function_interrupt();
1055 if (test_and_clear_bit(VIC_CALL_FUNCTION_SINGLE_CPI, &vic_cpi_mailbox[cpu]))
1056 smp_call_function_single_interrupt();
1057 set_irq_regs(old_regs);
1058}
1059
1060static void do_flush_tlb_all(void *info)
1061{
1062 unsigned long cpu = smp_processor_id();
1063
1064 __flush_tlb_all();
1065 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
1066 voyager_leave_mm(cpu);
1067}
1068
1069/* flush the TLB of every active CPU in the system */
1070void flush_tlb_all(void)
1071{
1072 on_each_cpu(do_flush_tlb_all, 0, 1);
1073}
1074
1075/* send a reschedule CPI to one CPU by physical CPU number*/
1076static void voyager_smp_send_reschedule(int cpu)
1077{
1078 send_one_CPI(cpu, VIC_RESCHEDULE_CPI);
1079}
1080
1081int hard_smp_processor_id(void)
1082{
1083 __u8 i;
1084 __u8 cpumask = inb(VIC_PROC_WHO_AM_I);
1085 if ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER)
1086 return cpumask & 0x1F;
1087
1088 for (i = 0; i < 8; i++) {
1089 if (cpumask & (1 << i))
1090 return i;
1091 }
1092 printk("** WARNING ** Illegal cpuid returned by VIC: %d", cpumask);
1093 return 0;
1094}
1095
1096int safe_smp_processor_id(void)
1097{
1098 return hard_smp_processor_id();
1099}
1100
1101/* broadcast a halt to all other CPUs */
1102static void voyager_smp_send_stop(void)
1103{
1104 smp_call_function(smp_stop_cpu_function, NULL, 1);
1105}
1106
1107/* this function is triggered in time.c when a clock tick fires
1108 * we need to re-broadcast the tick to all CPUs */
1109void smp_vic_timer_interrupt(void)
1110{
1111 send_CPI_allbutself(VIC_TIMER_CPI);
1112 smp_local_timer_interrupt();
1113}
1114
1115/* local (per CPU) timer interrupt. It does both profiling and
1116 * process statistics/rescheduling.
1117 *
1118 * We do profiling in every local tick, statistics/rescheduling
1119 * happen only every 'profiling multiplier' ticks. The default
1120 * multiplier is 1 and it can be changed by writing the new multiplier
1121 * value into /proc/profile.
1122 */
1123void smp_local_timer_interrupt(void)
1124{
1125 int cpu = smp_processor_id();
1126 long weight;
1127
1128 profile_tick(CPU_PROFILING);
1129 if (--per_cpu(prof_counter, cpu) <= 0) {
1130 /*
1131 * The multiplier may have changed since the last time we got
1132 * to this point as a result of the user writing to
1133 * /proc/profile. In this case we need to adjust the APIC
1134 * timer accordingly.
1135 *
1136 * Interrupts are already masked off at this point.
1137 */
1138 per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu);
1139 if (per_cpu(prof_counter, cpu) !=
1140 per_cpu(prof_old_multiplier, cpu)) {
1141 /* FIXME: need to update the vic timer tick here */
1142 per_cpu(prof_old_multiplier, cpu) =
1143 per_cpu(prof_counter, cpu);
1144 }
1145
1146 update_process_times(user_mode_vm(get_irq_regs()));
1147 }
1148
1149 if (((1 << cpu) & voyager_extended_vic_processors) == 0)
1150 /* only extended VIC processors participate in
1151 * interrupt distribution */
1152 return;
1153
1154 /*
1155 * We take the 'long' return path, and there every subsystem
1156 * grabs the appropriate locks (kernel lock/ irq lock).
1157 *
1158 * we might want to decouple profiling from the 'long path',
1159 * and do the profiling totally in assembly.
1160 *
1161 * Currently this isn't too much of an issue (performance wise),
1162 * we can take more than 100K local irqs per second on a 100 MHz P5.
1163 */
1164
1165 if ((++vic_tick[cpu] & 0x7) != 0)
1166 return;
1167 /* get here every 16 ticks (about every 1/6 of a second) */
1168
1169 /* Change our priority to give someone else a chance at getting
1170 * the IRQ. The algorithm goes like this:
1171 *
1172 * In the VIC, the dynamically routed interrupt is always
1173 * handled by the lowest priority eligible (i.e. receiving
1174 * interrupts) CPU. If >1 eligible CPUs are equal lowest, the
1175 * lowest processor number gets it.
1176 *
1177 * The priority of a CPU is controlled by a special per-CPU
1178 * VIC priority register which is 3 bits wide 0 being lowest
1179 * and 7 highest priority..
1180 *
1181 * Therefore we subtract the average number of interrupts from
1182 * the number we've fielded. If this number is negative, we
1183 * lower the activity count and if it is positive, we raise
1184 * it.
1185 *
1186 * I'm afraid this still leads to odd looking interrupt counts:
1187 * the totals are all roughly equal, but the individual ones
1188 * look rather skewed.
1189 *
1190 * FIXME: This algorithm is total crap when mixed with SMP
1191 * affinity code since we now try to even up the interrupt
1192 * counts when an affinity binding is keeping them on a
1193 * particular CPU*/
1194 weight = (vic_intr_count[cpu] * voyager_extended_cpus
1195 - vic_intr_total) >> 4;
1196 weight += 4;
1197 if (weight > 7)
1198 weight = 7;
1199 if (weight < 0)
1200 weight = 0;
1201
1202 outb((__u8) weight, VIC_PRIORITY_REGISTER);
1203
1204#ifdef VOYAGER_DEBUG
1205 if ((vic_tick[cpu] & 0xFFF) == 0) {
1206 /* print this message roughly every 25 secs */
1207 printk("VOYAGER SMP: vic_tick[%d] = %lu, weight = %ld\n",
1208 cpu, vic_tick[cpu], weight);
1209 }
1210#endif
1211}
1212
1213/* setup the profiling timer */
1214int setup_profiling_timer(unsigned int multiplier)
1215{
1216 int i;
1217
1218 if ((!multiplier))
1219 return -EINVAL;
1220
1221 /*
1222 * Set the new multiplier for each CPU. CPUs don't start using the
1223 * new values until the next timer interrupt in which they do process
1224 * accounting.
1225 */
1226 for (i = 0; i < nr_cpu_ids; ++i)
1227 per_cpu(prof_multiplier, i) = multiplier;
1228
1229 return 0;
1230}
1231
1232/* This is a bit of a mess, but forced on us by the genirq changes
1233 * there's no genirq handler that really does what voyager wants
1234 * so hack it up with the simple IRQ handler */
1235static void handle_vic_irq(unsigned int irq, struct irq_desc *desc)
1236{
1237 before_handle_vic_irq(irq);
1238 handle_simple_irq(irq, desc);
1239 after_handle_vic_irq(irq);
1240}
1241
1242/* The CPIs are handled in the per cpu 8259s, so they must be
1243 * enabled to be received: FIX: enabling the CPIs in the early
1244 * boot sequence interferes with bug checking; enable them later
1245 * on in smp_init */
1246#define VIC_SET_GATE(cpi, vector) \
1247 set_intr_gate((cpi) + VIC_DEFAULT_CPI_BASE, (vector))
1248#define QIC_SET_GATE(cpi, vector) \
1249 set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector))
1250
1251void __init voyager_smp_intr_init(void)
1252{
1253 int i;
1254
1255 /* initialize the per cpu irq mask to all disabled */
1256 for (i = 0; i < nr_cpu_ids; i++)
1257 vic_irq_mask[i] = 0xFFFF;
1258
1259 VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt);
1260
1261 VIC_SET_GATE(VIC_SYS_INT, vic_sys_interrupt);
1262 VIC_SET_GATE(VIC_CMN_INT, vic_cmn_interrupt);
1263
1264 QIC_SET_GATE(QIC_TIMER_CPI, qic_timer_interrupt);
1265 QIC_SET_GATE(QIC_INVALIDATE_CPI, qic_invalidate_interrupt);
1266 QIC_SET_GATE(QIC_RESCHEDULE_CPI, qic_reschedule_interrupt);
1267 QIC_SET_GATE(QIC_ENABLE_IRQ_CPI, qic_enable_irq_interrupt);
1268 QIC_SET_GATE(QIC_CALL_FUNCTION_CPI, qic_call_function_interrupt);
1269
1270 /* now put the VIC descriptor into the first 48 IRQs
1271 *
1272 * This is for later: first 16 correspond to PC IRQs; next 16
1273 * are Primary MC IRQs and final 16 are Secondary MC IRQs */
1274 for (i = 0; i < 48; i++)
1275 set_irq_chip_and_handler(i, &vic_chip, handle_vic_irq);
1276}
1277
1278/* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per
1279 * processor to receive CPI */
1280static void send_CPI(__u32 cpuset, __u8 cpi)
1281{
1282 int cpu;
1283 __u32 quad_cpuset = (cpuset & voyager_quad_processors);
1284
1285 if (cpi < VIC_START_FAKE_CPI) {
1286 /* fake CPI are only used for booting, so send to the
1287 * extended quads as well---Quads must be VIC booted */
1288 outb((__u8) (cpuset), VIC_CPI_Registers[cpi]);
1289 return;
1290 }
1291 if (quad_cpuset)
1292 send_QIC_CPI(quad_cpuset, cpi);
1293 cpuset &= ~quad_cpuset;
1294 cpuset &= 0xff; /* only first 8 CPUs vaild for VIC CPI */
1295 if (cpuset == 0)
1296 return;
1297 for_each_online_cpu(cpu) {
1298 if (cpuset & (1 << cpu))
1299 set_bit(cpi, &vic_cpi_mailbox[cpu]);
1300 }
1301 if (cpuset)
1302 outb((__u8) cpuset, VIC_CPI_Registers[VIC_CPI_LEVEL0]);
1303}
1304
1305/* Acknowledge receipt of CPI in the QIC, clear in QIC hardware and
1306 * set the cache line to shared by reading it.
1307 *
1308 * DON'T make this inline otherwise the cache line read will be
1309 * optimised away
1310 * */
1311static int ack_QIC_CPI(__u8 cpi)
1312{
1313 __u8 cpu = hard_smp_processor_id();
1314
1315 cpi &= 7;
1316
1317 outb(1 << cpi, QIC_INTERRUPT_CLEAR1);
1318 return voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi;
1319}
1320
1321static void ack_special_QIC_CPI(__u8 cpi)
1322{
1323 switch (cpi) {
1324 case VIC_CMN_INT:
1325 outb(QIC_CMN_INT, QIC_INTERRUPT_CLEAR0);
1326 break;
1327 case VIC_SYS_INT:
1328 outb(QIC_SYS_INT, QIC_INTERRUPT_CLEAR0);
1329 break;
1330 }
1331 /* also clear at the VIC, just in case (nop for non-extended proc) */
1332 ack_VIC_CPI(cpi);
1333}
1334
1335/* Acknowledge receipt of CPI in the VIC (essentially an EOI) */
1336static void ack_VIC_CPI(__u8 cpi)
1337{
1338#ifdef VOYAGER_DEBUG
1339 unsigned long flags;
1340 __u16 isr;
1341 __u8 cpu = smp_processor_id();
1342
1343 local_irq_save(flags);
1344 isr = vic_read_isr();
1345 if ((isr & (1 << (cpi & 7))) == 0) {
1346 printk("VOYAGER SMP: CPU%d lost CPI%d\n", cpu, cpi);
1347 }
1348#endif
1349 /* send specific EOI; the two system interrupts have
1350 * bit 4 set for a separate vector but behave as the
1351 * corresponding 3 bit intr */
1352 outb_p(0x60 | (cpi & 7), 0x20);
1353
1354#ifdef VOYAGER_DEBUG
1355 if ((vic_read_isr() & (1 << (cpi & 7))) != 0) {
1356 printk("VOYAGER SMP: CPU%d still asserting CPI%d\n", cpu, cpi);
1357 }
1358 local_irq_restore(flags);
1359#endif
1360}
1361
1362/* cribbed with thanks from irq.c */
1363#define __byte(x,y) (((unsigned char *)&(y))[x])
1364#define cached_21(cpu) (__byte(0,vic_irq_mask[cpu]))
1365#define cached_A1(cpu) (__byte(1,vic_irq_mask[cpu]))
1366
1367static unsigned int startup_vic_irq(unsigned int irq)
1368{
1369 unmask_vic_irq(irq);
1370
1371 return 0;
1372}
1373
1374/* The enable and disable routines. This is where we run into
1375 * conflicting architectural philosophy. Fundamentally, the voyager
1376 * architecture does not expect to have to disable interrupts globally
1377 * (the IRQ controllers belong to each CPU). The processor masquerade
1378 * which is used to start the system shouldn't be used in a running OS
1379 * since it will cause great confusion if two separate CPUs drive to
1380 * the same IRQ controller (I know, I've tried it).
1381 *
1382 * The solution is a variant on the NCR lazy SPL design:
1383 *
1384 * 1) To disable an interrupt, do nothing (other than set the
1385 * IRQ_DISABLED flag). This dares the interrupt actually to arrive.
1386 *
1387 * 2) If the interrupt dares to come in, raise the local mask against
1388 * it (this will result in all the CPU masks being raised
1389 * eventually).
1390 *
1391 * 3) To enable the interrupt, lower the mask on the local CPU and
1392 * broadcast an Interrupt enable CPI which causes all other CPUs to
1393 * adjust their masks accordingly. */
1394
1395static void unmask_vic_irq(unsigned int irq)
1396{
1397 /* linux doesn't to processor-irq affinity, so enable on
1398 * all CPUs we know about */
1399 int cpu = smp_processor_id(), real_cpu;
1400 __u16 mask = (1 << irq);
1401 __u32 processorList = 0;
1402 unsigned long flags;
1403
1404 VDEBUG(("VOYAGER: unmask_vic_irq(%d) CPU%d affinity 0x%lx\n",
1405 irq, cpu, cpu_irq_affinity[cpu]));
1406 spin_lock_irqsave(&vic_irq_lock, flags);
1407 for_each_online_cpu(real_cpu) {
1408 if (!(voyager_extended_vic_processors & (1 << real_cpu)))
1409 continue;
1410 if (!(cpu_irq_affinity[real_cpu] & mask)) {
1411 /* irq has no affinity for this CPU, ignore */
1412 continue;
1413 }
1414 if (real_cpu == cpu) {
1415 enable_local_vic_irq(irq);
1416 } else if (vic_irq_mask[real_cpu] & mask) {
1417 vic_irq_enable_mask[real_cpu] |= mask;
1418 processorList |= (1 << real_cpu);
1419 }
1420 }
1421 spin_unlock_irqrestore(&vic_irq_lock, flags);
1422 if (processorList)
1423 send_CPI(processorList, VIC_ENABLE_IRQ_CPI);
1424}
1425
1426static void mask_vic_irq(unsigned int irq)
1427{
1428 /* lazy disable, do nothing */
1429}
1430
1431static void enable_local_vic_irq(unsigned int irq)
1432{
1433 __u8 cpu = smp_processor_id();
1434 __u16 mask = ~(1 << irq);
1435 __u16 old_mask = vic_irq_mask[cpu];
1436
1437 vic_irq_mask[cpu] &= mask;
1438 if (vic_irq_mask[cpu] == old_mask)
1439 return;
1440
1441 VDEBUG(("VOYAGER DEBUG: Enabling irq %d in hardware on CPU %d\n",
1442 irq, cpu));
1443
1444 if (irq & 8) {
1445 outb_p(cached_A1(cpu), 0xA1);
1446 (void)inb_p(0xA1);
1447 } else {
1448 outb_p(cached_21(cpu), 0x21);
1449 (void)inb_p(0x21);
1450 }
1451}
1452
1453static void disable_local_vic_irq(unsigned int irq)
1454{
1455 __u8 cpu = smp_processor_id();
1456 __u16 mask = (1 << irq);
1457 __u16 old_mask = vic_irq_mask[cpu];
1458
1459 if (irq == 7)
1460 return;
1461
1462 vic_irq_mask[cpu] |= mask;
1463 if (old_mask == vic_irq_mask[cpu])
1464 return;
1465
1466 VDEBUG(("VOYAGER DEBUG: Disabling irq %d in hardware on CPU %d\n",
1467 irq, cpu));
1468
1469 if (irq & 8) {
1470 outb_p(cached_A1(cpu), 0xA1);
1471 (void)inb_p(0xA1);
1472 } else {
1473 outb_p(cached_21(cpu), 0x21);
1474 (void)inb_p(0x21);
1475 }
1476}
1477
1478/* The VIC is level triggered, so the ack can only be issued after the
1479 * interrupt completes. However, we do Voyager lazy interrupt
1480 * handling here: It is an extremely expensive operation to mask an
1481 * interrupt in the vic, so we merely set a flag (IRQ_DISABLED). If
1482 * this interrupt actually comes in, then we mask and ack here to push
1483 * the interrupt off to another CPU */
1484static void before_handle_vic_irq(unsigned int irq)
1485{
1486 irq_desc_t *desc = irq_to_desc(irq);
1487 __u8 cpu = smp_processor_id();
1488
1489 _raw_spin_lock(&vic_irq_lock);
1490 vic_intr_total++;
1491 vic_intr_count[cpu]++;
1492
1493 if (!(cpu_irq_affinity[cpu] & (1 << irq))) {
1494 /* The irq is not in our affinity mask, push it off
1495 * onto another CPU */
1496 VDEBUG(("VOYAGER DEBUG: affinity triggered disable of irq %d "
1497 "on cpu %d\n", irq, cpu));
1498 disable_local_vic_irq(irq);
1499 /* set IRQ_INPROGRESS to prevent the handler in irq.c from
1500 * actually calling the interrupt routine */
1501 desc->status |= IRQ_REPLAY | IRQ_INPROGRESS;
1502 } else if (desc->status & IRQ_DISABLED) {
1503 /* Damn, the interrupt actually arrived, do the lazy
1504 * disable thing. The interrupt routine in irq.c will
1505 * not handle a IRQ_DISABLED interrupt, so nothing more
1506 * need be done here */
1507 VDEBUG(("VOYAGER DEBUG: lazy disable of irq %d on CPU %d\n",
1508 irq, cpu));
1509 disable_local_vic_irq(irq);
1510 desc->status |= IRQ_REPLAY;
1511 } else {
1512 desc->status &= ~IRQ_REPLAY;
1513 }
1514
1515 _raw_spin_unlock(&vic_irq_lock);
1516}
1517
1518/* Finish the VIC interrupt: basically mask */
1519static void after_handle_vic_irq(unsigned int irq)
1520{
1521 irq_desc_t *desc = irq_to_desc(irq);
1522
1523 _raw_spin_lock(&vic_irq_lock);
1524 {
1525 unsigned int status = desc->status & ~IRQ_INPROGRESS;
1526#ifdef VOYAGER_DEBUG
1527 __u16 isr;
1528#endif
1529
1530 desc->status = status;
1531 if ((status & IRQ_DISABLED))
1532 disable_local_vic_irq(irq);
1533#ifdef VOYAGER_DEBUG
1534 /* DEBUG: before we ack, check what's in progress */
1535 isr = vic_read_isr();
1536 if ((isr & (1 << irq) && !(status & IRQ_REPLAY)) == 0) {
1537 int i;
1538 __u8 cpu = smp_processor_id();
1539 __u8 real_cpu;
1540 int mask; /* Um... initialize me??? --RR */
1541
1542 printk("VOYAGER SMP: CPU%d lost interrupt %d\n",
1543 cpu, irq);
1544 for_each_possible_cpu(real_cpu, mask) {
1545
1546 outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu,
1547 VIC_PROCESSOR_ID);
1548 isr = vic_read_isr();
1549 if (isr & (1 << irq)) {
1550 printk
1551 ("VOYAGER SMP: CPU%d ack irq %d\n",
1552 real_cpu, irq);
1553 ack_vic_irq(irq);
1554 }
1555 outb(cpu, VIC_PROCESSOR_ID);
1556 }
1557 }
1558#endif /* VOYAGER_DEBUG */
1559 /* as soon as we ack, the interrupt is eligible for
1560 * receipt by another CPU so everything must be in
1561 * order here */
1562 ack_vic_irq(irq);
1563 if (status & IRQ_REPLAY) {
1564 /* replay is set if we disable the interrupt
1565 * in the before_handle_vic_irq() routine, so
1566 * clear the in progress bit here to allow the
1567 * next CPU to handle this correctly */
1568 desc->status &= ~(IRQ_REPLAY | IRQ_INPROGRESS);
1569 }
1570#ifdef VOYAGER_DEBUG
1571 isr = vic_read_isr();
1572 if ((isr & (1 << irq)) != 0)
1573 printk("VOYAGER SMP: after_handle_vic_irq() after "
1574 "ack irq=%d, isr=0x%x\n", irq, isr);
1575#endif /* VOYAGER_DEBUG */
1576 }
1577 _raw_spin_unlock(&vic_irq_lock);
1578
1579 /* All code after this point is out of the main path - the IRQ
1580 * may be intercepted by another CPU if reasserted */
1581}
1582
1583/* Linux processor - interrupt affinity manipulations.
1584 *
1585 * For each processor, we maintain a 32 bit irq affinity mask.
1586 * Initially it is set to all 1's so every processor accepts every
1587 * interrupt. In this call, we change the processor's affinity mask:
1588 *
1589 * Change from enable to disable:
1590 *
1591 * If the interrupt ever comes in to the processor, we will disable it
1592 * and ack it to push it off to another CPU, so just accept the mask here.
1593 *
1594 * Change from disable to enable:
1595 *
1596 * change the mask and then do an interrupt enable CPI to re-enable on
1597 * the selected processors */
1598
1599void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask)
1600{
1601 /* Only extended processors handle interrupts */
1602 unsigned long real_mask;
1603 unsigned long irq_mask = 1 << irq;
1604 int cpu;
1605
1606 real_mask = cpus_addr(*mask)[0] & voyager_extended_vic_processors;
1607
1608 if (cpus_addr(*mask)[0] == 0)
1609 /* can't have no CPUs to accept the interrupt -- extremely
1610 * bad things will happen */
1611 return;
1612
1613 if (irq == 0)
1614 /* can't change the affinity of the timer IRQ. This
1615 * is due to the constraint in the voyager
1616 * architecture that the CPI also comes in on and IRQ
1617 * line and we have chosen IRQ0 for this. If you
1618 * raise the mask on this interrupt, the processor
1619 * will no-longer be able to accept VIC CPIs */
1620 return;
1621
1622 if (irq >= 32)
1623 /* You can only have 32 interrupts in a voyager system
1624 * (and 32 only if you have a secondary microchannel
1625 * bus) */
1626 return;
1627
1628 for_each_online_cpu(cpu) {
1629 unsigned long cpu_mask = 1 << cpu;
1630
1631 if (cpu_mask & real_mask) {
1632 /* enable the interrupt for this cpu */
1633 cpu_irq_affinity[cpu] |= irq_mask;
1634 } else {
1635 /* disable the interrupt for this cpu */
1636 cpu_irq_affinity[cpu] &= ~irq_mask;
1637 }
1638 }
1639 /* this is magic, we now have the correct affinity maps, so
1640 * enable the interrupt. This will send an enable CPI to
1641 * those CPUs who need to enable it in their local masks,
1642 * causing them to correct for the new affinity . If the
1643 * interrupt is currently globally disabled, it will simply be
1644 * disabled again as it comes in (voyager lazy disable). If
1645 * the affinity map is tightened to disable the interrupt on a
1646 * cpu, it will be pushed off when it comes in */
1647 unmask_vic_irq(irq);
1648}
1649
1650static void ack_vic_irq(unsigned int irq)
1651{
1652 if (irq & 8) {
1653 outb(0x62, 0x20); /* Specific EOI to cascade */
1654 outb(0x60 | (irq & 7), 0xA0);
1655 } else {
1656 outb(0x60 | (irq & 7), 0x20);
1657 }
1658}
1659
1660/* enable the CPIs. In the VIC, the CPIs are delivered by the 8259
1661 * but are not vectored by it. This means that the 8259 mask must be
1662 * lowered to receive them */
1663static __init void vic_enable_cpi(void)
1664{
1665 __u8 cpu = smp_processor_id();
1666
1667 /* just take a copy of the current mask (nop for boot cpu) */
1668 vic_irq_mask[cpu] = vic_irq_mask[boot_cpu_id];
1669
1670 enable_local_vic_irq(VIC_CPI_LEVEL0);
1671 enable_local_vic_irq(VIC_CPI_LEVEL1);
1672 /* for sys int and cmn int */
1673 enable_local_vic_irq(7);
1674
1675 if (is_cpu_quad()) {
1676 outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0);
1677 outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1);
1678 VDEBUG(("VOYAGER SMP: QIC ENABLE CPI: CPU%d: MASK 0x%x\n",
1679 cpu, QIC_CPI_ENABLE));
1680 }
1681
1682 VDEBUG(("VOYAGER SMP: ENABLE CPI: CPU%d: MASK 0x%x\n",
1683 cpu, vic_irq_mask[cpu]));
1684}
1685
1686void voyager_smp_dump()
1687{
1688 int old_cpu = smp_processor_id(), cpu;
1689
1690 /* dump the interrupt masks of each processor */
1691 for_each_online_cpu(cpu) {
1692 __u16 imr, isr, irr;
1693 unsigned long flags;
1694
1695 local_irq_save(flags);
1696 outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID);
1697 imr = (inb(0xa1) << 8) | inb(0x21);
1698 outb(0x0a, 0xa0);
1699 irr = inb(0xa0) << 8;
1700 outb(0x0a, 0x20);
1701 irr |= inb(0x20);
1702 outb(0x0b, 0xa0);
1703 isr = inb(0xa0) << 8;
1704 outb(0x0b, 0x20);
1705 isr |= inb(0x20);
1706 outb(old_cpu, VIC_PROCESSOR_ID);
1707 local_irq_restore(flags);
1708 printk("\tCPU%d: mask=0x%x, IMR=0x%x, IRR=0x%x, ISR=0x%x\n",
1709 cpu, vic_irq_mask[cpu], imr, irr, isr);
1710#if 0
1711 /* These lines are put in to try to unstick an un ack'd irq */
1712 if (isr != 0) {
1713 int irq;
1714 for (irq = 0; irq < 16; irq++) {
1715 if (isr & (1 << irq)) {
1716 printk("\tCPU%d: ack irq %d\n",
1717 cpu, irq);
1718 local_irq_save(flags);
1719 outb(VIC_CPU_MASQUERADE_ENABLE | cpu,
1720 VIC_PROCESSOR_ID);
1721 ack_vic_irq(irq);
1722 outb(old_cpu, VIC_PROCESSOR_ID);
1723 local_irq_restore(flags);
1724 }
1725 }
1726 }
1727#endif
1728 }
1729}
1730
1731void smp_voyager_power_off(void *dummy)
1732{
1733 if (smp_processor_id() == boot_cpu_id)
1734 voyager_power_off();
1735 else
1736 smp_stop_cpu_function(NULL);
1737}
1738
1739static void __init voyager_smp_prepare_cpus(unsigned int max_cpus)
1740{
1741 /* FIXME: ignore max_cpus for now */
1742 smp_boot_cpus();
1743}
1744
1745static void __cpuinit voyager_smp_prepare_boot_cpu(void)
1746{
1747 int cpu = smp_processor_id();
1748 switch_to_new_gdt(cpu);
1749
1750 cpu_set(cpu, cpu_online_map);
1751 cpu_set(cpu, cpu_callout_map);
1752 cpu_set(cpu, cpu_possible_map);
1753 cpu_set(cpu, cpu_present_map);
1754
1755}
1756
1757static int __cpuinit voyager_cpu_up(unsigned int cpu)
1758{
1759 /* This only works at boot for x86. See "rewrite" above. */
1760 if (cpu_isset(cpu, smp_commenced_mask))
1761 return -ENOSYS;
1762
1763 /* In case one didn't come up */
1764 if (!cpu_isset(cpu, cpu_callin_map))
1765 return -EIO;
1766 /* Unleash the CPU! */
1767 cpu_set(cpu, smp_commenced_mask);
1768 while (!cpu_online(cpu))
1769 mb();
1770 return 0;
1771}
1772
1773static void __init voyager_smp_cpus_done(unsigned int max_cpus)
1774{
1775 zap_low_mappings();
1776}
1777
1778void __init smp_setup_processor_id(void)
1779{
1780 current_thread_info()->cpu = hard_smp_processor_id();
1781}
1782
1783static void voyager_send_call_func(const struct cpumask *callmask)
1784{
1785 __u32 mask = cpus_addr(*callmask)[0] & ~(1 << smp_processor_id());
1786 send_CPI(mask, VIC_CALL_FUNCTION_CPI);
1787}
1788
1789static void voyager_send_call_func_single(int cpu)
1790{
1791 send_CPI(1 << cpu, VIC_CALL_FUNCTION_SINGLE_CPI);
1792}
1793
1794struct smp_ops smp_ops = {
1795 .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu,
1796 .smp_prepare_cpus = voyager_smp_prepare_cpus,
1797 .cpu_up = voyager_cpu_up,
1798 .smp_cpus_done = voyager_smp_cpus_done,
1799
1800 .smp_send_stop = voyager_smp_send_stop,
1801 .smp_send_reschedule = voyager_smp_send_reschedule,
1802
1803 .send_call_func_ipi = voyager_send_call_func,
1804 .send_call_func_single_ipi = voyager_send_call_func_single,
1805};
diff --git a/arch/x86/mach-voyager/voyager_thread.c b/arch/x86/mach-voyager/voyager_thread.c
deleted file mode 100644
index 15464a20fb38..000000000000
--- a/arch/x86/mach-voyager/voyager_thread.c
+++ /dev/null
@@ -1,128 +0,0 @@
1/* -*- mode: c; c-basic-offset: 8 -*- */
2
3/* Copyright (C) 2001
4 *
5 * Author: J.E.J.Bottomley@HansenPartnership.com
6 *
7 * This module provides the machine status monitor thread for the
8 * voyager architecture. This allows us to monitor the machine
9 * environment (temp, voltage, fan function) and the front panel and
10 * internal UPS. If a fault is detected, this thread takes corrective
11 * action (usually just informing init)
12 * */
13
14#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/kernel_stat.h>
17#include <linux/delay.h>
18#include <linux/mc146818rtc.h>
19#include <linux/init.h>
20#include <linux/bootmem.h>
21#include <linux/kmod.h>
22#include <linux/completion.h>
23#include <linux/sched.h>
24#include <linux/kthread.h>
25#include <asm/desc.h>
26#include <asm/voyager.h>
27#include <asm/vic.h>
28#include <asm/mtrr.h>
29#include <asm/msr.h>
30
31struct task_struct *voyager_thread;
32static __u8 set_timeout;
33
34static int execute(const char *string)
35{
36 int ret;
37
38 char *envp[] = {
39 "HOME=/",
40 "TERM=linux",
41 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
42 NULL,
43 };
44 char *argv[] = {
45 "/bin/bash",
46 "-c",
47 (char *)string,
48 NULL,
49 };
50
51 if ((ret =
52 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC)) != 0) {
53 printk(KERN_ERR "Voyager failed to run \"%s\": %i\n", string,
54 ret);
55 }
56 return ret;
57}
58
59static void check_from_kernel(void)
60{
61 if (voyager_status.switch_off) {
62
63 /* FIXME: This should be configurable via proc */
64 execute("umask 600; echo 0 > /etc/initrunlvl; kill -HUP 1");
65 } else if (voyager_status.power_fail) {
66 VDEBUG(("Voyager daemon detected AC power failure\n"));
67
68 /* FIXME: This should be configureable via proc */
69 execute("umask 600; echo F > /etc/powerstatus; kill -PWR 1");
70 set_timeout = 1;
71 }
72}
73
74static void check_continuing_condition(void)
75{
76 if (voyager_status.power_fail) {
77 __u8 data;
78 voyager_cat_psi(VOYAGER_PSI_SUBREAD,
79 VOYAGER_PSI_AC_FAIL_REG, &data);
80 if ((data & 0x1f) == 0) {
81 /* all power restored */
82 printk(KERN_NOTICE
83 "VOYAGER AC power restored, cancelling shutdown\n");
84 /* FIXME: should be user configureable */
85 execute
86 ("umask 600; echo O > /etc/powerstatus; kill -PWR 1");
87 set_timeout = 0;
88 }
89 }
90}
91
92static int thread(void *unused)
93{
94 printk(KERN_NOTICE "Voyager starting monitor thread\n");
95
96 for (;;) {
97 set_current_state(TASK_INTERRUPTIBLE);
98 schedule_timeout(set_timeout ? HZ : MAX_SCHEDULE_TIMEOUT);
99
100 VDEBUG(("Voyager Daemon awoken\n"));
101 if (voyager_status.request_from_kernel == 0) {
102 /* probably awoken from timeout */
103 check_continuing_condition();
104 } else {
105 check_from_kernel();
106 voyager_status.request_from_kernel = 0;
107 }
108 }
109}
110
111static int __init voyager_thread_start(void)
112{
113 voyager_thread = kthread_run(thread, NULL, "kvoyagerd");
114 if (IS_ERR(voyager_thread)) {
115 printk(KERN_ERR
116 "Voyager: Failed to create system monitor thread.\n");
117 return PTR_ERR(voyager_thread);
118 }
119 return 0;
120}
121
122static void __exit voyager_thread_stop(void)
123{
124 kthread_stop(voyager_thread);
125}
126
127module_init(voyager_thread_start);
128module_exit(voyager_thread_stop);
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 2b938a384910..08537747cb58 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -1,4 +1,4 @@
1obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ 1obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
2 pat.o pgtable.o gup.o 2 pat.o pgtable.o gup.o
3 3
4obj-$(CONFIG_SMP) += tlb.o 4obj-$(CONFIG_SMP) += tlb.o
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 29644175490f..a03b7279efa0 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1,74 +1,79 @@
1/* 1/*
2 * Copyright (C) 1995 Linus Torvalds 2 * Copyright (C) 1995 Linus Torvalds
3 * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs. 3 * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
4 * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
4 */ 5 */
5
6#include <linux/signal.h>
7#include <linux/sched.h>
8#include <linux/kernel.h>
9#include <linux/errno.h>
10#include <linux/string.h>
11#include <linux/types.h>
12#include <linux/ptrace.h>
13#include <linux/mmiotrace.h>
14#include <linux/mman.h>
15#include <linux/mm.h>
16#include <linux/smp.h>
17#include <linux/interrupt.h> 6#include <linux/interrupt.h>
18#include <linux/init.h> 7#include <linux/mmiotrace.h>
19#include <linux/tty.h> 8#include <linux/bootmem.h>
20#include <linux/vt_kern.h> /* For unblank_screen() */
21#include <linux/compiler.h> 9#include <linux/compiler.h>
22#include <linux/highmem.h> 10#include <linux/highmem.h>
23#include <linux/bootmem.h> /* for max_low_pfn */
24#include <linux/vmalloc.h>
25#include <linux/module.h>
26#include <linux/kprobes.h> 11#include <linux/kprobes.h>
27#include <linux/uaccess.h> 12#include <linux/uaccess.h>
13#include <linux/vmalloc.h>
14#include <linux/vt_kern.h>
15#include <linux/signal.h>
16#include <linux/kernel.h>
17#include <linux/ptrace.h>
18#include <linux/string.h>
19#include <linux/module.h>
28#include <linux/kdebug.h> 20#include <linux/kdebug.h>
21#include <linux/errno.h>
29#include <linux/magic.h> 22#include <linux/magic.h>
23#include <linux/sched.h>
24#include <linux/types.h>
25#include <linux/init.h>
26#include <linux/mman.h>
27#include <linux/tty.h>
28#include <linux/smp.h>
29#include <linux/mm.h>
30
31#include <asm-generic/sections.h>
30 32
31#include <asm/system.h>
32#include <asm/desc.h>
33#include <asm/segment.h>
34#include <asm/pgalloc.h>
35#include <asm/smp.h>
36#include <asm/tlbflush.h> 33#include <asm/tlbflush.h>
34#include <asm/pgalloc.h>
35#include <asm/segment.h>
36#include <asm/system.h>
37#include <asm/proto.h> 37#include <asm/proto.h>
38#include <asm-generic/sections.h>
39#include <asm/traps.h> 38#include <asm/traps.h>
39#include <asm/desc.h>
40 40
41/* 41/*
42 * Page fault error code bits 42 * Page fault error code bits:
43 * bit 0 == 0 means no page found, 1 means protection fault 43 *
44 * bit 1 == 0 means read, 1 means write 44 * bit 0 == 0: no page found 1: protection fault
45 * bit 2 == 0 means kernel, 1 means user-mode 45 * bit 1 == 0: read access 1: write access
46 * bit 3 == 1 means use of reserved bit detected 46 * bit 2 == 0: kernel-mode access 1: user-mode access
47 * bit 4 == 1 means fault was an instruction fetch 47 * bit 3 == 1: use of reserved bit detected
48 * bit 4 == 1: fault was an instruction fetch
48 */ 49 */
49#define PF_PROT (1<<0) 50enum x86_pf_error_code {
50#define PF_WRITE (1<<1)
51#define PF_USER (1<<2)
52#define PF_RSVD (1<<3)
53#define PF_INSTR (1<<4)
54 51
52 PF_PROT = 1 << 0,
53 PF_WRITE = 1 << 1,
54 PF_USER = 1 << 2,
55 PF_RSVD = 1 << 3,
56 PF_INSTR = 1 << 4,
57};
58
59/*
60 * Returns 0 if mmiotrace is disabled, or if the fault is not
61 * handled by mmiotrace:
62 */
55static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) 63static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr)
56{ 64{
57#ifdef CONFIG_MMIOTRACE
58 if (unlikely(is_kmmio_active())) 65 if (unlikely(is_kmmio_active()))
59 if (kmmio_handler(regs, addr) == 1) 66 if (kmmio_handler(regs, addr) == 1)
60 return -1; 67 return -1;
61#endif
62 return 0; 68 return 0;
63} 69}
64 70
65static inline int notify_page_fault(struct pt_regs *regs) 71static inline int notify_page_fault(struct pt_regs *regs)
66{ 72{
67#ifdef CONFIG_KPROBES
68 int ret = 0; 73 int ret = 0;
69 74
70 /* kprobe_running() needs smp_processor_id() */ 75 /* kprobe_running() needs smp_processor_id() */
71 if (!user_mode_vm(regs)) { 76 if (kprobes_built_in() && !user_mode_vm(regs)) {
72 preempt_disable(); 77 preempt_disable();
73 if (kprobe_running() && kprobe_fault_handler(regs, 14)) 78 if (kprobe_running() && kprobe_fault_handler(regs, 14))
74 ret = 1; 79 ret = 1;
@@ -76,29 +81,76 @@ static inline int notify_page_fault(struct pt_regs *regs)
76 } 81 }
77 82
78 return ret; 83 return ret;
79#else
80 return 0;
81#endif
82} 84}
83 85
84/* 86/*
85 * X86_32 87 * Prefetch quirks:
86 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
87 * Check that here and ignore it.
88 * 88 *
89 * X86_64 89 * 32-bit mode:
90 * Sometimes the CPU reports invalid exceptions on prefetch.
91 * Check that here and ignore it.
92 * 90 *
93 * Opcode checker based on code by Richard Brunner 91 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
92 * Check that here and ignore it.
93 *
94 * 64-bit mode:
95 *
96 * Sometimes the CPU reports invalid exceptions on prefetch.
97 * Check that here and ignore it.
98 *
99 * Opcode checker based on code by Richard Brunner.
94 */ 100 */
95static int is_prefetch(struct pt_regs *regs, unsigned long error_code, 101static inline int
96 unsigned long addr) 102check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
103 unsigned char opcode, int *prefetch)
97{ 104{
105 unsigned char instr_hi = opcode & 0xf0;
106 unsigned char instr_lo = opcode & 0x0f;
107
108 switch (instr_hi) {
109 case 0x20:
110 case 0x30:
111 /*
112 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
113 * In X86_64 long mode, the CPU will signal invalid
114 * opcode if some of these prefixes are present so
115 * X86_64 will never get here anyway
116 */
117 return ((instr_lo & 7) == 0x6);
118#ifdef CONFIG_X86_64
119 case 0x40:
120 /*
121 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
122 * Need to figure out under what instruction mode the
123 * instruction was issued. Could check the LDT for lm,
124 * but for now it's good enough to assume that long
125 * mode only uses well known segments or kernel.
126 */
127 return (!user_mode(regs)) || (regs->cs == __USER_CS);
128#endif
129 case 0x60:
130 /* 0x64 thru 0x67 are valid prefixes in all modes. */
131 return (instr_lo & 0xC) == 0x4;
132 case 0xF0:
133 /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
134 return !instr_lo || (instr_lo>>1) == 1;
135 case 0x00:
136 /* Prefetch instruction is 0x0F0D or 0x0F18 */
137 if (probe_kernel_address(instr, opcode))
138 return 0;
139
140 *prefetch = (instr_lo == 0xF) &&
141 (opcode == 0x0D || opcode == 0x18);
142 return 0;
143 default:
144 return 0;
145 }
146}
147
148static int
149is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
150{
151 unsigned char *max_instr;
98 unsigned char *instr; 152 unsigned char *instr;
99 int scan_more = 1;
100 int prefetch = 0; 153 int prefetch = 0;
101 unsigned char *max_instr;
102 154
103 /* 155 /*
104 * If it was a exec (instruction fetch) fault on NX page, then 156 * If it was a exec (instruction fetch) fault on NX page, then
@@ -107,106 +159,170 @@ static int is_prefetch(struct pt_regs *regs, unsigned long error_code,
107 if (error_code & PF_INSTR) 159 if (error_code & PF_INSTR)
108 return 0; 160 return 0;
109 161
110 instr = (unsigned char *)convert_ip_to_linear(current, regs); 162 instr = (void *)convert_ip_to_linear(current, regs);
111 max_instr = instr + 15; 163 max_instr = instr + 15;
112 164
113 if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) 165 if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
114 return 0; 166 return 0;
115 167
116 while (scan_more && instr < max_instr) { 168 while (instr < max_instr) {
117 unsigned char opcode; 169 unsigned char opcode;
118 unsigned char instr_hi;
119 unsigned char instr_lo;
120 170
121 if (probe_kernel_address(instr, opcode)) 171 if (probe_kernel_address(instr, opcode))
122 break; 172 break;
123 173
124 instr_hi = opcode & 0xf0;
125 instr_lo = opcode & 0x0f;
126 instr++; 174 instr++;
127 175
128 switch (instr_hi) { 176 if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
129 case 0x20:
130 case 0x30:
131 /*
132 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
133 * In X86_64 long mode, the CPU will signal invalid
134 * opcode if some of these prefixes are present so
135 * X86_64 will never get here anyway
136 */
137 scan_more = ((instr_lo & 7) == 0x6);
138 break;
139#ifdef CONFIG_X86_64
140 case 0x40:
141 /*
142 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
143 * Need to figure out under what instruction mode the
144 * instruction was issued. Could check the LDT for lm,
145 * but for now it's good enough to assume that long
146 * mode only uses well known segments or kernel.
147 */
148 scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
149 break;
150#endif
151 case 0x60:
152 /* 0x64 thru 0x67 are valid prefixes in all modes. */
153 scan_more = (instr_lo & 0xC) == 0x4;
154 break;
155 case 0xF0:
156 /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
157 scan_more = !instr_lo || (instr_lo>>1) == 1;
158 break;
159 case 0x00:
160 /* Prefetch instruction is 0x0F0D or 0x0F18 */
161 scan_more = 0;
162
163 if (probe_kernel_address(instr, opcode))
164 break;
165 prefetch = (instr_lo == 0xF) &&
166 (opcode == 0x0D || opcode == 0x18);
167 break; 177 break;
168 default:
169 scan_more = 0;
170 break;
171 }
172 } 178 }
173 return prefetch; 179 return prefetch;
174} 180}
175 181
176static void force_sig_info_fault(int si_signo, int si_code, 182static void
177 unsigned long address, struct task_struct *tsk) 183force_sig_info_fault(int si_signo, int si_code, unsigned long address,
184 struct task_struct *tsk)
178{ 185{
179 siginfo_t info; 186 siginfo_t info;
180 187
181 info.si_signo = si_signo; 188 info.si_signo = si_signo;
182 info.si_errno = 0; 189 info.si_errno = 0;
183 info.si_code = si_code; 190 info.si_code = si_code;
184 info.si_addr = (void __user *)address; 191 info.si_addr = (void __user *)address;
192
185 force_sig_info(si_signo, &info, tsk); 193 force_sig_info(si_signo, &info, tsk);
186} 194}
187 195
188#ifdef CONFIG_X86_64 196DEFINE_SPINLOCK(pgd_lock);
189static int bad_address(void *p) 197LIST_HEAD(pgd_list);
198
199#ifdef CONFIG_X86_32
200static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
190{ 201{
191 unsigned long dummy; 202 unsigned index = pgd_index(address);
192 return probe_kernel_address((unsigned long *)p, dummy); 203 pgd_t *pgd_k;
204 pud_t *pud, *pud_k;
205 pmd_t *pmd, *pmd_k;
206
207 pgd += index;
208 pgd_k = init_mm.pgd + index;
209
210 if (!pgd_present(*pgd_k))
211 return NULL;
212
213 /*
214 * set_pgd(pgd, *pgd_k); here would be useless on PAE
215 * and redundant with the set_pmd() on non-PAE. As would
216 * set_pud.
217 */
218 pud = pud_offset(pgd, address);
219 pud_k = pud_offset(pgd_k, address);
220 if (!pud_present(*pud_k))
221 return NULL;
222
223 pmd = pmd_offset(pud, address);
224 pmd_k = pmd_offset(pud_k, address);
225 if (!pmd_present(*pmd_k))
226 return NULL;
227
228 if (!pmd_present(*pmd)) {
229 set_pmd(pmd, *pmd_k);
230 arch_flush_lazy_mmu_mode();
231 } else {
232 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
233 }
234
235 return pmd_k;
236}
237
238void vmalloc_sync_all(void)
239{
240 unsigned long address;
241
242 if (SHARED_KERNEL_PMD)
243 return;
244
245 for (address = VMALLOC_START & PMD_MASK;
246 address >= TASK_SIZE && address < FIXADDR_TOP;
247 address += PMD_SIZE) {
248
249 unsigned long flags;
250 struct page *page;
251
252 spin_lock_irqsave(&pgd_lock, flags);
253 list_for_each_entry(page, &pgd_list, lru) {
254 if (!vmalloc_sync_one(page_address(page), address))
255 break;
256 }
257 spin_unlock_irqrestore(&pgd_lock, flags);
258 }
259}
260
261/*
262 * 32-bit:
263 *
264 * Handle a fault on the vmalloc or module mapping area
265 */
266static noinline int vmalloc_fault(unsigned long address)
267{
268 unsigned long pgd_paddr;
269 pmd_t *pmd_k;
270 pte_t *pte_k;
271
272 /* Make sure we are in vmalloc area: */
273 if (!(address >= VMALLOC_START && address < VMALLOC_END))
274 return -1;
275
276 /*
277 * Synchronize this task's top level page-table
278 * with the 'reference' page table.
279 *
280 * Do _not_ use "current" here. We might be inside
281 * an interrupt in the middle of a task switch..
282 */
283 pgd_paddr = read_cr3();
284 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
285 if (!pmd_k)
286 return -1;
287
288 pte_k = pte_offset_kernel(pmd_k, address);
289 if (!pte_present(*pte_k))
290 return -1;
291
292 return 0;
293}
294
295/*
296 * Did it hit the DOS screen memory VA from vm86 mode?
297 */
298static inline void
299check_v8086_mode(struct pt_regs *regs, unsigned long address,
300 struct task_struct *tsk)
301{
302 unsigned long bit;
303
304 if (!v8086_mode(regs))
305 return;
306
307 bit = (address - 0xA0000) >> PAGE_SHIFT;
308 if (bit < 32)
309 tsk->thread.screen_bitmap |= 1 << bit;
193} 310}
194#endif
195 311
196static void dump_pagetable(unsigned long address) 312static void dump_pagetable(unsigned long address)
197{ 313{
198#ifdef CONFIG_X86_32
199 __typeof__(pte_val(__pte(0))) page; 314 __typeof__(pte_val(__pte(0))) page;
200 315
201 page = read_cr3(); 316 page = read_cr3();
202 page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT]; 317 page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
318
203#ifdef CONFIG_X86_PAE 319#ifdef CONFIG_X86_PAE
204 printk("*pdpt = %016Lx ", page); 320 printk("*pdpt = %016Lx ", page);
205 if ((page >> PAGE_SHIFT) < max_low_pfn 321 if ((page >> PAGE_SHIFT) < max_low_pfn
206 && page & _PAGE_PRESENT) { 322 && page & _PAGE_PRESENT) {
207 page &= PAGE_MASK; 323 page &= PAGE_MASK;
208 page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT) 324 page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
209 & (PTRS_PER_PMD - 1)]; 325 & (PTRS_PER_PMD - 1)];
210 printk(KERN_CONT "*pde = %016Lx ", page); 326 printk(KERN_CONT "*pde = %016Lx ", page);
211 page &= ~_PAGE_NX; 327 page &= ~_PAGE_NX;
212 } 328 }
@@ -218,19 +334,145 @@ static void dump_pagetable(unsigned long address)
218 * We must not directly access the pte in the highpte 334 * We must not directly access the pte in the highpte
219 * case if the page table is located in highmem. 335 * case if the page table is located in highmem.
220 * And let's rather not kmap-atomic the pte, just in case 336 * And let's rather not kmap-atomic the pte, just in case
221 * it's allocated already. 337 * it's allocated already:
222 */ 338 */
223 if ((page >> PAGE_SHIFT) < max_low_pfn 339 if ((page >> PAGE_SHIFT) < max_low_pfn
224 && (page & _PAGE_PRESENT) 340 && (page & _PAGE_PRESENT)
225 && !(page & _PAGE_PSE)) { 341 && !(page & _PAGE_PSE)) {
342
226 page &= PAGE_MASK; 343 page &= PAGE_MASK;
227 page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) 344 page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
228 & (PTRS_PER_PTE - 1)]; 345 & (PTRS_PER_PTE - 1)];
229 printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page); 346 printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
230 } 347 }
231 348
232 printk("\n"); 349 printk("\n");
233#else /* CONFIG_X86_64 */ 350}
351
352#else /* CONFIG_X86_64: */
353
354void vmalloc_sync_all(void)
355{
356 unsigned long address;
357
358 for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
359 address += PGDIR_SIZE) {
360
361 const pgd_t *pgd_ref = pgd_offset_k(address);
362 unsigned long flags;
363 struct page *page;
364
365 if (pgd_none(*pgd_ref))
366 continue;
367
368 spin_lock_irqsave(&pgd_lock, flags);
369 list_for_each_entry(page, &pgd_list, lru) {
370 pgd_t *pgd;
371 pgd = (pgd_t *)page_address(page) + pgd_index(address);
372 if (pgd_none(*pgd))
373 set_pgd(pgd, *pgd_ref);
374 else
375 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
376 }
377 spin_unlock_irqrestore(&pgd_lock, flags);
378 }
379}
380
381/*
382 * 64-bit:
383 *
384 * Handle a fault on the vmalloc area
385 *
386 * This assumes no large pages in there.
387 */
388static noinline int vmalloc_fault(unsigned long address)
389{
390 pgd_t *pgd, *pgd_ref;
391 pud_t *pud, *pud_ref;
392 pmd_t *pmd, *pmd_ref;
393 pte_t *pte, *pte_ref;
394
395 /* Make sure we are in vmalloc area: */
396 if (!(address >= VMALLOC_START && address < VMALLOC_END))
397 return -1;
398
399 /*
400 * Copy kernel mappings over when needed. This can also
401 * happen within a race in page table update. In the later
402 * case just flush:
403 */
404 pgd = pgd_offset(current->active_mm, address);
405 pgd_ref = pgd_offset_k(address);
406 if (pgd_none(*pgd_ref))
407 return -1;
408
409 if (pgd_none(*pgd))
410 set_pgd(pgd, *pgd_ref);
411 else
412 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
413
414 /*
415 * Below here mismatches are bugs because these lower tables
416 * are shared:
417 */
418
419 pud = pud_offset(pgd, address);
420 pud_ref = pud_offset(pgd_ref, address);
421 if (pud_none(*pud_ref))
422 return -1;
423
424 if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
425 BUG();
426
427 pmd = pmd_offset(pud, address);
428 pmd_ref = pmd_offset(pud_ref, address);
429 if (pmd_none(*pmd_ref))
430 return -1;
431
432 if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
433 BUG();
434
435 pte_ref = pte_offset_kernel(pmd_ref, address);
436 if (!pte_present(*pte_ref))
437 return -1;
438
439 pte = pte_offset_kernel(pmd, address);
440
441 /*
442 * Don't use pte_page here, because the mappings can point
443 * outside mem_map, and the NUMA hash lookup cannot handle
444 * that:
445 */
446 if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
447 BUG();
448
449 return 0;
450}
451
452static const char errata93_warning[] =
453KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
454KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
455KERN_ERR "******* Please consider a BIOS update.\n"
456KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
457
458/*
459 * No vm86 mode in 64-bit mode:
460 */
461static inline void
462check_v8086_mode(struct pt_regs *regs, unsigned long address,
463 struct task_struct *tsk)
464{
465}
466
467static int bad_address(void *p)
468{
469 unsigned long dummy;
470
471 return probe_kernel_address((unsigned long *)p, dummy);
472}
473
474static void dump_pagetable(unsigned long address)
475{
234 pgd_t *pgd; 476 pgd_t *pgd;
235 pud_t *pud; 477 pud_t *pud;
236 pmd_t *pmd; 478 pmd_t *pmd;
@@ -239,102 +481,77 @@ static void dump_pagetable(unsigned long address)
239 pgd = (pgd_t *)read_cr3(); 481 pgd = (pgd_t *)read_cr3();
240 482
241 pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); 483 pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
484
242 pgd += pgd_index(address); 485 pgd += pgd_index(address);
243 if (bad_address(pgd)) goto bad; 486 if (bad_address(pgd))
487 goto bad;
488
244 printk("PGD %lx ", pgd_val(*pgd)); 489 printk("PGD %lx ", pgd_val(*pgd));
245 if (!pgd_present(*pgd)) goto ret; 490
491 if (!pgd_present(*pgd))
492 goto out;
246 493
247 pud = pud_offset(pgd, address); 494 pud = pud_offset(pgd, address);
248 if (bad_address(pud)) goto bad; 495 if (bad_address(pud))
496 goto bad;
497
249 printk("PUD %lx ", pud_val(*pud)); 498 printk("PUD %lx ", pud_val(*pud));
250 if (!pud_present(*pud) || pud_large(*pud)) 499 if (!pud_present(*pud) || pud_large(*pud))
251 goto ret; 500 goto out;
252 501
253 pmd = pmd_offset(pud, address); 502 pmd = pmd_offset(pud, address);
254 if (bad_address(pmd)) goto bad; 503 if (bad_address(pmd))
504 goto bad;
505
255 printk("PMD %lx ", pmd_val(*pmd)); 506 printk("PMD %lx ", pmd_val(*pmd));
256 if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret; 507 if (!pmd_present(*pmd) || pmd_large(*pmd))
508 goto out;
257 509
258 pte = pte_offset_kernel(pmd, address); 510 pte = pte_offset_kernel(pmd, address);
259 if (bad_address(pte)) goto bad; 511 if (bad_address(pte))
512 goto bad;
513
260 printk("PTE %lx", pte_val(*pte)); 514 printk("PTE %lx", pte_val(*pte));
261ret: 515out:
262 printk("\n"); 516 printk("\n");
263 return; 517 return;
264bad: 518bad:
265 printk("BAD\n"); 519 printk("BAD\n");
266#endif
267} 520}
268 521
269#ifdef CONFIG_X86_32 522#endif /* CONFIG_X86_64 */
270static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
271{
272 unsigned index = pgd_index(address);
273 pgd_t *pgd_k;
274 pud_t *pud, *pud_k;
275 pmd_t *pmd, *pmd_k;
276
277 pgd += index;
278 pgd_k = init_mm.pgd + index;
279
280 if (!pgd_present(*pgd_k))
281 return NULL;
282 523
283 /* 524/*
284 * set_pgd(pgd, *pgd_k); here would be useless on PAE 525 * Workaround for K8 erratum #93 & buggy BIOS.
285 * and redundant with the set_pmd() on non-PAE. As would 526 *
286 * set_pud. 527 * BIOS SMM functions are required to use a specific workaround
287 */ 528 * to avoid corruption of the 64bit RIP register on C stepping K8.
288 529 *
289 pud = pud_offset(pgd, address); 530 * A lot of BIOS that didn't get tested properly miss this.
290 pud_k = pud_offset(pgd_k, address); 531 *
291 if (!pud_present(*pud_k)) 532 * The OS sees this as a page fault with the upper 32bits of RIP cleared.
292 return NULL; 533 * Try to work around it here.
293 534 *
294 pmd = pmd_offset(pud, address); 535 * Note we only handle faults in kernel here.
295 pmd_k = pmd_offset(pud_k, address); 536 * Does nothing on 32-bit.
296 if (!pmd_present(*pmd_k))
297 return NULL;
298 if (!pmd_present(*pmd)) {
299 set_pmd(pmd, *pmd_k);
300 arch_flush_lazy_mmu_mode();
301 } else
302 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
303 return pmd_k;
304}
305#endif
306
307#ifdef CONFIG_X86_64
308static const char errata93_warning[] =
309KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
310KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
311KERN_ERR "******* Please consider a BIOS update.\n"
312KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
313#endif
314
315/* Workaround for K8 erratum #93 & buggy BIOS.
316 BIOS SMM functions are required to use a specific workaround
317 to avoid corruption of the 64bit RIP register on C stepping K8.
318 A lot of BIOS that didn't get tested properly miss this.
319 The OS sees this as a page fault with the upper 32bits of RIP cleared.
320 Try to work around it here.
321 Note we only handle faults in kernel here.
322 Does nothing for X86_32
323 */ 537 */
324static int is_errata93(struct pt_regs *regs, unsigned long address) 538static int is_errata93(struct pt_regs *regs, unsigned long address)
325{ 539{
326#ifdef CONFIG_X86_64 540#ifdef CONFIG_X86_64
327 static int warned; 541 static int once;
542
328 if (address != regs->ip) 543 if (address != regs->ip)
329 return 0; 544 return 0;
545
330 if ((address >> 32) != 0) 546 if ((address >> 32) != 0)
331 return 0; 547 return 0;
548
332 address |= 0xffffffffUL << 32; 549 address |= 0xffffffffUL << 32;
333 if ((address >= (u64)_stext && address <= (u64)_etext) || 550 if ((address >= (u64)_stext && address <= (u64)_etext) ||
334 (address >= MODULES_VADDR && address <= MODULES_END)) { 551 (address >= MODULES_VADDR && address <= MODULES_END)) {
335 if (!warned) { 552 if (!once) {
336 printk(errata93_warning); 553 printk(errata93_warning);
337 warned = 1; 554 once = 1;
338 } 555 }
339 regs->ip = address; 556 regs->ip = address;
340 return 1; 557 return 1;
@@ -344,16 +561,17 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
344} 561}
345 562
346/* 563/*
347 * Work around K8 erratum #100 K8 in compat mode occasionally jumps to illegal 564 * Work around K8 erratum #100 K8 in compat mode occasionally jumps
348 * addresses >4GB. We catch this in the page fault handler because these 565 * to illegal addresses >4GB.
349 * addresses are not reachable. Just detect this case and return. Any code 566 *
567 * We catch this in the page fault handler because these addresses
568 * are not reachable. Just detect this case and return. Any code
350 * segment in LDT is compatibility mode. 569 * segment in LDT is compatibility mode.
351 */ 570 */
352static int is_errata100(struct pt_regs *regs, unsigned long address) 571static int is_errata100(struct pt_regs *regs, unsigned long address)
353{ 572{
354#ifdef CONFIG_X86_64 573#ifdef CONFIG_X86_64
355 if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && 574 if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
356 (address >> 32))
357 return 1; 575 return 1;
358#endif 576#endif
359 return 0; 577 return 0;
@@ -363,8 +581,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
363{ 581{
364#ifdef CONFIG_X86_F00F_BUG 582#ifdef CONFIG_X86_F00F_BUG
365 unsigned long nr; 583 unsigned long nr;
584
366 /* 585 /*
367 * Pentium F0 0F C7 C8 bug workaround. 586 * Pentium F0 0F C7 C8 bug workaround:
368 */ 587 */
369 if (boot_cpu_data.f00f_bug) { 588 if (boot_cpu_data.f00f_bug) {
370 nr = (address - idt_descr.address) >> 3; 589 nr = (address - idt_descr.address) >> 3;
@@ -378,80 +597,87 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
378 return 0; 597 return 0;
379} 598}
380 599
381static void show_fault_oops(struct pt_regs *regs, unsigned long error_code, 600static const char nx_warning[] = KERN_CRIT
382 unsigned long address) 601"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
602
603static void
604show_fault_oops(struct pt_regs *regs, unsigned long error_code,
605 unsigned long address)
383{ 606{
384#ifdef CONFIG_X86_32
385 if (!oops_may_print()) 607 if (!oops_may_print())
386 return; 608 return;
387#endif
388 609
389#ifdef CONFIG_X86_PAE
390 if (error_code & PF_INSTR) { 610 if (error_code & PF_INSTR) {
391 unsigned int level; 611 unsigned int level;
612
392 pte_t *pte = lookup_address(address, &level); 613 pte_t *pte = lookup_address(address, &level);
393 614
394 if (pte && pte_present(*pte) && !pte_exec(*pte)) 615 if (pte && pte_present(*pte) && !pte_exec(*pte))
395 printk(KERN_CRIT "kernel tried to execute " 616 printk(nx_warning, current_uid());
396 "NX-protected page - exploit attempt? "
397 "(uid: %d)\n", current_uid());
398 } 617 }
399#endif
400 618
401 printk(KERN_ALERT "BUG: unable to handle kernel "); 619 printk(KERN_ALERT "BUG: unable to handle kernel ");
402 if (address < PAGE_SIZE) 620 if (address < PAGE_SIZE)
403 printk(KERN_CONT "NULL pointer dereference"); 621 printk(KERN_CONT "NULL pointer dereference");
404 else 622 else
405 printk(KERN_CONT "paging request"); 623 printk(KERN_CONT "paging request");
624
406 printk(KERN_CONT " at %p\n", (void *) address); 625 printk(KERN_CONT " at %p\n", (void *) address);
407 printk(KERN_ALERT "IP:"); 626 printk(KERN_ALERT "IP:");
408 printk_address(regs->ip, 1); 627 printk_address(regs->ip, 1);
628
409 dump_pagetable(address); 629 dump_pagetable(address);
410} 630}
411 631
412#ifdef CONFIG_X86_64 632static noinline void
413static noinline void pgtable_bad(struct pt_regs *regs, 633pgtable_bad(struct pt_regs *regs, unsigned long error_code,
414 unsigned long error_code, unsigned long address) 634 unsigned long address)
415{ 635{
416 unsigned long flags = oops_begin(); 636 struct task_struct *tsk;
417 int sig = SIGKILL; 637 unsigned long flags;
418 struct task_struct *tsk = current; 638 int sig;
639
640 flags = oops_begin();
641 tsk = current;
642 sig = SIGKILL;
419 643
420 printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", 644 printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
421 tsk->comm, address); 645 tsk->comm, address);
422 dump_pagetable(address); 646 dump_pagetable(address);
423 tsk->thread.cr2 = address; 647
424 tsk->thread.trap_no = 14; 648 tsk->thread.cr2 = address;
425 tsk->thread.error_code = error_code; 649 tsk->thread.trap_no = 14;
650 tsk->thread.error_code = error_code;
651
426 if (__die("Bad pagetable", regs, error_code)) 652 if (__die("Bad pagetable", regs, error_code))
427 sig = 0; 653 sig = 0;
654
428 oops_end(flags, regs, sig); 655 oops_end(flags, regs, sig);
429} 656}
430#endif
431 657
432static noinline void no_context(struct pt_regs *regs, 658static noinline void
433 unsigned long error_code, unsigned long address) 659no_context(struct pt_regs *regs, unsigned long error_code,
660 unsigned long address)
434{ 661{
435 struct task_struct *tsk = current; 662 struct task_struct *tsk = current;
436 unsigned long *stackend; 663 unsigned long *stackend;
437
438#ifdef CONFIG_X86_64
439 unsigned long flags; 664 unsigned long flags;
440 int sig; 665 int sig;
441#endif
442 666
443 /* Are we prepared to handle this kernel fault? */ 667 /* Are we prepared to handle this kernel fault? */
444 if (fixup_exception(regs)) 668 if (fixup_exception(regs))
445 return; 669 return;
446 670
447 /* 671 /*
448 * X86_32 672 * 32-bit:
449 * Valid to do another page fault here, because if this fault 673 *
450 * had been triggered by is_prefetch fixup_exception would have 674 * Valid to do another page fault here, because if this fault
451 * handled it. 675 * had been triggered by is_prefetch fixup_exception would have
676 * handled it.
677 *
678 * 64-bit:
452 * 679 *
453 * X86_64 680 * Hall of shame of CPU/BIOS bugs.
454 * Hall of shame of CPU/BIOS bugs.
455 */ 681 */
456 if (is_prefetch(regs, error_code, address)) 682 if (is_prefetch(regs, error_code, address))
457 return; 683 return;
@@ -461,54 +687,70 @@ static noinline void no_context(struct pt_regs *regs,
461 687
462 /* 688 /*
463 * Oops. The kernel tried to access some bad page. We'll have to 689 * Oops. The kernel tried to access some bad page. We'll have to
464 * terminate things with extreme prejudice. 690 * terminate things with extreme prejudice:
465 */ 691 */
466#ifdef CONFIG_X86_32
467 bust_spinlocks(1);
468#else
469 flags = oops_begin(); 692 flags = oops_begin();
470#endif
471 693
472 show_fault_oops(regs, error_code, address); 694 show_fault_oops(regs, error_code, address);
473 695
474 stackend = end_of_stack(tsk); 696 stackend = end_of_stack(tsk);
475 if (*stackend != STACK_END_MAGIC) 697 if (*stackend != STACK_END_MAGIC)
476 printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); 698 printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
477 699
478 tsk->thread.cr2 = address; 700 tsk->thread.cr2 = address;
479 tsk->thread.trap_no = 14; 701 tsk->thread.trap_no = 14;
480 tsk->thread.error_code = error_code; 702 tsk->thread.error_code = error_code;
481 703
482#ifdef CONFIG_X86_32
483 die("Oops", regs, error_code);
484 bust_spinlocks(0);
485 do_exit(SIGKILL);
486#else
487 sig = SIGKILL; 704 sig = SIGKILL;
488 if (__die("Oops", regs, error_code)) 705 if (__die("Oops", regs, error_code))
489 sig = 0; 706 sig = 0;
707
490 /* Executive summary in case the body of the oops scrolled away */ 708 /* Executive summary in case the body of the oops scrolled away */
491 printk(KERN_EMERG "CR2: %016lx\n", address); 709 printk(KERN_EMERG "CR2: %016lx\n", address);
710
492 oops_end(flags, regs, sig); 711 oops_end(flags, regs, sig);
493#endif
494} 712}
495 713
496static void __bad_area_nosemaphore(struct pt_regs *regs, 714/*
497 unsigned long error_code, unsigned long address, 715 * Print out info about fatal segfaults, if the show_unhandled_signals
498 int si_code) 716 * sysctl is set:
717 */
718static inline void
719show_signal_msg(struct pt_regs *regs, unsigned long error_code,
720 unsigned long address, struct task_struct *tsk)
721{
722 if (!unhandled_signal(tsk, SIGSEGV))
723 return;
724
725 if (!printk_ratelimit())
726 return;
727
728 printk(KERN_CONT "%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
729 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
730 tsk->comm, task_pid_nr(tsk), address,
731 (void *)regs->ip, (void *)regs->sp, error_code);
732
733 print_vma_addr(KERN_CONT " in ", regs->ip);
734
735 printk(KERN_CONT "\n");
736}
737
738static void
739__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
740 unsigned long address, int si_code)
499{ 741{
500 struct task_struct *tsk = current; 742 struct task_struct *tsk = current;
501 743
502 /* User mode accesses just cause a SIGSEGV */ 744 /* User mode accesses just cause a SIGSEGV */
503 if (error_code & PF_USER) { 745 if (error_code & PF_USER) {
504 /* 746 /*
505 * It's possible to have interrupts off here. 747 * It's possible to have interrupts off here:
506 */ 748 */
507 local_irq_enable(); 749 local_irq_enable();
508 750
509 /* 751 /*
510 * Valid to do another page fault here because this one came 752 * Valid to do another page fault here because this one came
511 * from user space. 753 * from user space:
512 */ 754 */
513 if (is_prefetch(regs, error_code, address)) 755 if (is_prefetch(regs, error_code, address))
514 return; 756 return;
@@ -516,22 +758,16 @@ static void __bad_area_nosemaphore(struct pt_regs *regs,
516 if (is_errata100(regs, address)) 758 if (is_errata100(regs, address))
517 return; 759 return;
518 760
519 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && 761 if (unlikely(show_unhandled_signals))
520 printk_ratelimit()) { 762 show_signal_msg(regs, error_code, address, tsk);
521 printk( 763
522 "%s%s[%d]: segfault at %lx ip %p sp %p error %lx", 764 /* Kernel addresses are always protection faults: */
523 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, 765 tsk->thread.cr2 = address;
524 tsk->comm, task_pid_nr(tsk), address, 766 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
525 (void *) regs->ip, (void *) regs->sp, error_code); 767 tsk->thread.trap_no = 14;
526 print_vma_addr(" in ", regs->ip);
527 printk("\n");
528 }
529 768
530 tsk->thread.cr2 = address;
531 /* Kernel addresses are always protection faults */
532 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
533 tsk->thread.trap_no = 14;
534 force_sig_info_fault(SIGSEGV, si_code, address, tsk); 769 force_sig_info_fault(SIGSEGV, si_code, address, tsk);
770
535 return; 771 return;
536 } 772 }
537 773
@@ -541,15 +777,16 @@ static void __bad_area_nosemaphore(struct pt_regs *regs,
541 no_context(regs, error_code, address); 777 no_context(regs, error_code, address);
542} 778}
543 779
544static noinline void bad_area_nosemaphore(struct pt_regs *regs, 780static noinline void
545 unsigned long error_code, unsigned long address) 781bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
782 unsigned long address)
546{ 783{
547 __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); 784 __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
548} 785}
549 786
550static void __bad_area(struct pt_regs *regs, 787static void
551 unsigned long error_code, unsigned long address, 788__bad_area(struct pt_regs *regs, unsigned long error_code,
552 int si_code) 789 unsigned long address, int si_code)
553{ 790{
554 struct mm_struct *mm = current->mm; 791 struct mm_struct *mm = current->mm;
555 792
@@ -562,67 +799,75 @@ static void __bad_area(struct pt_regs *regs,
562 __bad_area_nosemaphore(regs, error_code, address, si_code); 799 __bad_area_nosemaphore(regs, error_code, address, si_code);
563} 800}
564 801
565static noinline void bad_area(struct pt_regs *regs, 802static noinline void
566 unsigned long error_code, unsigned long address) 803bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
567{ 804{
568 __bad_area(regs, error_code, address, SEGV_MAPERR); 805 __bad_area(regs, error_code, address, SEGV_MAPERR);
569} 806}
570 807
571static noinline void bad_area_access_error(struct pt_regs *regs, 808static noinline void
572 unsigned long error_code, unsigned long address) 809bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
810 unsigned long address)
573{ 811{
574 __bad_area(regs, error_code, address, SEGV_ACCERR); 812 __bad_area(regs, error_code, address, SEGV_ACCERR);
575} 813}
576 814
577/* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */ 815/* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */
578static void out_of_memory(struct pt_regs *regs, 816static void
579 unsigned long error_code, unsigned long address) 817out_of_memory(struct pt_regs *regs, unsigned long error_code,
818 unsigned long address)
580{ 819{
581 /* 820 /*
582 * We ran out of memory, call the OOM killer, and return the userspace 821 * We ran out of memory, call the OOM killer, and return the userspace
583 * (which will retry the fault, or kill us if we got oom-killed). 822 * (which will retry the fault, or kill us if we got oom-killed):
584 */ 823 */
585 up_read(&current->mm->mmap_sem); 824 up_read(&current->mm->mmap_sem);
825
586 pagefault_out_of_memory(); 826 pagefault_out_of_memory();
587} 827}
588 828
589static void do_sigbus(struct pt_regs *regs, 829static void
590 unsigned long error_code, unsigned long address) 830do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
591{ 831{
592 struct task_struct *tsk = current; 832 struct task_struct *tsk = current;
593 struct mm_struct *mm = tsk->mm; 833 struct mm_struct *mm = tsk->mm;
594 834
595 up_read(&mm->mmap_sem); 835 up_read(&mm->mmap_sem);
596 836
597 /* Kernel mode? Handle exceptions or die */ 837 /* Kernel mode? Handle exceptions or die: */
598 if (!(error_code & PF_USER)) 838 if (!(error_code & PF_USER))
599 no_context(regs, error_code, address); 839 no_context(regs, error_code, address);
600#ifdef CONFIG_X86_32 840
601 /* User space => ok to do another page fault */ 841 /* User-space => ok to do another page fault: */
602 if (is_prefetch(regs, error_code, address)) 842 if (is_prefetch(regs, error_code, address))
603 return; 843 return;
604#endif 844
605 tsk->thread.cr2 = address; 845 tsk->thread.cr2 = address;
606 tsk->thread.error_code = error_code; 846 tsk->thread.error_code = error_code;
607 tsk->thread.trap_no = 14; 847 tsk->thread.trap_no = 14;
848
608 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); 849 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
609} 850}
610 851
611static noinline void mm_fault_error(struct pt_regs *regs, 852static noinline void
612 unsigned long error_code, unsigned long address, unsigned int fault) 853mm_fault_error(struct pt_regs *regs, unsigned long error_code,
854 unsigned long address, unsigned int fault)
613{ 855{
614 if (fault & VM_FAULT_OOM) 856 if (fault & VM_FAULT_OOM) {
615 out_of_memory(regs, error_code, address); 857 out_of_memory(regs, error_code, address);
616 else if (fault & VM_FAULT_SIGBUS) 858 } else {
617 do_sigbus(regs, error_code, address); 859 if (fault & VM_FAULT_SIGBUS)
618 else 860 do_sigbus(regs, error_code, address);
619 BUG(); 861 else
862 BUG();
863 }
620} 864}
621 865
622static int spurious_fault_check(unsigned long error_code, pte_t *pte) 866static int spurious_fault_check(unsigned long error_code, pte_t *pte)
623{ 867{
624 if ((error_code & PF_WRITE) && !pte_write(*pte)) 868 if ((error_code & PF_WRITE) && !pte_write(*pte))
625 return 0; 869 return 0;
870
626 if ((error_code & PF_INSTR) && !pte_exec(*pte)) 871 if ((error_code & PF_INSTR) && !pte_exec(*pte))
627 return 0; 872 return 0;
628 873
@@ -630,21 +875,25 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
630} 875}
631 876
632/* 877/*
633 * Handle a spurious fault caused by a stale TLB entry. This allows 878 * Handle a spurious fault caused by a stale TLB entry.
634 * us to lazily refresh the TLB when increasing the permissions of a 879 *
635 * kernel page (RO -> RW or NX -> X). Doing it eagerly is very 880 * This allows us to lazily refresh the TLB when increasing the
636 * expensive since that implies doing a full cross-processor TLB 881 * permissions of a kernel page (RO -> RW or NX -> X). Doing it
637 * flush, even if no stale TLB entries exist on other processors. 882 * eagerly is very expensive since that implies doing a full
883 * cross-processor TLB flush, even if no stale TLB entries exist
884 * on other processors.
885 *
638 * There are no security implications to leaving a stale TLB when 886 * There are no security implications to leaving a stale TLB when
639 * increasing the permissions on a page. 887 * increasing the permissions on a page.
640 */ 888 */
641static noinline int spurious_fault(unsigned long error_code, 889static noinline int
642 unsigned long address) 890spurious_fault(unsigned long error_code, unsigned long address)
643{ 891{
644 pgd_t *pgd; 892 pgd_t *pgd;
645 pud_t *pud; 893 pud_t *pud;
646 pmd_t *pmd; 894 pmd_t *pmd;
647 pte_t *pte; 895 pte_t *pte;
896 int ret;
648 897
649 /* Reserved-bit violation or user access to kernel space? */ 898 /* Reserved-bit violation or user access to kernel space? */
650 if (error_code & (PF_USER | PF_RSVD)) 899 if (error_code & (PF_USER | PF_RSVD))
@@ -672,123 +921,46 @@ static noinline int spurious_fault(unsigned long error_code,
672 if (!pte_present(*pte)) 921 if (!pte_present(*pte))
673 return 0; 922 return 0;
674 923
675 return spurious_fault_check(error_code, pte); 924 ret = spurious_fault_check(error_code, pte);
676} 925 if (!ret)
677 926 return 0;
678/*
679 * X86_32
680 * Handle a fault on the vmalloc or module mapping area
681 *
682 * X86_64
683 * Handle a fault on the vmalloc area
684 *
685 * This assumes no large pages in there.
686 */
687static noinline int vmalloc_fault(unsigned long address)
688{
689#ifdef CONFIG_X86_32
690 unsigned long pgd_paddr;
691 pmd_t *pmd_k;
692 pte_t *pte_k;
693
694 /* Make sure we are in vmalloc area */
695 if (!(address >= VMALLOC_START && address < VMALLOC_END))
696 return -1;
697 927
698 /* 928 /*
699 * Synchronize this task's top level page-table 929 * Make sure we have permissions in PMD.
700 * with the 'reference' page table. 930 * If not, then there's a bug in the page tables:
701 *
702 * Do _not_ use "current" here. We might be inside
703 * an interrupt in the middle of a task switch..
704 */ 931 */
705 pgd_paddr = read_cr3(); 932 ret = spurious_fault_check(error_code, (pte_t *) pmd);
706 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); 933 WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
707 if (!pmd_k)
708 return -1;
709 pte_k = pte_offset_kernel(pmd_k, address);
710 if (!pte_present(*pte_k))
711 return -1;
712 return 0;
713#else
714 pgd_t *pgd, *pgd_ref;
715 pud_t *pud, *pud_ref;
716 pmd_t *pmd, *pmd_ref;
717 pte_t *pte, *pte_ref;
718 934
719 /* Make sure we are in vmalloc area */ 935 return ret;
720 if (!(address >= VMALLOC_START && address < VMALLOC_END))
721 return -1;
722
723 /* Copy kernel mappings over when needed. This can also
724 happen within a race in page table update. In the later
725 case just flush. */
726
727 pgd = pgd_offset(current->active_mm, address);
728 pgd_ref = pgd_offset_k(address);
729 if (pgd_none(*pgd_ref))
730 return -1;
731 if (pgd_none(*pgd))
732 set_pgd(pgd, *pgd_ref);
733 else
734 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
735
736 /* Below here mismatches are bugs because these lower tables
737 are shared */
738
739 pud = pud_offset(pgd, address);
740 pud_ref = pud_offset(pgd_ref, address);
741 if (pud_none(*pud_ref))
742 return -1;
743 if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
744 BUG();
745 pmd = pmd_offset(pud, address);
746 pmd_ref = pmd_offset(pud_ref, address);
747 if (pmd_none(*pmd_ref))
748 return -1;
749 if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
750 BUG();
751 pte_ref = pte_offset_kernel(pmd_ref, address);
752 if (!pte_present(*pte_ref))
753 return -1;
754 pte = pte_offset_kernel(pmd, address);
755 /* Don't use pte_page here, because the mappings can point
756 outside mem_map, and the NUMA hash lookup cannot handle
757 that. */
758 if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
759 BUG();
760 return 0;
761#endif
762} 936}
763 937
764int show_unhandled_signals = 1; 938int show_unhandled_signals = 1;
765 939
766static inline int access_error(unsigned long error_code, int write, 940static inline int
767 struct vm_area_struct *vma) 941access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
768{ 942{
769 if (write) { 943 if (write) {
770 /* write, present and write, not present */ 944 /* write, present and write, not present: */
771 if (unlikely(!(vma->vm_flags & VM_WRITE))) 945 if (unlikely(!(vma->vm_flags & VM_WRITE)))
772 return 1; 946 return 1;
773 } else if (unlikely(error_code & PF_PROT)) { 947 return 0;
774 /* read, present */
775 return 1;
776 } else {
777 /* read, not present */
778 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
779 return 1;
780 } 948 }
781 949
950 /* read, present: */
951 if (unlikely(error_code & PF_PROT))
952 return 1;
953
954 /* read, not present: */
955 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
956 return 1;
957
782 return 0; 958 return 0;
783} 959}
784 960
785static int fault_in_kernel_space(unsigned long address) 961static int fault_in_kernel_space(unsigned long address)
786{ 962{
787#ifdef CONFIG_X86_32 963 return address >= TASK_SIZE_MAX;
788 return address >= TASK_SIZE;
789#else /* !CONFIG_X86_32 */
790 return address >= TASK_SIZE64;
791#endif /* CONFIG_X86_32 */
792} 964}
793 965
794/* 966/*
@@ -796,23 +968,22 @@ static int fault_in_kernel_space(unsigned long address)
796 * and the problem, and then passes it off to one of the appropriate 968 * and the problem, and then passes it off to one of the appropriate
797 * routines. 969 * routines.
798 */ 970 */
799#ifdef CONFIG_X86_64 971dotraplinkage void __kprobes
800asmlinkage 972do_page_fault(struct pt_regs *regs, unsigned long error_code)
801#endif
802void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
803{ 973{
804 unsigned long address; 974 struct vm_area_struct *vma;
805 struct task_struct *tsk; 975 struct task_struct *tsk;
976 unsigned long address;
806 struct mm_struct *mm; 977 struct mm_struct *mm;
807 struct vm_area_struct *vma;
808 int write; 978 int write;
809 int fault; 979 int fault;
810 980
811 tsk = current; 981 tsk = current;
812 mm = tsk->mm; 982 mm = tsk->mm;
983
813 prefetchw(&mm->mmap_sem); 984 prefetchw(&mm->mmap_sem);
814 985
815 /* get the address */ 986 /* Get the faulting address: */
816 address = read_cr2(); 987 address = read_cr2();
817 988
818 if (unlikely(kmmio_fault(regs, address))) 989 if (unlikely(kmmio_fault(regs, address)))
@@ -836,22 +1007,23 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
836 vmalloc_fault(address) >= 0) 1007 vmalloc_fault(address) >= 0)
837 return; 1008 return;
838 1009
839 /* Can handle a stale RO->RW TLB */ 1010 /* Can handle a stale RO->RW TLB: */
840 if (spurious_fault(error_code, address)) 1011 if (spurious_fault(error_code, address))
841 return; 1012 return;
842 1013
843 /* kprobes don't want to hook the spurious faults. */ 1014 /* kprobes don't want to hook the spurious faults: */
844 if (notify_page_fault(regs)) 1015 if (notify_page_fault(regs))
845 return; 1016 return;
846 /* 1017 /*
847 * Don't take the mm semaphore here. If we fixup a prefetch 1018 * Don't take the mm semaphore here. If we fixup a prefetch
848 * fault we could otherwise deadlock. 1019 * fault we could otherwise deadlock:
849 */ 1020 */
850 bad_area_nosemaphore(regs, error_code, address); 1021 bad_area_nosemaphore(regs, error_code, address);
1022
851 return; 1023 return;
852 } 1024 }
853 1025
854 /* kprobes don't want to hook the spurious faults. */ 1026 /* kprobes don't want to hook the spurious faults: */
855 if (unlikely(notify_page_fault(regs))) 1027 if (unlikely(notify_page_fault(regs)))
856 return; 1028 return;
857 /* 1029 /*
@@ -859,22 +1031,22 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
859 * vmalloc fault has been handled. 1031 * vmalloc fault has been handled.
860 * 1032 *
861 * User-mode registers count as a user access even for any 1033 * User-mode registers count as a user access even for any
862 * potential system fault or CPU buglet. 1034 * potential system fault or CPU buglet:
863 */ 1035 */
864 if (user_mode_vm(regs)) { 1036 if (user_mode_vm(regs)) {
865 local_irq_enable(); 1037 local_irq_enable();
866 error_code |= PF_USER; 1038 error_code |= PF_USER;
867 } else if (regs->flags & X86_EFLAGS_IF) 1039 } else {
868 local_irq_enable(); 1040 if (regs->flags & X86_EFLAGS_IF)
1041 local_irq_enable();
1042 }
869 1043
870#ifdef CONFIG_X86_64
871 if (unlikely(error_code & PF_RSVD)) 1044 if (unlikely(error_code & PF_RSVD))
872 pgtable_bad(regs, error_code, address); 1045 pgtable_bad(regs, error_code, address);
873#endif
874 1046
875 /* 1047 /*
876 * If we're in an interrupt, have no user context or are running in an 1048 * If we're in an interrupt, have no user context or are running
877 * atomic region then we must not take the fault. 1049 * in an atomic region then we must not take the fault:
878 */ 1050 */
879 if (unlikely(in_atomic() || !mm)) { 1051 if (unlikely(in_atomic() || !mm)) {
880 bad_area_nosemaphore(regs, error_code, address); 1052 bad_area_nosemaphore(regs, error_code, address);
@@ -883,19 +1055,19 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
883 1055
884 /* 1056 /*
885 * When running in the kernel we expect faults to occur only to 1057 * When running in the kernel we expect faults to occur only to
886 * addresses in user space. All other faults represent errors in the 1058 * addresses in user space. All other faults represent errors in
887 * kernel and should generate an OOPS. Unfortunately, in the case of an 1059 * the kernel and should generate an OOPS. Unfortunately, in the
888 * erroneous fault occurring in a code path which already holds mmap_sem 1060 * case of an erroneous fault occurring in a code path which already
889 * we will deadlock attempting to validate the fault against the 1061 * holds mmap_sem we will deadlock attempting to validate the fault
890 * address space. Luckily the kernel only validly references user 1062 * against the address space. Luckily the kernel only validly
891 * space from well defined areas of code, which are listed in the 1063 * references user space from well defined areas of code, which are
892 * exceptions table. 1064 * listed in the exceptions table.
893 * 1065 *
894 * As the vast majority of faults will be valid we will only perform 1066 * As the vast majority of faults will be valid we will only perform
895 * the source reference check when there is a possibility of a deadlock. 1067 * the source reference check when there is a possibility of a
896 * Attempt to lock the address space, if we cannot we then validate the 1068 * deadlock. Attempt to lock the address space, if we cannot we then
897 * source. If this is invalid we can skip the address space check, 1069 * validate the source. If this is invalid we can skip the address
898 * thus avoiding the deadlock. 1070 * space check, thus avoiding the deadlock:
899 */ 1071 */
900 if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 1072 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
901 if ((error_code & PF_USER) == 0 && 1073 if ((error_code & PF_USER) == 0 &&
@@ -906,8 +1078,9 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
906 down_read(&mm->mmap_sem); 1078 down_read(&mm->mmap_sem);
907 } else { 1079 } else {
908 /* 1080 /*
909 * The above down_read_trylock() might have succeeded in which 1081 * The above down_read_trylock() might have succeeded in
910 * case we'll have missed the might_sleep() from down_read(). 1082 * which case we'll have missed the might_sleep() from
1083 * down_read():
911 */ 1084 */
912 might_sleep(); 1085 might_sleep();
913 } 1086 }
@@ -927,7 +1100,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
927 /* 1100 /*
928 * Accessing the stack below %sp is always a bug. 1101 * Accessing the stack below %sp is always a bug.
929 * The large cushion allows instructions like enter 1102 * The large cushion allows instructions like enter
930 * and pusha to work. ("enter $65535,$31" pushes 1103 * and pusha to work. ("enter $65535, $31" pushes
931 * 32 pointers and then decrements %sp by 65535.) 1104 * 32 pointers and then decrements %sp by 65535.)
932 */ 1105 */
933 if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { 1106 if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
@@ -946,6 +1119,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
946 */ 1119 */
947good_area: 1120good_area:
948 write = error_code & PF_WRITE; 1121 write = error_code & PF_WRITE;
1122
949 if (unlikely(access_error(error_code, write, vma))) { 1123 if (unlikely(access_error(error_code, write, vma))) {
950 bad_area_access_error(regs, error_code, address); 1124 bad_area_access_error(regs, error_code, address);
951 return; 1125 return;
@@ -954,75 +1128,21 @@ good_area:
954 /* 1128 /*
955 * If for any reason at all we couldn't handle the fault, 1129 * If for any reason at all we couldn't handle the fault,
956 * make sure we exit gracefully rather than endlessly redo 1130 * make sure we exit gracefully rather than endlessly redo
957 * the fault. 1131 * the fault:
958 */ 1132 */
959 fault = handle_mm_fault(mm, vma, address, write); 1133 fault = handle_mm_fault(mm, vma, address, write);
1134
960 if (unlikely(fault & VM_FAULT_ERROR)) { 1135 if (unlikely(fault & VM_FAULT_ERROR)) {
961 mm_fault_error(regs, error_code, address, fault); 1136 mm_fault_error(regs, error_code, address, fault);
962 return; 1137 return;
963 } 1138 }
1139
964 if (fault & VM_FAULT_MAJOR) 1140 if (fault & VM_FAULT_MAJOR)
965 tsk->maj_flt++; 1141 tsk->maj_flt++;
966 else 1142 else
967 tsk->min_flt++; 1143 tsk->min_flt++;
968 1144
969#ifdef CONFIG_X86_32 1145 check_v8086_mode(regs, address, tsk);
970 /*
971 * Did it hit the DOS screen memory VA from vm86 mode?
972 */
973 if (v8086_mode(regs)) {
974 unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
975 if (bit < 32)
976 tsk->thread.screen_bitmap |= 1 << bit;
977 }
978#endif
979 up_read(&mm->mmap_sem);
980}
981
982DEFINE_SPINLOCK(pgd_lock);
983LIST_HEAD(pgd_list);
984 1146
985void vmalloc_sync_all(void) 1147 up_read(&mm->mmap_sem);
986{
987 unsigned long address;
988
989#ifdef CONFIG_X86_32
990 if (SHARED_KERNEL_PMD)
991 return;
992
993 for (address = VMALLOC_START & PMD_MASK;
994 address >= TASK_SIZE && address < FIXADDR_TOP;
995 address += PMD_SIZE) {
996 unsigned long flags;
997 struct page *page;
998
999 spin_lock_irqsave(&pgd_lock, flags);
1000 list_for_each_entry(page, &pgd_list, lru) {
1001 if (!vmalloc_sync_one(page_address(page),
1002 address))
1003 break;
1004 }
1005 spin_unlock_irqrestore(&pgd_lock, flags);
1006 }
1007#else /* CONFIG_X86_64 */
1008 for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
1009 address += PGDIR_SIZE) {
1010 const pgd_t *pgd_ref = pgd_offset_k(address);
1011 unsigned long flags;
1012 struct page *page;
1013
1014 if (pgd_none(*pgd_ref))
1015 continue;
1016 spin_lock_irqsave(&pgd_lock, flags);
1017 list_for_each_entry(page, &pgd_list, lru) {
1018 pgd_t *pgd;
1019 pgd = (pgd_t *)page_address(page) + pgd_index(address);
1020 if (pgd_none(*pgd))
1021 set_pgd(pgd, *pgd_ref);
1022 else
1023 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
1024 }
1025 spin_unlock_irqrestore(&pgd_lock, flags);
1026 }
1027#endif
1028} 1148}
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index bcc079c282dd..00f127c80b0e 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -1,5 +1,6 @@
1#include <linux/highmem.h> 1#include <linux/highmem.h>
2#include <linux/module.h> 2#include <linux/module.h>
3#include <linux/swap.h> /* for totalram_pages */
3 4
4void *kmap(struct page *page) 5void *kmap(struct page *page)
5{ 6{
@@ -156,3 +157,36 @@ EXPORT_SYMBOL(kmap);
156EXPORT_SYMBOL(kunmap); 157EXPORT_SYMBOL(kunmap);
157EXPORT_SYMBOL(kmap_atomic); 158EXPORT_SYMBOL(kmap_atomic);
158EXPORT_SYMBOL(kunmap_atomic); 159EXPORT_SYMBOL(kunmap_atomic);
160
161#ifdef CONFIG_NUMA
162void __init set_highmem_pages_init(void)
163{
164 struct zone *zone;
165 int nid;
166
167 for_each_zone(zone) {
168 unsigned long zone_start_pfn, zone_end_pfn;
169
170 if (!is_highmem(zone))
171 continue;
172
173 zone_start_pfn = zone->zone_start_pfn;
174 zone_end_pfn = zone_start_pfn + zone->spanned_pages;
175
176 nid = zone_to_nid(zone);
177 printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
178 zone->name, nid, zone_start_pfn, zone_end_pfn);
179
180 add_highpages_with_active_regions(nid, zone_start_pfn,
181 zone_end_pfn);
182 }
183 totalram_pages += totalhigh_pages;
184}
185#else
186void __init set_highmem_pages_init(void)
187{
188 add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
189
190 totalram_pages += totalhigh_pages;
191}
192#endif /* CONFIG_NUMA */
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
new file mode 100644
index 000000000000..ce6a722587d8
--- /dev/null
+++ b/arch/x86/mm/init.c
@@ -0,0 +1,49 @@
1#include <linux/swap.h>
2#include <asm/cacheflush.h>
3#include <asm/page.h>
4#include <asm/sections.h>
5#include <asm/system.h>
6
7void free_init_pages(char *what, unsigned long begin, unsigned long end)
8{
9 unsigned long addr = begin;
10
11 if (addr >= end)
12 return;
13
14 /*
15 * If debugging page accesses then do not free this memory but
16 * mark them not present - any buggy init-section access will
17 * create a kernel page fault:
18 */
19#ifdef CONFIG_DEBUG_PAGEALLOC
20 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
21 begin, PAGE_ALIGN(end));
22 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
23#else
24 /*
25 * We just marked the kernel text read only above, now that
26 * we are going to free part of that, we need to make that
27 * writeable first.
28 */
29 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
30
31 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
32
33 for (; addr < end; addr += PAGE_SIZE) {
34 ClearPageReserved(virt_to_page(addr));
35 init_page_count(virt_to_page(addr));
36 memset((void *)(addr & ~(PAGE_SIZE-1)),
37 POISON_FREE_INITMEM, PAGE_SIZE);
38 free_page(addr);
39 totalram_pages++;
40 }
41#endif
42}
43
44void free_initmem(void)
45{
46 free_init_pages("unused kernel memory",
47 (unsigned long)(&__init_begin),
48 (unsigned long)(&__init_end));
49}
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 06708ee94aa4..0b087dcd2c18 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -50,8 +50,6 @@
50#include <asm/setup.h> 50#include <asm/setup.h>
51#include <asm/cacheflush.h> 51#include <asm/cacheflush.h>
52 52
53unsigned int __VMALLOC_RESERVE = 128 << 20;
54
55unsigned long max_low_pfn_mapped; 53unsigned long max_low_pfn_mapped;
56unsigned long max_pfn_mapped; 54unsigned long max_pfn_mapped;
57 55
@@ -469,22 +467,10 @@ void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
469 work_with_active_regions(nid, add_highpages_work_fn, &data); 467 work_with_active_regions(nid, add_highpages_work_fn, &data);
470} 468}
471 469
472#ifndef CONFIG_NUMA
473static void __init set_highmem_pages_init(void)
474{
475 add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
476
477 totalram_pages += totalhigh_pages;
478}
479#endif /* !CONFIG_NUMA */
480
481#else 470#else
482static inline void permanent_kmaps_init(pgd_t *pgd_base) 471static inline void permanent_kmaps_init(pgd_t *pgd_base)
483{ 472{
484} 473}
485static inline void set_highmem_pages_init(void)
486{
487}
488#endif /* CONFIG_HIGHMEM */ 474#endif /* CONFIG_HIGHMEM */
489 475
490void __init native_pagetable_setup_start(pgd_t *base) 476void __init native_pagetable_setup_start(pgd_t *base)
@@ -847,10 +833,10 @@ static void __init find_early_table_space(unsigned long end, int use_pse)
847 unsigned long puds, pmds, ptes, tables, start; 833 unsigned long puds, pmds, ptes, tables, start;
848 834
849 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; 835 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
850 tables = PAGE_ALIGN(puds * sizeof(pud_t)); 836 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
851 837
852 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; 838 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
853 tables += PAGE_ALIGN(pmds * sizeof(pmd_t)); 839 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
854 840
855 if (use_pse) { 841 if (use_pse) {
856 unsigned long extra; 842 unsigned long extra;
@@ -861,10 +847,10 @@ static void __init find_early_table_space(unsigned long end, int use_pse)
861 } else 847 } else
862 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; 848 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
863 849
864 tables += PAGE_ALIGN(ptes * sizeof(pte_t)); 850 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
865 851
866 /* for fixmap */ 852 /* for fixmap */
867 tables += PAGE_ALIGN(__end_of_fixed_addresses * sizeof(pte_t)); 853 tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
868 854
869 /* 855 /*
870 * RED-PEN putting page tables only on node 0 could 856 * RED-PEN putting page tables only on node 0 could
@@ -1214,45 +1200,6 @@ void mark_rodata_ro(void)
1214} 1200}
1215#endif 1201#endif
1216 1202
1217void free_init_pages(char *what, unsigned long begin, unsigned long end)
1218{
1219#ifdef CONFIG_DEBUG_PAGEALLOC
1220 /*
1221 * If debugging page accesses then do not free this memory but
1222 * mark them not present - any buggy init-section access will
1223 * create a kernel page fault:
1224 */
1225 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
1226 begin, PAGE_ALIGN(end));
1227 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
1228#else
1229 unsigned long addr;
1230
1231 /*
1232 * We just marked the kernel text read only above, now that
1233 * we are going to free part of that, we need to make that
1234 * writeable first.
1235 */
1236 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
1237
1238 for (addr = begin; addr < end; addr += PAGE_SIZE) {
1239 ClearPageReserved(virt_to_page(addr));
1240 init_page_count(virt_to_page(addr));
1241 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
1242 free_page(addr);
1243 totalram_pages++;
1244 }
1245 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
1246#endif
1247}
1248
1249void free_initmem(void)
1250{
1251 free_init_pages("unused kernel memory",
1252 (unsigned long)(&__init_begin),
1253 (unsigned long)(&__init_end));
1254}
1255
1256#ifdef CONFIG_BLK_DEV_INITRD 1203#ifdef CONFIG_BLK_DEV_INITRD
1257void free_initrd_mem(unsigned long start, unsigned long end) 1204void free_initrd_mem(unsigned long start, unsigned long end)
1258{ 1205{
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index e6d36b490250..724e537432e7 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -714,6 +714,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
714 pos = start_pfn << PAGE_SHIFT; 714 pos = start_pfn << PAGE_SHIFT;
715 end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT) 715 end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
716 << (PMD_SHIFT - PAGE_SHIFT); 716 << (PMD_SHIFT - PAGE_SHIFT);
717 if (end_pfn > (end >> PAGE_SHIFT))
718 end_pfn = end >> PAGE_SHIFT;
717 if (start_pfn < end_pfn) { 719 if (start_pfn < end_pfn) {
718 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); 720 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
719 pos = end_pfn << PAGE_SHIFT; 721 pos = end_pfn << PAGE_SHIFT;
@@ -945,43 +947,6 @@ void __init mem_init(void)
945 initsize >> 10); 947 initsize >> 10);
946} 948}
947 949
948void free_init_pages(char *what, unsigned long begin, unsigned long end)
949{
950 unsigned long addr = begin;
951
952 if (addr >= end)
953 return;
954
955 /*
956 * If debugging page accesses then do not free this memory but
957 * mark them not present - any buggy init-section access will
958 * create a kernel page fault:
959 */
960#ifdef CONFIG_DEBUG_PAGEALLOC
961 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
962 begin, PAGE_ALIGN(end));
963 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
964#else
965 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
966
967 for (; addr < end; addr += PAGE_SIZE) {
968 ClearPageReserved(virt_to_page(addr));
969 init_page_count(virt_to_page(addr));
970 memset((void *)(addr & ~(PAGE_SIZE-1)),
971 POISON_FREE_INITMEM, PAGE_SIZE);
972 free_page(addr);
973 totalram_pages++;
974 }
975#endif
976}
977
978void free_initmem(void)
979{
980 free_init_pages("unused kernel memory",
981 (unsigned long)(&__init_begin),
982 (unsigned long)(&__init_end));
983}
984
985#ifdef CONFIG_DEBUG_RODATA 950#ifdef CONFIG_DEBUG_RODATA
986const int rodata_test_data = 0xC3; 951const int rodata_test_data = 0xC3;
987EXPORT_SYMBOL_GPL(rodata_test_data); 952EXPORT_SYMBOL_GPL(rodata_test_data);
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index ca53224fc56c..04102d42ff42 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -20,6 +20,17 @@
20#include <asm/pat.h> 20#include <asm/pat.h>
21#include <linux/module.h> 21#include <linux/module.h>
22 22
23int is_io_mapping_possible(resource_size_t base, unsigned long size)
24{
25#ifndef CONFIG_X86_PAE
26 /* There is no way to map greater than 1 << 32 address without PAE */
27 if (base + size > 0x100000000ULL)
28 return 0;
29#endif
30 return 1;
31}
32EXPORT_SYMBOL_GPL(is_io_mapping_possible);
33
23/* Map 'pfn' using fixed map 'type' and protections 'prot' 34/* Map 'pfn' using fixed map 'type' and protections 'prot'
24 */ 35 */
25void * 36void *
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c
index 9cab18b0b857..0bcd7883d036 100644
--- a/arch/x86/mm/memtest.c
+++ b/arch/x86/mm/memtest.c
@@ -9,44 +9,44 @@
9 9
10#include <asm/e820.h> 10#include <asm/e820.h>
11 11
12static void __init memtest(unsigned long start_phys, unsigned long size, 12static u64 patterns[] __initdata = {
13 unsigned pattern) 13 0,
14 0xffffffffffffffffULL,
15 0x5555555555555555ULL,
16 0xaaaaaaaaaaaaaaaaULL,
17 0x1111111111111111ULL,
18 0x2222222222222222ULL,
19 0x4444444444444444ULL,
20 0x8888888888888888ULL,
21 0x3333333333333333ULL,
22 0x6666666666666666ULL,
23 0x9999999999999999ULL,
24 0xccccccccccccccccULL,
25 0x7777777777777777ULL,
26 0xbbbbbbbbbbbbbbbbULL,
27 0xddddddddddddddddULL,
28 0xeeeeeeeeeeeeeeeeULL,
29 0x7a6c7258554e494cULL, /* yeah ;-) */
30};
31
32static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad)
14{ 33{
15 unsigned long i; 34 printk(KERN_INFO " %016llx bad mem addr %010llx - %010llx reserved\n",
16 unsigned long *start; 35 (unsigned long long) pattern,
17 unsigned long start_bad; 36 (unsigned long long) start_bad,
18 unsigned long last_bad; 37 (unsigned long long) end_bad);
19 unsigned long val; 38 reserve_early(start_bad, end_bad, "BAD RAM");
20 unsigned long start_phys_aligned; 39}
21 unsigned long count;
22 unsigned long incr;
23
24 switch (pattern) {
25 case 0:
26 val = 0UL;
27 break;
28 case 1:
29 val = -1UL;
30 break;
31 case 2:
32#ifdef CONFIG_X86_64
33 val = 0x5555555555555555UL;
34#else
35 val = 0x55555555UL;
36#endif
37 break;
38 case 3:
39#ifdef CONFIG_X86_64
40 val = 0xaaaaaaaaaaaaaaaaUL;
41#else
42 val = 0xaaaaaaaaUL;
43#endif
44 break;
45 default:
46 return;
47 }
48 40
49 incr = sizeof(unsigned long); 41static void __init memtest(u64 pattern, u64 start_phys, u64 size)
42{
43 u64 i, count;
44 u64 *start;
45 u64 start_bad, last_bad;
46 u64 start_phys_aligned;
47 size_t incr;
48
49 incr = sizeof(pattern);
50 start_phys_aligned = ALIGN(start_phys, incr); 50 start_phys_aligned = ALIGN(start_phys, incr);
51 count = (size - (start_phys_aligned - start_phys))/incr; 51 count = (size - (start_phys_aligned - start_phys))/incr;
52 start = __va(start_phys_aligned); 52 start = __va(start_phys_aligned);
@@ -54,25 +54,42 @@ static void __init memtest(unsigned long start_phys, unsigned long size,
54 last_bad = 0; 54 last_bad = 0;
55 55
56 for (i = 0; i < count; i++) 56 for (i = 0; i < count; i++)
57 start[i] = val; 57 start[i] = pattern;
58 for (i = 0; i < count; i++, start++, start_phys_aligned += incr) { 58 for (i = 0; i < count; i++, start++, start_phys_aligned += incr) {
59 if (*start != val) { 59 if (*start == pattern)
60 if (start_phys_aligned == last_bad + incr) { 60 continue;
61 last_bad += incr; 61 if (start_phys_aligned == last_bad + incr) {
62 } else { 62 last_bad += incr;
63 if (start_bad) { 63 continue;
64 printk(KERN_CONT "\n %016lx bad mem addr %010lx - %010lx reserved",
65 val, start_bad, last_bad + incr);
66 reserve_early(start_bad, last_bad + incr, "BAD RAM");
67 }
68 start_bad = last_bad = start_phys_aligned;
69 }
70 } 64 }
65 if (start_bad)
66 reserve_bad_mem(pattern, start_bad, last_bad + incr);
67 start_bad = last_bad = start_phys_aligned;
71 } 68 }
72 if (start_bad) { 69 if (start_bad)
73 printk(KERN_CONT "\n %016lx bad mem addr %010lx - %010lx reserved", 70 reserve_bad_mem(pattern, start_bad, last_bad + incr);
74 val, start_bad, last_bad + incr); 71}
75 reserve_early(start_bad, last_bad + incr, "BAD RAM"); 72
73static void __init do_one_pass(u64 pattern, u64 start, u64 end)
74{
75 u64 size = 0;
76
77 while (start < end) {
78 start = find_e820_area_size(start, &size, 1);
79
80 /* done ? */
81 if (start >= end)
82 break;
83 if (start + size > end)
84 size = end - start;
85
86 printk(KERN_INFO " %010llx - %010llx pattern %016llx\n",
87 (unsigned long long) start,
88 (unsigned long long) start + size,
89 (unsigned long long) cpu_to_be64(pattern));
90 memtest(pattern, start, size);
91
92 start += size;
76 } 93 }
77} 94}
78 95
@@ -90,33 +107,22 @@ early_param("memtest", parse_memtest);
90 107
91void __init early_memtest(unsigned long start, unsigned long end) 108void __init early_memtest(unsigned long start, unsigned long end)
92{ 109{
93 u64 t_start, t_size; 110 unsigned int i;
94 unsigned pattern; 111 unsigned int idx = 0;
95 112
96 if (!memtest_pattern) 113 if (!memtest_pattern)
97 return; 114 return;
98 115
99 printk(KERN_INFO "early_memtest: pattern num %d", memtest_pattern); 116 printk(KERN_INFO "early_memtest: # of tests: %d\n", memtest_pattern);
100 for (pattern = 0; pattern < memtest_pattern; pattern++) { 117 for (i = 0; i < memtest_pattern; i++) {
101 t_start = start; 118 idx = i % ARRAY_SIZE(patterns);
102 t_size = 0; 119 do_one_pass(patterns[idx], start, end);
103 while (t_start < end) { 120 }
104 t_start = find_e820_area_size(t_start, &t_size, 1);
105
106 /* done ? */
107 if (t_start >= end)
108 break;
109 if (t_start + t_size > end)
110 t_size = end - t_start;
111
112 printk(KERN_CONT "\n %010llx - %010llx pattern %d",
113 (unsigned long long)t_start,
114 (unsigned long long)t_start + t_size, pattern);
115
116 memtest(t_start, t_size, pattern);
117 121
118 t_start += t_size; 122 if (idx > 0) {
119 } 123 printk(KERN_INFO "early_memtest: wipe out "
124 "test pattern from memory\n");
125 /* additional test with pattern 0 will do this */
126 do_one_pass(0, start, end);
120 } 127 }
121 printk(KERN_CONT "\n");
122} 128}
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index d1f7439d173c..451fe95a0352 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -194,7 +194,7 @@ void *alloc_remap(int nid, unsigned long size)
194 size = ALIGN(size, L1_CACHE_BYTES); 194 size = ALIGN(size, L1_CACHE_BYTES);
195 195
196 if (!allocation || (allocation + size) >= node_remap_end_vaddr[nid]) 196 if (!allocation || (allocation + size) >= node_remap_end_vaddr[nid])
197 return 0; 197 return NULL;
198 198
199 node_remap_alloc_vaddr[nid] += size; 199 node_remap_alloc_vaddr[nid] += size;
200 memset(allocation, 0, size); 200 memset(allocation, 0, size);
@@ -423,32 +423,6 @@ void __init initmem_init(unsigned long start_pfn,
423 setup_bootmem_allocator(); 423 setup_bootmem_allocator();
424} 424}
425 425
426void __init set_highmem_pages_init(void)
427{
428#ifdef CONFIG_HIGHMEM
429 struct zone *zone;
430 int nid;
431
432 for_each_zone(zone) {
433 unsigned long zone_start_pfn, zone_end_pfn;
434
435 if (!is_highmem(zone))
436 continue;
437
438 zone_start_pfn = zone->zone_start_pfn;
439 zone_end_pfn = zone_start_pfn + zone->spanned_pages;
440
441 nid = zone_to_nid(zone);
442 printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
443 zone->name, nid, zone_start_pfn, zone_end_pfn);
444
445 add_highpages_with_active_regions(nid, zone_start_pfn,
446 zone_end_pfn);
447 }
448 totalram_pages += totalhigh_pages;
449#endif
450}
451
452#ifdef CONFIG_MEMORY_HOTPLUG 426#ifdef CONFIG_MEMORY_HOTPLUG
453static int paddr_to_nid(u64 addr) 427static int paddr_to_nid(u64 addr)
454{ 428{
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index deb1c1ab7868..64c9cf043cdd 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -166,7 +166,7 @@ int __init compute_hash_shift(struct bootnode *nodes, int numnodes,
166 return shift; 166 return shift;
167} 167}
168 168
169int early_pfn_to_nid(unsigned long pfn) 169int __meminit __early_pfn_to_nid(unsigned long pfn)
170{ 170{
171 return phys_to_nid(pfn << PAGE_SHIFT); 171 return phys_to_nid(pfn << PAGE_SHIFT);
172} 172}
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 8ca0d8566fc8..8253bc97587e 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -482,6 +482,13 @@ static int split_large_page(pte_t *kpte, unsigned long address)
482 pbase = (pte_t *)page_address(base); 482 pbase = (pte_t *)page_address(base);
483 paravirt_alloc_pte(&init_mm, page_to_pfn(base)); 483 paravirt_alloc_pte(&init_mm, page_to_pfn(base));
484 ref_prot = pte_pgprot(pte_clrhuge(*kpte)); 484 ref_prot = pte_pgprot(pte_clrhuge(*kpte));
485 /*
486 * If we ever want to utilize the PAT bit, we need to
487 * update this function to make sure it's converted from
488 * bit 12 to bit 7 when we cross from the 2MB level to
489 * the 4K level:
490 */
491 WARN_ON_ONCE(pgprot_val(ref_prot) & _PAGE_PAT_LARGE);
485 492
486#ifdef CONFIG_X86_64 493#ifdef CONFIG_X86_64
487 if (level == PG_LEVEL_1G) { 494 if (level == PG_LEVEL_1G) {
@@ -508,18 +515,13 @@ static int split_large_page(pte_t *kpte, unsigned long address)
508#endif 515#endif
509 516
510 /* 517 /*
511 * Install the new, split up pagetable. Important details here: 518 * Install the new, split up pagetable.
512 *
513 * On Intel the NX bit of all levels must be cleared to make a
514 * page executable. See section 4.13.2 of Intel 64 and IA-32
515 * Architectures Software Developer's Manual).
516 * 519 *
517 * Mark the entry present. The current mapping might be 520 * We use the standard kernel pagetable protections for the new
518 * set to not present, which we preserved above. 521 * pagetable protections, the actual ptes set above control the
522 * primary protection behavior:
519 */ 523 */
520 ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte))); 524 __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE)));
521 pgprot_val(ref_prot) |= _PAGE_PRESENT;
522 __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
523 base = NULL; 525 base = NULL;
524 526
525out_unlock: 527out_unlock:
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 05f9aef6818a..2ed37158012d 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -11,6 +11,7 @@
11#include <linux/bootmem.h> 11#include <linux/bootmem.h>
12#include <linux/debugfs.h> 12#include <linux/debugfs.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/module.h>
14#include <linux/gfp.h> 15#include <linux/gfp.h>
15#include <linux/mm.h> 16#include <linux/mm.h>
16#include <linux/fs.h> 17#include <linux/fs.h>
@@ -634,6 +635,33 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
634} 635}
635 636
636/* 637/*
638 * Change the memory type for the physial address range in kernel identity
639 * mapping space if that range is a part of identity map.
640 */
641int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
642{
643 unsigned long id_sz;
644
645 if (!pat_enabled || base >= __pa(high_memory))
646 return 0;
647
648 id_sz = (__pa(high_memory) < base + size) ?
649 __pa(high_memory) - base :
650 size;
651
652 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
653 printk(KERN_INFO
654 "%s:%d ioremap_change_attr failed %s "
655 "for %Lx-%Lx\n",
656 current->comm, current->pid,
657 cattr_name(flags),
658 base, (unsigned long long)(base + size));
659 return -EINVAL;
660 }
661 return 0;
662}
663
664/*
637 * Internal interface to reserve a range of physical memory with prot. 665 * Internal interface to reserve a range of physical memory with prot.
638 * Reserved non RAM regions only and after successful reserve_memtype, 666 * Reserved non RAM regions only and after successful reserve_memtype,
639 * this func also keeps identity mapping (if any) in sync with this new prot. 667 * this func also keeps identity mapping (if any) in sync with this new prot.
@@ -642,7 +670,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
642 int strict_prot) 670 int strict_prot)
643{ 671{
644 int is_ram = 0; 672 int is_ram = 0;
645 int id_sz, ret; 673 int ret;
646 unsigned long flags; 674 unsigned long flags;
647 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); 675 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
648 676
@@ -679,23 +707,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
679 flags); 707 flags);
680 } 708 }
681 709
682 /* Need to keep identity mapping in sync */ 710 if (kernel_map_sync_memtype(paddr, size, flags) < 0) {
683 if (paddr >= __pa(high_memory))
684 return 0;
685
686 id_sz = (__pa(high_memory) < paddr + size) ?
687 __pa(high_memory) - paddr :
688 size;
689
690 if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) {
691 free_memtype(paddr, paddr + size); 711 free_memtype(paddr, paddr + size);
692 printk(KERN_ERR
693 "%s:%d reserve_pfn_range ioremap_change_attr failed %s "
694 "for %Lx-%Lx\n",
695 current->comm, current->pid,
696 cattr_name(flags),
697 (unsigned long long)paddr,
698 (unsigned long long)(paddr + size));
699 return -EINVAL; 712 return -EINVAL;
700 } 713 }
701 return 0; 714 return 0;
@@ -877,6 +890,7 @@ pgprot_t pgprot_writecombine(pgprot_t prot)
877 else 890 else
878 return pgprot_noncached(prot); 891 return pgprot_noncached(prot);
879} 892}
893EXPORT_SYMBOL_GPL(pgprot_writecombine);
880 894
881#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) 895#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
882 896
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 86f2ffc43c3d..5b7c7c8464fe 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -313,6 +313,24 @@ int ptep_clear_flush_young(struct vm_area_struct *vma,
313 return young; 313 return young;
314} 314}
315 315
316/**
317 * reserve_top_address - reserves a hole in the top of kernel address space
318 * @reserve - size of hole to reserve
319 *
320 * Can be used to relocate the fixmap area and poke a hole in the top
321 * of kernel address space to make room for a hypervisor.
322 */
323void __init reserve_top_address(unsigned long reserve)
324{
325#ifdef CONFIG_X86_32
326 BUG_ON(fixmaps_set > 0);
327 printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
328 (int)-reserve);
329 __FIXADDR_TOP = -reserve - PAGE_SIZE;
330 __VMALLOC_RESERVE += reserve;
331#endif
332}
333
316int fixmaps_set; 334int fixmaps_set;
317 335
318void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) 336void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index 0951db9ee519..f2e477c91c1b 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -20,6 +20,8 @@
20#include <asm/tlb.h> 20#include <asm/tlb.h>
21#include <asm/tlbflush.h> 21#include <asm/tlbflush.h>
22 22
23unsigned int __VMALLOC_RESERVE = 128 << 20;
24
23/* 25/*
24 * Associate a virtual page frame with a given physical page frame 26 * Associate a virtual page frame with a given physical page frame
25 * and protection flags for that frame. 27 * and protection flags for that frame.
@@ -97,22 +99,6 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
97unsigned long __FIXADDR_TOP = 0xfffff000; 99unsigned long __FIXADDR_TOP = 0xfffff000;
98EXPORT_SYMBOL(__FIXADDR_TOP); 100EXPORT_SYMBOL(__FIXADDR_TOP);
99 101
100/**
101 * reserve_top_address - reserves a hole in the top of kernel address space
102 * @reserve - size of hole to reserve
103 *
104 * Can be used to relocate the fixmap area and poke a hole in the top
105 * of kernel address space to make room for a hypervisor.
106 */
107void __init reserve_top_address(unsigned long reserve)
108{
109 BUG_ON(fixmaps_set > 0);
110 printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
111 (int)-reserve);
112 __FIXADDR_TOP = -reserve - PAGE_SIZE;
113 __VMALLOC_RESERVE += reserve;
114}
115
116/* 102/*
117 * vmalloc=size forces the vmalloc area to be exactly 'size' 103 * vmalloc=size forces the vmalloc area to be exactly 'size'
118 * bytes. This can be used to increase (or decrease) the 104 * bytes. This can be used to increase (or decrease) the
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 15df1baee100..574c8bc95ef0 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -20,7 +20,7 @@
20#include <asm/proto.h> 20#include <asm/proto.h>
21#include <asm/numa.h> 21#include <asm/numa.h>
22#include <asm/e820.h> 22#include <asm/e820.h>
23#include <asm/genapic.h> 23#include <asm/apic.h>
24#include <asm/uv/uv.h> 24#include <asm/uv/uv.h>
25 25
26int acpi_numa __initdata; 26int acpi_numa __initdata;
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 14c5af4d11e6..a654d59e4483 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -14,7 +14,6 @@
14DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) 14DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
15 = { &init_mm, 0, }; 15 = { &init_mm, 0, };
16 16
17#include <asm/genapic.h>
18/* 17/*
19 * Smarter SMP flushing macros. 18 * Smarter SMP flushing macros.
20 * c/o Linus Torvalds. 19 * c/o Linus Torvalds.
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index e9f80c744cf3..10131fbdaada 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -78,8 +78,18 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs)
78 if (cpu_has_arch_perfmon) { 78 if (cpu_has_arch_perfmon) {
79 union cpuid10_eax eax; 79 union cpuid10_eax eax;
80 eax.full = cpuid_eax(0xa); 80 eax.full = cpuid_eax(0xa);
81 if (counter_width < eax.split.bit_width) 81
82 counter_width = eax.split.bit_width; 82 /*
83 * For Core2 (family 6, model 15), don't reset the
84 * counter width:
85 */
86 if (!(eax.split.version_id == 0 &&
87 current_cpu_data.x86 == 6 &&
88 current_cpu_data.x86_model == 15)) {
89
90 if (counter_width < eax.split.bit_width)
91 counter_width = eax.split.bit_width;
92 }
83 } 93 }
84 94
85 /* clear all counters */ 95 /* clear all counters */
diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
index 5601e829c387..8eb295e116f6 100644
--- a/arch/x86/pci/numaq_32.c
+++ b/arch/x86/pci/numaq_32.c
@@ -5,7 +5,7 @@
5#include <linux/pci.h> 5#include <linux/pci.h>
6#include <linux/init.h> 6#include <linux/init.h>
7#include <linux/nodemask.h> 7#include <linux/nodemask.h>
8#include <asm/genapic.h> 8#include <asm/apic.h>
9#include <asm/mpspec.h> 9#include <asm/mpspec.h>
10#include <asm/pci_x86.h> 10#include <asm/pci_x86.h>
11 11
diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S
index d1e9b53f9d33..b641388d8286 100644
--- a/arch/x86/power/hibernate_asm_32.S
+++ b/arch/x86/power/hibernate_asm_32.S
@@ -8,7 +8,7 @@
8 8
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <asm/segment.h> 10#include <asm/segment.h>
11#include <asm/page.h> 11#include <asm/page_types.h>
12#include <asm/asm-offsets.h> 12#include <asm/asm-offsets.h>
13#include <asm/processor-flags.h> 13#include <asm/processor-flags.h>
14 14
diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S
index 000415947d93..9356547d8c01 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -18,7 +18,7 @@
18 .text 18 .text
19#include <linux/linkage.h> 19#include <linux/linkage.h>
20#include <asm/segment.h> 20#include <asm/segment.h>
21#include <asm/page.h> 21#include <asm/page_types.h>
22#include <asm/asm-offsets.h> 22#include <asm/asm-offsets.h>
23#include <asm/processor-flags.h> 23#include <asm/processor-flags.h>
24 24
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index 9c98cc6ba978..7133cdf9098b 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -85,8 +85,8 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
85 unsigned long addr, end; 85 unsigned long addr, end;
86 unsigned offset; 86 unsigned offset;
87 end = (start + PMD_SIZE - 1) & PMD_MASK; 87 end = (start + PMD_SIZE - 1) & PMD_MASK;
88 if (end >= TASK_SIZE64) 88 if (end >= TASK_SIZE_MAX)
89 end = TASK_SIZE64; 89 end = TASK_SIZE_MAX;
90 end -= len; 90 end -= len;
91 /* This loses some more bits than a modulo, but is cheaper */ 91 /* This loses some more bits than a modulo, but is cheaper */
92 offset = get_random_int() & (PTRS_PER_PTE - 1); 92 offset = get_random_int() & (PTRS_PER_PTE - 1);
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 87b9ab166423..b83e119fbeb0 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -6,7 +6,7 @@ config XEN
6 bool "Xen guest support" 6 bool "Xen guest support"
7 select PARAVIRT 7 select PARAVIRT
8 select PARAVIRT_CLOCK 8 select PARAVIRT_CLOCK
9 depends on X86_64 || (X86_32 && X86_PAE && !(X86_VISWS || X86_VOYAGER)) 9 depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS)
10 depends on X86_CMPXCHG && X86_TSC 10 depends on X86_CMPXCHG && X86_TSC
11 help 11 help
12 This is the Linux Xen port. Enabling this will allow the 12 This is the Linux Xen port. Enabling this will allow the
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 95ff6a0e942a..c52f4034c7fd 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -554,14 +554,15 @@ static u32 xen_safe_apic_wait_icr_idle(void)
554 return 0; 554 return 0;
555} 555}
556 556
557static struct apic_ops xen_basic_apic_ops = { 557static void set_xen_basic_apic_ops(void)
558 .read = xen_apic_read, 558{
559 .write = xen_apic_write, 559 apic->read = xen_apic_read;
560 .icr_read = xen_apic_icr_read, 560 apic->write = xen_apic_write;
561 .icr_write = xen_apic_icr_write, 561 apic->icr_read = xen_apic_icr_read;
562 .wait_icr_idle = xen_apic_wait_icr_idle, 562 apic->icr_write = xen_apic_icr_write;
563 .safe_wait_icr_idle = xen_safe_apic_wait_icr_idle, 563 apic->wait_icr_idle = xen_apic_wait_icr_idle;
564}; 564 apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
565}
565 566
566#endif 567#endif
567 568
@@ -898,7 +899,7 @@ asmlinkage void __init xen_start_kernel(void)
898 /* 899 /*
899 * set up the basic apic ops. 900 * set up the basic apic ops.
900 */ 901 */
901 apic_ops = &xen_basic_apic_ops; 902 set_xen_basic_apic_ops();
902#endif 903#endif
903 904
904 if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) { 905 if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
@@ -939,6 +940,9 @@ asmlinkage void __init xen_start_kernel(void)
939 possible map and a non-dummy shared_info. */ 940 possible map and a non-dummy shared_info. */
940 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; 941 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
941 942
943 local_irq_disable();
944 early_boot_irqs_off();
945
942 xen_raw_console_write("mapping kernel into physical memory\n"); 946 xen_raw_console_write("mapping kernel into physical memory\n");
943 pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); 947 pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
944 948
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 63d49a523ed3..1a5ff24e29c0 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -8,7 +8,7 @@
8 8
9#include <asm/boot.h> 9#include <asm/boot.h>
10#include <asm/asm.h> 10#include <asm/asm.h>
11#include <asm/page.h> 11#include <asm/page_types.h>
12 12
13#include <xen/interface/elfnote.h> 13#include <xen/interface/elfnote.h>
14#include <asm/xen/interface.h> 14#include <asm/xen/interface.h>