aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--Documentation/filesystems/proc.txt30
-rw-r--r--Documentation/kernel-parameters.txt2
-rw-r--r--Makefile8
-rw-r--r--arch/i386/.gitignore1
-rw-r--r--arch/i386/Kconfig1
-rw-r--r--arch/i386/Kconfig.cpu36
-rw-r--r--arch/x86/ia32/Makefile25
-rw-r--r--arch/x86/ia32/ia32_aout.c2
-rw-r--r--arch/x86/ia32/ia32_binfmt.c5
-rw-r--r--arch/x86/ia32/ptrace32.c10
-rw-r--r--arch/x86/kernel/.gitignore1
-rw-r--r--arch/x86/kernel/Makefile4
-rw-r--r--arch/x86/kernel/Makefile_322
-rw-r--r--arch/x86/kernel/alternative.c65
-rw-r--r--arch/x86/kernel/apic_32.c1
-rw-r--r--arch/x86/kernel/apic_64.c4
-rw-r--r--arch/x86/kernel/asm-offsets_32.c14
-rw-r--r--arch/x86/kernel/cpu/amd.c15
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k7.c2
-rw-r--r--arch/x86/kernel/cpu/intel.c17
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c15
-rw-r--r--arch/x86/kernel/cpu/mcheck/p4.c1
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c8
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c14
-rw-r--r--arch/x86/kernel/cpuid.c28
-rw-r--r--arch/x86/kernel/entry_32.S2
-rw-r--r--arch/x86/kernel/entry_64.S2
-rw-r--r--arch/x86/kernel/genapic_64.c2
-rw-r--r--arch/x86/kernel/genapic_flat_64.c1
-rw-r--r--arch/x86/kernel/head_32.S21
-rw-r--r--arch/x86/kernel/i386_ksyms_32.c1
-rw-r--r--arch/x86/kernel/i8259_32.c6
-rw-r--r--arch/x86/kernel/i8259_64.c6
-rw-r--r--arch/x86/kernel/io_apic_32.c5
-rw-r--r--arch/x86/kernel/io_apic_64.c11
-rw-r--r--arch/x86/kernel/irq_32.c49
-rw-r--r--arch/x86/kernel/irq_64.c48
-rw-r--r--arch/x86/kernel/ldt_32.c14
-rw-r--r--arch/x86/kernel/ldt_64.c14
-rw-r--r--arch/x86/kernel/mce_64.c4
-rw-r--r--arch/x86/kernel/mce_amd_64.c1
-rw-r--r--arch/x86/kernel/mce_intel_64.c1
-rw-r--r--arch/x86/kernel/msr.c4
-rw-r--r--arch/x86/kernel/paravirt_32.c224
-rw-r--r--arch/x86/kernel/pci-calgary_64.c41
-rw-r--r--arch/x86/kernel/pci-dma_32.c1
-rw-r--r--arch/x86/kernel/pci-dma_64.c8
-rw-r--r--arch/x86/kernel/pci-gart_64.c4
-rw-r--r--arch/x86/kernel/ptrace_32.c4
-rw-r--r--arch/x86/kernel/ptrace_64.c4
-rw-r--r--arch/x86/kernel/setup_64.c6
-rw-r--r--arch/x86/kernel/signal_32.c2
-rw-r--r--arch/x86/kernel/smp_32.c10
-rw-r--r--arch/x86/kernel/smp_64.c3
-rw-r--r--arch/x86/kernel/smpboot_32.c12
-rw-r--r--arch/x86/kernel/smpboot_64.c5
-rw-r--r--arch/x86/kernel/stacktrace.c2
-rw-r--r--arch/x86/kernel/tce_64.c4
-rw-r--r--arch/x86/kernel/topology.c6
-rw-r--r--arch/x86/kernel/trampoline_32.S4
-rw-r--r--arch/x86/kernel/trampoline_64.S7
-rw-r--r--arch/x86/kernel/traps_32.c37
-rw-r--r--arch/x86/kernel/traps_64.c4
-rw-r--r--arch/x86/kernel/tsc_32.c6
-rw-r--r--arch/x86/kernel/vmi_32.c201
-rw-r--r--arch/x86/kernel/vsyscall_64.c3
-rw-r--r--arch/x86/lib/bitstr_64.c2
-rw-r--r--arch/x86/lib/msr-on-cpu.c62
-rw-r--r--arch/x86/lib/rwlock_64.S2
-rw-r--r--arch/x86/lib/semaphore_32.S4
-rw-r--r--arch/x86/lib/string_32.c20
-rw-r--r--arch/x86/mach-default/setup.c6
-rw-r--r--arch/x86/mach-es7000/es7000plat.c32
-rw-r--r--arch/x86/mach-generic/probe.c2
-rw-r--r--arch/x86/mach-voyager/setup.c6
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c5
-rw-r--r--arch/x86/mm/discontig_32.c10
-rw-r--r--arch/x86/mm/fault_32.c17
-rw-r--r--arch/x86/mm/init_32.c33
-rw-r--r--arch/x86/mm/numa_64.c6
-rw-r--r--arch/x86/mm/pageattr_32.c4
-rw-r--r--arch/x86/mm/pageattr_64.c3
-rw-r--r--arch/x86/mm/pgtable_32.c6
-rw-r--r--arch/x86/mm/srat_64.c4
-rw-r--r--arch/x86/oprofile/nmi_int.c2
-rw-r--r--arch/x86/pci/common.c16
-rw-r--r--arch/x86/vdso/Makefile17
-rw-r--r--arch/x86/vdso/vdso.lds.S16
-rw-r--r--arch/x86/vdso/vvar.c2
-rw-r--r--arch/x86/xen/enlighten.c233
-rw-r--r--arch/x86/xen/mmu.c145
-rw-r--r--arch/x86/xen/multicalls.c52
-rw-r--r--arch/x86/xen/multicalls.h5
-rw-r--r--arch/x86/xen/smp.c15
-rw-r--r--arch/x86/xen/time.c6
-rw-r--r--arch/x86/xen/xen-ops.h10
-rw-r--r--arch/x86_64/Kconfig14
-rw-r--r--arch/x86_64/Makefile8
-rw-r--r--block/ll_rw_blk.c10
-rw-r--r--drivers/char/agp/efficeon-agp.c11
-rw-r--r--drivers/char/hvc_lguest.c2
-rw-r--r--drivers/lguest/core.c6
-rw-r--r--drivers/lguest/lguest.c152
-rw-r--r--drivers/lguest/lguest_bus.c2
-rw-r--r--drivers/mmc/core/mmc.c3
-rw-r--r--drivers/mmc/host/at91_mci.c4
-rw-r--r--drivers/net/wireless/Kconfig6
-rw-r--r--drivers/net/wireless/libertas/Makefile2
-rw-r--r--drivers/net/wireless/libertas/defs.h2
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c1079
-rw-r--r--drivers/net/wireless/libertas/if_sdio.h45
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/video/Kconfig1
-rw-r--r--include/asm-um/alternative-asm.h (renamed from include/asm-um/alternative-asm.i)2
-rw-r--r--include/asm-um/frame.h (renamed from include/asm-um/frame.i)2
-rw-r--r--include/asm-x86/Kbuild36
-rw-r--r--include/asm-x86/agp.h43
-rw-r--r--include/asm-x86/agp_32.h36
-rw-r--r--include/asm-x86/agp_64.h34
-rw-r--r--include/asm-x86/alternative-asm.h22
-rw-r--r--include/asm-x86/alternative-asm.i5
-rw-r--r--include/asm-x86/alternative-asm_32.i12
-rw-r--r--include/asm-x86/alternative-asm_64.i12
-rw-r--r--include/asm-x86/atomic_64.h2
-rw-r--r--include/asm-x86/auxvec.h23
-rw-r--r--include/asm-x86/auxvec_32.h11
-rw-r--r--include/asm-x86/auxvec_64.h6
-rw-r--r--include/asm-x86/bitops_64.h2
-rw-r--r--include/asm-x86/bug.h41
-rw-r--r--include/asm-x86/bug_32.h37
-rw-r--r--include/asm-x86/bug_64.h34
-rw-r--r--include/asm-x86/bugs.h11
-rw-r--r--include/asm-x86/bugs_32.h12
-rw-r--r--include/asm-x86/bugs_64.h6
-rw-r--r--include/asm-x86/cache.h23
-rw-r--r--include/asm-x86/cache_32.h14
-rw-r--r--include/asm-x86/cache_64.h26
-rw-r--r--include/asm-x86/cacheflush.h43
-rw-r--r--include/asm-x86/cacheflush_32.h39
-rw-r--r--include/asm-x86/cacheflush_64.h35
-rw-r--r--include/asm-x86/cpu.h3
-rw-r--r--include/asm-x86/cputime.h6
-rw-r--r--include/asm-x86/cputime_32.h6
-rw-r--r--include/asm-x86/cputime_64.h6
-rw-r--r--include/asm-x86/debugreg.h79
-rw-r--r--include/asm-x86/debugreg_32.h64
-rw-r--r--include/asm-x86/debugreg_64.h65
-rw-r--r--include/asm-x86/delay.h36
-rw-r--r--include/asm-x86/delay_32.h31
-rw-r--r--include/asm-x86/delay_64.h30
-rw-r--r--include/asm-x86/device.h13
-rw-r--r--include/asm-x86/device_32.h15
-rw-r--r--include/asm-x86/device_64.h15
-rw-r--r--include/asm-x86/dmi.h36
-rw-r--r--include/asm-x86/dmi_32.h11
-rw-r--r--include/asm-x86/dmi_64.h24
-rw-r--r--include/asm-x86/edac.h21
-rw-r--r--include/asm-x86/edac_32.h18
-rw-r--r--include/asm-x86/edac_64.h18
-rw-r--r--include/asm-x86/errno.h14
-rw-r--r--include/asm-x86/errno_32.h6
-rw-r--r--include/asm-x86/errno_64.h6
-rw-r--r--include/asm-x86/fb.h20
-rw-r--r--include/asm-x86/fb_32.h17
-rw-r--r--include/asm-x86/fb_64.h19
-rw-r--r--include/asm-x86/floppy.h281
-rw-r--r--include/asm-x86/floppy_32.h280
-rw-r--r--include/asm-x86/floppy_64.h279
-rw-r--r--include/asm-x86/frame.h (renamed from include/asm-x86/frame.i)4
-rw-r--r--include/asm-x86/hardirq_32.h6
-rw-r--r--include/asm-x86/hw_irq_64.h35
-rw-r--r--include/asm-x86/intel_arch_perfmon.h36
-rw-r--r--include/asm-x86/intel_arch_perfmon_32.h31
-rw-r--r--include/asm-x86/intel_arch_perfmon_64.h31
-rw-r--r--include/asm-x86/io_32.h17
-rw-r--r--include/asm-x86/ioctls.h98
-rw-r--r--include/asm-x86/ioctls_32.h87
-rw-r--r--include/asm-x86/ioctls_64.h86
-rw-r--r--include/asm-x86/ipcbuf.h42
-rw-r--r--include/asm-x86/ipcbuf_32.h29
-rw-r--r--include/asm-x86/ipcbuf_64.h29
-rw-r--r--include/asm-x86/kdebug.h36
-rw-r--r--include/asm-x86/kdebug_32.h27
-rw-r--r--include/asm-x86/kdebug_64.h32
-rw-r--r--include/asm-x86/kmap_types.h30
-rw-r--r--include/asm-x86/kmap_types_32.h30
-rw-r--r--include/asm-x86/kmap_types_64.h19
-rw-r--r--include/asm-x86/ldt.h51
-rw-r--r--include/asm-x86/ldt_32.h32
-rw-r--r--include/asm-x86/ldt_64.h36
-rw-r--r--include/asm-x86/mach-default/mach_apicdef.h8
-rw-r--r--include/asm-x86/mach-visws/cobalt.h8
-rw-r--r--include/asm-x86/mach-visws/lithium.h8
-rw-r--r--include/asm-x86/mce.h128
-rw-r--r--include/asm-x86/mce_32.h11
-rw-r--r--include/asm-x86/mce_64.h115
-rw-r--r--include/asm-x86/mman.h32
-rw-r--r--include/asm-x86/mman_32.h17
-rw-r--r--include/asm-x86/mman_64.h19
-rw-r--r--include/asm-x86/mmu_32.h4
-rw-r--r--include/asm-x86/mmu_64.h4
-rw-r--r--include/asm-x86/namei.h16
-rw-r--r--include/asm-x86/namei_32.h17
-rw-r--r--include/asm-x86/namei_64.h11
-rw-r--r--include/asm-x86/numa_64.h3
-rw-r--r--include/asm-x86/param.h31
-rw-r--r--include/asm-x86/param_32.h22
-rw-r--r--include/asm-x86/param_64.h22
-rw-r--r--include/asm-x86/paravirt.h487
-rw-r--r--include/asm-x86/parport.h15
-rw-r--r--include/asm-x86/parport_32.h18
-rw-r--r--include/asm-x86/parport_64.h18
-rw-r--r--include/asm-x86/pda.h6
-rw-r--r--include/asm-x86/pgtable-3level-defs.h2
-rw-r--r--include/asm-x86/processor_32.h31
-rw-r--r--include/asm-x86/processor_64.h27
-rw-r--r--include/asm-x86/ptrace-abi.h90
-rw-r--r--include/asm-x86/ptrace-abi_32.h39
-rw-r--r--include/asm-x86/ptrace-abi_64.h51
-rw-r--r--include/asm-x86/resource.h14
-rw-r--r--include/asm-x86/resource_32.h6
-rw-r--r--include/asm-x86/resource_64.h6
-rw-r--r--include/asm-x86/rtc.h6
-rw-r--r--include/asm-x86/rtc_32.h10
-rw-r--r--include/asm-x86/rtc_64.h10
-rw-r--r--include/asm-x86/rwlock.h14
-rw-r--r--include/asm-x86/rwlock_32.h25
-rw-r--r--include/asm-x86/rwlock_64.h26
-rw-r--r--include/asm-x86/sections.h6
-rw-r--r--include/asm-x86/sections_32.h7
-rw-r--r--include/asm-x86/sections_64.h7
-rw-r--r--include/asm-x86/sembuf.h37
-rw-r--r--include/asm-x86/sembuf_32.h25
-rw-r--r--include/asm-x86/sembuf_64.h25
-rw-r--r--include/asm-x86/serial.h30
-rw-r--r--include/asm-x86/serial_32.h29
-rw-r--r--include/asm-x86/serial_64.h29
-rw-r--r--include/asm-x86/shmparam.h19
-rw-r--r--include/asm-x86/shmparam_32.h6
-rw-r--r--include/asm-x86/shmparam_64.h6
-rw-r--r--include/asm-x86/siginfo.h21
-rw-r--r--include/asm-x86/siginfo_32.h6
-rw-r--r--include/asm-x86/siginfo_64.h8
-rw-r--r--include/asm-x86/smp_32.h9
-rw-r--r--include/asm-x86/smp_64.h1
-rw-r--r--include/asm-x86/sockios.h26
-rw-r--r--include/asm-x86/sockios_32.h13
-rw-r--r--include/asm-x86/sockios_64.h13
-rw-r--r--include/asm-x86/stacktrace.h2
-rw-r--r--include/asm-x86/string_32.h3
-rw-r--r--include/asm-x86/system_32.h6
-rw-r--r--include/asm-x86/system_64.h16
-rw-r--r--include/asm-x86/termbits.h211
-rw-r--r--include/asm-x86/termbits_32.h198
-rw-r--r--include/asm-x86/termbits_64.h198
-rw-r--r--include/asm-x86/termios.h108
-rw-r--r--include/asm-x86/termios_32.h90
-rw-r--r--include/asm-x86/termios_64.h90
-rw-r--r--include/asm-x86/tlb.h14
-rw-r--r--include/asm-x86/tlb_32.h20
-rw-r--r--include/asm-x86/tlb_64.h13
-rw-r--r--include/asm-x86/types.h77
-rw-r--r--include/asm-x86/types_32.h64
-rw-r--r--include/asm-x86/types_64.h55
-rw-r--r--include/asm-x86/ucontext.h25
-rw-r--r--include/asm-x86/ucontext_32.h12
-rw-r--r--include/asm-x86/ucontext_64.h12
-rw-r--r--include/asm-x86/unaligned.h42
-rw-r--r--include/asm-x86/unaligned_32.h37
-rw-r--r--include/asm-x86/unaligned_64.h37
-rw-r--r--include/asm-x86/unistd_64.h619
-rw-r--r--include/asm-x86/unwind.h18
-rw-r--r--include/asm-x86/unwind_32.h13
-rw-r--r--include/asm-x86/unwind_64.h12
-rw-r--r--include/linux/mmc/sdio_ids.h6
-rw-r--r--include/xen/interface/vcpu.h5
-rw-r--r--kernel/time/tick-broadcast.c18
-rw-r--r--mm/Kconfig1
280 files changed, 4799 insertions, 4931 deletions
diff --git a/.gitignore b/.gitignore
index 27c3e839b54e..22fb8fa9bc3d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -26,6 +26,7 @@ vmlinux*
26!vmlinux.lds.S 26!vmlinux.lds.S
27System.map 27System.map
28Module.symvers 28Module.symvers
29!.gitignore
29 30
30# 31#
31# Generated include files 32# Generated include files
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 4a37e25e694c..e5c1df52a876 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -347,7 +347,35 @@ connects the CPUs in a SMP system. This means that an error has been detected,
347the IO-APIC automatically retry the transmission, so it should not be a big 347the IO-APIC automatically retry the transmission, so it should not be a big
348problem, but you should read the SMP-FAQ. 348problem, but you should read the SMP-FAQ.
349 349
350In this context it could be interesting to note the new irq directory in 2.4. 350In 2.6.2* /proc/interrupts was expanded again. This time the goal was for
351/proc/interrupts to display every IRQ vector in use by the system, not
352just those considered 'most important'. The new vectors are:
353
354 THR -- interrupt raised when a machine check threshold counter
355 (typically counting ECC corrected errors of memory or cache) exceeds
356 a configurable threshold. Only available on some systems.
357
358 TRM -- a thermal event interrupt occurs when a temperature threshold
359 has been exceeded for the CPU. This interrupt may also be generated
360 when the temperature drops back to normal.
361
362 SPU -- a spurious interrupt is some interrupt that was raised then lowered
363 by some IO device before it could be fully processed by the APIC. Hence
364 the APIC sees the interrupt but does not know what device it came from.
365 For this case the APIC will generate the interrupt with a IRQ vector
366 of 0xff. This might also be generated by chipset bugs.
367
368 RES, CAL, TLB -- rescheduling, call and TLB flush interrupts are
369 sent from one CPU to another per the needs of the OS. Typically,
370 their statistics are used by kernel developers and interested users to
371 determine the occurance of interrupt of the given type.
372
373The above IRQ vectors are displayed only when relevent. For example,
374the threshold vector does not exist on x86_64 platforms. Others are
375suppressed when the system is a uniprocessor. As of this writing, only
376i386 and x86_64 platforms support the new IRQ vector displays.
377
378Of some interest is the introduction of the /proc/irq directory to 2.4.
351It could be used to set IRQ to CPU affinity, this means that you can "hook" an 379It could be used to set IRQ to CPU affinity, this means that you can "hook" an
352IRQ to only one CPU, or to exclude a CPU of handling IRQs. The contents of the 380IRQ to only one CPU, or to exclude a CPU of handling IRQs. The contents of the
353irq subdir is one subdir for each IRQ, and one file; prof_cpu_mask 381irq subdir is one subdir for each IRQ, and one file; prof_cpu_mask
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 63bda3637085..98cf90f2631d 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -994,6 +994,8 @@ and is between 256 and 4096 characters. It is defined in the file
994 994
995 mce [X86-32] Machine Check Exception 995 mce [X86-32] Machine Check Exception
996 996
997 mce=option [X86-64] See Documentation/x86_64/boot-options.txt
998
997 md= [HW] RAID subsystems devices and level 999 md= [HW] RAID subsystems devices and level
998 See Documentation/md.txt. 1000 See Documentation/md.txt.
999 1001
diff --git a/Makefile b/Makefile
index ed65de7078c7..529b9048d97e 100644
--- a/Makefile
+++ b/Makefile
@@ -1325,8 +1325,8 @@ ALLSOURCE_ARCHS := $(ARCH) $(SRCARCH)
1325endif 1325endif
1326 1326
1327define find-sources 1327define find-sources
1328 ( for ARCH in $(ALLSOURCE_ARCHS) ; do \ 1328 ( for arch in $(ALLSOURCE_ARCHS) ; do \
1329 find $(__srctree)arch/$${SRCARCH} $(RCS_FIND_IGNORE) \ 1329 find $(__srctree)arch/$${arch} $(RCS_FIND_IGNORE) \
1330 -name $1 -print; \ 1330 -name $1 -print; \
1331 done ; \ 1331 done ; \
1332 find $(__srctree)security/selinux/include $(RCS_FIND_IGNORE) \ 1332 find $(__srctree)security/selinux/include $(RCS_FIND_IGNORE) \
@@ -1334,8 +1334,8 @@ define find-sources
1334 find $(__srctree)include $(RCS_FIND_IGNORE) \ 1334 find $(__srctree)include $(RCS_FIND_IGNORE) \
1335 \( -name config -o -name 'asm-*' \) -prune \ 1335 \( -name config -o -name 'asm-*' \) -prune \
1336 -o -name $1 -print; \ 1336 -o -name $1 -print; \
1337 for ARCH in $(ALLINCLUDE_ARCHS) ; do \ 1337 for arch in $(ALLINCLUDE_ARCHS) ; do \
1338 find $(__srctree)include/asm-$${SRCARCH} $(RCS_FIND_IGNORE) \ 1338 find $(__srctree)include/asm-$${arch} $(RCS_FIND_IGNORE) \
1339 -name $1 -print; \ 1339 -name $1 -print; \
1340 done ; \ 1340 done ; \
1341 find $(__srctree)include/asm-generic $(RCS_FIND_IGNORE) \ 1341 find $(__srctree)include/asm-generic $(RCS_FIND_IGNORE) \
diff --git a/arch/i386/.gitignore b/arch/i386/.gitignore
new file mode 100644
index 000000000000..36ef4c374d25
--- /dev/null
+++ b/arch/i386/.gitignore
@@ -0,0 +1 @@
boot
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 7a95c58947e4..b84d5050e92e 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -146,6 +146,7 @@ config X86_ELAN
146 146
147config X86_VOYAGER 147config X86_VOYAGER
148 bool "Voyager (NCR)" 148 bool "Voyager (NCR)"
149 select SMP if !BROKEN
149 help 150 help
150 Voyager is an MCA-based 32-way capable SMP architecture proprietary 151 Voyager is an MCA-based 32-way capable SMP architecture proprietary
151 to NCR Corp. Machine classes 345x/35xx/4100/51xx are Voyager-based. 152 to NCR Corp. Machine classes 345x/35xx/4100/51xx are Voyager-based.
diff --git a/arch/i386/Kconfig.cpu b/arch/i386/Kconfig.cpu
index 11a24d54f27b..0e2adadf5905 100644
--- a/arch/i386/Kconfig.cpu
+++ b/arch/i386/Kconfig.cpu
@@ -109,16 +109,42 @@ config MCORE2
109 help 109 help
110 Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and 53xx) 110 Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and 53xx)
111 CPUs. You can distinguish newer from older Xeons by the CPU family 111 CPUs. You can distinguish newer from older Xeons by the CPU family
112 in /proc/cpuinfo. Newer ones have 6. 112 in /proc/cpuinfo. Newer ones have 6 and older ones 15 (not a typo)
113 113
114config MPENTIUM4 114config MPENTIUM4
115 bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon" 115 bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
116 help 116 help
117 Select this for Intel Pentium 4 chips. This includes the 117 Select this for Intel Pentium 4 chips. This includes the
118 Pentium 4, P4-based Celeron and Xeon, and Pentium-4 M 118 Pentium 4, Pentium D, P4-based Celeron and Xeon, and
119 (not Pentium M) chips. This option enables compile flags 119 Pentium-4 M (not Pentium M) chips. This option enables compile
120 optimized for the chip, uses the correct cache shift, and 120 flags optimized for the chip, uses the correct cache line size, and
121 applies any applicable Pentium III optimizations. 121 applies any applicable optimizations.
122
123 CPUIDs: F[0-6][1-A] (in /proc/cpuinfo show = cpu family : 15 )
124
125 Select this for:
126 Pentiums (Pentium 4, Pentium D, Celeron, Celeron D) corename:
127 -Willamette
128 -Northwood
129 -Mobile Pentium 4
130 -Mobile Pentium 4 M
131 -Extreme Edition (Gallatin)
132 -Prescott
133 -Prescott 2M
134 -Cedar Mill
135 -Presler
136 -Smithfiled
137 Xeons (Intel Xeon, Xeon MP, Xeon LV, Xeon MV) corename:
138 -Foster
139 -Prestonia
140 -Gallatin
141 -Nocona
142 -Irwindale
143 -Cranford
144 -Potomac
145 -Paxville
146 -Dempsey
147
122 148
123config MK6 149config MK6
124 bool "K6/K6-II/K6-III" 150 bool "K6/K6-II/K6-III"
diff --git a/arch/x86/ia32/Makefile b/arch/x86/ia32/Makefile
index cdae36435e21..e2edda255a84 100644
--- a/arch/x86/ia32/Makefile
+++ b/arch/x86/ia32/Makefile
@@ -18,18 +18,35 @@ $(obj)/syscall32_syscall.o: \
18 $(foreach F,sysenter syscall,$(obj)/vsyscall-$F.so) 18 $(foreach F,sysenter syscall,$(obj)/vsyscall-$F.so)
19 19
20# Teach kbuild about targets 20# Teach kbuild about targets
21targets := $(foreach F,sysenter syscall,vsyscall-$F.o vsyscall-$F.so) 21targets := $(foreach F,$(addprefix vsyscall-,sysenter syscall),\
22 $F.o $F.so $F.so.dbg)
22 23
23# The DSO images are built using a special linker script 24# The DSO images are built using a special linker script
24quiet_cmd_syscall = SYSCALL $@ 25quiet_cmd_syscall = SYSCALL $@
25 cmd_syscall = $(CC) -m32 -nostdlib -shared -s \ 26 cmd_syscall = $(CC) -m32 -nostdlib -shared \
26 $(call ld-option, -Wl$(comma)--hash-style=sysv) \ 27 $(call ld-option, -Wl$(comma)--hash-style=sysv) \
27 -Wl,-soname=linux-gate.so.1 -o $@ \ 28 -Wl,-soname=linux-gate.so.1 -o $@ \
28 -Wl,-T,$(filter-out FORCE,$^) 29 -Wl,-T,$(filter-out FORCE,$^)
29 30
30$(obj)/vsyscall-sysenter.so $(obj)/vsyscall-syscall.so: \ 31$(obj)/%.so: OBJCOPYFLAGS := -S
31$(obj)/vsyscall-%.so: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE 32$(obj)/%.so: $(obj)/%.so.dbg FORCE
33 $(call if_changed,objcopy)
34
35$(obj)/vsyscall-sysenter.so.dbg $(obj)/vsyscall-syscall.so.dbg: \
36$(obj)/vsyscall-%.so.dbg: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
32 $(call if_changed,syscall) 37 $(call if_changed,syscall)
33 38
34AFLAGS_vsyscall-sysenter.o = -m32 -Wa,-32 39AFLAGS_vsyscall-sysenter.o = -m32 -Wa,-32
35AFLAGS_vsyscall-syscall.o = -m32 -Wa,-32 40AFLAGS_vsyscall-syscall.o = -m32 -Wa,-32
41
42vdsos := vdso32-sysenter.so vdso32-syscall.so
43
44quiet_cmd_vdso_install = INSTALL $@
45 cmd_vdso_install = cp $(@:vdso32-%.so=$(obj)/vsyscall-%.so.dbg) \
46 $(MODLIB)/vdso/$@
47
48$(vdsos):
49 @mkdir -p $(MODLIB)/vdso
50 $(call cmd,vdso_install)
51
52vdso_install: $(vdsos)
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index 7cf1c29bf90e..f82e1a94fcb7 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -420,6 +420,8 @@ beyond_if:
420 (regs)->eflags = 0x200; 420 (regs)->eflags = 0x200;
421 (regs)->cs = __USER32_CS; 421 (regs)->cs = __USER32_CS;
422 (regs)->ss = __USER32_DS; 422 (regs)->ss = __USER32_DS;
423 regs->r8 = regs->r9 = regs->r10 = regs->r11 =
424 regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0;
423 set_fs(USER_DS); 425 set_fs(USER_DS);
424 if (unlikely(current->ptrace & PT_PTRACED)) { 426 if (unlikely(current->ptrace & PT_PTRACED)) {
425 if (current->ptrace & PT_TRACE_EXEC) 427 if (current->ptrace & PT_TRACE_EXEC)
diff --git a/arch/x86/ia32/ia32_binfmt.c b/arch/x86/ia32/ia32_binfmt.c
index d3c53e8b05c0..118b9f9ff499 100644
--- a/arch/x86/ia32/ia32_binfmt.c
+++ b/arch/x86/ia32/ia32_binfmt.c
@@ -112,11 +112,8 @@ struct elf_prpsinfo
112 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ 112 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
113}; 113};
114 114
115#define __STR(x) #x
116#define STR(x) __STR(x)
117
118#define _GET_SEG(x) \ 115#define _GET_SEG(x) \
119 ({ __u32 seg; asm("movl %%" STR(x) ",%0" : "=r"(seg)); seg; }) 116 ({ __u32 seg; asm("movl %%" __stringify(x) ",%0" : "=r"(seg)); seg; })
120 117
121/* Assumes current==process to be dumped */ 118/* Assumes current==process to be dumped */
122#define ELF_CORE_COPY_REGS(pr_reg, regs) \ 119#define ELF_CORE_COPY_REGS(pr_reg, regs) \
diff --git a/arch/x86/ia32/ptrace32.c b/arch/x86/ia32/ptrace32.c
index 4a233ad6269c..f52770ef0ee3 100644
--- a/arch/x86/ia32/ptrace32.c
+++ b/arch/x86/ia32/ptrace32.c
@@ -228,6 +228,8 @@ static long ptrace32_siginfo(unsigned request, u32 pid, u32 addr, u32 data)
228 return ret; 228 return ret;
229} 229}
230 230
231#define COMPAT_GDT_ENTRY_TLS_MIN 6
232
231asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) 233asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
232{ 234{
233 struct task_struct *child; 235 struct task_struct *child;
@@ -246,8 +248,6 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
246 case PTRACE_SYSCALL: 248 case PTRACE_SYSCALL:
247 case PTRACE_OLDSETOPTIONS: 249 case PTRACE_OLDSETOPTIONS:
248 case PTRACE_SETOPTIONS: 250 case PTRACE_SETOPTIONS:
249 case PTRACE_SET_THREAD_AREA:
250 case PTRACE_GET_THREAD_AREA:
251 return sys_ptrace(request, pid, addr, data); 251 return sys_ptrace(request, pid, addr, data);
252 252
253 default: 253 default:
@@ -271,6 +271,12 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
271 case PTRACE_SETSIGINFO: 271 case PTRACE_SETSIGINFO:
272 case PTRACE_GETSIGINFO: 272 case PTRACE_GETSIGINFO:
273 return ptrace32_siginfo(request, pid, addr, data); 273 return ptrace32_siginfo(request, pid, addr, data);
274
275 case PTRACE_SET_THREAD_AREA:
276 case PTRACE_GET_THREAD_AREA:
277 return sys_ptrace(request, pid,
278 addr + GDT_ENTRY_TLS_MIN - COMPAT_GDT_ENTRY_TLS_MIN,
279 data);
274 } 280 }
275 281
276 child = ptrace_get_task_struct(pid); 282 child = ptrace_get_task_struct(pid);
diff --git a/arch/x86/kernel/.gitignore b/arch/x86/kernel/.gitignore
index 40836ad9079c..4ea38a39aed4 100644
--- a/arch/x86/kernel/.gitignore
+++ b/arch/x86/kernel/.gitignore
@@ -1 +1,2 @@
1vsyscall.lds 1vsyscall.lds
2vsyscall_32.lds
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 45855c97923e..38573340b143 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -3,3 +3,7 @@ include ${srctree}/arch/x86/kernel/Makefile_32
3else 3else
4include ${srctree}/arch/x86/kernel/Makefile_64 4include ${srctree}/arch/x86/kernel/Makefile_64
5endif 5endif
6
7# Workaround to delete .lds files with make clean
8# The problem is that we do not enter Makefile_32 with make clean.
9clean-files := vsyscall*.lds vsyscall*.so
diff --git a/arch/x86/kernel/Makefile_32 b/arch/x86/kernel/Makefile_32
index 7ff02063b858..a3fa11f8f460 100644
--- a/arch/x86/kernel/Makefile_32
+++ b/arch/x86/kernel/Makefile_32
@@ -51,7 +51,7 @@ obj-$(CONFIG_SCx200) += scx200_32.o
51# We must build both images before we can assemble it. 51# We must build both images before we can assemble it.
52# Note: kbuild does not track this dependency due to usage of .incbin 52# Note: kbuild does not track this dependency due to usage of .incbin
53$(obj)/vsyscall_32.o: $(obj)/vsyscall-int80_32.so $(obj)/vsyscall-sysenter_32.so 53$(obj)/vsyscall_32.o: $(obj)/vsyscall-int80_32.so $(obj)/vsyscall-sysenter_32.so
54targets += $(foreach F,int80 sysenter,vsyscall-$F.o vsyscall-$F.so) 54targets += $(foreach F,int80 sysenter,vsyscall-$F_32.o vsyscall-$F_32.so)
55targets += vsyscall-note_32.o vsyscall_32.lds 55targets += vsyscall-note_32.o vsyscall_32.lds
56 56
57# The DSO images are built using a special linker script. 57# The DSO images are built using a special linker script.
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 11b03d3c6fda..3bd2688bd443 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -63,11 +63,11 @@ __setup("noreplace-paravirt", setup_noreplace_paravirt);
63/* Use inline assembly to define this because the nops are defined 63/* Use inline assembly to define this because the nops are defined
64 as inline assembly strings in the include files and we cannot 64 as inline assembly strings in the include files and we cannot
65 get them easily into strings. */ 65 get them easily into strings. */
66asm("\t.data\nintelnops: " 66asm("\t.section .rodata, \"a\"\nintelnops: "
67 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 67 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
68 GENERIC_NOP7 GENERIC_NOP8); 68 GENERIC_NOP7 GENERIC_NOP8);
69extern unsigned char intelnops[]; 69extern const unsigned char intelnops[];
70static unsigned char *intel_nops[ASM_NOP_MAX+1] = { 70static const unsigned char *const intel_nops[ASM_NOP_MAX+1] = {
71 NULL, 71 NULL,
72 intelnops, 72 intelnops,
73 intelnops + 1, 73 intelnops + 1,
@@ -81,11 +81,11 @@ static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
81#endif 81#endif
82 82
83#ifdef K8_NOP1 83#ifdef K8_NOP1
84asm("\t.data\nk8nops: " 84asm("\t.section .rodata, \"a\"\nk8nops: "
85 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 85 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
86 K8_NOP7 K8_NOP8); 86 K8_NOP7 K8_NOP8);
87extern unsigned char k8nops[]; 87extern const unsigned char k8nops[];
88static unsigned char *k8_nops[ASM_NOP_MAX+1] = { 88static const unsigned char *const k8_nops[ASM_NOP_MAX+1] = {
89 NULL, 89 NULL,
90 k8nops, 90 k8nops,
91 k8nops + 1, 91 k8nops + 1,
@@ -99,11 +99,11 @@ static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
99#endif 99#endif
100 100
101#ifdef K7_NOP1 101#ifdef K7_NOP1
102asm("\t.data\nk7nops: " 102asm("\t.section .rodata, \"a\"\nk7nops: "
103 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 103 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
104 K7_NOP7 K7_NOP8); 104 K7_NOP7 K7_NOP8);
105extern unsigned char k7nops[]; 105extern const unsigned char k7nops[];
106static unsigned char *k7_nops[ASM_NOP_MAX+1] = { 106static const unsigned char *const k7_nops[ASM_NOP_MAX+1] = {
107 NULL, 107 NULL,
108 k7nops, 108 k7nops,
109 k7nops + 1, 109 k7nops + 1,
@@ -116,28 +116,49 @@ static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
116}; 116};
117#endif 117#endif
118 118
119#ifdef P6_NOP1
120asm("\t.section .rodata, \"a\"\np6nops: "
121 P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6
122 P6_NOP7 P6_NOP8);
123extern const unsigned char p6nops[];
124static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = {
125 NULL,
126 p6nops,
127 p6nops + 1,
128 p6nops + 1 + 2,
129 p6nops + 1 + 2 + 3,
130 p6nops + 1 + 2 + 3 + 4,
131 p6nops + 1 + 2 + 3 + 4 + 5,
132 p6nops + 1 + 2 + 3 + 4 + 5 + 6,
133 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
134};
135#endif
136
119#ifdef CONFIG_X86_64 137#ifdef CONFIG_X86_64
120 138
121extern char __vsyscall_0; 139extern char __vsyscall_0;
122static inline unsigned char** find_nop_table(void) 140static inline const unsigned char*const * find_nop_table(void)
123{ 141{
124 return k8_nops; 142 return boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
143 boot_cpu_data.x86 < 6 ? k8_nops : p6_nops;
125} 144}
126 145
127#else /* CONFIG_X86_64 */ 146#else /* CONFIG_X86_64 */
128 147
129static struct nop { 148static const struct nop {
130 int cpuid; 149 int cpuid;
131 unsigned char **noptable; 150 const unsigned char *const *noptable;
132} noptypes[] = { 151} noptypes[] = {
133 { X86_FEATURE_K8, k8_nops }, 152 { X86_FEATURE_K8, k8_nops },
134 { X86_FEATURE_K7, k7_nops }, 153 { X86_FEATURE_K7, k7_nops },
154 { X86_FEATURE_P4, p6_nops },
155 { X86_FEATURE_P3, p6_nops },
135 { -1, NULL } 156 { -1, NULL }
136}; 157};
137 158
138static unsigned char** find_nop_table(void) 159static const unsigned char*const * find_nop_table(void)
139{ 160{
140 unsigned char **noptable = intel_nops; 161 const unsigned char *const *noptable = intel_nops;
141 int i; 162 int i;
142 163
143 for (i = 0; noptypes[i].cpuid >= 0; i++) { 164 for (i = 0; noptypes[i].cpuid >= 0; i++) {
@@ -154,7 +175,7 @@ static unsigned char** find_nop_table(void)
154/* Use this to add nops to a buffer, then text_poke the whole buffer. */ 175/* Use this to add nops to a buffer, then text_poke the whole buffer. */
155static void add_nops(void *insns, unsigned int len) 176static void add_nops(void *insns, unsigned int len)
156{ 177{
157 unsigned char **noptable = find_nop_table(); 178 const unsigned char *const *noptable = find_nop_table();
158 179
159 while (len > 0) { 180 while (len > 0) {
160 unsigned int noplen = len; 181 unsigned int noplen = len;
@@ -369,8 +390,8 @@ void apply_paravirt(struct paravirt_patch_site *start,
369 BUG_ON(p->len > MAX_PATCH_LEN); 390 BUG_ON(p->len > MAX_PATCH_LEN);
370 /* prep the buffer with the original instructions */ 391 /* prep the buffer with the original instructions */
371 memcpy(insnbuf, p->instr, p->len); 392 memcpy(insnbuf, p->instr, p->len);
372 used = paravirt_ops.patch(p->instrtype, p->clobbers, insnbuf, 393 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
373 (unsigned long)p->instr, p->len); 394 (unsigned long)p->instr, p->len);
374 395
375 BUG_ON(used > p->len); 396 BUG_ON(used > p->len);
376 397
@@ -415,9 +436,6 @@ void __init alternative_instructions(void)
415 alternatives_smp_unlock(__smp_locks, __smp_locks_end, 436 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
416 _text, _etext); 437 _text, _etext);
417 } 438 }
418 free_init_pages("SMP alternatives",
419 (unsigned long)__smp_locks,
420 (unsigned long)__smp_locks_end);
421 } else { 439 } else {
422 alternatives_smp_module_add(NULL, "core kernel", 440 alternatives_smp_module_add(NULL, "core kernel",
423 __smp_locks, __smp_locks_end, 441 __smp_locks, __smp_locks_end,
@@ -428,6 +446,11 @@ void __init alternative_instructions(void)
428 apply_paravirt(__parainstructions, __parainstructions_end); 446 apply_paravirt(__parainstructions, __parainstructions_end);
429 local_irq_restore(flags); 447 local_irq_restore(flags);
430 448
449 if (smp_alt_once)
450 free_init_pages("SMP alternatives",
451 (unsigned long)__smp_locks,
452 (unsigned long)__smp_locks_end);
453
431 restart_nmi(); 454 restart_nmi();
432#ifdef CONFIG_X86_MCE 455#ifdef CONFIG_X86_MCE
433 restart_mce(); 456 restart_mce();
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
index 3d67ae18d762..793341fffc81 100644
--- a/arch/x86/kernel/apic_32.c
+++ b/arch/x86/kernel/apic_32.c
@@ -1277,6 +1277,7 @@ void smp_spurious_interrupt(struct pt_regs *regs)
1277 /* see sw-dev-man vol 3, chapter 7.4.13.5 */ 1277 /* see sw-dev-man vol 3, chapter 7.4.13.5 */
1278 printk(KERN_INFO "spurious APIC interrupt on CPU#%d, " 1278 printk(KERN_INFO "spurious APIC interrupt on CPU#%d, "
1279 "should never happen.\n", smp_processor_id()); 1279 "should never happen.\n", smp_processor_id());
1280 __get_cpu_var(irq_stat).irq_spurious_count++;
1280 irq_exit(); 1281 irq_exit();
1281} 1282}
1282 1283
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
index 09b82093bc75..f47bc493dba9 100644
--- a/arch/x86/kernel/apic_64.c
+++ b/arch/x86/kernel/apic_64.c
@@ -974,15 +974,12 @@ void __init setup_boot_APIC_clock (void)
974 */ 974 */
975void __cpuinit check_boot_apic_timer_broadcast(void) 975void __cpuinit check_boot_apic_timer_broadcast(void)
976{ 976{
977 struct clock_event_device *levt = &per_cpu(lapic_events, boot_cpu_id);
978
979 if (!disable_apic_timer || 977 if (!disable_apic_timer ||
980 (lapic_clockevent.features & CLOCK_EVT_FEAT_DUMMY)) 978 (lapic_clockevent.features & CLOCK_EVT_FEAT_DUMMY))
981 return; 979 return;
982 980
983 printk(KERN_INFO "AMD C1E detected late. Force timer broadcast.\n"); 981 printk(KERN_INFO "AMD C1E detected late. Force timer broadcast.\n");
984 lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY; 982 lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY;
985 levt->features |= CLOCK_EVT_FEAT_DUMMY;
986 983
987 local_irq_enable(); 984 local_irq_enable();
988 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, &boot_cpu_id); 985 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, &boot_cpu_id);
@@ -1143,6 +1140,7 @@ asmlinkage void smp_spurious_interrupt(void)
1143 if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) 1140 if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
1144 ack_APIC_irq(); 1141 ack_APIC_irq();
1145 1142
1143 add_pda(irq_spurious_count, 1);
1146 irq_exit(); 1144 irq_exit();
1147} 1145}
1148 1146
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index 8029742c0fc1..f1b7cdda82b3 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -116,12 +116,14 @@ void foo(void)
116 116
117#ifdef CONFIG_PARAVIRT 117#ifdef CONFIG_PARAVIRT
118 BLANK(); 118 BLANK();
119 OFFSET(PARAVIRT_enabled, paravirt_ops, paravirt_enabled); 119 OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
120 OFFSET(PARAVIRT_irq_disable, paravirt_ops, irq_disable); 120 OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops);
121 OFFSET(PARAVIRT_irq_enable, paravirt_ops, irq_enable); 121 OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops);
122 OFFSET(PARAVIRT_irq_enable_sysexit, paravirt_ops, irq_enable_sysexit); 122 OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
123 OFFSET(PARAVIRT_iret, paravirt_ops, iret); 123 OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
124 OFFSET(PARAVIRT_read_cr0, paravirt_ops, read_cr0); 124 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
125 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
126 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
125#endif 127#endif
126 128
127#ifdef CONFIG_XEN 129#ifdef CONFIG_XEN
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index dcf6bbb1c7c0..5f8af875f457 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -4,6 +4,7 @@
4#include <asm/io.h> 4#include <asm/io.h>
5#include <asm/processor.h> 5#include <asm/processor.h>
6#include <asm/apic.h> 6#include <asm/apic.h>
7#include <asm/mach_apic.h>
7 8
8#include "cpu.h" 9#include "cpu.h"
9 10
@@ -45,13 +46,17 @@ static __cpuinit int amd_apic_timer_broken(void)
45 case CPUID_XFAM_10H: 46 case CPUID_XFAM_10H:
46 case CPUID_XFAM_11H: 47 case CPUID_XFAM_11H:
47 rdmsr(MSR_K8_ENABLE_C1E, lo, hi); 48 rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
48 if (lo & ENABLE_C1E_MASK) 49 if (lo & ENABLE_C1E_MASK) {
50 if (smp_processor_id() != boot_cpu_physical_apicid)
51 printk(KERN_INFO "AMD C1E detected late. "
52 " Force timer broadcast.\n");
49 return 1; 53 return 1;
50 break; 54 }
51 default: 55 break;
52 /* err on the side of caution */ 56 default:
57 /* err on the side of caution */
53 return 1; 58 return 1;
54 } 59 }
55 return 0; 60 return 0;
56} 61}
57#endif 62#endif
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
index 7decd6a50ffa..f3686a5f2308 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
@@ -565,7 +565,7 @@ static unsigned int powernow_get(unsigned int cpu)
565} 565}
566 566
567 567
568static int __init acer_cpufreq_pst(struct dmi_system_id *d) 568static int __init acer_cpufreq_pst(const struct dmi_system_id *d)
569{ 569{
570 printk(KERN_WARNING "%s laptop with broken PST tables in BIOS detected.\n", d->ident); 570 printk(KERN_WARNING "%s laptop with broken PST tables in BIOS detected.\n", d->ident);
571 printk(KERN_WARNING "You need to downgrade to 3A21 (09/09/2002), or try a newer BIOS than 3A71 (01/20/2003)\n"); 571 printk(KERN_WARNING "You need to downgrade to 3A21 (09/09/2002), or try a newer BIOS than 3A71 (01/20/2003)\n");
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index dc4e08147b1f..cc8c501b9f39 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -8,6 +8,7 @@
8#include <linux/module.h> 8#include <linux/module.h>
9 9
10#include <asm/processor.h> 10#include <asm/processor.h>
11#include <asm/pgtable.h>
11#include <asm/msr.h> 12#include <asm/msr.h>
12#include <asm/uaccess.h> 13#include <asm/uaccess.h>
13 14
@@ -19,8 +20,6 @@
19#include <mach_apic.h> 20#include <mach_apic.h>
20#endif 21#endif
21 22
22extern int trap_init_f00f_bug(void);
23
24#ifdef CONFIG_X86_INTEL_USERCOPY 23#ifdef CONFIG_X86_INTEL_USERCOPY
25/* 24/*
26 * Alignment at which movsl is preferred for bulk memory copies. 25 * Alignment at which movsl is preferred for bulk memory copies.
@@ -95,6 +94,20 @@ static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c)
95 return 1; 94 return 1;
96} 95}
97 96
97#ifdef CONFIG_X86_F00F_BUG
98static void __cpuinit trap_init_f00f_bug(void)
99{
100 __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
101
102 /*
103 * Update the IDT descriptor and reload the IDT so that
104 * it uses the read-only mapped virtual address.
105 */
106 idt_descr.address = fix_to_virt(FIX_F00F_IDT);
107 load_idt(&idt_descr);
108}
109#endif
110
98static void __cpuinit init_intel(struct cpuinfo_x86 *c) 111static void __cpuinit init_intel(struct cpuinfo_x86 *c)
99{ 112{
100 unsigned int l2 = 0; 113 unsigned int l2 = 0;
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index db6c25aa5776..1826395ebeeb 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -170,15 +170,15 @@ union l3_cache {
170 unsigned val; 170 unsigned val;
171}; 171};
172 172
173static const unsigned short assocs[] = { 173static unsigned short assocs[] __cpuinitdata = {
174 [1] = 1, [2] = 2, [4] = 4, [6] = 8, 174 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
175 [8] = 16, [0xa] = 32, [0xb] = 48, 175 [8] = 16, [0xa] = 32, [0xb] = 48,
176 [0xc] = 64, 176 [0xc] = 64,
177 [0xf] = 0xffff // ?? 177 [0xf] = 0xffff // ??
178}; 178};
179 179
180static const unsigned char levels[] = { 1, 1, 2, 3 }; 180static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 };
181static const unsigned char types[] = { 1, 2, 3, 3 }; 181static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 };
182 182
183static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, 183static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
184 union _cpuid4_leaf_ebx *ebx, 184 union _cpuid4_leaf_ebx *ebx,
@@ -493,8 +493,8 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
493 } 493 }
494} 494}
495#else 495#else
496static void __init cache_shared_cpu_map_setup(unsigned int cpu, int index) {} 496static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
497static void __init cache_remove_shared_cpu_map(unsigned int cpu, int index) {} 497static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
498#endif 498#endif
499 499
500static void free_cache_attributes(unsigned int cpu) 500static void free_cache_attributes(unsigned int cpu)
@@ -794,8 +794,9 @@ static int __cpuinit cache_sysfs_init(void)
794 register_hotcpu_notifier(&cacheinfo_cpu_notifier); 794 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
795 795
796 for_each_online_cpu(i) { 796 for_each_online_cpu(i) {
797 cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE, 797 struct sys_device *sys_dev = get_cpu_sysdev((unsigned int)i);
798 (void *)(long)i); 798
799 cache_add_dev(sys_dev);
799 } 800 }
800 801
801 return 0; 802 return 0;
diff --git a/arch/x86/kernel/cpu/mcheck/p4.c b/arch/x86/kernel/cpu/mcheck/p4.c
index 1509edfb2313..be4dabfee1f5 100644
--- a/arch/x86/kernel/cpu/mcheck/p4.c
+++ b/arch/x86/kernel/cpu/mcheck/p4.c
@@ -61,6 +61,7 @@ fastcall void smp_thermal_interrupt(struct pt_regs *regs)
61{ 61{
62 irq_enter(); 62 irq_enter();
63 vendor_thermal_interrupt(regs); 63 vendor_thermal_interrupt(regs);
64 __get_cpu_var(irq_stat).irq_thermal_count++;
64 irq_exit(); 65 irq_exit();
65} 66}
66 67
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 1203dc5ab87a..494d320d909b 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -152,7 +152,7 @@ static __cpuinit int thermal_throttle_cpu_callback(struct notifier_block *nfb,
152 return NOTIFY_OK; 152 return NOTIFY_OK;
153} 153}
154 154
155static struct notifier_block thermal_throttle_cpu_notifier = 155static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
156{ 156{
157 .notifier_call = thermal_throttle_cpu_callback, 157 .notifier_call = thermal_throttle_cpu_callback,
158}; 158};
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index c48b6fea5ab4..5e4be30ff903 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -738,13 +738,7 @@ void mtrr_ap_init(void)
738 */ 738 */
739void mtrr_save_state(void) 739void mtrr_save_state(void)
740{ 740{
741 int cpu = get_cpu(); 741 smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1);
742
743 if (cpu == 0)
744 mtrr_save_fixed_ranges(NULL);
745 else
746 smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1);
747 put_cpu();
748} 742}
749 743
750static int __init mtrr_init_finialize(void) 744static int __init mtrr_init_finialize(void)
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 93fecd4b03de..54cdbf1a40f1 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -34,7 +34,7 @@ struct wd_ops {
34 u64 checkbit; 34 u64 checkbit;
35}; 35};
36 36
37static struct wd_ops *wd_ops; 37static const struct wd_ops *wd_ops;
38 38
39/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's 39/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
40 * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now) 40 * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
@@ -317,7 +317,7 @@ static void single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
317 write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz); 317 write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz);
318} 318}
319 319
320static struct wd_ops k7_wd_ops = { 320static const struct wd_ops k7_wd_ops = {
321 .reserve = single_msr_reserve, 321 .reserve = single_msr_reserve,
322 .unreserve = single_msr_unreserve, 322 .unreserve = single_msr_unreserve,
323 .setup = setup_k7_watchdog, 323 .setup = setup_k7_watchdog,
@@ -380,7 +380,7 @@ static void p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
380 write_watchdog_counter32(wd->perfctr_msr, NULL,nmi_hz); 380 write_watchdog_counter32(wd->perfctr_msr, NULL,nmi_hz);
381} 381}
382 382
383static struct wd_ops p6_wd_ops = { 383static const struct wd_ops p6_wd_ops = {
384 .reserve = single_msr_reserve, 384 .reserve = single_msr_reserve,
385 .unreserve = single_msr_unreserve, 385 .unreserve = single_msr_unreserve,
386 .setup = setup_p6_watchdog, 386 .setup = setup_p6_watchdog,
@@ -532,7 +532,7 @@ static void p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
532 write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz); 532 write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz);
533} 533}
534 534
535static struct wd_ops p4_wd_ops = { 535static const struct wd_ops p4_wd_ops = {
536 .reserve = p4_reserve, 536 .reserve = p4_reserve,
537 .unreserve = p4_unreserve, 537 .unreserve = p4_unreserve,
538 .setup = setup_p4_watchdog, 538 .setup = setup_p4_watchdog,
@@ -550,6 +550,8 @@ static struct wd_ops p4_wd_ops = {
550#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 550#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
551#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK 551#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
552 552
553static struct wd_ops intel_arch_wd_ops;
554
553static int setup_intel_arch_watchdog(unsigned nmi_hz) 555static int setup_intel_arch_watchdog(unsigned nmi_hz)
554{ 556{
555 unsigned int ebx; 557 unsigned int ebx;
@@ -591,11 +593,11 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
591 wd->perfctr_msr = perfctr_msr; 593 wd->perfctr_msr = perfctr_msr;
592 wd->evntsel_msr = evntsel_msr; 594 wd->evntsel_msr = evntsel_msr;
593 wd->cccr_msr = 0; //unused 595 wd->cccr_msr = 0; //unused
594 wd_ops->checkbit = 1ULL << (eax.split.bit_width - 1); 596 intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1);
595 return 1; 597 return 1;
596} 598}
597 599
598static struct wd_ops intel_arch_wd_ops = { 600static struct wd_ops intel_arch_wd_ops __read_mostly = {
599 .reserve = single_msr_reserve, 601 .reserve = single_msr_reserve,
600 .unreserve = single_msr_unreserve, 602 .unreserve = single_msr_unreserve,
601 .setup = setup_intel_arch_watchdog, 603 .setup = setup_intel_arch_watchdog,
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index f4548c93ccf5..70dcf912d9fb 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -43,8 +43,6 @@
43 43
44static struct class *cpuid_class; 44static struct class *cpuid_class;
45 45
46#ifdef CONFIG_SMP
47
48struct cpuid_command { 46struct cpuid_command {
49 u32 reg; 47 u32 reg;
50 u32 *data; 48 u32 *data;
@@ -62,25 +60,11 @@ static inline void do_cpuid(int cpu, u32 reg, u32 * data)
62{ 60{
63 struct cpuid_command cmd; 61 struct cpuid_command cmd;
64 62
65 preempt_disable(); 63 cmd.reg = reg;
66 if (cpu == smp_processor_id()) { 64 cmd.data = data;
67 cpuid(reg, &data[0], &data[1], &data[2], &data[3]);
68 } else {
69 cmd.reg = reg;
70 cmd.data = data;
71 65
72 smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1, 1); 66 smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1, 1);
73 }
74 preempt_enable();
75} 67}
76#else /* ! CONFIG_SMP */
77
78static inline void do_cpuid(int cpu, u32 reg, u32 * data)
79{
80 cpuid(reg, &data[0], &data[1], &data[2], &data[3]);
81}
82
83#endif /* ! CONFIG_SMP */
84 68
85static loff_t cpuid_seek(struct file *file, loff_t offset, int orig) 69static loff_t cpuid_seek(struct file *file, loff_t offset, int orig)
86{ 70{
@@ -150,7 +134,7 @@ static const struct file_operations cpuid_fops = {
150 .open = cpuid_open, 134 .open = cpuid_open,
151}; 135};
152 136
153static int cpuid_device_create(int i) 137static int __cpuinit cpuid_device_create(int i)
154{ 138{
155 int err = 0; 139 int err = 0;
156 struct device *dev; 140 struct device *dev;
@@ -161,7 +145,9 @@ static int cpuid_device_create(int i)
161 return err; 145 return err;
162} 146}
163 147
164static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 148static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
149 unsigned long action,
150 void *hcpu)
165{ 151{
166 unsigned int cpu = (unsigned long)hcpu; 152 unsigned int cpu = (unsigned long)hcpu;
167 153
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 8099fea0a72f..dc7f938e5015 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -437,7 +437,7 @@ ldt_ss:
437 * is still available to implement the setting of the high 437 * is still available to implement the setting of the high
438 * 16-bits in the INTERRUPT_RETURN paravirt-op. 438 * 16-bits in the INTERRUPT_RETURN paravirt-op.
439 */ 439 */
440 cmpl $0, paravirt_ops+PARAVIRT_enabled 440 cmpl $0, pv_info+PARAVIRT_enabled
441 jne restore_nocheck 441 jne restore_nocheck
442#endif 442#endif
443 443
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index f1cacd4897f7..3a058bb16409 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -988,7 +988,7 @@ child_rip:
988 movq %rsi, %rdi 988 movq %rsi, %rdi
989 call *%rax 989 call *%rax
990 # exit 990 # exit
991 xorl %edi, %edi 991 mov %eax, %edi
992 call do_exit 992 call do_exit
993 CFI_ENDPROC 993 CFI_ENDPROC
994ENDPROC(child_rip) 994ENDPROC(child_rip)
diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c
index 47496a40e84f..4ae03e3e8294 100644
--- a/arch/x86/kernel/genapic_64.c
+++ b/arch/x86/kernel/genapic_64.c
@@ -29,8 +29,6 @@ u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly
29 = { [0 ... NR_CPUS-1] = BAD_APICID }; 29 = { [0 ... NR_CPUS-1] = BAD_APICID };
30EXPORT_SYMBOL(x86_cpu_to_apicid); 30EXPORT_SYMBOL(x86_cpu_to_apicid);
31 31
32u8 x86_cpu_to_log_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
33
34struct genapic __read_mostly *genapic = &apic_flat; 32struct genapic __read_mostly *genapic = &apic_flat;
35 33
36/* 34/*
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c
index ecb01eefdd27..91c7526768ee 100644
--- a/arch/x86/kernel/genapic_flat_64.c
+++ b/arch/x86/kernel/genapic_flat_64.c
@@ -52,7 +52,6 @@ static void flat_init_apic_ldr(void)
52 52
53 num = smp_processor_id(); 53 num = smp_processor_id();
54 id = 1UL << num; 54 id = 1UL << num;
55 x86_cpu_to_log_apicid[num] = id;
56 apic_write(APIC_DFR, APIC_DFR_FLAT); 55 apic_write(APIC_DFR, APIC_DFR_FLAT);
57 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; 56 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
58 val |= SET_APIC_LOGICAL_ID(id); 57 val |= SET_APIC_LOGICAL_ID(id);
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 9150ca9b5f80..39677965e161 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -51,6 +51,15 @@
51 */ 51 */
52LOW_PAGES = 1<<(32-PAGE_SHIFT_asm) 52LOW_PAGES = 1<<(32-PAGE_SHIFT_asm)
53 53
54/*
55 * To preserve the DMA pool in PAGEALLOC kernels, we'll allocate
56 * pagetables from above the 16MB DMA limit, so we'll have to set
57 * up pagetables 16MB more (worst-case):
58 */
59#ifdef CONFIG_DEBUG_PAGEALLOC
60LOW_PAGES = LOW_PAGES + 0x1000000
61#endif
62
54#if PTRS_PER_PMD > 1 63#if PTRS_PER_PMD > 1
55PAGE_TABLE_SIZE = (LOW_PAGES / PTRS_PER_PMD) + PTRS_PER_PGD 64PAGE_TABLE_SIZE = (LOW_PAGES / PTRS_PER_PMD) + PTRS_PER_PGD
56#else 65#else
@@ -443,6 +452,7 @@ early_page_fault:
443early_fault: 452early_fault:
444 cld 453 cld
445#ifdef CONFIG_PRINTK 454#ifdef CONFIG_PRINTK
455 pusha
446 movl $(__KERNEL_DS),%eax 456 movl $(__KERNEL_DS),%eax
447 movl %eax,%ds 457 movl %eax,%ds
448 movl %eax,%es 458 movl %eax,%es
@@ -534,8 +544,15 @@ int_msg:
534 .asciz "Unknown interrupt or fault at EIP %p %p %p\n" 544 .asciz "Unknown interrupt or fault at EIP %p %p %p\n"
535 545
536fault_msg: 546fault_msg:
537 .ascii "Int %d: CR2 %p err %p EIP %p CS %p flags %p\n" 547 .ascii \
538 .asciz "Stack: %p %p %p %p %p %p %p %p\n" 548/* fault info: */ "BUG: Int %d: CR2 %p\n" \
549/* pusha regs: */ " EDI %p ESI %p EBP %p ESP %p\n" \
550 " EBX %p EDX %p ECX %p EAX %p\n" \
551/* fault frame: */ " err %p EIP %p CS %p flg %p\n" \
552 \
553 "Stack: %p %p %p %p %p %p %p %p\n" \
554 " %p %p %p %p %p %p %p %p\n" \
555 " %p %p %p %p %p %p %p %p\n"
539 556
540#include "../../x86/xen/xen-head.S" 557#include "../../x86/xen/xen-head.S"
541 558
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
index e3d4b73bfdb0..edd39ccf139e 100644
--- a/arch/x86/kernel/i386_ksyms_32.c
+++ b/arch/x86/kernel/i386_ksyms_32.c
@@ -1,4 +1,5 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <asm/semaphore.h>
2#include <asm/checksum.h> 3#include <asm/checksum.h>
3#include <asm/desc.h> 4#include <asm/desc.h>
4 5
diff --git a/arch/x86/kernel/i8259_32.c b/arch/x86/kernel/i8259_32.c
index 679bb33acbf1..d34a10cc13a7 100644
--- a/arch/x86/kernel/i8259_32.c
+++ b/arch/x86/kernel/i8259_32.c
@@ -349,7 +349,11 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id)
349 * New motherboards sometimes make IRQ 13 be a PCI interrupt, 349 * New motherboards sometimes make IRQ 13 be a PCI interrupt,
350 * so allow interrupt sharing. 350 * so allow interrupt sharing.
351 */ 351 */
352static struct irqaction fpu_irq = { math_error_irq, 0, CPU_MASK_NONE, "fpu", NULL, NULL }; 352static struct irqaction fpu_irq = {
353 .handler = math_error_irq,
354 .mask = CPU_MASK_NONE,
355 .name = "fpu",
356};
353 357
354void __init init_ISA_irqs (void) 358void __init init_ISA_irqs (void)
355{ 359{
diff --git a/arch/x86/kernel/i8259_64.c b/arch/x86/kernel/i8259_64.c
index eb72976cc13c..3f27ea0b9816 100644
--- a/arch/x86/kernel/i8259_64.c
+++ b/arch/x86/kernel/i8259_64.c
@@ -395,7 +395,11 @@ device_initcall(i8259A_init_sysfs);
395 * IRQ2 is cascade interrupt to second interrupt controller 395 * IRQ2 is cascade interrupt to second interrupt controller
396 */ 396 */
397 397
398static struct irqaction irq2 = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL}; 398static struct irqaction irq2 = {
399 .handler = no_action,
400 .mask = CPU_MASK_NONE,
401 .name = "cascade",
402};
399DEFINE_PER_CPU(vector_irq_t, vector_irq) = { 403DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
400 [0 ... IRQ0_VECTOR - 1] = -1, 404 [0 ... IRQ0_VECTOR - 1] = -1,
401 [IRQ0_VECTOR] = 0, 405 [IRQ0_VECTOR] = 0,
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
index 4ee1e5ee9b57..5f10c7189534 100644
--- a/arch/x86/kernel/io_apic_32.c
+++ b/arch/x86/kernel/io_apic_32.c
@@ -1296,6 +1296,11 @@ static void __init setup_IO_APIC_irqs(void)
1296 continue; 1296 continue;
1297 } 1297 }
1298 1298
1299 if (!first_notcon) {
1300 apic_printk(APIC_VERBOSE, " not connected.\n");
1301 first_notcon = 1;
1302 }
1303
1299 entry.trigger = irq_trigger(idx); 1304 entry.trigger = irq_trigger(idx);
1300 entry.polarity = irq_polarity(idx); 1305 entry.polarity = irq_polarity(idx);
1301 1306
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
index 966fa1062491..1c2c7bf6a9d3 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic_64.c
@@ -875,6 +875,10 @@ static void __init setup_IO_APIC_irqs(void)
875 apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin); 875 apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin);
876 continue; 876 continue;
877 } 877 }
878 if (!first_notcon) {
879 apic_printk(APIC_VERBOSE, " not connected.\n");
880 first_notcon = 1;
881 }
878 882
879 irq = pin_2_irq(idx, apic, pin); 883 irq = pin_2_irq(idx, apic, pin);
880 add_pin_to_irq(irq, apic, pin); 884 add_pin_to_irq(irq, apic, pin);
@@ -885,7 +889,7 @@ static void __init setup_IO_APIC_irqs(void)
885 } 889 }
886 890
887 if (!first_notcon) 891 if (!first_notcon)
888 apic_printk(APIC_VERBOSE," not connected.\n"); 892 apic_printk(APIC_VERBOSE, " not connected.\n");
889} 893}
890 894
891/* 895/*
@@ -1845,7 +1849,7 @@ static struct sysdev_class ioapic_sysdev_class = {
1845static int __init ioapic_init_sysfs(void) 1849static int __init ioapic_init_sysfs(void)
1846{ 1850{
1847 struct sys_device * dev; 1851 struct sys_device * dev;
1848 int i, size, error = 0; 1852 int i, size, error;
1849 1853
1850 error = sysdev_class_register(&ioapic_sysdev_class); 1854 error = sysdev_class_register(&ioapic_sysdev_class);
1851 if (error) 1855 if (error)
@@ -1854,12 +1858,11 @@ static int __init ioapic_init_sysfs(void)
1854 for (i = 0; i < nr_ioapics; i++ ) { 1858 for (i = 0; i < nr_ioapics; i++ ) {
1855 size = sizeof(struct sys_device) + nr_ioapic_registers[i] 1859 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
1856 * sizeof(struct IO_APIC_route_entry); 1860 * sizeof(struct IO_APIC_route_entry);
1857 mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL); 1861 mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
1858 if (!mp_ioapic_data[i]) { 1862 if (!mp_ioapic_data[i]) {
1859 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); 1863 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
1860 continue; 1864 continue;
1861 } 1865 }
1862 memset(mp_ioapic_data[i], 0, size);
1863 dev = &mp_ioapic_data[i]->dev; 1866 dev = &mp_ioapic_data[i]->dev;
1864 dev->id = i; 1867 dev->id = i;
1865 dev->cls = &ioapic_sysdev_class; 1868 dev->cls = &ioapic_sysdev_class;
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index e173b763f148..d3fde94f7345 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -255,9 +255,17 @@ int show_interrupts(struct seq_file *p, void *v)
255 } 255 }
256 256
257 if (i < NR_IRQS) { 257 if (i < NR_IRQS) {
258 unsigned any_count = 0;
259
258 spin_lock_irqsave(&irq_desc[i].lock, flags); 260 spin_lock_irqsave(&irq_desc[i].lock, flags);
261#ifndef CONFIG_SMP
262 any_count = kstat_irqs(i);
263#else
264 for_each_online_cpu(j)
265 any_count |= kstat_cpu(j).irqs[i];
266#endif
259 action = irq_desc[i].action; 267 action = irq_desc[i].action;
260 if (!action) 268 if (!action && !any_count)
261 goto skip; 269 goto skip;
262 seq_printf(p, "%3d: ",i); 270 seq_printf(p, "%3d: ",i);
263#ifndef CONFIG_SMP 271#ifndef CONFIG_SMP
@@ -268,10 +276,12 @@ int show_interrupts(struct seq_file *p, void *v)
268#endif 276#endif
269 seq_printf(p, " %8s", irq_desc[i].chip->name); 277 seq_printf(p, " %8s", irq_desc[i].chip->name);
270 seq_printf(p, "-%-8s", irq_desc[i].name); 278 seq_printf(p, "-%-8s", irq_desc[i].name);
271 seq_printf(p, " %s", action->name);
272 279
273 for (action=action->next; action; action = action->next) 280 if (action) {
274 seq_printf(p, ", %s", action->name); 281 seq_printf(p, " %s", action->name);
282 while ((action = action->next) != NULL)
283 seq_printf(p, ", %s", action->name);
284 }
275 285
276 seq_putc(p, '\n'); 286 seq_putc(p, '\n');
277skip: 287skip:
@@ -280,14 +290,41 @@ skip:
280 seq_printf(p, "NMI: "); 290 seq_printf(p, "NMI: ");
281 for_each_online_cpu(j) 291 for_each_online_cpu(j)
282 seq_printf(p, "%10u ", nmi_count(j)); 292 seq_printf(p, "%10u ", nmi_count(j));
283 seq_putc(p, '\n'); 293 seq_printf(p, " Non-maskable interrupts\n");
284#ifdef CONFIG_X86_LOCAL_APIC 294#ifdef CONFIG_X86_LOCAL_APIC
285 seq_printf(p, "LOC: "); 295 seq_printf(p, "LOC: ");
286 for_each_online_cpu(j) 296 for_each_online_cpu(j)
287 seq_printf(p, "%10u ", 297 seq_printf(p, "%10u ",
288 per_cpu(irq_stat,j).apic_timer_irqs); 298 per_cpu(irq_stat,j).apic_timer_irqs);
289 seq_putc(p, '\n'); 299 seq_printf(p, " Local timer interrupts\n");
290#endif 300#endif
301#ifdef CONFIG_SMP
302 seq_printf(p, "RES: ");
303 for_each_online_cpu(j)
304 seq_printf(p, "%10u ",
305 per_cpu(irq_stat,j).irq_resched_count);
306 seq_printf(p, " Rescheduling interrupts\n");
307 seq_printf(p, "CAL: ");
308 for_each_online_cpu(j)
309 seq_printf(p, "%10u ",
310 per_cpu(irq_stat,j).irq_call_count);
311 seq_printf(p, " function call interrupts\n");
312 seq_printf(p, "TLB: ");
313 for_each_online_cpu(j)
314 seq_printf(p, "%10u ",
315 per_cpu(irq_stat,j).irq_tlb_count);
316 seq_printf(p, " TLB shootdowns\n");
317#endif
318 seq_printf(p, "TRM: ");
319 for_each_online_cpu(j)
320 seq_printf(p, "%10u ",
321 per_cpu(irq_stat,j).irq_thermal_count);
322 seq_printf(p, " Thermal event interrupts\n");
323 seq_printf(p, "SPU: ");
324 for_each_online_cpu(j)
325 seq_printf(p, "%10u ",
326 per_cpu(irq_stat,j).irq_spurious_count);
327 seq_printf(p, " Spurious interrupts\n");
291 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 328 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
292#if defined(CONFIG_X86_IO_APIC) 329#if defined(CONFIG_X86_IO_APIC)
293 seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count)); 330 seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 865669efc540..6b5c730d67b9 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -62,9 +62,17 @@ int show_interrupts(struct seq_file *p, void *v)
62 } 62 }
63 63
64 if (i < NR_IRQS) { 64 if (i < NR_IRQS) {
65 unsigned any_count = 0;
66
65 spin_lock_irqsave(&irq_desc[i].lock, flags); 67 spin_lock_irqsave(&irq_desc[i].lock, flags);
68#ifndef CONFIG_SMP
69 any_count = kstat_irqs(i);
70#else
71 for_each_online_cpu(j)
72 any_count |= kstat_cpu(j).irqs[i];
73#endif
66 action = irq_desc[i].action; 74 action = irq_desc[i].action;
67 if (!action) 75 if (!action && !any_count)
68 goto skip; 76 goto skip;
69 seq_printf(p, "%3d: ",i); 77 seq_printf(p, "%3d: ",i);
70#ifndef CONFIG_SMP 78#ifndef CONFIG_SMP
@@ -76,9 +84,11 @@ int show_interrupts(struct seq_file *p, void *v)
76 seq_printf(p, " %8s", irq_desc[i].chip->name); 84 seq_printf(p, " %8s", irq_desc[i].chip->name);
77 seq_printf(p, "-%-8s", irq_desc[i].name); 85 seq_printf(p, "-%-8s", irq_desc[i].name);
78 86
79 seq_printf(p, " %s", action->name); 87 if (action) {
80 for (action=action->next; action; action = action->next) 88 seq_printf(p, " %s", action->name);
81 seq_printf(p, ", %s", action->name); 89 while ((action = action->next) != NULL)
90 seq_printf(p, ", %s", action->name);
91 }
82 seq_putc(p, '\n'); 92 seq_putc(p, '\n');
83skip: 93skip:
84 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 94 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
@@ -86,11 +96,37 @@ skip:
86 seq_printf(p, "NMI: "); 96 seq_printf(p, "NMI: ");
87 for_each_online_cpu(j) 97 for_each_online_cpu(j)
88 seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count); 98 seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
89 seq_putc(p, '\n'); 99 seq_printf(p, " Non-maskable interrupts\n");
90 seq_printf(p, "LOC: "); 100 seq_printf(p, "LOC: ");
91 for_each_online_cpu(j) 101 for_each_online_cpu(j)
92 seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs); 102 seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
93 seq_putc(p, '\n'); 103 seq_printf(p, " Local timer interrupts\n");
104#ifdef CONFIG_SMP
105 seq_printf(p, "RES: ");
106 for_each_online_cpu(j)
107 seq_printf(p, "%10u ", cpu_pda(j)->irq_resched_count);
108 seq_printf(p, " Rescheduling interrupts\n");
109 seq_printf(p, "CAL: ");
110 for_each_online_cpu(j)
111 seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count);
112 seq_printf(p, " function call interrupts\n");
113 seq_printf(p, "TLB: ");
114 for_each_online_cpu(j)
115 seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count);
116 seq_printf(p, " TLB shootdowns\n");
117#endif
118 seq_printf(p, "TRM: ");
119 for_each_online_cpu(j)
120 seq_printf(p, "%10u ", cpu_pda(j)->irq_thermal_count);
121 seq_printf(p, " Thermal event interrupts\n");
122 seq_printf(p, "THR: ");
123 for_each_online_cpu(j)
124 seq_printf(p, "%10u ", cpu_pda(j)->irq_threshold_count);
125 seq_printf(p, " Threshold APIC interrupts\n");
126 seq_printf(p, "SPU: ");
127 for_each_online_cpu(j)
128 seq_printf(p, "%10u ", cpu_pda(j)->irq_spurious_count);
129 seq_printf(p, " Spurious interrupts\n");
94 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 130 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
95 } 131 }
96 return 0; 132 return 0;
diff --git a/arch/x86/kernel/ldt_32.c b/arch/x86/kernel/ldt_32.c
index a8b18421863a..9ff90a27c45f 100644
--- a/arch/x86/kernel/ldt_32.c
+++ b/arch/x86/kernel/ldt_32.c
@@ -92,13 +92,13 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
92 struct mm_struct * old_mm; 92 struct mm_struct * old_mm;
93 int retval = 0; 93 int retval = 0;
94 94
95 init_MUTEX(&mm->context.sem); 95 mutex_init(&mm->context.lock);
96 mm->context.size = 0; 96 mm->context.size = 0;
97 old_mm = current->mm; 97 old_mm = current->mm;
98 if (old_mm && old_mm->context.size > 0) { 98 if (old_mm && old_mm->context.size > 0) {
99 down(&old_mm->context.sem); 99 mutex_lock(&old_mm->context.lock);
100 retval = copy_ldt(&mm->context, &old_mm->context); 100 retval = copy_ldt(&mm->context, &old_mm->context);
101 up(&old_mm->context.sem); 101 mutex_unlock(&old_mm->context.lock);
102 } 102 }
103 return retval; 103 return retval;
104} 104}
@@ -130,7 +130,7 @@ static int read_ldt(void __user * ptr, unsigned long bytecount)
130 if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) 130 if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
131 bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; 131 bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
132 132
133 down(&mm->context.sem); 133 mutex_lock(&mm->context.lock);
134 size = mm->context.size*LDT_ENTRY_SIZE; 134 size = mm->context.size*LDT_ENTRY_SIZE;
135 if (size > bytecount) 135 if (size > bytecount)
136 size = bytecount; 136 size = bytecount;
@@ -138,7 +138,7 @@ static int read_ldt(void __user * ptr, unsigned long bytecount)
138 err = 0; 138 err = 0;
139 if (copy_to_user(ptr, mm->context.ldt, size)) 139 if (copy_to_user(ptr, mm->context.ldt, size))
140 err = -EFAULT; 140 err = -EFAULT;
141 up(&mm->context.sem); 141 mutex_unlock(&mm->context.lock);
142 if (err < 0) 142 if (err < 0)
143 goto error_return; 143 goto error_return;
144 if (size != bytecount) { 144 if (size != bytecount) {
@@ -194,7 +194,7 @@ static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
194 goto out; 194 goto out;
195 } 195 }
196 196
197 down(&mm->context.sem); 197 mutex_lock(&mm->context.lock);
198 if (ldt_info.entry_number >= mm->context.size) { 198 if (ldt_info.entry_number >= mm->context.size) {
199 error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1); 199 error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
200 if (error < 0) 200 if (error < 0)
@@ -221,7 +221,7 @@ install:
221 error = 0; 221 error = 0;
222 222
223out_unlock: 223out_unlock:
224 up(&mm->context.sem); 224 mutex_unlock(&mm->context.lock);
225out: 225out:
226 return error; 226 return error;
227} 227}
diff --git a/arch/x86/kernel/ldt_64.c b/arch/x86/kernel/ldt_64.c
index 3796523d616a..60e57abb8e90 100644
--- a/arch/x86/kernel/ldt_64.c
+++ b/arch/x86/kernel/ldt_64.c
@@ -96,13 +96,13 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
96 struct mm_struct * old_mm; 96 struct mm_struct * old_mm;
97 int retval = 0; 97 int retval = 0;
98 98
99 init_MUTEX(&mm->context.sem); 99 mutex_init(&mm->context.lock);
100 mm->context.size = 0; 100 mm->context.size = 0;
101 old_mm = current->mm; 101 old_mm = current->mm;
102 if (old_mm && old_mm->context.size > 0) { 102 if (old_mm && old_mm->context.size > 0) {
103 down(&old_mm->context.sem); 103 mutex_lock(&old_mm->context.lock);
104 retval = copy_ldt(&mm->context, &old_mm->context); 104 retval = copy_ldt(&mm->context, &old_mm->context);
105 up(&old_mm->context.sem); 105 mutex_unlock(&old_mm->context.lock);
106 } 106 }
107 return retval; 107 return retval;
108} 108}
@@ -133,7 +133,7 @@ static int read_ldt(void __user * ptr, unsigned long bytecount)
133 if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) 133 if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
134 bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; 134 bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
135 135
136 down(&mm->context.sem); 136 mutex_lock(&mm->context.lock);
137 size = mm->context.size*LDT_ENTRY_SIZE; 137 size = mm->context.size*LDT_ENTRY_SIZE;
138 if (size > bytecount) 138 if (size > bytecount)
139 size = bytecount; 139 size = bytecount;
@@ -141,7 +141,7 @@ static int read_ldt(void __user * ptr, unsigned long bytecount)
141 err = 0; 141 err = 0;
142 if (copy_to_user(ptr, mm->context.ldt, size)) 142 if (copy_to_user(ptr, mm->context.ldt, size))
143 err = -EFAULT; 143 err = -EFAULT;
144 up(&mm->context.sem); 144 mutex_unlock(&mm->context.lock);
145 if (err < 0) 145 if (err < 0)
146 goto error_return; 146 goto error_return;
147 if (size != bytecount) { 147 if (size != bytecount) {
@@ -193,7 +193,7 @@ static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
193 goto out; 193 goto out;
194 } 194 }
195 195
196 down(&mm->context.sem); 196 mutex_lock(&mm->context.lock);
197 if (ldt_info.entry_number >= (unsigned)mm->context.size) { 197 if (ldt_info.entry_number >= (unsigned)mm->context.size) {
198 error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1); 198 error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
199 if (error < 0) 199 if (error < 0)
@@ -223,7 +223,7 @@ install:
223 error = 0; 223 error = 0;
224 224
225out_unlock: 225out_unlock:
226 up(&mm->context.sem); 226 mutex_unlock(&mm->context.lock);
227out: 227out:
228 return error; 228 return error;
229} 229}
diff --git a/arch/x86/kernel/mce_64.c b/arch/x86/kernel/mce_64.c
index 97d2b757d6bd..8ca8f8648969 100644
--- a/arch/x86/kernel/mce_64.c
+++ b/arch/x86/kernel/mce_64.c
@@ -695,8 +695,6 @@ static int __init mcheck_disable(char *str)
695 mce=nobootlog Don't log MCEs from before booting. */ 695 mce=nobootlog Don't log MCEs from before booting. */
696static int __init mcheck_enable(char *str) 696static int __init mcheck_enable(char *str)
697{ 697{
698 if (*str == '=')
699 str++;
700 if (!strcmp(str, "off")) 698 if (!strcmp(str, "off"))
701 mce_dont_init = 1; 699 mce_dont_init = 1;
702 else if (!strcmp(str, "bootlog") || !strcmp(str,"nobootlog")) 700 else if (!strcmp(str, "bootlog") || !strcmp(str,"nobootlog"))
@@ -709,7 +707,7 @@ static int __init mcheck_enable(char *str)
709} 707}
710 708
711__setup("nomce", mcheck_disable); 709__setup("nomce", mcheck_disable);
712__setup("mce", mcheck_enable); 710__setup("mce=", mcheck_enable);
713 711
714/* 712/*
715 * Sysfs support 713 * Sysfs support
diff --git a/arch/x86/kernel/mce_amd_64.c b/arch/x86/kernel/mce_amd_64.c
index 805b62b1e0df..0d2afd96aca4 100644
--- a/arch/x86/kernel/mce_amd_64.c
+++ b/arch/x86/kernel/mce_amd_64.c
@@ -237,6 +237,7 @@ asmlinkage void mce_threshold_interrupt(void)
237 } 237 }
238 } 238 }
239out: 239out:
240 add_pda(irq_threshold_count, 1);
240 irq_exit(); 241 irq_exit();
241} 242}
242 243
diff --git a/arch/x86/kernel/mce_intel_64.c b/arch/x86/kernel/mce_intel_64.c
index 6551505d8a2c..c17eaf5dd6dd 100644
--- a/arch/x86/kernel/mce_intel_64.c
+++ b/arch/x86/kernel/mce_intel_64.c
@@ -26,6 +26,7 @@ asmlinkage void smp_thermal_interrupt(void)
26 if (therm_throt_process(msr_val & 1)) 26 if (therm_throt_process(msr_val & 1))
27 mce_log_therm_throt_event(smp_processor_id(), msr_val); 27 mce_log_therm_throt_event(smp_processor_id(), msr_val);
28 28
29 add_pda(irq_thermal_count, 1);
29 irq_exit(); 30 irq_exit();
30} 31}
31 32
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index c044de310b69..df85c9c13601 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -133,7 +133,7 @@ static const struct file_operations msr_fops = {
133 .open = msr_open, 133 .open = msr_open,
134}; 134};
135 135
136static int msr_device_create(int i) 136static int __cpuinit msr_device_create(int i)
137{ 137{
138 int err = 0; 138 int err = 0;
139 struct device *dev; 139 struct device *dev;
@@ -144,7 +144,7 @@ static int msr_device_create(int i)
144 return err; 144 return err;
145} 145}
146 146
147static int msr_class_cpu_callback(struct notifier_block *nfb, 147static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
148 unsigned long action, void *hcpu) 148 unsigned long action, void *hcpu)
149{ 149{
150 unsigned int cpu = (unsigned long)hcpu; 150 unsigned int cpu = (unsigned long)hcpu;
diff --git a/arch/x86/kernel/paravirt_32.c b/arch/x86/kernel/paravirt_32.c
index 739cfb207dd7..6a80d67c2121 100644
--- a/arch/x86/kernel/paravirt_32.c
+++ b/arch/x86/kernel/paravirt_32.c
@@ -42,32 +42,33 @@ void _paravirt_nop(void)
42static void __init default_banner(void) 42static void __init default_banner(void)
43{ 43{
44 printk(KERN_INFO "Booting paravirtualized kernel on %s\n", 44 printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
45 paravirt_ops.name); 45 pv_info.name);
46} 46}
47 47
48char *memory_setup(void) 48char *memory_setup(void)
49{ 49{
50 return paravirt_ops.memory_setup(); 50 return pv_init_ops.memory_setup();
51} 51}
52 52
53/* Simple instruction patching code. */ 53/* Simple instruction patching code. */
54#define DEF_NATIVE(name, code) \ 54#define DEF_NATIVE(ops, name, code) \
55 extern const char start_##name[], end_##name[]; \ 55 extern const char start_##ops##_##name[], end_##ops##_##name[]; \
56 asm("start_" #name ": " code "; end_" #name ":") 56 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
57 57
58DEF_NATIVE(irq_disable, "cli"); 58DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
59DEF_NATIVE(irq_enable, "sti"); 59DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
60DEF_NATIVE(restore_fl, "push %eax; popf"); 60DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf");
61DEF_NATIVE(save_fl, "pushf; pop %eax"); 61DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax");
62DEF_NATIVE(iret, "iret"); 62DEF_NATIVE(pv_cpu_ops, iret, "iret");
63DEF_NATIVE(irq_enable_sysexit, "sti; sysexit"); 63DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "sti; sysexit");
64DEF_NATIVE(read_cr2, "mov %cr2, %eax"); 64DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
65DEF_NATIVE(write_cr3, "mov %eax, %cr3"); 65DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
66DEF_NATIVE(read_cr3, "mov %cr3, %eax"); 66DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
67DEF_NATIVE(clts, "clts"); 67DEF_NATIVE(pv_cpu_ops, clts, "clts");
68DEF_NATIVE(read_tsc, "rdtsc"); 68DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc");
69 69
70DEF_NATIVE(ud2a, "ud2a"); 70/* Undefined instruction for dealing with missing ops pointers. */
71static const unsigned char ud2a[] = { 0x0f, 0x0b };
71 72
72static unsigned native_patch(u8 type, u16 clobbers, void *ibuf, 73static unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
73 unsigned long addr, unsigned len) 74 unsigned long addr, unsigned len)
@@ -76,37 +77,29 @@ static unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
76 unsigned ret; 77 unsigned ret;
77 78
78 switch(type) { 79 switch(type) {
79#define SITE(x) case PARAVIRT_PATCH(x): start = start_##x; end = end_##x; goto patch_site 80#define SITE(ops, x) \
80 SITE(irq_disable); 81 case PARAVIRT_PATCH(ops.x): \
81 SITE(irq_enable); 82 start = start_##ops##_##x; \
82 SITE(restore_fl); 83 end = end_##ops##_##x; \
83 SITE(save_fl); 84 goto patch_site
84 SITE(iret); 85
85 SITE(irq_enable_sysexit); 86 SITE(pv_irq_ops, irq_disable);
86 SITE(read_cr2); 87 SITE(pv_irq_ops, irq_enable);
87 SITE(read_cr3); 88 SITE(pv_irq_ops, restore_fl);
88 SITE(write_cr3); 89 SITE(pv_irq_ops, save_fl);
89 SITE(clts); 90 SITE(pv_cpu_ops, iret);
90 SITE(read_tsc); 91 SITE(pv_cpu_ops, irq_enable_sysexit);
92 SITE(pv_mmu_ops, read_cr2);
93 SITE(pv_mmu_ops, read_cr3);
94 SITE(pv_mmu_ops, write_cr3);
95 SITE(pv_cpu_ops, clts);
96 SITE(pv_cpu_ops, read_tsc);
91#undef SITE 97#undef SITE
92 98
93 patch_site: 99 patch_site:
94 ret = paravirt_patch_insns(ibuf, len, start, end); 100 ret = paravirt_patch_insns(ibuf, len, start, end);
95 break; 101 break;
96 102
97 case PARAVIRT_PATCH(make_pgd):
98 case PARAVIRT_PATCH(make_pte):
99 case PARAVIRT_PATCH(pgd_val):
100 case PARAVIRT_PATCH(pte_val):
101#ifdef CONFIG_X86_PAE
102 case PARAVIRT_PATCH(make_pmd):
103 case PARAVIRT_PATCH(pmd_val):
104#endif
105 /* These functions end up returning exactly what
106 they're passed, in the same registers. */
107 ret = paravirt_patch_nop();
108 break;
109
110 default: 103 default:
111 ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); 104 ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
112 break; 105 break;
@@ -150,7 +143,7 @@ unsigned paravirt_patch_call(void *insnbuf,
150 return 5; 143 return 5;
151} 144}
152 145
153unsigned paravirt_patch_jmp(const void *target, void *insnbuf, 146unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
154 unsigned long addr, unsigned len) 147 unsigned long addr, unsigned len)
155{ 148{
156 struct branch *b = insnbuf; 149 struct branch *b = insnbuf;
@@ -165,22 +158,37 @@ unsigned paravirt_patch_jmp(const void *target, void *insnbuf,
165 return 5; 158 return 5;
166} 159}
167 160
161/* Neat trick to map patch type back to the call within the
162 * corresponding structure. */
163static void *get_call_destination(u8 type)
164{
165 struct paravirt_patch_template tmpl = {
166 .pv_init_ops = pv_init_ops,
167 .pv_time_ops = pv_time_ops,
168 .pv_cpu_ops = pv_cpu_ops,
169 .pv_irq_ops = pv_irq_ops,
170 .pv_apic_ops = pv_apic_ops,
171 .pv_mmu_ops = pv_mmu_ops,
172 };
173 return *((void **)&tmpl + type);
174}
175
168unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, 176unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
169 unsigned long addr, unsigned len) 177 unsigned long addr, unsigned len)
170{ 178{
171 void *opfunc = *((void **)&paravirt_ops + type); 179 void *opfunc = get_call_destination(type);
172 unsigned ret; 180 unsigned ret;
173 181
174 if (opfunc == NULL) 182 if (opfunc == NULL)
175 /* If there's no function, patch it with a ud2a (BUG) */ 183 /* If there's no function, patch it with a ud2a (BUG) */
176 ret = paravirt_patch_insns(insnbuf, len, start_ud2a, end_ud2a); 184 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
177 else if (opfunc == paravirt_nop) 185 else if (opfunc == paravirt_nop)
178 /* If the operation is a nop, then nop the callsite */ 186 /* If the operation is a nop, then nop the callsite */
179 ret = paravirt_patch_nop(); 187 ret = paravirt_patch_nop();
180 else if (type == PARAVIRT_PATCH(iret) || 188 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
181 type == PARAVIRT_PATCH(irq_enable_sysexit)) 189 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit))
182 /* If operation requires a jmp, then jmp */ 190 /* If operation requires a jmp, then jmp */
183 ret = paravirt_patch_jmp(opfunc, insnbuf, addr, len); 191 ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
184 else 192 else
185 /* Otherwise call the function; assume target could 193 /* Otherwise call the function; assume target could
186 clobber any caller-save reg */ 194 clobber any caller-save reg */
@@ -205,7 +213,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
205 213
206void init_IRQ(void) 214void init_IRQ(void)
207{ 215{
208 paravirt_ops.init_IRQ(); 216 pv_irq_ops.init_IRQ();
209} 217}
210 218
211static void native_flush_tlb(void) 219static void native_flush_tlb(void)
@@ -233,7 +241,7 @@ extern void native_irq_enable_sysexit(void);
233 241
234static int __init print_banner(void) 242static int __init print_banner(void)
235{ 243{
236 paravirt_ops.banner(); 244 pv_init_ops.banner();
237 return 0; 245 return 0;
238} 246}
239core_initcall(print_banner); 247core_initcall(print_banner);
@@ -273,47 +281,96 @@ int paravirt_disable_iospace(void)
273 return ret; 281 return ret;
274} 282}
275 283
276struct paravirt_ops paravirt_ops = { 284static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
285
286static inline void enter_lazy(enum paravirt_lazy_mode mode)
287{
288 BUG_ON(x86_read_percpu(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
289 BUG_ON(preemptible());
290
291 x86_write_percpu(paravirt_lazy_mode, mode);
292}
293
294void paravirt_leave_lazy(enum paravirt_lazy_mode mode)
295{
296 BUG_ON(x86_read_percpu(paravirt_lazy_mode) != mode);
297 BUG_ON(preemptible());
298
299 x86_write_percpu(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
300}
301
302void paravirt_enter_lazy_mmu(void)
303{
304 enter_lazy(PARAVIRT_LAZY_MMU);
305}
306
307void paravirt_leave_lazy_mmu(void)
308{
309 paravirt_leave_lazy(PARAVIRT_LAZY_MMU);
310}
311
312void paravirt_enter_lazy_cpu(void)
313{
314 enter_lazy(PARAVIRT_LAZY_CPU);
315}
316
317void paravirt_leave_lazy_cpu(void)
318{
319 paravirt_leave_lazy(PARAVIRT_LAZY_CPU);
320}
321
322enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
323{
324 return x86_read_percpu(paravirt_lazy_mode);
325}
326
327struct pv_info pv_info = {
277 .name = "bare hardware", 328 .name = "bare hardware",
278 .paravirt_enabled = 0, 329 .paravirt_enabled = 0,
279 .kernel_rpl = 0, 330 .kernel_rpl = 0,
280 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */ 331 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
332};
281 333
282 .patch = native_patch, 334struct pv_init_ops pv_init_ops = {
335 .patch = native_patch,
283 .banner = default_banner, 336 .banner = default_banner,
284 .arch_setup = paravirt_nop, 337 .arch_setup = paravirt_nop,
285 .memory_setup = machine_specific_memory_setup, 338 .memory_setup = machine_specific_memory_setup,
339};
340
341struct pv_time_ops pv_time_ops = {
342 .time_init = hpet_time_init,
286 .get_wallclock = native_get_wallclock, 343 .get_wallclock = native_get_wallclock,
287 .set_wallclock = native_set_wallclock, 344 .set_wallclock = native_set_wallclock,
288 .time_init = hpet_time_init, 345 .sched_clock = native_sched_clock,
346 .get_cpu_khz = native_calculate_cpu_khz,
347};
348
349struct pv_irq_ops pv_irq_ops = {
289 .init_IRQ = native_init_IRQ, 350 .init_IRQ = native_init_IRQ,
351 .save_fl = native_save_fl,
352 .restore_fl = native_restore_fl,
353 .irq_disable = native_irq_disable,
354 .irq_enable = native_irq_enable,
355 .safe_halt = native_safe_halt,
356 .halt = native_halt,
357};
290 358
359struct pv_cpu_ops pv_cpu_ops = {
291 .cpuid = native_cpuid, 360 .cpuid = native_cpuid,
292 .get_debugreg = native_get_debugreg, 361 .get_debugreg = native_get_debugreg,
293 .set_debugreg = native_set_debugreg, 362 .set_debugreg = native_set_debugreg,
294 .clts = native_clts, 363 .clts = native_clts,
295 .read_cr0 = native_read_cr0, 364 .read_cr0 = native_read_cr0,
296 .write_cr0 = native_write_cr0, 365 .write_cr0 = native_write_cr0,
297 .read_cr2 = native_read_cr2,
298 .write_cr2 = native_write_cr2,
299 .read_cr3 = native_read_cr3,
300 .write_cr3 = native_write_cr3,
301 .read_cr4 = native_read_cr4, 366 .read_cr4 = native_read_cr4,
302 .read_cr4_safe = native_read_cr4_safe, 367 .read_cr4_safe = native_read_cr4_safe,
303 .write_cr4 = native_write_cr4, 368 .write_cr4 = native_write_cr4,
304 .save_fl = native_save_fl,
305 .restore_fl = native_restore_fl,
306 .irq_disable = native_irq_disable,
307 .irq_enable = native_irq_enable,
308 .safe_halt = native_safe_halt,
309 .halt = native_halt,
310 .wbinvd = native_wbinvd, 369 .wbinvd = native_wbinvd,
311 .read_msr = native_read_msr_safe, 370 .read_msr = native_read_msr_safe,
312 .write_msr = native_write_msr_safe, 371 .write_msr = native_write_msr_safe,
313 .read_tsc = native_read_tsc, 372 .read_tsc = native_read_tsc,
314 .read_pmc = native_read_pmc, 373 .read_pmc = native_read_pmc,
315 .sched_clock = native_sched_clock,
316 .get_cpu_khz = native_calculate_cpu_khz,
317 .load_tr_desc = native_load_tr_desc, 374 .load_tr_desc = native_load_tr_desc,
318 .set_ldt = native_set_ldt, 375 .set_ldt = native_set_ldt,
319 .load_gdt = native_load_gdt, 376 .load_gdt = native_load_gdt,
@@ -327,9 +384,19 @@ struct paravirt_ops paravirt_ops = {
327 .write_idt_entry = write_dt_entry, 384 .write_idt_entry = write_dt_entry,
328 .load_esp0 = native_load_esp0, 385 .load_esp0 = native_load_esp0,
329 386
387 .irq_enable_sysexit = native_irq_enable_sysexit,
388 .iret = native_iret,
389
330 .set_iopl_mask = native_set_iopl_mask, 390 .set_iopl_mask = native_set_iopl_mask,
331 .io_delay = native_io_delay, 391 .io_delay = native_io_delay,
332 392
393 .lazy_mode = {
394 .enter = paravirt_nop,
395 .leave = paravirt_nop,
396 },
397};
398
399struct pv_apic_ops pv_apic_ops = {
333#ifdef CONFIG_X86_LOCAL_APIC 400#ifdef CONFIG_X86_LOCAL_APIC
334 .apic_write = native_apic_write, 401 .apic_write = native_apic_write,
335 .apic_write_atomic = native_apic_write_atomic, 402 .apic_write_atomic = native_apic_write_atomic,
@@ -338,11 +405,17 @@ struct paravirt_ops paravirt_ops = {
338 .setup_secondary_clock = setup_secondary_APIC_clock, 405 .setup_secondary_clock = setup_secondary_APIC_clock,
339 .startup_ipi_hook = paravirt_nop, 406 .startup_ipi_hook = paravirt_nop,
340#endif 407#endif
341 .set_lazy_mode = paravirt_nop, 408};
342 409
410struct pv_mmu_ops pv_mmu_ops = {
343 .pagetable_setup_start = native_pagetable_setup_start, 411 .pagetable_setup_start = native_pagetable_setup_start,
344 .pagetable_setup_done = native_pagetable_setup_done, 412 .pagetable_setup_done = native_pagetable_setup_done,
345 413
414 .read_cr2 = native_read_cr2,
415 .write_cr2 = native_write_cr2,
416 .read_cr3 = native_read_cr3,
417 .write_cr3 = native_write_cr3,
418
346 .flush_tlb_user = native_flush_tlb, 419 .flush_tlb_user = native_flush_tlb,
347 .flush_tlb_kernel = native_flush_tlb_global, 420 .flush_tlb_kernel = native_flush_tlb_global,
348 .flush_tlb_single = native_flush_tlb_single, 421 .flush_tlb_single = native_flush_tlb_single,
@@ -381,12 +454,19 @@ struct paravirt_ops paravirt_ops = {
381 .make_pte = native_make_pte, 454 .make_pte = native_make_pte,
382 .make_pgd = native_make_pgd, 455 .make_pgd = native_make_pgd,
383 456
384 .irq_enable_sysexit = native_irq_enable_sysexit,
385 .iret = native_iret,
386
387 .dup_mmap = paravirt_nop, 457 .dup_mmap = paravirt_nop,
388 .exit_mmap = paravirt_nop, 458 .exit_mmap = paravirt_nop,
389 .activate_mm = paravirt_nop, 459 .activate_mm = paravirt_nop,
460
461 .lazy_mode = {
462 .enter = paravirt_nop,
463 .leave = paravirt_nop,
464 },
390}; 465};
391 466
392EXPORT_SYMBOL(paravirt_ops); 467EXPORT_SYMBOL_GPL(pv_time_ops);
468EXPORT_SYMBOL_GPL(pv_cpu_ops);
469EXPORT_SYMBOL_GPL(pv_mmu_ops);
470EXPORT_SYMBOL_GPL(pv_apic_ops);
471EXPORT_SYMBOL_GPL(pv_info);
472EXPORT_SYMBOL (pv_irq_ops);
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index a50b787b3bfa..5098f58063a5 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -222,10 +222,10 @@ static inline unsigned int num_dma_pages(unsigned long dma, unsigned int dmalen)
222 return npages; 222 return npages;
223} 223}
224 224
225static inline int translate_phb(struct pci_dev* dev) 225static inline int translation_enabled(struct iommu_table *tbl)
226{ 226{
227 int disabled = bus_info[dev->bus->number].translation_disabled; 227 /* only PHBs with translation enabled have an IOMMU table */
228 return !disabled; 228 return (tbl != NULL);
229} 229}
230 230
231static void iommu_range_reserve(struct iommu_table *tbl, 231static void iommu_range_reserve(struct iommu_table *tbl,
@@ -388,7 +388,7 @@ static void calgary_unmap_sg(struct device *dev,
388 struct scatterlist *s; 388 struct scatterlist *s;
389 int i; 389 int i;
390 390
391 if (!translate_phb(to_pci_dev(dev))) 391 if (!translation_enabled(tbl))
392 return; 392 return;
393 393
394 for_each_sg(sglist, s, nelems, i) { 394 for_each_sg(sglist, s, nelems, i) {
@@ -428,7 +428,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
428 unsigned long entry; 428 unsigned long entry;
429 int i; 429 int i;
430 430
431 if (!translate_phb(to_pci_dev(dev))) 431 if (!translation_enabled(tbl))
432 return calgary_nontranslate_map_sg(dev, sg, nelems, direction); 432 return calgary_nontranslate_map_sg(dev, sg, nelems, direction);
433 433
434 for_each_sg(sg, s, nelems, i) { 434 for_each_sg(sg, s, nelems, i) {
@@ -474,7 +474,7 @@ static dma_addr_t calgary_map_single(struct device *dev, void *vaddr,
474 uaddr = (unsigned long)vaddr; 474 uaddr = (unsigned long)vaddr;
475 npages = num_dma_pages(uaddr, size); 475 npages = num_dma_pages(uaddr, size);
476 476
477 if (translate_phb(to_pci_dev(dev))) 477 if (translation_enabled(tbl))
478 dma_handle = iommu_alloc(tbl, vaddr, npages, direction); 478 dma_handle = iommu_alloc(tbl, vaddr, npages, direction);
479 else 479 else
480 dma_handle = virt_to_bus(vaddr); 480 dma_handle = virt_to_bus(vaddr);
@@ -488,7 +488,7 @@ static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle,
488 struct iommu_table *tbl = find_iommu_table(dev); 488 struct iommu_table *tbl = find_iommu_table(dev);
489 unsigned int npages; 489 unsigned int npages;
490 490
491 if (!translate_phb(to_pci_dev(dev))) 491 if (!translation_enabled(tbl))
492 return; 492 return;
493 493
494 npages = num_dma_pages(dma_handle, size); 494 npages = num_dma_pages(dma_handle, size);
@@ -513,7 +513,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,
513 goto error; 513 goto error;
514 memset(ret, 0, size); 514 memset(ret, 0, size);
515 515
516 if (translate_phb(to_pci_dev(dev))) { 516 if (translation_enabled(tbl)) {
517 /* set up tces to cover the allocated range */ 517 /* set up tces to cover the allocated range */
518 mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL); 518 mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL);
519 if (mapping == bad_dma_address) 519 if (mapping == bad_dma_address)
@@ -1194,7 +1194,7 @@ static int __init calgary_init(void)
1194{ 1194{
1195 int ret; 1195 int ret;
1196 struct pci_dev *dev = NULL; 1196 struct pci_dev *dev = NULL;
1197 void *tce_space; 1197 struct calgary_bus_info *info;
1198 1198
1199 ret = calgary_locate_bbars(); 1199 ret = calgary_locate_bbars();
1200 if (ret) 1200 if (ret)
@@ -1206,12 +1206,14 @@ static int __init calgary_init(void)
1206 break; 1206 break;
1207 if (!is_cal_pci_dev(dev->device)) 1207 if (!is_cal_pci_dev(dev->device))
1208 continue; 1208 continue;
1209 if (!translate_phb(dev)) { 1209
1210 info = &bus_info[dev->bus->number];
1211 if (info->translation_disabled) {
1210 calgary_init_one_nontraslated(dev); 1212 calgary_init_one_nontraslated(dev);
1211 continue; 1213 continue;
1212 } 1214 }
1213 tce_space = bus_info[dev->bus->number].tce_space; 1215
1214 if (!tce_space && !translate_empty_slots) 1216 if (!info->tce_space && !translate_empty_slots)
1215 continue; 1217 continue;
1216 1218
1217 ret = calgary_init_one(dev); 1219 ret = calgary_init_one(dev);
@@ -1229,11 +1231,13 @@ error:
1229 break; 1231 break;
1230 if (!is_cal_pci_dev(dev->device)) 1232 if (!is_cal_pci_dev(dev->device))
1231 continue; 1233 continue;
1232 if (!translate_phb(dev)) { 1234
1235 info = &bus_info[dev->bus->number];
1236 if (info->translation_disabled) {
1233 pci_dev_put(dev); 1237 pci_dev_put(dev);
1234 continue; 1238 continue;
1235 } 1239 }
1236 if (!bus_info[dev->bus->number].tce_space && !translate_empty_slots) 1240 if (!info->tce_space && !translate_empty_slots)
1237 continue; 1241 continue;
1238 1242
1239 calgary_disable_translation(dev); 1243 calgary_disable_translation(dev);
@@ -1546,7 +1550,7 @@ static void __init calgary_fixup_one_tce_space(struct pci_dev *dev)
1546static int __init calgary_fixup_tce_spaces(void) 1550static int __init calgary_fixup_tce_spaces(void)
1547{ 1551{
1548 struct pci_dev *dev = NULL; 1552 struct pci_dev *dev = NULL;
1549 void *tce_space; 1553 struct calgary_bus_info *info;
1550 1554
1551 if (no_iommu || swiotlb || !calgary_detected) 1555 if (no_iommu || swiotlb || !calgary_detected)
1552 return -ENODEV; 1556 return -ENODEV;
@@ -1559,11 +1563,12 @@ static int __init calgary_fixup_tce_spaces(void)
1559 break; 1563 break;
1560 if (!is_cal_pci_dev(dev->device)) 1564 if (!is_cal_pci_dev(dev->device))
1561 continue; 1565 continue;
1562 if (!translate_phb(dev)) 1566
1567 info = &bus_info[dev->bus->number];
1568 if (info->translation_disabled)
1563 continue; 1569 continue;
1564 1570
1565 tce_space = bus_info[dev->bus->number].tce_space; 1571 if (!info->tce_space)
1566 if (!tce_space)
1567 continue; 1572 continue;
1568 1573
1569 calgary_fixup_one_tce_space(dev); 1574 calgary_fixup_one_tce_space(dev);
diff --git a/arch/x86/kernel/pci-dma_32.c b/arch/x86/kernel/pci-dma_32.c
index 0aae2f3847a5..51330321a5d3 100644
--- a/arch/x86/kernel/pci-dma_32.c
+++ b/arch/x86/kernel/pci-dma_32.c
@@ -12,7 +12,6 @@
12#include <linux/string.h> 12#include <linux/string.h>
13#include <linux/pci.h> 13#include <linux/pci.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/pci.h>
16#include <asm/io.h> 15#include <asm/io.h>
17 16
18struct dma_coherent_mem { 17struct dma_coherent_mem {
diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c
index 9576a2eb375e..b2b42bdb0a15 100644
--- a/arch/x86/kernel/pci-dma_64.c
+++ b/arch/x86/kernel/pci-dma_64.c
@@ -51,11 +51,9 @@ dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
51{ 51{
52 struct page *page; 52 struct page *page;
53 int node; 53 int node;
54#ifdef CONFIG_PCI 54
55 if (dev->bus == &pci_bus_type) 55 node = dev_to_node(dev);
56 node = pcibus_to_node(to_pci_dev(dev)->bus); 56 if (node == -1)
57 else
58#endif
59 node = numa_node_id(); 57 node = numa_node_id();
60 58
61 if (node < first_node(node_online_map)) 59 if (node < first_node(node_online_map))
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index cfcc84e6c350..5cdfab65e93f 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -8,6 +8,7 @@
8 * See Documentation/DMA-mapping.txt for the interface specification. 8 * See Documentation/DMA-mapping.txt for the interface specification.
9 * 9 *
10 * Copyright 2002 Andi Kleen, SuSE Labs. 10 * Copyright 2002 Andi Kleen, SuSE Labs.
11 * Subject to the GNU General Public License v2 only.
11 */ 12 */
12 13
13#include <linux/types.h> 14#include <linux/types.h>
@@ -375,7 +376,8 @@ static inline int dma_map_cont(struct scatterlist *start, int nelems,
375 * DMA map all entries in a scatterlist. 376 * DMA map all entries in a scatterlist.
376 * Merge chunks that have page aligned sizes into a continuous mapping. 377 * Merge chunks that have page aligned sizes into a continuous mapping.
377 */ 378 */
378int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) 379static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
380 int dir)
379{ 381{
380 int i; 382 int i;
381 int out; 383 int out;
diff --git a/arch/x86/kernel/ptrace_32.c b/arch/x86/kernel/ptrace_32.c
index 8622b9cd3e38..99102ec5fade 100644
--- a/arch/x86/kernel/ptrace_32.c
+++ b/arch/x86/kernel/ptrace_32.c
@@ -165,7 +165,7 @@ static unsigned long convert_eip_to_linear(struct task_struct *child, struct pt_
165 165
166 seg &= ~7UL; 166 seg &= ~7UL;
167 167
168 down(&child->mm->context.sem); 168 mutex_lock(&child->mm->context.lock);
169 if (unlikely((seg >> 3) >= child->mm->context.size)) 169 if (unlikely((seg >> 3) >= child->mm->context.size))
170 addr = -1L; /* bogus selector, access would fault */ 170 addr = -1L; /* bogus selector, access would fault */
171 else { 171 else {
@@ -179,7 +179,7 @@ static unsigned long convert_eip_to_linear(struct task_struct *child, struct pt_
179 addr &= 0xffff; 179 addr &= 0xffff;
180 addr += base; 180 addr += base;
181 } 181 }
182 up(&child->mm->context.sem); 182 mutex_unlock(&child->mm->context.lock);
183 } 183 }
184 return addr; 184 return addr;
185} 185}
diff --git a/arch/x86/kernel/ptrace_64.c b/arch/x86/kernel/ptrace_64.c
index 86321ee6da93..607085f3f08a 100644
--- a/arch/x86/kernel/ptrace_64.c
+++ b/arch/x86/kernel/ptrace_64.c
@@ -103,7 +103,7 @@ unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *r
103 103
104 seg &= ~7UL; 104 seg &= ~7UL;
105 105
106 down(&child->mm->context.sem); 106 mutex_lock(&child->mm->context.lock);
107 if (unlikely((seg >> 3) >= child->mm->context.size)) 107 if (unlikely((seg >> 3) >= child->mm->context.size))
108 addr = -1L; /* bogus selector, access would fault */ 108 addr = -1L; /* bogus selector, access would fault */
109 else { 109 else {
@@ -117,7 +117,7 @@ unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *r
117 addr &= 0xffff; 117 addr &= 0xffff;
118 addr += base; 118 addr += base;
119 } 119 }
120 up(&child->mm->context.sem); 120 mutex_unlock(&child->mm->context.lock);
121 } 121 }
122 122
123 return addr; 123 return addr;
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index 8159bf0be17a..5a19f0cc5b67 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -604,7 +604,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
604 level = cpuid_eax(1); 604 level = cpuid_eax(1);
605 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)) 605 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
606 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability); 606 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
607 if (c->x86 == 0x10) 607 if (c->x86 == 0x10 || c->x86 == 0x11)
608 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability); 608 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
609 609
610 /* Enable workaround for FXSAVE leak */ 610 /* Enable workaround for FXSAVE leak */
@@ -968,7 +968,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
968 * applications want to get the raw CPUID data, they should access 968 * applications want to get the raw CPUID data, they should access
969 * /dev/cpu/<cpu_nr>/cpuid instead. 969 * /dev/cpu/<cpu_nr>/cpuid instead.
970 */ 970 */
971 static char *x86_cap_flags[] = { 971 static const char *const x86_cap_flags[] = {
972 /* Intel-defined */ 972 /* Intel-defined */
973 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", 973 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
974 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", 974 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
@@ -1022,7 +1022,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1022 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1022 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1023 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1023 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1024 }; 1024 };
1025 static char *x86_power_flags[] = { 1025 static const char *const x86_power_flags[] = {
1026 "ts", /* temperature sensor */ 1026 "ts", /* temperature sensor */
1027 "fid", /* frequency id control */ 1027 "fid", /* frequency id control */
1028 "vid", /* voltage id control */ 1028 "vid", /* voltage id control */
diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c
index d01d51fcce2a..0d79df3c5631 100644
--- a/arch/x86/kernel/signal_32.c
+++ b/arch/x86/kernel/signal_32.c
@@ -385,7 +385,6 @@ static int setup_frame(int sig, struct k_sigaction *ka,
385 regs->edx = (unsigned long) 0; 385 regs->edx = (unsigned long) 0;
386 regs->ecx = (unsigned long) 0; 386 regs->ecx = (unsigned long) 0;
387 387
388 set_fs(USER_DS);
389 regs->xds = __USER_DS; 388 regs->xds = __USER_DS;
390 regs->xes = __USER_DS; 389 regs->xes = __USER_DS;
391 regs->xss = __USER_DS; 390 regs->xss = __USER_DS;
@@ -479,7 +478,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
479 regs->edx = (unsigned long) &frame->info; 478 regs->edx = (unsigned long) &frame->info;
480 regs->ecx = (unsigned long) &frame->uc; 479 regs->ecx = (unsigned long) &frame->uc;
481 480
482 set_fs(USER_DS);
483 regs->xds = __USER_DS; 481 regs->xds = __USER_DS;
484 regs->xes = __USER_DS; 482 regs->xes = __USER_DS;
485 regs->xss = __USER_DS; 483 regs->xss = __USER_DS;
diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c
index 2d35d8502029..791d9f8036ae 100644
--- a/arch/x86/kernel/smp_32.c
+++ b/arch/x86/kernel/smp_32.c
@@ -342,6 +342,7 @@ fastcall void smp_invalidate_interrupt(struct pt_regs *regs)
342 smp_mb__after_clear_bit(); 342 smp_mb__after_clear_bit();
343out: 343out:
344 put_cpu_no_resched(); 344 put_cpu_no_resched();
345 __get_cpu_var(irq_stat).irq_tlb_count++;
345} 346}
346 347
347void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, 348void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
@@ -640,6 +641,7 @@ static void native_smp_send_stop(void)
640fastcall void smp_reschedule_interrupt(struct pt_regs *regs) 641fastcall void smp_reschedule_interrupt(struct pt_regs *regs)
641{ 642{
642 ack_APIC_irq(); 643 ack_APIC_irq();
644 __get_cpu_var(irq_stat).irq_resched_count++;
643} 645}
644 646
645fastcall void smp_call_function_interrupt(struct pt_regs *regs) 647fastcall void smp_call_function_interrupt(struct pt_regs *regs)
@@ -660,6 +662,7 @@ fastcall void smp_call_function_interrupt(struct pt_regs *regs)
660 */ 662 */
661 irq_enter(); 663 irq_enter();
662 (*func)(info); 664 (*func)(info);
665 __get_cpu_var(irq_stat).irq_call_count++;
663 irq_exit(); 666 irq_exit();
664 667
665 if (wait) { 668 if (wait) {
@@ -705,3 +708,10 @@ struct smp_ops smp_ops = {
705 .smp_send_reschedule = native_smp_send_reschedule, 708 .smp_send_reschedule = native_smp_send_reschedule,
706 .smp_call_function_mask = native_smp_call_function_mask, 709 .smp_call_function_mask = native_smp_call_function_mask,
707}; 710};
711
712int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
713 void *info, int wait)
714{
715 return smp_ops.smp_call_function_mask(mask, func, info, wait);
716}
717EXPORT_SYMBOL(smp_call_function_mask);
diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c
index df4a82812adb..5c2964727d19 100644
--- a/arch/x86/kernel/smp_64.c
+++ b/arch/x86/kernel/smp_64.c
@@ -163,6 +163,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
163out: 163out:
164 ack_APIC_irq(); 164 ack_APIC_irq();
165 cpu_clear(cpu, f->flush_cpumask); 165 cpu_clear(cpu, f->flush_cpumask);
166 add_pda(irq_tlb_count, 1);
166} 167}
167 168
168static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, 169static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
@@ -493,6 +494,7 @@ void smp_send_stop(void)
493asmlinkage void smp_reschedule_interrupt(void) 494asmlinkage void smp_reschedule_interrupt(void)
494{ 495{
495 ack_APIC_irq(); 496 ack_APIC_irq();
497 add_pda(irq_resched_count, 1);
496} 498}
497 499
498asmlinkage void smp_call_function_interrupt(void) 500asmlinkage void smp_call_function_interrupt(void)
@@ -514,6 +516,7 @@ asmlinkage void smp_call_function_interrupt(void)
514 exit_idle(); 516 exit_idle();
515 irq_enter(); 517 irq_enter();
516 (*func)(info); 518 (*func)(info);
519 add_pda(irq_call_count, 1);
517 irq_exit(); 520 irq_exit();
518 if (wait) { 521 if (wait) {
519 mb(); 522 mb();
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c
index 31fc08bd15ef..be3faac04719 100644
--- a/arch/x86/kernel/smpboot_32.c
+++ b/arch/x86/kernel/smpboot_32.c
@@ -102,8 +102,8 @@ u8 apicid_2_node[MAX_APICID];
102 * Trampoline 80x86 program as an array. 102 * Trampoline 80x86 program as an array.
103 */ 103 */
104 104
105extern unsigned char trampoline_data []; 105extern const unsigned char trampoline_data [];
106extern unsigned char trampoline_end []; 106extern const unsigned char trampoline_end [];
107static unsigned char *trampoline_base; 107static unsigned char *trampoline_base;
108static int trampoline_exec; 108static int trampoline_exec;
109 109
@@ -118,7 +118,7 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
118 * has made sure it's suitably aligned. 118 * has made sure it's suitably aligned.
119 */ 119 */
120 120
121static unsigned long __devinit setup_trampoline(void) 121static unsigned long __cpuinit setup_trampoline(void)
122{ 122{
123 memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data); 123 memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
124 return virt_to_phys(trampoline_base); 124 return virt_to_phys(trampoline_base);
@@ -1021,6 +1021,12 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1021 if (!max_cpus) { 1021 if (!max_cpus) {
1022 smp_found_config = 0; 1022 smp_found_config = 0;
1023 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n"); 1023 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
1024
1025 if (nmi_watchdog == NMI_LOCAL_APIC) {
1026 printk(KERN_INFO "activating minimal APIC for NMI watchdog use.\n");
1027 connect_bsp_APIC();
1028 setup_local_APIC();
1029 }
1024 smpboot_clear_io_apic_irqs(); 1030 smpboot_clear_io_apic_irqs();
1025 phys_cpu_present_map = physid_mask_of_physid(0); 1031 phys_cpu_present_map = physid_mask_of_physid(0);
1026 cpu_set(0, per_cpu(cpu_sibling_map, 0)); 1032 cpu_set(0, per_cpu(cpu_sibling_map, 0));
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
index 0faa0a0af272..e351ac4ab5b1 100644
--- a/arch/x86/kernel/smpboot_64.c
+++ b/arch/x86/kernel/smpboot_64.c
@@ -102,8 +102,8 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map);
102 * Trampoline 80x86 program as an array. 102 * Trampoline 80x86 program as an array.
103 */ 103 */
104 104
105extern unsigned char trampoline_data[]; 105extern const unsigned char trampoline_data[];
106extern unsigned char trampoline_end[]; 106extern const unsigned char trampoline_end[];
107 107
108/* State of each CPU */ 108/* State of each CPU */
109DEFINE_PER_CPU(int, cpu_state) = { 0 }; 109DEFINE_PER_CPU(int, cpu_state) = { 0 };
@@ -695,7 +695,6 @@ do_rest:
695 cpu_clear(cpu, cpu_present_map); 695 cpu_clear(cpu, cpu_present_map);
696 cpu_clear(cpu, cpu_possible_map); 696 cpu_clear(cpu, cpu_possible_map);
697 x86_cpu_to_apicid[cpu] = BAD_APICID; 697 x86_cpu_to_apicid[cpu] = BAD_APICID;
698 x86_cpu_to_log_apicid[cpu] = BAD_APICID;
699 return -EIO; 698 return -EIO;
700 } 699 }
701 700
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 413e527cdeb9..6fa6cf036c70 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -33,7 +33,7 @@ static void save_stack_address(void *data, unsigned long addr)
33 trace->entries[trace->nr_entries++] = addr; 33 trace->entries[trace->nr_entries++] = addr;
34} 34}
35 35
36static struct stacktrace_ops save_stack_ops = { 36static const struct stacktrace_ops save_stack_ops = {
37 .warning = save_stack_warning, 37 .warning = save_stack_warning,
38 .warning_symbol = save_stack_warning_symbol, 38 .warning_symbol = save_stack_warning_symbol,
39 .stack = save_stack_stack, 39 .stack = save_stack_stack,
diff --git a/arch/x86/kernel/tce_64.c b/arch/x86/kernel/tce_64.c
index e3f2569b2c44..9e540fee7009 100644
--- a/arch/x86/kernel/tce_64.c
+++ b/arch/x86/kernel/tce_64.c
@@ -40,9 +40,9 @@ static inline void flush_tce(void* tceaddr)
40{ 40{
41 /* a single tce can't cross a cache line */ 41 /* a single tce can't cross a cache line */
42 if (cpu_has_clflush) 42 if (cpu_has_clflush)
43 asm volatile("clflush (%0)" :: "r" (tceaddr)); 43 clflush(tceaddr);
44 else 44 else
45 asm volatile("wbinvd":::"memory"); 45 wbinvd();
46} 46}
47 47
48void tce_build(struct iommu_table *tbl, unsigned long index, 48void tce_build(struct iommu_table *tbl, unsigned long index,
diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c
index c25f23eb397c..8caa0b777466 100644
--- a/arch/x86/kernel/topology.c
+++ b/arch/x86/kernel/topology.c
@@ -44,15 +44,15 @@ int arch_register_cpu(int num)
44 * Also certain PCI quirks require not to enable hotplug control 44 * Also certain PCI quirks require not to enable hotplug control
45 * for all CPU's. 45 * for all CPU's.
46 */ 46 */
47 if (num && enable_cpu_hotplug) 47#ifdef CONFIG_HOTPLUG_CPU
48 if (num)
48 cpu_devices[num].cpu.hotpluggable = 1; 49 cpu_devices[num].cpu.hotpluggable = 1;
50#endif
49 51
50 return register_cpu(&cpu_devices[num].cpu, num); 52 return register_cpu(&cpu_devices[num].cpu, num);
51} 53}
52 54
53#ifdef CONFIG_HOTPLUG_CPU 55#ifdef CONFIG_HOTPLUG_CPU
54int enable_cpu_hotplug = 1;
55
56void arch_unregister_cpu(int num) { 56void arch_unregister_cpu(int num) {
57 return unregister_cpu(&cpu_devices[num].cpu); 57 return unregister_cpu(&cpu_devices[num].cpu);
58} 58}
diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
index f62815f8d06a..9bcc1c6aca3d 100644
--- a/arch/x86/kernel/trampoline_32.S
+++ b/arch/x86/kernel/trampoline_32.S
@@ -36,11 +36,11 @@
36#include <asm/segment.h> 36#include <asm/segment.h>
37#include <asm/page.h> 37#include <asm/page.h>
38 38
39.data
40
41/* We can free up trampoline after bootup if cpu hotplug is not supported. */ 39/* We can free up trampoline after bootup if cpu hotplug is not supported. */
42#ifndef CONFIG_HOTPLUG_CPU 40#ifndef CONFIG_HOTPLUG_CPU
43.section ".init.data","aw",@progbits 41.section ".init.data","aw",@progbits
42#else
43.section .rodata,"a",@progbits
44#endif 44#endif
45 45
46.code16 46.code16
diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
index 607983b0d27b..e30b67c6a9f5 100644
--- a/arch/x86/kernel/trampoline_64.S
+++ b/arch/x86/kernel/trampoline_64.S
@@ -33,7 +33,12 @@
33#include <asm/msr.h> 33#include <asm/msr.h>
34#include <asm/segment.h> 34#include <asm/segment.h>
35 35
36.data 36/* We can free up trampoline after bootup if cpu hotplug is not supported. */
37#ifndef CONFIG_HOTPLUG_CPU
38.section .init.data, "aw", @progbits
39#else
40.section .rodata, "a", @progbits
41#endif
37 42
38.code16 43.code16
39 44
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c
index 05c27ecaf2a7..b132d3957dfc 100644
--- a/arch/x86/kernel/traps_32.c
+++ b/arch/x86/kernel/traps_32.c
@@ -112,7 +112,7 @@ struct stack_frame {
112 112
113static inline unsigned long print_context_stack(struct thread_info *tinfo, 113static inline unsigned long print_context_stack(struct thread_info *tinfo,
114 unsigned long *stack, unsigned long ebp, 114 unsigned long *stack, unsigned long ebp,
115 struct stacktrace_ops *ops, void *data) 115 const struct stacktrace_ops *ops, void *data)
116{ 116{
117#ifdef CONFIG_FRAME_POINTER 117#ifdef CONFIG_FRAME_POINTER
118 struct stack_frame *frame = (struct stack_frame *)ebp; 118 struct stack_frame *frame = (struct stack_frame *)ebp;
@@ -149,7 +149,7 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo,
149 149
150void dump_trace(struct task_struct *task, struct pt_regs *regs, 150void dump_trace(struct task_struct *task, struct pt_regs *regs,
151 unsigned long *stack, 151 unsigned long *stack,
152 struct stacktrace_ops *ops, void *data) 152 const struct stacktrace_ops *ops, void *data)
153{ 153{
154 unsigned long ebp = 0; 154 unsigned long ebp = 0;
155 155
@@ -221,7 +221,7 @@ static void print_trace_address(void *data, unsigned long addr)
221 touch_nmi_watchdog(); 221 touch_nmi_watchdog();
222} 222}
223 223
224static struct stacktrace_ops print_trace_ops = { 224static const struct stacktrace_ops print_trace_ops = {
225 .warning = print_trace_warning, 225 .warning = print_trace_warning,
226 .warning_symbol = print_trace_warning_symbol, 226 .warning_symbol = print_trace_warning_symbol,
227 .stack = print_trace_stack, 227 .stack = print_trace_stack,
@@ -398,31 +398,24 @@ void die(const char * str, struct pt_regs * regs, long err)
398 local_save_flags(flags); 398 local_save_flags(flags);
399 399
400 if (++die.lock_owner_depth < 3) { 400 if (++die.lock_owner_depth < 3) {
401 int nl = 0;
402 unsigned long esp; 401 unsigned long esp;
403 unsigned short ss; 402 unsigned short ss;
404 403
405 report_bug(regs->eip, regs); 404 report_bug(regs->eip, regs);
406 405
407 printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); 406 printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff,
407 ++die_counter);
408#ifdef CONFIG_PREEMPT 408#ifdef CONFIG_PREEMPT
409 printk(KERN_EMERG "PREEMPT "); 409 printk("PREEMPT ");
410 nl = 1;
411#endif 410#endif
412#ifdef CONFIG_SMP 411#ifdef CONFIG_SMP
413 if (!nl)
414 printk(KERN_EMERG);
415 printk("SMP "); 412 printk("SMP ");
416 nl = 1;
417#endif 413#endif
418#ifdef CONFIG_DEBUG_PAGEALLOC 414#ifdef CONFIG_DEBUG_PAGEALLOC
419 if (!nl)
420 printk(KERN_EMERG);
421 printk("DEBUG_PAGEALLOC"); 415 printk("DEBUG_PAGEALLOC");
422 nl = 1;
423#endif 416#endif
424 if (nl) 417 printk("\n");
425 printk("\n"); 418
426 if (notify_die(DIE_OOPS, str, regs, err, 419 if (notify_die(DIE_OOPS, str, regs, err,
427 current->thread.trap_no, SIGSEGV) != 420 current->thread.trap_no, SIGSEGV) !=
428 NOTIFY_STOP) { 421 NOTIFY_STOP) {
@@ -1112,20 +1105,6 @@ asmlinkage void math_emulate(long arg)
1112 1105
1113#endif /* CONFIG_MATH_EMULATION */ 1106#endif /* CONFIG_MATH_EMULATION */
1114 1107
1115#ifdef CONFIG_X86_F00F_BUG
1116void __init trap_init_f00f_bug(void)
1117{
1118 __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
1119
1120 /*
1121 * Update the IDT descriptor and reload the IDT so that
1122 * it uses the read-only mapped virtual address.
1123 */
1124 idt_descr.address = fix_to_virt(FIX_F00F_IDT);
1125 load_idt(&idt_descr);
1126}
1127#endif
1128
1129/* 1108/*
1130 * This needs to use 'idt_table' rather than 'idt', and 1109 * This needs to use 'idt_table' rather than 'idt', and
1131 * thus use the _nonmapped_ version of the IDT, as the 1110 * thus use the _nonmapped_ version of the IDT, as the
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
index bc7116acf8ff..b4a9b3db1994 100644
--- a/arch/x86/kernel/traps_64.c
+++ b/arch/x86/kernel/traps_64.c
@@ -215,7 +215,7 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
215 215
216void dump_trace(struct task_struct *tsk, struct pt_regs *regs, 216void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
217 unsigned long *stack, 217 unsigned long *stack,
218 struct stacktrace_ops *ops, void *data) 218 const struct stacktrace_ops *ops, void *data)
219{ 219{
220 const unsigned cpu = get_cpu(); 220 const unsigned cpu = get_cpu();
221 unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr; 221 unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr;
@@ -336,7 +336,7 @@ static void print_trace_address(void *data, unsigned long addr)
336 printk_address(addr); 336 printk_address(addr);
337} 337}
338 338
339static struct stacktrace_ops print_trace_ops = { 339static const struct stacktrace_ops print_trace_ops = {
340 .warning = print_trace_warning, 340 .warning = print_trace_warning,
341 .warning_symbol = print_trace_warning_symbol, 341 .warning_symbol = print_trace_warning_symbol,
342 .stack = print_trace_stack, 342 .stack = print_trace_stack,
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c
index b85ad754f70e..e87a3939ed40 100644
--- a/arch/x86/kernel/tsc_32.c
+++ b/arch/x86/kernel/tsc_32.c
@@ -349,10 +349,10 @@ __cpuinit int unsynchronized_tsc(void)
349 349
350static void __init check_geode_tsc_reliable(void) 350static void __init check_geode_tsc_reliable(void)
351{ 351{
352 unsigned long val; 352 unsigned long res_low, res_high;
353 353
354 rdmsrl(MSR_GEODE_BUSCONT_CONF0, val); 354 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
355 if ((val & RTSC_SUSP)) 355 if (res_low & RTSC_SUSP)
356 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; 356 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
357} 357}
358#else 358#else
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index 18673e0f193b..f02bad68abaa 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -134,21 +134,21 @@ static unsigned vmi_patch(u8 type, u16 clobbers, void *insns,
134 unsigned long eip, unsigned len) 134 unsigned long eip, unsigned len)
135{ 135{
136 switch (type) { 136 switch (type) {
137 case PARAVIRT_PATCH(irq_disable): 137 case PARAVIRT_PATCH(pv_irq_ops.irq_disable):
138 return patch_internal(VMI_CALL_DisableInterrupts, len, 138 return patch_internal(VMI_CALL_DisableInterrupts, len,
139 insns, eip); 139 insns, eip);
140 case PARAVIRT_PATCH(irq_enable): 140 case PARAVIRT_PATCH(pv_irq_ops.irq_enable):
141 return patch_internal(VMI_CALL_EnableInterrupts, len, 141 return patch_internal(VMI_CALL_EnableInterrupts, len,
142 insns, eip); 142 insns, eip);
143 case PARAVIRT_PATCH(restore_fl): 143 case PARAVIRT_PATCH(pv_irq_ops.restore_fl):
144 return patch_internal(VMI_CALL_SetInterruptMask, len, 144 return patch_internal(VMI_CALL_SetInterruptMask, len,
145 insns, eip); 145 insns, eip);
146 case PARAVIRT_PATCH(save_fl): 146 case PARAVIRT_PATCH(pv_irq_ops.save_fl):
147 return patch_internal(VMI_CALL_GetInterruptMask, len, 147 return patch_internal(VMI_CALL_GetInterruptMask, len,
148 insns, eip); 148 insns, eip);
149 case PARAVIRT_PATCH(iret): 149 case PARAVIRT_PATCH(pv_cpu_ops.iret):
150 return patch_internal(VMI_CALL_IRET, len, insns, eip); 150 return patch_internal(VMI_CALL_IRET, len, insns, eip);
151 case PARAVIRT_PATCH(irq_enable_sysexit): 151 case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit):
152 return patch_internal(VMI_CALL_SYSEXIT, len, insns, eip); 152 return patch_internal(VMI_CALL_SYSEXIT, len, insns, eip);
153 default: 153 default:
154 break; 154 break;
@@ -552,24 +552,22 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
552} 552}
553#endif 553#endif
554 554
555static void vmi_set_lazy_mode(enum paravirt_lazy_mode mode) 555static void vmi_enter_lazy_cpu(void)
556{ 556{
557 static DEFINE_PER_CPU(enum paravirt_lazy_mode, lazy_mode); 557 paravirt_enter_lazy_cpu();
558 558 vmi_ops.set_lazy_mode(2);
559 if (!vmi_ops.set_lazy_mode) 559}
560 return;
561 560
562 /* Modes should never nest or overlap */ 561static void vmi_enter_lazy_mmu(void)
563 BUG_ON(__get_cpu_var(lazy_mode) && !(mode == PARAVIRT_LAZY_NONE || 562{
564 mode == PARAVIRT_LAZY_FLUSH)); 563 paravirt_enter_lazy_mmu();
564 vmi_ops.set_lazy_mode(1);
565}
565 566
566 if (mode == PARAVIRT_LAZY_FLUSH) { 567static void vmi_leave_lazy(void)
567 vmi_ops.set_lazy_mode(0); 568{
568 vmi_ops.set_lazy_mode(__get_cpu_var(lazy_mode)); 569 paravirt_leave_lazy(paravirt_get_lazy_mode());
569 } else { 570 vmi_ops.set_lazy_mode(0);
570 vmi_ops.set_lazy_mode(mode);
571 __get_cpu_var(lazy_mode) = mode;
572 }
573} 571}
574 572
575static inline int __init check_vmi_rom(struct vrom_header *rom) 573static inline int __init check_vmi_rom(struct vrom_header *rom)
@@ -690,9 +688,9 @@ do { \
690 reloc = call_vrom_long_func(vmi_rom, get_reloc, \ 688 reloc = call_vrom_long_func(vmi_rom, get_reloc, \
691 VMI_CALL_##vmicall); \ 689 VMI_CALL_##vmicall); \
692 if (rel->type == VMI_RELOCATION_CALL_REL) \ 690 if (rel->type == VMI_RELOCATION_CALL_REL) \
693 paravirt_ops.opname = (void *)rel->eip; \ 691 opname = (void *)rel->eip; \
694 else if (rel->type == VMI_RELOCATION_NOP) \ 692 else if (rel->type == VMI_RELOCATION_NOP) \
695 paravirt_ops.opname = (void *)vmi_nop; \ 693 opname = (void *)vmi_nop; \
696 else if (rel->type != VMI_RELOCATION_NONE) \ 694 else if (rel->type != VMI_RELOCATION_NONE) \
697 printk(KERN_WARNING "VMI: Unknown relocation " \ 695 printk(KERN_WARNING "VMI: Unknown relocation " \
698 "type %d for " #vmicall"\n",\ 696 "type %d for " #vmicall"\n",\
@@ -712,7 +710,7 @@ do { \
712 VMI_CALL_##vmicall); \ 710 VMI_CALL_##vmicall); \
713 BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); \ 711 BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); \
714 if (rel->type == VMI_RELOCATION_CALL_REL) { \ 712 if (rel->type == VMI_RELOCATION_CALL_REL) { \
715 paravirt_ops.opname = wrapper; \ 713 opname = wrapper; \
716 vmi_ops.cache = (void *)rel->eip; \ 714 vmi_ops.cache = (void *)rel->eip; \
717 } \ 715 } \
718} while (0) 716} while (0)
@@ -732,11 +730,11 @@ static inline int __init activate_vmi(void)
732 } 730 }
733 savesegment(cs, kernel_cs); 731 savesegment(cs, kernel_cs);
734 732
735 paravirt_ops.paravirt_enabled = 1; 733 pv_info.paravirt_enabled = 1;
736 paravirt_ops.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK; 734 pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
735 pv_info.name = "vmi";
737 736
738 paravirt_ops.patch = vmi_patch; 737 pv_init_ops.patch = vmi_patch;
739 paravirt_ops.name = "vmi";
740 738
741 /* 739 /*
742 * Many of these operations are ABI compatible with VMI. 740 * Many of these operations are ABI compatible with VMI.
@@ -754,26 +752,26 @@ static inline int __init activate_vmi(void)
754 */ 752 */
755 753
756 /* CPUID is special, so very special it gets wrapped like a present */ 754 /* CPUID is special, so very special it gets wrapped like a present */
757 para_wrap(cpuid, vmi_cpuid, cpuid, CPUID); 755 para_wrap(pv_cpu_ops.cpuid, vmi_cpuid, cpuid, CPUID);
758 756
759 para_fill(clts, CLTS); 757 para_fill(pv_cpu_ops.clts, CLTS);
760 para_fill(get_debugreg, GetDR); 758 para_fill(pv_cpu_ops.get_debugreg, GetDR);
761 para_fill(set_debugreg, SetDR); 759 para_fill(pv_cpu_ops.set_debugreg, SetDR);
762 para_fill(read_cr0, GetCR0); 760 para_fill(pv_cpu_ops.read_cr0, GetCR0);
763 para_fill(read_cr2, GetCR2); 761 para_fill(pv_mmu_ops.read_cr2, GetCR2);
764 para_fill(read_cr3, GetCR3); 762 para_fill(pv_mmu_ops.read_cr3, GetCR3);
765 para_fill(read_cr4, GetCR4); 763 para_fill(pv_cpu_ops.read_cr4, GetCR4);
766 para_fill(write_cr0, SetCR0); 764 para_fill(pv_cpu_ops.write_cr0, SetCR0);
767 para_fill(write_cr2, SetCR2); 765 para_fill(pv_mmu_ops.write_cr2, SetCR2);
768 para_fill(write_cr3, SetCR3); 766 para_fill(pv_mmu_ops.write_cr3, SetCR3);
769 para_fill(write_cr4, SetCR4); 767 para_fill(pv_cpu_ops.write_cr4, SetCR4);
770 para_fill(save_fl, GetInterruptMask); 768 para_fill(pv_irq_ops.save_fl, GetInterruptMask);
771 para_fill(restore_fl, SetInterruptMask); 769 para_fill(pv_irq_ops.restore_fl, SetInterruptMask);
772 para_fill(irq_disable, DisableInterrupts); 770 para_fill(pv_irq_ops.irq_disable, DisableInterrupts);
773 para_fill(irq_enable, EnableInterrupts); 771 para_fill(pv_irq_ops.irq_enable, EnableInterrupts);
774 772
775 para_fill(wbinvd, WBINVD); 773 para_fill(pv_cpu_ops.wbinvd, WBINVD);
776 para_fill(read_tsc, RDTSC); 774 para_fill(pv_cpu_ops.read_tsc, RDTSC);
777 775
778 /* The following we emulate with trap and emulate for now */ 776 /* The following we emulate with trap and emulate for now */
779 /* paravirt_ops.read_msr = vmi_rdmsr */ 777 /* paravirt_ops.read_msr = vmi_rdmsr */
@@ -781,29 +779,38 @@ static inline int __init activate_vmi(void)
781 /* paravirt_ops.rdpmc = vmi_rdpmc */ 779 /* paravirt_ops.rdpmc = vmi_rdpmc */
782 780
783 /* TR interface doesn't pass TR value, wrap */ 781 /* TR interface doesn't pass TR value, wrap */
784 para_wrap(load_tr_desc, vmi_set_tr, set_tr, SetTR); 782 para_wrap(pv_cpu_ops.load_tr_desc, vmi_set_tr, set_tr, SetTR);
785 783
786 /* LDT is special, too */ 784 /* LDT is special, too */
787 para_wrap(set_ldt, vmi_set_ldt, _set_ldt, SetLDT); 785 para_wrap(pv_cpu_ops.set_ldt, vmi_set_ldt, _set_ldt, SetLDT);
788 786
789 para_fill(load_gdt, SetGDT); 787 para_fill(pv_cpu_ops.load_gdt, SetGDT);
790 para_fill(load_idt, SetIDT); 788 para_fill(pv_cpu_ops.load_idt, SetIDT);
791 para_fill(store_gdt, GetGDT); 789 para_fill(pv_cpu_ops.store_gdt, GetGDT);
792 para_fill(store_idt, GetIDT); 790 para_fill(pv_cpu_ops.store_idt, GetIDT);
793 para_fill(store_tr, GetTR); 791 para_fill(pv_cpu_ops.store_tr, GetTR);
794 paravirt_ops.load_tls = vmi_load_tls; 792 pv_cpu_ops.load_tls = vmi_load_tls;
795 para_fill(write_ldt_entry, WriteLDTEntry); 793 para_fill(pv_cpu_ops.write_ldt_entry, WriteLDTEntry);
796 para_fill(write_gdt_entry, WriteGDTEntry); 794 para_fill(pv_cpu_ops.write_gdt_entry, WriteGDTEntry);
797 para_fill(write_idt_entry, WriteIDTEntry); 795 para_fill(pv_cpu_ops.write_idt_entry, WriteIDTEntry);
798 para_wrap(load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack); 796 para_wrap(pv_cpu_ops.load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack);
799 para_fill(set_iopl_mask, SetIOPLMask); 797 para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask);
800 para_fill(io_delay, IODelay); 798 para_fill(pv_cpu_ops.io_delay, IODelay);
801 para_wrap(set_lazy_mode, vmi_set_lazy_mode, set_lazy_mode, SetLazyMode); 799
800 para_wrap(pv_cpu_ops.lazy_mode.enter, vmi_enter_lazy_cpu,
801 set_lazy_mode, SetLazyMode);
802 para_wrap(pv_cpu_ops.lazy_mode.leave, vmi_leave_lazy,
803 set_lazy_mode, SetLazyMode);
804
805 para_wrap(pv_mmu_ops.lazy_mode.enter, vmi_enter_lazy_mmu,
806 set_lazy_mode, SetLazyMode);
807 para_wrap(pv_mmu_ops.lazy_mode.leave, vmi_leave_lazy,
808 set_lazy_mode, SetLazyMode);
802 809
803 /* user and kernel flush are just handled with different flags to FlushTLB */ 810 /* user and kernel flush are just handled with different flags to FlushTLB */
804 para_wrap(flush_tlb_user, vmi_flush_tlb_user, _flush_tlb, FlushTLB); 811 para_wrap(pv_mmu_ops.flush_tlb_user, vmi_flush_tlb_user, _flush_tlb, FlushTLB);
805 para_wrap(flush_tlb_kernel, vmi_flush_tlb_kernel, _flush_tlb, FlushTLB); 812 para_wrap(pv_mmu_ops.flush_tlb_kernel, vmi_flush_tlb_kernel, _flush_tlb, FlushTLB);
806 para_fill(flush_tlb_single, InvalPage); 813 para_fill(pv_mmu_ops.flush_tlb_single, InvalPage);
807 814
808 /* 815 /*
809 * Until a standard flag format can be agreed on, we need to 816 * Until a standard flag format can be agreed on, we need to
@@ -819,41 +826,41 @@ static inline int __init activate_vmi(void)
819#endif 826#endif
820 827
821 if (vmi_ops.set_pte) { 828 if (vmi_ops.set_pte) {
822 paravirt_ops.set_pte = vmi_set_pte; 829 pv_mmu_ops.set_pte = vmi_set_pte;
823 paravirt_ops.set_pte_at = vmi_set_pte_at; 830 pv_mmu_ops.set_pte_at = vmi_set_pte_at;
824 paravirt_ops.set_pmd = vmi_set_pmd; 831 pv_mmu_ops.set_pmd = vmi_set_pmd;
825#ifdef CONFIG_X86_PAE 832#ifdef CONFIG_X86_PAE
826 paravirt_ops.set_pte_atomic = vmi_set_pte_atomic; 833 pv_mmu_ops.set_pte_atomic = vmi_set_pte_atomic;
827 paravirt_ops.set_pte_present = vmi_set_pte_present; 834 pv_mmu_ops.set_pte_present = vmi_set_pte_present;
828 paravirt_ops.set_pud = vmi_set_pud; 835 pv_mmu_ops.set_pud = vmi_set_pud;
829 paravirt_ops.pte_clear = vmi_pte_clear; 836 pv_mmu_ops.pte_clear = vmi_pte_clear;
830 paravirt_ops.pmd_clear = vmi_pmd_clear; 837 pv_mmu_ops.pmd_clear = vmi_pmd_clear;
831#endif 838#endif
832 } 839 }
833 840
834 if (vmi_ops.update_pte) { 841 if (vmi_ops.update_pte) {
835 paravirt_ops.pte_update = vmi_update_pte; 842 pv_mmu_ops.pte_update = vmi_update_pte;
836 paravirt_ops.pte_update_defer = vmi_update_pte_defer; 843 pv_mmu_ops.pte_update_defer = vmi_update_pte_defer;
837 } 844 }
838 845
839 vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage); 846 vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);
840 if (vmi_ops.allocate_page) { 847 if (vmi_ops.allocate_page) {
841 paravirt_ops.alloc_pt = vmi_allocate_pt; 848 pv_mmu_ops.alloc_pt = vmi_allocate_pt;
842 paravirt_ops.alloc_pd = vmi_allocate_pd; 849 pv_mmu_ops.alloc_pd = vmi_allocate_pd;
843 paravirt_ops.alloc_pd_clone = vmi_allocate_pd_clone; 850 pv_mmu_ops.alloc_pd_clone = vmi_allocate_pd_clone;
844 } 851 }
845 852
846 vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage); 853 vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage);
847 if (vmi_ops.release_page) { 854 if (vmi_ops.release_page) {
848 paravirt_ops.release_pt = vmi_release_pt; 855 pv_mmu_ops.release_pt = vmi_release_pt;
849 paravirt_ops.release_pd = vmi_release_pd; 856 pv_mmu_ops.release_pd = vmi_release_pd;
850 } 857 }
851 858
852 /* Set linear is needed in all cases */ 859 /* Set linear is needed in all cases */
853 vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping); 860 vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
854#ifdef CONFIG_HIGHPTE 861#ifdef CONFIG_HIGHPTE
855 if (vmi_ops.set_linear_mapping) 862 if (vmi_ops.set_linear_mapping)
856 paravirt_ops.kmap_atomic_pte = vmi_kmap_atomic_pte; 863 pv_mmu_ops.kmap_atomic_pte = vmi_kmap_atomic_pte;
857#endif 864#endif
858 865
859 /* 866 /*
@@ -863,17 +870,17 @@ static inline int __init activate_vmi(void)
863 * the backend. They are performance critical anyway, so requiring 870 * the backend. They are performance critical anyway, so requiring
864 * a patch is not a big problem. 871 * a patch is not a big problem.
865 */ 872 */
866 paravirt_ops.irq_enable_sysexit = (void *)0xfeedbab0; 873 pv_cpu_ops.irq_enable_sysexit = (void *)0xfeedbab0;
867 paravirt_ops.iret = (void *)0xbadbab0; 874 pv_cpu_ops.iret = (void *)0xbadbab0;
868 875
869#ifdef CONFIG_SMP 876#ifdef CONFIG_SMP
870 para_wrap(startup_ipi_hook, vmi_startup_ipi_hook, set_initial_ap_state, SetInitialAPState); 877 para_wrap(pv_apic_ops.startup_ipi_hook, vmi_startup_ipi_hook, set_initial_ap_state, SetInitialAPState);
871#endif 878#endif
872 879
873#ifdef CONFIG_X86_LOCAL_APIC 880#ifdef CONFIG_X86_LOCAL_APIC
874 para_fill(apic_read, APICRead); 881 para_fill(pv_apic_ops.apic_read, APICRead);
875 para_fill(apic_write, APICWrite); 882 para_fill(pv_apic_ops.apic_write, APICWrite);
876 para_fill(apic_write_atomic, APICWrite); 883 para_fill(pv_apic_ops.apic_write_atomic, APICWrite);
877#endif 884#endif
878 885
879 /* 886 /*
@@ -891,15 +898,15 @@ static inline int __init activate_vmi(void)
891 vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm); 898 vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm);
892 vmi_timer_ops.cancel_alarm = 899 vmi_timer_ops.cancel_alarm =
893 vmi_get_function(VMI_CALL_CancelAlarm); 900 vmi_get_function(VMI_CALL_CancelAlarm);
894 paravirt_ops.time_init = vmi_time_init; 901 pv_time_ops.time_init = vmi_time_init;
895 paravirt_ops.get_wallclock = vmi_get_wallclock; 902 pv_time_ops.get_wallclock = vmi_get_wallclock;
896 paravirt_ops.set_wallclock = vmi_set_wallclock; 903 pv_time_ops.set_wallclock = vmi_set_wallclock;
897#ifdef CONFIG_X86_LOCAL_APIC 904#ifdef CONFIG_X86_LOCAL_APIC
898 paravirt_ops.setup_boot_clock = vmi_time_bsp_init; 905 pv_apic_ops.setup_boot_clock = vmi_time_bsp_init;
899 paravirt_ops.setup_secondary_clock = vmi_time_ap_init; 906 pv_apic_ops.setup_secondary_clock = vmi_time_ap_init;
900#endif 907#endif
901 paravirt_ops.sched_clock = vmi_sched_clock; 908 pv_time_ops.sched_clock = vmi_sched_clock;
902 paravirt_ops.get_cpu_khz = vmi_cpu_khz; 909 pv_time_ops.get_cpu_khz = vmi_cpu_khz;
903 910
904 /* We have true wallclock functions; disable CMOS clock sync */ 911 /* We have true wallclock functions; disable CMOS clock sync */
905 no_sync_cmos_clock = 1; 912 no_sync_cmos_clock = 1;
@@ -908,7 +915,7 @@ static inline int __init activate_vmi(void)
908 disable_vmi_timer = 1; 915 disable_vmi_timer = 1;
909 } 916 }
910 917
911 para_fill(safe_halt, Halt); 918 para_fill(pv_irq_ops.safe_halt, Halt);
912 919
913 /* 920 /*
914 * Alternative instruction rewriting doesn't happen soon enough 921 * Alternative instruction rewriting doesn't happen soon enough
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 93847d848157..8a67e282cb5e 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -78,7 +78,6 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
78 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; 78 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
79 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; 79 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
80 vsyscall_gtod_data.sys_tz = sys_tz; 80 vsyscall_gtod_data.sys_tz = sys_tz;
81 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
82 vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic; 81 vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic;
83 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); 82 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
84} 83}
@@ -289,7 +288,7 @@ static void __cpuinit vsyscall_set_cpu(int cpu)
289 unsigned long *d; 288 unsigned long *d;
290 unsigned long node = 0; 289 unsigned long node = 0;
291#ifdef CONFIG_NUMA 290#ifdef CONFIG_NUMA
292 node = cpu_to_node[cpu]; 291 node = cpu_to_node(cpu);
293#endif 292#endif
294 if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP)) 293 if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP))
295 write_rdtscp_aux((node << 12) | cpu); 294 write_rdtscp_aux((node << 12) | cpu);
diff --git a/arch/x86/lib/bitstr_64.c b/arch/x86/lib/bitstr_64.c
index 24676609a6ac..7445caf1b5de 100644
--- a/arch/x86/lib/bitstr_64.c
+++ b/arch/x86/lib/bitstr_64.c
@@ -14,7 +14,7 @@ find_next_zero_string(unsigned long *bitmap, long start, long nbits, int len)
14 14
15 /* could test bitsliced, but it's hardly worth it */ 15 /* could test bitsliced, but it's hardly worth it */
16 end = n+len; 16 end = n+len;
17 if (end >= nbits) 17 if (end > nbits)
18 return -1; 18 return -1;
19 for (i = n+1; i < end; i++) { 19 for (i = n+1; i < end; i++) {
20 if (test_bit(i, bitmap)) { 20 if (test_bit(i, bitmap)) {
diff --git a/arch/x86/lib/msr-on-cpu.c b/arch/x86/lib/msr-on-cpu.c
index 7767962f25d3..57d043fa893e 100644
--- a/arch/x86/lib/msr-on-cpu.c
+++ b/arch/x86/lib/msr-on-cpu.c
@@ -26,27 +26,18 @@ static void __rdmsr_safe_on_cpu(void *info)
26static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe) 26static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe)
27{ 27{
28 int err = 0; 28 int err = 0;
29 preempt_disable(); 29 struct msr_info rv;
30 if (smp_processor_id() == cpu) 30
31 if (safe) 31 rv.msr_no = msr_no;
32 err = rdmsr_safe(msr_no, l, h); 32 if (safe) {
33 else 33 smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 0, 1);
34 rdmsr(msr_no, *l, *h); 34 err = rv.err;
35 else { 35 } else {
36 struct msr_info rv; 36 smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 0, 1);
37
38 rv.msr_no = msr_no;
39 if (safe) {
40 smp_call_function_single(cpu, __rdmsr_safe_on_cpu,
41 &rv, 0, 1);
42 err = rv.err;
43 } else {
44 smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 0, 1);
45 }
46 *l = rv.l;
47 *h = rv.h;
48 } 37 }
49 preempt_enable(); 38 *l = rv.l;
39 *h = rv.h;
40
50 return err; 41 return err;
51} 42}
52 43
@@ -67,27 +58,18 @@ static void __wrmsr_safe_on_cpu(void *info)
67static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe) 58static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe)
68{ 59{
69 int err = 0; 60 int err = 0;
70 preempt_disable(); 61 struct msr_info rv;
71 if (smp_processor_id() == cpu) 62
72 if (safe) 63 rv.msr_no = msr_no;
73 err = wrmsr_safe(msr_no, l, h); 64 rv.l = l;
74 else 65 rv.h = h;
75 wrmsr(msr_no, l, h); 66 if (safe) {
76 else { 67 smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 0, 1);
77 struct msr_info rv; 68 err = rv.err;
78 69 } else {
79 rv.msr_no = msr_no; 70 smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1);
80 rv.l = l;
81 rv.h = h;
82 if (safe) {
83 smp_call_function_single(cpu, __wrmsr_safe_on_cpu,
84 &rv, 0, 1);
85 err = rv.err;
86 } else {
87 smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1);
88 }
89 } 71 }
90 preempt_enable(); 72
91 return err; 73 return err;
92} 74}
93 75
diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S
index 0cde1f807314..05ea55f71405 100644
--- a/arch/x86/lib/rwlock_64.S
+++ b/arch/x86/lib/rwlock_64.S
@@ -2,7 +2,7 @@
2 2
3#include <linux/linkage.h> 3#include <linux/linkage.h>
4#include <asm/rwlock.h> 4#include <asm/rwlock.h>
5#include <asm/alternative-asm.i> 5#include <asm/alternative-asm.h>
6#include <asm/dwarf2.h> 6#include <asm/dwarf2.h>
7 7
8/* rdi: pointer to rwlock_t */ 8/* rdi: pointer to rwlock_t */
diff --git a/arch/x86/lib/semaphore_32.S b/arch/x86/lib/semaphore_32.S
index c01eb39c0b43..444fba400983 100644
--- a/arch/x86/lib/semaphore_32.S
+++ b/arch/x86/lib/semaphore_32.S
@@ -15,8 +15,8 @@
15 15
16#include <linux/linkage.h> 16#include <linux/linkage.h>
17#include <asm/rwlock.h> 17#include <asm/rwlock.h>
18#include <asm/alternative-asm.i> 18#include <asm/alternative-asm.h>
19#include <asm/frame.i> 19#include <asm/frame.h>
20#include <asm/dwarf2.h> 20#include <asm/dwarf2.h>
21 21
22/* 22/*
diff --git a/arch/x86/lib/string_32.c b/arch/x86/lib/string_32.c
index 2c773fefa3dd..c2c0504a3071 100644
--- a/arch/x86/lib/string_32.c
+++ b/arch/x86/lib/string_32.c
@@ -160,26 +160,6 @@ char *strchr(const char * s, int c)
160EXPORT_SYMBOL(strchr); 160EXPORT_SYMBOL(strchr);
161#endif 161#endif
162 162
163#ifdef __HAVE_ARCH_STRRCHR
164char *strrchr(const char * s, int c)
165{
166 int d0, d1;
167 char * res;
168 asm volatile( "movb %%al,%%ah\n"
169 "1:\tlodsb\n\t"
170 "cmpb %%ah,%%al\n\t"
171 "jne 2f\n\t"
172 "leal -1(%%esi),%0\n"
173 "2:\ttestb %%al,%%al\n\t"
174 "jne 1b"
175 :"=g" (res), "=&S" (d0), "=&a" (d1)
176 :"0" (0),"1" (s),"2" (c)
177 :"memory");
178 return res;
179}
180EXPORT_SYMBOL(strrchr);
181#endif
182
183#ifdef __HAVE_ARCH_STRLEN 163#ifdef __HAVE_ARCH_STRLEN
184size_t strlen(const char * s) 164size_t strlen(const char * s)
185{ 165{
diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c
index 1bd82983986d..3f08010f3517 100644
--- a/arch/x86/mach-default/setup.c
+++ b/arch/x86/mach-default/setup.c
@@ -35,7 +35,11 @@ void __init pre_intr_init_hook(void)
35/* 35/*
36 * IRQ2 is cascade interrupt to second interrupt controller 36 * IRQ2 is cascade interrupt to second interrupt controller
37 */ 37 */
38static struct irqaction irq2 = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL}; 38static struct irqaction irq2 = {
39 .handler = no_action,
40 .mask = CPU_MASK_NONE,
41 .name = "cascade",
42};
39 43
40/** 44/**
41 * intr_init_hook - post gate setup interrupt initialisation 45 * intr_init_hook - post gate setup interrupt initialisation
diff --git a/arch/x86/mach-es7000/es7000plat.c b/arch/x86/mach-es7000/es7000plat.c
index ab99072d3f9a..f5d6f7d8b86e 100644
--- a/arch/x86/mach-es7000/es7000plat.c
+++ b/arch/x86/mach-es7000/es7000plat.c
@@ -46,11 +46,11 @@
46 * ES7000 Globals 46 * ES7000 Globals
47 */ 47 */
48 48
49volatile unsigned long *psai = NULL; 49static volatile unsigned long *psai = NULL;
50struct mip_reg *mip_reg; 50static struct mip_reg *mip_reg;
51struct mip_reg *host_reg; 51static struct mip_reg *host_reg;
52int mip_port; 52static int mip_port;
53unsigned long mip_addr, host_addr; 53static unsigned long mip_addr, host_addr;
54 54
55/* 55/*
56 * GSI override for ES7000 platforms. 56 * GSI override for ES7000 platforms.
@@ -288,28 +288,8 @@ es7000_start_cpu(int cpu, unsigned long eip)
288 288
289} 289}
290 290
291int
292es7000_stop_cpu(int cpu)
293{
294 int startup;
295
296 if (psai == NULL)
297 return -1;
298
299 startup= (0x1000000 | cpu);
300
301 while ((*psai & 0xff00ffff) != startup)
302 ;
303
304 startup = (*psai & 0xff0000) >> 16;
305 *psai &= 0xffffff;
306
307 return 0;
308
309}
310
311void __init 291void __init
312es7000_sw_apic() 292es7000_sw_apic(void)
313{ 293{
314 if (es7000_plat) { 294 if (es7000_plat) {
315 int mip_status; 295 int mip_status;
diff --git a/arch/x86/mach-generic/probe.c b/arch/x86/mach-generic/probe.c
index 74f3da634423..4121d1551800 100644
--- a/arch/x86/mach-generic/probe.c
+++ b/arch/x86/mach-generic/probe.c
@@ -22,7 +22,7 @@ extern struct genapic apic_default;
22 22
23struct genapic *genapic = &apic_default; 23struct genapic *genapic = &apic_default;
24 24
25struct genapic *apic_probe[] __initdata = { 25static struct genapic *apic_probe[] __initdata = {
26 &apic_summit, 26 &apic_summit,
27 &apic_bigsmp, 27 &apic_bigsmp,
28 &apic_es7000, 28 &apic_es7000,
diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c
index a0ab4002abcd..3bef977cb29b 100644
--- a/arch/x86/mach-voyager/setup.c
+++ b/arch/x86/mach-voyager/setup.c
@@ -18,7 +18,11 @@ void __init pre_intr_init_hook(void)
18/* 18/*
19 * IRQ2 is cascade interrupt to second interrupt controller 19 * IRQ2 is cascade interrupt to second interrupt controller
20 */ 20 */
21static struct irqaction irq2 = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL}; 21static struct irqaction irq2 = {
22 .handler = no_action,
23 .mask = CPU_MASK_NONE,
24 .name = "cascade",
25};
22 26
23void __init intr_init_hook(void) 27void __init intr_init_hook(void)
24{ 28{
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index b87f8548e75a..e4928aa6bdfb 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -442,8 +442,8 @@ static __u32 __init
442setup_trampoline(void) 442setup_trampoline(void)
443{ 443{
444 /* these two are global symbols in trampoline.S */ 444 /* these two are global symbols in trampoline.S */
445 extern __u8 trampoline_end[]; 445 extern const __u8 trampoline_end[];
446 extern __u8 trampoline_data[]; 446 extern const __u8 trampoline_data[];
447 447
448 memcpy((__u8 *)trampoline_base, trampoline_data, 448 memcpy((__u8 *)trampoline_base, trampoline_data,
449 trampoline_end - trampoline_data); 449 trampoline_end - trampoline_data);
@@ -1037,6 +1037,7 @@ smp_call_function_interrupt(void)
1037 */ 1037 */
1038 irq_enter(); 1038 irq_enter();
1039 (*func)(info); 1039 (*func)(info);
1040 __get_cpu_var(irq_stat).irq_call_count++;
1040 irq_exit(); 1041 irq_exit();
1041 if (wait) { 1042 if (wait) {
1042 mb(); 1043 mb();
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c
index b1e45457d4ef..13893772cc48 100644
--- a/arch/x86/mm/discontig_32.c
+++ b/arch/x86/mm/discontig_32.c
@@ -103,14 +103,14 @@ extern unsigned long highend_pfn, highstart_pfn;
103 103
104#define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE) 104#define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
105 105
106unsigned long node_remap_start_pfn[MAX_NUMNODES]; 106static unsigned long node_remap_start_pfn[MAX_NUMNODES];
107unsigned long node_remap_size[MAX_NUMNODES]; 107unsigned long node_remap_size[MAX_NUMNODES];
108unsigned long node_remap_offset[MAX_NUMNODES]; 108static unsigned long node_remap_offset[MAX_NUMNODES];
109void *node_remap_start_vaddr[MAX_NUMNODES]; 109static void *node_remap_start_vaddr[MAX_NUMNODES];
110void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); 110void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
111 111
112void *node_remap_end_vaddr[MAX_NUMNODES]; 112static void *node_remap_end_vaddr[MAX_NUMNODES];
113void *node_remap_alloc_vaddr[MAX_NUMNODES]; 113static void *node_remap_alloc_vaddr[MAX_NUMNODES];
114static unsigned long kva_start_pfn; 114static unsigned long kva_start_pfn;
115static unsigned long kva_pages; 115static unsigned long kva_pages;
116/* 116/*
diff --git a/arch/x86/mm/fault_32.c b/arch/x86/mm/fault_32.c
index c686ae20fd6b..6555c3d14371 100644
--- a/arch/x86/mm/fault_32.c
+++ b/arch/x86/mm/fault_32.c
@@ -105,7 +105,7 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs,
105 LDT and other horrors are only used in user space. */ 105 LDT and other horrors are only used in user space. */
106 if (seg & (1<<2)) { 106 if (seg & (1<<2)) {
107 /* Must lock the LDT while reading it. */ 107 /* Must lock the LDT while reading it. */
108 down(&current->mm->context.sem); 108 mutex_lock(&current->mm->context.lock);
109 desc = current->mm->context.ldt; 109 desc = current->mm->context.ldt;
110 desc = (void *)desc + (seg & ~7); 110 desc = (void *)desc + (seg & ~7);
111 } else { 111 } else {
@@ -118,7 +118,7 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs,
118 base = get_desc_base((unsigned long *)desc); 118 base = get_desc_base((unsigned long *)desc);
119 119
120 if (seg & (1<<2)) { 120 if (seg & (1<<2)) {
121 up(&current->mm->context.sem); 121 mutex_unlock(&current->mm->context.lock);
122 } else 122 } else
123 put_cpu(); 123 put_cpu();
124 124
@@ -539,23 +539,22 @@ no_context:
539 printk(KERN_ALERT "BUG: unable to handle kernel paging" 539 printk(KERN_ALERT "BUG: unable to handle kernel paging"
540 " request"); 540 " request");
541 printk(" at virtual address %08lx\n",address); 541 printk(" at virtual address %08lx\n",address);
542 printk(KERN_ALERT " printing eip:\n"); 542 printk(KERN_ALERT "printing eip: %08lx ", regs->eip);
543 printk("%08lx\n", regs->eip);
544 543
545 page = read_cr3(); 544 page = read_cr3();
546 page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT]; 545 page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
547#ifdef CONFIG_X86_PAE 546#ifdef CONFIG_X86_PAE
548 printk(KERN_ALERT "*pdpt = %016Lx\n", page); 547 printk("*pdpt = %016Lx ", page);
549 if ((page >> PAGE_SHIFT) < max_low_pfn 548 if ((page >> PAGE_SHIFT) < max_low_pfn
550 && page & _PAGE_PRESENT) { 549 && page & _PAGE_PRESENT) {
551 page &= PAGE_MASK; 550 page &= PAGE_MASK;
552 page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT) 551 page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
553 & (PTRS_PER_PMD - 1)]; 552 & (PTRS_PER_PMD - 1)];
554 printk(KERN_ALERT "*pde = %016Lx\n", page); 553 printk(KERN_ALERT "*pde = %016Lx ", page);
555 page &= ~_PAGE_NX; 554 page &= ~_PAGE_NX;
556 } 555 }
557#else 556#else
558 printk(KERN_ALERT "*pde = %08lx\n", page); 557 printk("*pde = %08lx ", page);
559#endif 558#endif
560 559
561 /* 560 /*
@@ -569,8 +568,10 @@ no_context:
569 page &= PAGE_MASK; 568 page &= PAGE_MASK;
570 page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) 569 page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
571 & (PTRS_PER_PTE - 1)]; 570 & (PTRS_PER_PTE - 1)];
572 printk(KERN_ALERT "*pte = %0*Lx\n", sizeof(page)*2, (u64)page); 571 printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
573 } 572 }
573
574 printk("\n");
574 } 575 }
575 576
576 tsk->thread.cr2 = address; 577 tsk->thread.cr2 = address;
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index dda4e83649a0..c7d19471261d 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -85,13 +85,20 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd)
85static pte_t * __init one_page_table_init(pmd_t *pmd) 85static pte_t * __init one_page_table_init(pmd_t *pmd)
86{ 86{
87 if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { 87 if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
88 pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); 88 pte_t *page_table = NULL;
89
90#ifdef CONFIG_DEBUG_PAGEALLOC
91 page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
92#endif
93 if (!page_table)
94 page_table =
95 (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
89 96
90 paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT); 97 paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
91 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); 98 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
92 BUG_ON(page_table != pte_offset_kernel(pmd, 0)); 99 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
93 } 100 }
94 101
95 return pte_offset_kernel(pmd, 0); 102 return pte_offset_kernel(pmd, 0);
96} 103}
97 104
@@ -741,24 +748,12 @@ struct kmem_cache *pmd_cache;
741 748
742void __init pgtable_cache_init(void) 749void __init pgtable_cache_init(void)
743{ 750{
744 size_t pgd_size = PTRS_PER_PGD*sizeof(pgd_t); 751 if (PTRS_PER_PMD > 1)
745
746 if (PTRS_PER_PMD > 1) {
747 pmd_cache = kmem_cache_create("pmd", 752 pmd_cache = kmem_cache_create("pmd",
748 PTRS_PER_PMD*sizeof(pmd_t), 753 PTRS_PER_PMD*sizeof(pmd_t),
749 PTRS_PER_PMD*sizeof(pmd_t), 754 PTRS_PER_PMD*sizeof(pmd_t),
750 SLAB_PANIC, 755 SLAB_PANIC,
751 pmd_ctor); 756 pmd_ctor);
752 if (!SHARED_KERNEL_PMD) {
753 /* If we're in PAE mode and have a non-shared
754 kernel pmd, then the pgd size must be a
755 page size. This is because the pgd_list
756 links through the page structure, so there
757 can only be one pgd per page for this to
758 work. */
759 pgd_size = PAGE_SIZE;
760 }
761 }
762} 757}
763 758
764/* 759/*
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 6da235522269..5eec5e56d07f 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -166,7 +166,7 @@ early_node_mem(int nodeid, unsigned long start, unsigned long end,
166 return __va(mem); 166 return __va(mem);
167 ptr = __alloc_bootmem_nopanic(size, 167 ptr = __alloc_bootmem_nopanic(size,
168 SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)); 168 SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS));
169 if (ptr == 0) { 169 if (ptr == NULL) {
170 printk(KERN_ERR "Cannot find %lu bytes in node %d\n", 170 printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
171 size, nodeid); 171 size, nodeid);
172 return NULL; 172 return NULL;
@@ -261,7 +261,7 @@ void __init numa_init_array(void)
261 We round robin the existing nodes. */ 261 We round robin the existing nodes. */
262 rr = first_node(node_online_map); 262 rr = first_node(node_online_map);
263 for (i = 0; i < NR_CPUS; i++) { 263 for (i = 0; i < NR_CPUS; i++) {
264 if (cpu_to_node[i] != NUMA_NO_NODE) 264 if (cpu_to_node(i) != NUMA_NO_NODE)
265 continue; 265 continue;
266 numa_set_node(i, rr); 266 numa_set_node(i, rr);
267 rr = next_node(rr, node_online_map); 267 rr = next_node(rr, node_online_map);
@@ -543,7 +543,7 @@ __cpuinit void numa_add_cpu(int cpu)
543void __cpuinit numa_set_node(int cpu, int node) 543void __cpuinit numa_set_node(int cpu, int node)
544{ 544{
545 cpu_pda(cpu)->nodenumber = node; 545 cpu_pda(cpu)->nodenumber = node;
546 cpu_to_node[cpu] = node; 546 cpu_to_node(cpu) = node;
547} 547}
548 548
549unsigned long __init numa_free_all_bootmem(void) 549unsigned long __init numa_free_all_bootmem(void)
diff --git a/arch/x86/mm/pageattr_32.c b/arch/x86/mm/pageattr_32.c
index 4241a74d16c8..260073c07600 100644
--- a/arch/x86/mm/pageattr_32.c
+++ b/arch/x86/mm/pageattr_32.c
@@ -70,10 +70,10 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
70 70
71static void cache_flush_page(struct page *p) 71static void cache_flush_page(struct page *p)
72{ 72{
73 unsigned long adr = (unsigned long)page_address(p); 73 void *adr = page_address(p);
74 int i; 74 int i;
75 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) 75 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
76 asm volatile("clflush (%0)" :: "r" (adr + i)); 76 clflush(adr+i);
77} 77}
78 78
79static void flush_kernel_map(void *arg) 79static void flush_kernel_map(void *arg)
diff --git a/arch/x86/mm/pageattr_64.c b/arch/x86/mm/pageattr_64.c
index 10b9809ce821..8a4f65bf956e 100644
--- a/arch/x86/mm/pageattr_64.c
+++ b/arch/x86/mm/pageattr_64.c
@@ -65,7 +65,7 @@ static void cache_flush_page(void *adr)
65{ 65{
66 int i; 66 int i;
67 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) 67 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
68 asm volatile("clflush (%0)" :: "r" (adr + i)); 68 clflush(adr+i);
69} 69}
70 70
71static void flush_kernel_map(void *arg) 71static void flush_kernel_map(void *arg)
@@ -148,6 +148,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
148 split = split_large_page(address, prot, ref_prot2); 148 split = split_large_page(address, prot, ref_prot2);
149 if (!split) 149 if (!split)
150 return -ENOMEM; 150 return -ENOMEM;
151 pgprot_val(ref_prot2) &= ~_PAGE_NX;
151 set_pte(kpte, mk_pte(split, ref_prot2)); 152 set_pte(kpte, mk_pte(split, ref_prot2));
152 kpte_page = split; 153 kpte_page = split;
153 } 154 }
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index ef1f6cd3ea66..be61a1d845a4 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -6,6 +6,7 @@
6#include <linux/kernel.h> 6#include <linux/kernel.h>
7#include <linux/errno.h> 7#include <linux/errno.h>
8#include <linux/mm.h> 8#include <linux/mm.h>
9#include <linux/nmi.h>
9#include <linux/swap.h> 10#include <linux/swap.h>
10#include <linux/smp.h> 11#include <linux/smp.h>
11#include <linux/highmem.h> 12#include <linux/highmem.h>
@@ -39,6 +40,8 @@ void show_mem(void)
39 for_each_online_pgdat(pgdat) { 40 for_each_online_pgdat(pgdat) {
40 pgdat_resize_lock(pgdat, &flags); 41 pgdat_resize_lock(pgdat, &flags);
41 for (i = 0; i < pgdat->node_spanned_pages; ++i) { 42 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
43 if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
44 touch_nmi_watchdog();
42 page = pgdat_page_nr(pgdat, i); 45 page = pgdat_page_nr(pgdat, i);
43 total++; 46 total++;
44 if (PageHighMem(page)) 47 if (PageHighMem(page))
@@ -97,8 +100,7 @@ static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
97 } 100 }
98 pte = pte_offset_kernel(pmd, vaddr); 101 pte = pte_offset_kernel(pmd, vaddr);
99 if (pgprot_val(flags)) 102 if (pgprot_val(flags))
100 /* <pfn,flags> stored as-is, to permit clearing entries */ 103 set_pte_present(&init_mm, vaddr, pte, pfn_pte(pfn, flags));
101 set_pte(pte, pfn_pte(pfn, flags));
102 else 104 else
103 pte_clear(&init_mm, vaddr, pte); 105 pte_clear(&init_mm, vaddr, pte);
104 106
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index acdf03e19146..56089ccc3949 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -431,9 +431,9 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
431 setup_node_bootmem(i, nodes[i].start, nodes[i].end); 431 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
432 432
433 for (i = 0; i < NR_CPUS; i++) { 433 for (i = 0; i < NR_CPUS; i++) {
434 if (cpu_to_node[i] == NUMA_NO_NODE) 434 if (cpu_to_node(i) == NUMA_NO_NODE)
435 continue; 435 continue;
436 if (!node_isset(cpu_to_node[i], node_possible_map)) 436 if (!node_isset(cpu_to_node(i), node_possible_map))
437 numa_set_node(i, NUMA_NO_NODE); 437 numa_set_node(i, NUMA_NO_NODE);
438 } 438 }
439 numa_init_array(); 439 numa_init_array();
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 11b7a51566a8..2d0eeac7251f 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -269,7 +269,6 @@ static void nmi_cpu_shutdown(void * dummy)
269 apic_write(APIC_LVTPC, saved_lvtpc[cpu]); 269 apic_write(APIC_LVTPC, saved_lvtpc[cpu]);
270 apic_write(APIC_LVTERR, v); 270 apic_write(APIC_LVTERR, v);
271 nmi_restore_registers(msrs); 271 nmi_restore_registers(msrs);
272 model->shutdown(msrs);
273} 272}
274 273
275 274
@@ -278,6 +277,7 @@ static void nmi_shutdown(void)
278 nmi_enabled = 0; 277 nmi_enabled = 0;
279 on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1); 278 on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1);
280 unregister_die_notifier(&profile_exceptions_nb); 279 unregister_die_notifier(&profile_exceptions_nb);
280 model->shutdown(cpu_msrs);
281 free_msrs(); 281 free_msrs();
282} 282}
283 283
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 2d71bbc411d2..f4386990b150 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -289,6 +289,22 @@ static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = {
289 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL685c G1"), 289 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL685c G1"),
290 }, 290 },
291 }, 291 },
292 {
293 .callback = set_bf_sort,
294 .ident = "HP ProLiant DL385 G2",
295 .matches = {
296 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
297 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL385 G2"),
298 },
299 },
300 {
301 .callback = set_bf_sort,
302 .ident = "HP ProLiant DL585 G2",
303 .matches = {
304 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
305 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"),
306 },
307 },
292#ifdef __i386__ 308#ifdef __i386__
293 { 309 {
294 .callback = assign_all_busses, 310 .callback = assign_all_busses,
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
index dcd6bb9e0bb3..7a2ba4583939 100644
--- a/arch/x86/vdso/Makefile
+++ b/arch/x86/vdso/Makefile
@@ -13,7 +13,7 @@ vobjs := $(foreach F,$(vobjs-y),$(obj)/$F)
13 13
14$(obj)/vdso.o: $(obj)/vdso.so 14$(obj)/vdso.o: $(obj)/vdso.so
15 15
16targets += vdso.so vdso.lds $(vobjs-y) vdso-syms.o 16targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y) vdso-syms.o
17 17
18# The DSO images are built using a special linker script. 18# The DSO images are built using a special linker script.
19quiet_cmd_syscall = SYSCALL $@ 19quiet_cmd_syscall = SYSCALL $@
@@ -26,12 +26,19 @@ vdso-flags = -fPIC -shared -Wl,-soname=linux-vdso.so.1 \
26 $(call ld-option, -Wl$(comma)--hash-style=sysv) \ 26 $(call ld-option, -Wl$(comma)--hash-style=sysv) \
27 -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 27 -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
28SYSCFLAGS_vdso.so = $(vdso-flags) 28SYSCFLAGS_vdso.so = $(vdso-flags)
29SYSCFLAGS_vdso.so.dbg = $(vdso-flags)
29 30
30$(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so 31$(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
31 32
32$(obj)/vdso.so: $(src)/vdso.lds $(vobjs) FORCE 33$(obj)/vdso.so: $(src)/vdso.lds $(vobjs) FORCE
34
35$(obj)/vdso.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
33 $(call if_changed,syscall) 36 $(call if_changed,syscall)
34 37
38$(obj)/%.so: OBJCOPYFLAGS := -S
39$(obj)/%.so: $(obj)/%.so.dbg FORCE
40 $(call if_changed,objcopy)
41
35CFL := $(PROFILING) -mcmodel=small -fPIC -g0 -O2 -fasynchronous-unwind-tables -m64 42CFL := $(PROFILING) -mcmodel=small -fPIC -g0 -O2 -fasynchronous-unwind-tables -m64
36 43
37$(obj)/vclock_gettime.o: KBUILD_CFLAGS = $(CFL) 44$(obj)/vclock_gettime.o: KBUILD_CFLAGS = $(CFL)
@@ -47,3 +54,11 @@ $(obj)/built-in.o: ld_flags += -R $(obj)/vdso-syms.o
47SYSCFLAGS_vdso-syms.o = -r -d 54SYSCFLAGS_vdso-syms.o = -r -d
48$(obj)/vdso-syms.o: $(src)/vdso.lds $(vobjs) FORCE 55$(obj)/vdso-syms.o: $(src)/vdso.lds $(vobjs) FORCE
49 $(call if_changed,syscall) 56 $(call if_changed,syscall)
57
58quiet_cmd_vdso_install = INSTALL $@
59 cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
60vdso.so:
61 @mkdir -p $(MODLIB)/vdso
62 $(call cmd,vdso_install)
63
64vdso_install: vdso.so
diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
index b9a60e665d08..667d3245d972 100644
--- a/arch/x86/vdso/vdso.lds.S
+++ b/arch/x86/vdso/vdso.lds.S
@@ -26,13 +26,16 @@ SECTIONS
26 is insufficient, ld -shared will barf. Just increase it here. */ 26 is insufficient, ld -shared will barf. Just increase it here. */
27 . = VDSO_PRELINK + VDSO_TEXT_OFFSET; 27 . = VDSO_PRELINK + VDSO_TEXT_OFFSET;
28 28
29 .text : { *(.text) } :text 29 .text : { *(.text*) } :text
30 .text.ptr : { *(.text.ptr) } :text 30 .rodata : { *(.rodata*) } :text
31 . = VDSO_PRELINK + 0x900; 31 .data : {
32 .data : { *(.data) } :text 32 *(.data*)
33 .bss : { *(.bss) } :text 33 *(.sdata*)
34 *(.bss*)
35 *(.dynbss*)
36 } :text
34 37
35 .altinstructions : { *(.altinstructions) } :text 38 .altinstructions : { *(.altinstructions) } :text
36 .altinstr_replacement : { *(.altinstr_replacement) } :text 39 .altinstr_replacement : { *(.altinstr_replacement) } :text
37 40
38 .note : { *(.note.*) } :text :note 41 .note : { *(.note.*) } :text :note
@@ -42,7 +45,6 @@ SECTIONS
42 .useless : { 45 .useless : {
43 *(.got.plt) *(.got) 46 *(.got.plt) *(.got)
44 *(.gnu.linkonce.d.*) 47 *(.gnu.linkonce.d.*)
45 *(.dynbss)
46 *(.gnu.linkonce.b.*) 48 *(.gnu.linkonce.b.*)
47 } :text 49 } :text
48} 50}
diff --git a/arch/x86/vdso/vvar.c b/arch/x86/vdso/vvar.c
index 6fc22219a472..1b7e703684f9 100644
--- a/arch/x86/vdso/vvar.c
+++ b/arch/x86/vdso/vvar.c
@@ -8,5 +8,5 @@
8#include <asm/timex.h> 8#include <asm/timex.h>
9#include <asm/vgtod.h> 9#include <asm/vgtod.h>
10 10
11#define VEXTERN(x) typeof (__ ## x) *vdso_ ## x = (void *)VMAGIC; 11#define VEXTERN(x) typeof (__ ## x) *const vdso_ ## x = (void *)VMAGIC;
12#include "vextern.h" 12#include "vextern.h"
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 493a083f6886..94c39aaf695f 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -25,7 +25,6 @@
25#include <linux/mm.h> 25#include <linux/mm.h>
26#include <linux/page-flags.h> 26#include <linux/page-flags.h>
27#include <linux/highmem.h> 27#include <linux/highmem.h>
28#include <linux/smp.h>
29 28
30#include <xen/interface/xen.h> 29#include <xen/interface/xen.h>
31#include <xen/interface/physdev.h> 30#include <xen/interface/physdev.h>
@@ -52,11 +51,25 @@
52 51
53EXPORT_SYMBOL_GPL(hypercall_page); 52EXPORT_SYMBOL_GPL(hypercall_page);
54 53
55DEFINE_PER_CPU(enum paravirt_lazy_mode, xen_lazy_mode);
56
57DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); 54DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
58DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); 55DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
59DEFINE_PER_CPU(unsigned long, xen_cr3); 56
57/*
58 * Note about cr3 (pagetable base) values:
59 *
60 * xen_cr3 contains the current logical cr3 value; it contains the
61 * last set cr3. This may not be the current effective cr3, because
62 * its update may be being lazily deferred. However, a vcpu looking
63 * at its own cr3 can use this value knowing that it everything will
64 * be self-consistent.
65 *
66 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
67 * hypercall to set the vcpu cr3 is complete (so it may be a little
68 * out of date, but it will never be set early). If one vcpu is
69 * looking at another vcpu's cr3 value, it should use this variable.
70 */
71DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
72DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
60 73
61struct start_info *xen_start_info; 74struct start_info *xen_start_info;
62EXPORT_SYMBOL_GPL(xen_start_info); 75EXPORT_SYMBOL_GPL(xen_start_info);
@@ -100,7 +113,7 @@ static void __init xen_vcpu_setup(int cpu)
100 info.mfn = virt_to_mfn(vcpup); 113 info.mfn = virt_to_mfn(vcpup);
101 info.offset = offset_in_page(vcpup); 114 info.offset = offset_in_page(vcpup);
102 115
103 printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %x, offset %d\n", 116 printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %llx, offset %d\n",
104 cpu, vcpup, info.mfn, info.offset); 117 cpu, vcpup, info.mfn, info.offset);
105 118
106 /* Check to see if the hypervisor will put the vcpu_info 119 /* Check to see if the hypervisor will put the vcpu_info
@@ -124,7 +137,7 @@ static void __init xen_vcpu_setup(int cpu)
124static void __init xen_banner(void) 137static void __init xen_banner(void)
125{ 138{
126 printk(KERN_INFO "Booting paravirtualized kernel on %s\n", 139 printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
127 paravirt_ops.name); 140 pv_info.name);
128 printk(KERN_INFO "Hypervisor signature: %s\n", xen_start_info->magic); 141 printk(KERN_INFO "Hypervisor signature: %s\n", xen_start_info->magic);
129} 142}
130 143
@@ -249,29 +262,10 @@ static void xen_halt(void)
249 xen_safe_halt(); 262 xen_safe_halt();
250} 263}
251 264
252static void xen_set_lazy_mode(enum paravirt_lazy_mode mode) 265static void xen_leave_lazy(void)
253{ 266{
254 BUG_ON(preemptible()); 267 paravirt_leave_lazy(paravirt_get_lazy_mode());
255
256 switch (mode) {
257 case PARAVIRT_LAZY_NONE:
258 BUG_ON(x86_read_percpu(xen_lazy_mode) == PARAVIRT_LAZY_NONE);
259 break;
260
261 case PARAVIRT_LAZY_MMU:
262 case PARAVIRT_LAZY_CPU:
263 BUG_ON(x86_read_percpu(xen_lazy_mode) != PARAVIRT_LAZY_NONE);
264 break;
265
266 case PARAVIRT_LAZY_FLUSH:
267 /* flush if necessary, but don't change state */
268 if (x86_read_percpu(xen_lazy_mode) != PARAVIRT_LAZY_NONE)
269 xen_mc_flush();
270 return;
271 }
272
273 xen_mc_flush(); 268 xen_mc_flush();
274 x86_write_percpu(xen_lazy_mode, mode);
275} 269}
276 270
277static unsigned long xen_store_tr(void) 271static unsigned long xen_store_tr(void)
@@ -358,7 +352,7 @@ static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
358 * loaded properly. This will go away as soon as Xen has been 352 * loaded properly. This will go away as soon as Xen has been
359 * modified to not save/restore %gs for normal hypercalls. 353 * modified to not save/restore %gs for normal hypercalls.
360 */ 354 */
361 if (xen_get_lazy_mode() == PARAVIRT_LAZY_CPU) 355 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)
362 loadsegment(gs, 0); 356 loadsegment(gs, 0);
363} 357}
364 358
@@ -632,32 +626,36 @@ static unsigned long xen_read_cr3(void)
632 return x86_read_percpu(xen_cr3); 626 return x86_read_percpu(xen_cr3);
633} 627}
634 628
629static void set_current_cr3(void *v)
630{
631 x86_write_percpu(xen_current_cr3, (unsigned long)v);
632}
633
635static void xen_write_cr3(unsigned long cr3) 634static void xen_write_cr3(unsigned long cr3)
636{ 635{
636 struct mmuext_op *op;
637 struct multicall_space mcs;
638 unsigned long mfn = pfn_to_mfn(PFN_DOWN(cr3));
639
637 BUG_ON(preemptible()); 640 BUG_ON(preemptible());
638 641
639 if (cr3 == x86_read_percpu(xen_cr3)) { 642 mcs = xen_mc_entry(sizeof(*op)); /* disables interrupts */
640 /* just a simple tlb flush */
641 xen_flush_tlb();
642 return;
643 }
644 643
644 /* Update while interrupts are disabled, so its atomic with
645 respect to ipis */
645 x86_write_percpu(xen_cr3, cr3); 646 x86_write_percpu(xen_cr3, cr3);
646 647
648 op = mcs.args;
649 op->cmd = MMUEXT_NEW_BASEPTR;
650 op->arg1.mfn = mfn;
647 651
648 { 652 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
649 struct mmuext_op *op;
650 struct multicall_space mcs = xen_mc_entry(sizeof(*op));
651 unsigned long mfn = pfn_to_mfn(PFN_DOWN(cr3));
652
653 op = mcs.args;
654 op->cmd = MMUEXT_NEW_BASEPTR;
655 op->arg1.mfn = mfn;
656 653
657 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); 654 /* Update xen_update_cr3 once the batch has actually
655 been submitted. */
656 xen_mc_callback(set_current_cr3, (void *)cr3);
658 657
659 xen_mc_issue(PARAVIRT_LAZY_CPU); 658 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
660 }
661} 659}
662 660
663/* Early in boot, while setting up the initial pagetable, assume 661/* Early in boot, while setting up the initial pagetable, assume
@@ -668,6 +666,15 @@ static __init void xen_alloc_pt_init(struct mm_struct *mm, u32 pfn)
668 make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); 666 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
669} 667}
670 668
669static void pin_pagetable_pfn(unsigned level, unsigned long pfn)
670{
671 struct mmuext_op op;
672 op.cmd = level;
673 op.arg1.mfn = pfn_to_mfn(pfn);
674 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
675 BUG();
676}
677
671/* This needs to make sure the new pte page is pinned iff its being 678/* This needs to make sure the new pte page is pinned iff its being
672 attached to a pinned pagetable. */ 679 attached to a pinned pagetable. */
673static void xen_alloc_pt(struct mm_struct *mm, u32 pfn) 680static void xen_alloc_pt(struct mm_struct *mm, u32 pfn)
@@ -677,9 +684,10 @@ static void xen_alloc_pt(struct mm_struct *mm, u32 pfn)
677 if (PagePinned(virt_to_page(mm->pgd))) { 684 if (PagePinned(virt_to_page(mm->pgd))) {
678 SetPagePinned(page); 685 SetPagePinned(page);
679 686
680 if (!PageHighMem(page)) 687 if (!PageHighMem(page)) {
681 make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); 688 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
682 else 689 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
690 } else
683 /* make sure there are no stray mappings of 691 /* make sure there are no stray mappings of
684 this page */ 692 this page */
685 kmap_flush_unused(); 693 kmap_flush_unused();
@@ -692,8 +700,10 @@ static void xen_release_pt(u32 pfn)
692 struct page *page = pfn_to_page(pfn); 700 struct page *page = pfn_to_page(pfn);
693 701
694 if (PagePinned(page)) { 702 if (PagePinned(page)) {
695 if (!PageHighMem(page)) 703 if (!PageHighMem(page)) {
704 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
696 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); 705 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
706 }
697 } 707 }
698} 708}
699 709
@@ -738,7 +748,7 @@ static __init void xen_pagetable_setup_start(pgd_t *base)
738 pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base; 748 pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base;
739 749
740 /* special set_pte for pagetable initialization */ 750 /* special set_pte for pagetable initialization */
741 paravirt_ops.set_pte = xen_set_pte_init; 751 pv_mmu_ops.set_pte = xen_set_pte_init;
742 752
743 init_mm.pgd = base; 753 init_mm.pgd = base;
744 /* 754 /*
@@ -785,8 +795,8 @@ static __init void xen_pagetable_setup_done(pgd_t *base)
785{ 795{
786 /* This will work as long as patching hasn't happened yet 796 /* This will work as long as patching hasn't happened yet
787 (which it hasn't) */ 797 (which it hasn't) */
788 paravirt_ops.alloc_pt = xen_alloc_pt; 798 pv_mmu_ops.alloc_pt = xen_alloc_pt;
789 paravirt_ops.set_pte = xen_set_pte; 799 pv_mmu_ops.set_pte = xen_set_pte;
790 800
791 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 801 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
792 /* 802 /*
@@ -808,15 +818,15 @@ static __init void xen_pagetable_setup_done(pgd_t *base)
808 /* Actually pin the pagetable down, but we can't set PG_pinned 818 /* Actually pin the pagetable down, but we can't set PG_pinned
809 yet because the page structures don't exist yet. */ 819 yet because the page structures don't exist yet. */
810 { 820 {
811 struct mmuext_op op; 821 unsigned level;
822
812#ifdef CONFIG_X86_PAE 823#ifdef CONFIG_X86_PAE
813 op.cmd = MMUEXT_PIN_L3_TABLE; 824 level = MMUEXT_PIN_L3_TABLE;
814#else 825#else
815 op.cmd = MMUEXT_PIN_L3_TABLE; 826 level = MMUEXT_PIN_L2_TABLE;
816#endif 827#endif
817 op.arg1.mfn = pfn_to_mfn(PFN_DOWN(__pa(base))); 828
818 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) 829 pin_pagetable_pfn(level, PFN_DOWN(__pa(base)));
819 BUG();
820 } 830 }
821} 831}
822 832
@@ -833,12 +843,12 @@ void __init xen_setup_vcpu_info_placement(void)
833 if (have_vcpu_info_placement) { 843 if (have_vcpu_info_placement) {
834 printk(KERN_INFO "Xen: using vcpu_info placement\n"); 844 printk(KERN_INFO "Xen: using vcpu_info placement\n");
835 845
836 paravirt_ops.save_fl = xen_save_fl_direct; 846 pv_irq_ops.save_fl = xen_save_fl_direct;
837 paravirt_ops.restore_fl = xen_restore_fl_direct; 847 pv_irq_ops.restore_fl = xen_restore_fl_direct;
838 paravirt_ops.irq_disable = xen_irq_disable_direct; 848 pv_irq_ops.irq_disable = xen_irq_disable_direct;
839 paravirt_ops.irq_enable = xen_irq_enable_direct; 849 pv_irq_ops.irq_enable = xen_irq_enable_direct;
840 paravirt_ops.read_cr2 = xen_read_cr2_direct; 850 pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
841 paravirt_ops.iret = xen_iret_direct; 851 pv_cpu_ops.iret = xen_iret_direct;
842 } 852 }
843} 853}
844 854
@@ -850,8 +860,8 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
850 860
851 start = end = reloc = NULL; 861 start = end = reloc = NULL;
852 862
853#define SITE(x) \ 863#define SITE(op, x) \
854 case PARAVIRT_PATCH(x): \ 864 case PARAVIRT_PATCH(op.x): \
855 if (have_vcpu_info_placement) { \ 865 if (have_vcpu_info_placement) { \
856 start = (char *)xen_##x##_direct; \ 866 start = (char *)xen_##x##_direct; \
857 end = xen_##x##_direct_end; \ 867 end = xen_##x##_direct_end; \
@@ -860,10 +870,10 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
860 goto patch_site 870 goto patch_site
861 871
862 switch (type) { 872 switch (type) {
863 SITE(irq_enable); 873 SITE(pv_irq_ops, irq_enable);
864 SITE(irq_disable); 874 SITE(pv_irq_ops, irq_disable);
865 SITE(save_fl); 875 SITE(pv_irq_ops, save_fl);
866 SITE(restore_fl); 876 SITE(pv_irq_ops, restore_fl);
867#undef SITE 877#undef SITE
868 878
869 patch_site: 879 patch_site:
@@ -895,26 +905,32 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
895 return ret; 905 return ret;
896} 906}
897 907
898static const struct paravirt_ops xen_paravirt_ops __initdata = { 908static const struct pv_info xen_info __initdata = {
899 .paravirt_enabled = 1, 909 .paravirt_enabled = 1,
900 .shared_kernel_pmd = 0, 910 .shared_kernel_pmd = 0,
901 911
902 .name = "Xen", 912 .name = "Xen",
903 .banner = xen_banner, 913};
904 914
915static const struct pv_init_ops xen_init_ops __initdata = {
905 .patch = xen_patch, 916 .patch = xen_patch,
906 917
918 .banner = xen_banner,
907 .memory_setup = xen_memory_setup, 919 .memory_setup = xen_memory_setup,
908 .arch_setup = xen_arch_setup, 920 .arch_setup = xen_arch_setup,
909 .init_IRQ = xen_init_IRQ,
910 .post_allocator_init = xen_mark_init_mm_pinned, 921 .post_allocator_init = xen_mark_init_mm_pinned,
922};
911 923
924static const struct pv_time_ops xen_time_ops __initdata = {
912 .time_init = xen_time_init, 925 .time_init = xen_time_init,
926
913 .set_wallclock = xen_set_wallclock, 927 .set_wallclock = xen_set_wallclock,
914 .get_wallclock = xen_get_wallclock, 928 .get_wallclock = xen_get_wallclock,
915 .get_cpu_khz = xen_cpu_khz, 929 .get_cpu_khz = xen_cpu_khz,
916 .sched_clock = xen_sched_clock, 930 .sched_clock = xen_sched_clock,
931};
917 932
933static const struct pv_cpu_ops xen_cpu_ops __initdata = {
918 .cpuid = xen_cpuid, 934 .cpuid = xen_cpuid,
919 935
920 .set_debugreg = xen_set_debugreg, 936 .set_debugreg = xen_set_debugreg,
@@ -925,22 +941,10 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = {
925 .read_cr0 = native_read_cr0, 941 .read_cr0 = native_read_cr0,
926 .write_cr0 = native_write_cr0, 942 .write_cr0 = native_write_cr0,
927 943
928 .read_cr2 = xen_read_cr2,
929 .write_cr2 = xen_write_cr2,
930
931 .read_cr3 = xen_read_cr3,
932 .write_cr3 = xen_write_cr3,
933
934 .read_cr4 = native_read_cr4, 944 .read_cr4 = native_read_cr4,
935 .read_cr4_safe = native_read_cr4_safe, 945 .read_cr4_safe = native_read_cr4_safe,
936 .write_cr4 = xen_write_cr4, 946 .write_cr4 = xen_write_cr4,
937 947
938 .save_fl = xen_save_fl,
939 .restore_fl = xen_restore_fl,
940 .irq_disable = xen_irq_disable,
941 .irq_enable = xen_irq_enable,
942 .safe_halt = xen_safe_halt,
943 .halt = xen_halt,
944 .wbinvd = native_wbinvd, 948 .wbinvd = native_wbinvd,
945 949
946 .read_msr = native_read_msr_safe, 950 .read_msr = native_read_msr_safe,
@@ -969,6 +973,23 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = {
969 .set_iopl_mask = xen_set_iopl_mask, 973 .set_iopl_mask = xen_set_iopl_mask,
970 .io_delay = xen_io_delay, 974 .io_delay = xen_io_delay,
971 975
976 .lazy_mode = {
977 .enter = paravirt_enter_lazy_cpu,
978 .leave = xen_leave_lazy,
979 },
980};
981
982static const struct pv_irq_ops xen_irq_ops __initdata = {
983 .init_IRQ = xen_init_IRQ,
984 .save_fl = xen_save_fl,
985 .restore_fl = xen_restore_fl,
986 .irq_disable = xen_irq_disable,
987 .irq_enable = xen_irq_enable,
988 .safe_halt = xen_safe_halt,
989 .halt = xen_halt,
990};
991
992static const struct pv_apic_ops xen_apic_ops __initdata = {
972#ifdef CONFIG_X86_LOCAL_APIC 993#ifdef CONFIG_X86_LOCAL_APIC
973 .apic_write = xen_apic_write, 994 .apic_write = xen_apic_write,
974 .apic_write_atomic = xen_apic_write, 995 .apic_write_atomic = xen_apic_write,
@@ -977,6 +998,17 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = {
977 .setup_secondary_clock = paravirt_nop, 998 .setup_secondary_clock = paravirt_nop,
978 .startup_ipi_hook = paravirt_nop, 999 .startup_ipi_hook = paravirt_nop,
979#endif 1000#endif
1001};
1002
1003static const struct pv_mmu_ops xen_mmu_ops __initdata = {
1004 .pagetable_setup_start = xen_pagetable_setup_start,
1005 .pagetable_setup_done = xen_pagetable_setup_done,
1006
1007 .read_cr2 = xen_read_cr2,
1008 .write_cr2 = xen_write_cr2,
1009
1010 .read_cr3 = xen_read_cr3,
1011 .write_cr3 = xen_write_cr3,
980 1012
981 .flush_tlb_user = xen_flush_tlb, 1013 .flush_tlb_user = xen_flush_tlb,
982 .flush_tlb_kernel = xen_flush_tlb, 1014 .flush_tlb_kernel = xen_flush_tlb,
@@ -986,9 +1018,6 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = {
986 .pte_update = paravirt_nop, 1018 .pte_update = paravirt_nop,
987 .pte_update_defer = paravirt_nop, 1019 .pte_update_defer = paravirt_nop,
988 1020
989 .pagetable_setup_start = xen_pagetable_setup_start,
990 .pagetable_setup_done = xen_pagetable_setup_done,
991
992 .alloc_pt = xen_alloc_pt_init, 1021 .alloc_pt = xen_alloc_pt_init,
993 .release_pt = xen_release_pt, 1022 .release_pt = xen_release_pt,
994 .alloc_pd = paravirt_nop, 1023 .alloc_pd = paravirt_nop,
@@ -1024,7 +1053,10 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = {
1024 .dup_mmap = xen_dup_mmap, 1053 .dup_mmap = xen_dup_mmap,
1025 .exit_mmap = xen_exit_mmap, 1054 .exit_mmap = xen_exit_mmap,
1026 1055
1027 .set_lazy_mode = xen_set_lazy_mode, 1056 .lazy_mode = {
1057 .enter = paravirt_enter_lazy_mmu,
1058 .leave = xen_leave_lazy,
1059 },
1028}; 1060};
1029 1061
1030#ifdef CONFIG_SMP 1062#ifdef CONFIG_SMP
@@ -1080,6 +1112,17 @@ static const struct machine_ops __initdata xen_machine_ops = {
1080}; 1112};
1081 1113
1082 1114
1115static void __init xen_reserve_top(void)
1116{
1117 unsigned long top = HYPERVISOR_VIRT_START;
1118 struct xen_platform_parameters pp;
1119
1120 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1121 top = pp.virt_start;
1122
1123 reserve_top_address(-top + 2 * PAGE_SIZE);
1124}
1125
1083/* First C function to be called on Xen boot */ 1126/* First C function to be called on Xen boot */
1084asmlinkage void __init xen_start_kernel(void) 1127asmlinkage void __init xen_start_kernel(void)
1085{ 1128{
@@ -1091,7 +1134,14 @@ asmlinkage void __init xen_start_kernel(void)
1091 BUG_ON(memcmp(xen_start_info->magic, "xen-3.0", 7) != 0); 1134 BUG_ON(memcmp(xen_start_info->magic, "xen-3.0", 7) != 0);
1092 1135
1093 /* Install Xen paravirt ops */ 1136 /* Install Xen paravirt ops */
1094 paravirt_ops = xen_paravirt_ops; 1137 pv_info = xen_info;
1138 pv_init_ops = xen_init_ops;
1139 pv_time_ops = xen_time_ops;
1140 pv_cpu_ops = xen_cpu_ops;
1141 pv_irq_ops = xen_irq_ops;
1142 pv_apic_ops = xen_apic_ops;
1143 pv_mmu_ops = xen_mmu_ops;
1144
1095 machine_ops = xen_machine_ops; 1145 machine_ops = xen_machine_ops;
1096 1146
1097#ifdef CONFIG_SMP 1147#ifdef CONFIG_SMP
@@ -1113,6 +1163,7 @@ asmlinkage void __init xen_start_kernel(void)
1113 /* keep using Xen gdt for now; no urgent need to change it */ 1163 /* keep using Xen gdt for now; no urgent need to change it */
1114 1164
1115 x86_write_percpu(xen_cr3, __pa(pgd)); 1165 x86_write_percpu(xen_cr3, __pa(pgd));
1166 x86_write_percpu(xen_current_cr3, __pa(pgd));
1116 1167
1117#ifdef CONFIG_SMP 1168#ifdef CONFIG_SMP
1118 /* Don't do the full vcpu_info placement stuff until we have a 1169 /* Don't do the full vcpu_info placement stuff until we have a
@@ -1124,12 +1175,12 @@ asmlinkage void __init xen_start_kernel(void)
1124 xen_setup_vcpu_info_placement(); 1175 xen_setup_vcpu_info_placement();
1125#endif 1176#endif
1126 1177
1127 paravirt_ops.kernel_rpl = 1; 1178 pv_info.kernel_rpl = 1;
1128 if (xen_feature(XENFEAT_supervisor_mode_kernel)) 1179 if (xen_feature(XENFEAT_supervisor_mode_kernel))
1129 paravirt_ops.kernel_rpl = 0; 1180 pv_info.kernel_rpl = 0;
1130 1181
1131 /* set the limit of our address space */ 1182 /* set the limit of our address space */
1132 reserve_top_address(-HYPERVISOR_VIRT_START + 2 * PAGE_SIZE); 1183 xen_reserve_top();
1133 1184
1134 /* set up basic CPUID stuff */ 1185 /* set up basic CPUID stuff */
1135 cpu_detect(&new_cpu_data); 1186 cpu_detect(&new_cpu_data);
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 874db0cd1d2a..b2e32f9d0071 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -41,7 +41,6 @@
41#include <linux/sched.h> 41#include <linux/sched.h>
42#include <linux/highmem.h> 42#include <linux/highmem.h>
43#include <linux/bug.h> 43#include <linux/bug.h>
44#include <linux/sched.h>
45 44
46#include <asm/pgtable.h> 45#include <asm/pgtable.h>
47#include <asm/tlbflush.h> 46#include <asm/tlbflush.h>
@@ -155,7 +154,7 @@ void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
155 pte_t *ptep, pte_t pteval) 154 pte_t *ptep, pte_t pteval)
156{ 155{
157 if (mm == current->mm || mm == &init_mm) { 156 if (mm == current->mm || mm == &init_mm) {
158 if (xen_get_lazy_mode() == PARAVIRT_LAZY_MMU) { 157 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
159 struct multicall_space mcs; 158 struct multicall_space mcs;
160 mcs = xen_mc_entry(0); 159 mcs = xen_mc_entry(0);
161 160
@@ -304,7 +303,12 @@ pgd_t xen_make_pgd(unsigned long pgd)
304} 303}
305#endif /* CONFIG_X86_PAE */ 304#endif /* CONFIG_X86_PAE */
306 305
307 306enum pt_level {
307 PT_PGD,
308 PT_PUD,
309 PT_PMD,
310 PT_PTE
311};
308 312
309/* 313/*
310 (Yet another) pagetable walker. This one is intended for pinning a 314 (Yet another) pagetable walker. This one is intended for pinning a
@@ -316,7 +320,7 @@ pgd_t xen_make_pgd(unsigned long pgd)
316 FIXADDR_TOP. But the important bit is that we don't pin beyond 320 FIXADDR_TOP. But the important bit is that we don't pin beyond
317 there, because then we start getting into Xen's ptes. 321 there, because then we start getting into Xen's ptes.
318*/ 322*/
319static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, unsigned), 323static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, enum pt_level),
320 unsigned long limit) 324 unsigned long limit)
321{ 325{
322 pgd_t *pgd = pgd_base; 326 pgd_t *pgd = pgd_base;
@@ -341,7 +345,7 @@ static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, unsigned),
341 pud = pud_offset(pgd, 0); 345 pud = pud_offset(pgd, 0);
342 346
343 if (PTRS_PER_PUD > 1) /* not folded */ 347 if (PTRS_PER_PUD > 1) /* not folded */
344 flush |= (*func)(virt_to_page(pud), 0); 348 flush |= (*func)(virt_to_page(pud), PT_PUD);
345 349
346 for (; addr != pud_limit; pud++, addr = pud_next) { 350 for (; addr != pud_limit; pud++, addr = pud_next) {
347 pmd_t *pmd; 351 pmd_t *pmd;
@@ -360,7 +364,7 @@ static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, unsigned),
360 pmd = pmd_offset(pud, 0); 364 pmd = pmd_offset(pud, 0);
361 365
362 if (PTRS_PER_PMD > 1) /* not folded */ 366 if (PTRS_PER_PMD > 1) /* not folded */
363 flush |= (*func)(virt_to_page(pmd), 0); 367 flush |= (*func)(virt_to_page(pmd), PT_PMD);
364 368
365 for (; addr != pmd_limit; pmd++) { 369 for (; addr != pmd_limit; pmd++) {
366 addr += (PAGE_SIZE * PTRS_PER_PTE); 370 addr += (PAGE_SIZE * PTRS_PER_PTE);
@@ -372,17 +376,47 @@ static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, unsigned),
372 if (pmd_none(*pmd)) 376 if (pmd_none(*pmd))
373 continue; 377 continue;
374 378
375 flush |= (*func)(pmd_page(*pmd), 0); 379 flush |= (*func)(pmd_page(*pmd), PT_PTE);
376 } 380 }
377 } 381 }
378 } 382 }
379 383
380 flush |= (*func)(virt_to_page(pgd_base), UVMF_TLB_FLUSH); 384 flush |= (*func)(virt_to_page(pgd_base), PT_PGD);
381 385
382 return flush; 386 return flush;
383} 387}
384 388
385static int pin_page(struct page *page, unsigned flags) 389static spinlock_t *lock_pte(struct page *page)
390{
391 spinlock_t *ptl = NULL;
392
393#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
394 ptl = __pte_lockptr(page);
395 spin_lock(ptl);
396#endif
397
398 return ptl;
399}
400
401static void do_unlock(void *v)
402{
403 spinlock_t *ptl = v;
404 spin_unlock(ptl);
405}
406
407static void xen_do_pin(unsigned level, unsigned long pfn)
408{
409 struct mmuext_op *op;
410 struct multicall_space mcs;
411
412 mcs = __xen_mc_entry(sizeof(*op));
413 op = mcs.args;
414 op->cmd = level;
415 op->arg1.mfn = pfn_to_mfn(pfn);
416 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
417}
418
419static int pin_page(struct page *page, enum pt_level level)
386{ 420{
387 unsigned pgfl = test_and_set_bit(PG_pinned, &page->flags); 421 unsigned pgfl = test_and_set_bit(PG_pinned, &page->flags);
388 int flush; 422 int flush;
@@ -397,12 +431,26 @@ static int pin_page(struct page *page, unsigned flags)
397 void *pt = lowmem_page_address(page); 431 void *pt = lowmem_page_address(page);
398 unsigned long pfn = page_to_pfn(page); 432 unsigned long pfn = page_to_pfn(page);
399 struct multicall_space mcs = __xen_mc_entry(0); 433 struct multicall_space mcs = __xen_mc_entry(0);
434 spinlock_t *ptl;
400 435
401 flush = 0; 436 flush = 0;
402 437
438 ptl = NULL;
439 if (level == PT_PTE)
440 ptl = lock_pte(page);
441
403 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, 442 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
404 pfn_pte(pfn, PAGE_KERNEL_RO), 443 pfn_pte(pfn, PAGE_KERNEL_RO),
405 flags); 444 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
445
446 if (level == PT_PTE)
447 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
448
449 if (ptl) {
450 /* Queue a deferred unlock for when this batch
451 is completed. */
452 xen_mc_callback(do_unlock, ptl);
453 }
406 } 454 }
407 455
408 return flush; 456 return flush;
@@ -413,8 +461,7 @@ static int pin_page(struct page *page, unsigned flags)
413 read-only, and can be pinned. */ 461 read-only, and can be pinned. */
414void xen_pgd_pin(pgd_t *pgd) 462void xen_pgd_pin(pgd_t *pgd)
415{ 463{
416 struct multicall_space mcs; 464 unsigned level;
417 struct mmuext_op *op;
418 465
419 xen_mc_batch(); 466 xen_mc_batch();
420 467
@@ -425,16 +472,13 @@ void xen_pgd_pin(pgd_t *pgd)
425 xen_mc_batch(); 472 xen_mc_batch();
426 } 473 }
427 474
428 mcs = __xen_mc_entry(sizeof(*op));
429 op = mcs.args;
430
431#ifdef CONFIG_X86_PAE 475#ifdef CONFIG_X86_PAE
432 op->cmd = MMUEXT_PIN_L3_TABLE; 476 level = MMUEXT_PIN_L3_TABLE;
433#else 477#else
434 op->cmd = MMUEXT_PIN_L2_TABLE; 478 level = MMUEXT_PIN_L2_TABLE;
435#endif 479#endif
436 op->arg1.mfn = pfn_to_mfn(PFN_DOWN(__pa(pgd))); 480
437 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); 481 xen_do_pin(level, PFN_DOWN(__pa(pgd)));
438 482
439 xen_mc_issue(0); 483 xen_mc_issue(0);
440} 484}
@@ -442,7 +486,7 @@ void xen_pgd_pin(pgd_t *pgd)
442/* The init_mm pagetable is really pinned as soon as its created, but 486/* The init_mm pagetable is really pinned as soon as its created, but
443 that's before we have page structures to store the bits. So do all 487 that's before we have page structures to store the bits. So do all
444 the book-keeping now. */ 488 the book-keeping now. */
445static __init int mark_pinned(struct page *page, unsigned flags) 489static __init int mark_pinned(struct page *page, enum pt_level level)
446{ 490{
447 SetPagePinned(page); 491 SetPagePinned(page);
448 return 0; 492 return 0;
@@ -453,18 +497,32 @@ void __init xen_mark_init_mm_pinned(void)
453 pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP); 497 pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP);
454} 498}
455 499
456static int unpin_page(struct page *page, unsigned flags) 500static int unpin_page(struct page *page, enum pt_level level)
457{ 501{
458 unsigned pgfl = test_and_clear_bit(PG_pinned, &page->flags); 502 unsigned pgfl = test_and_clear_bit(PG_pinned, &page->flags);
459 503
460 if (pgfl && !PageHighMem(page)) { 504 if (pgfl && !PageHighMem(page)) {
461 void *pt = lowmem_page_address(page); 505 void *pt = lowmem_page_address(page);
462 unsigned long pfn = page_to_pfn(page); 506 unsigned long pfn = page_to_pfn(page);
463 struct multicall_space mcs = __xen_mc_entry(0); 507 spinlock_t *ptl = NULL;
508 struct multicall_space mcs;
509
510 if (level == PT_PTE) {
511 ptl = lock_pte(page);
512
513 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
514 }
515
516 mcs = __xen_mc_entry(0);
464 517
465 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, 518 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
466 pfn_pte(pfn, PAGE_KERNEL), 519 pfn_pte(pfn, PAGE_KERNEL),
467 flags); 520 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
521
522 if (ptl) {
523 /* unlock when batch completed */
524 xen_mc_callback(do_unlock, ptl);
525 }
468 } 526 }
469 527
470 return 0; /* never need to flush on unpin */ 528 return 0; /* never need to flush on unpin */
@@ -473,18 +531,9 @@ static int unpin_page(struct page *page, unsigned flags)
473/* Release a pagetables pages back as normal RW */ 531/* Release a pagetables pages back as normal RW */
474static void xen_pgd_unpin(pgd_t *pgd) 532static void xen_pgd_unpin(pgd_t *pgd)
475{ 533{
476 struct mmuext_op *op;
477 struct multicall_space mcs;
478
479 xen_mc_batch(); 534 xen_mc_batch();
480 535
481 mcs = __xen_mc_entry(sizeof(*op)); 536 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
482
483 op = mcs.args;
484 op->cmd = MMUEXT_UNPIN_TABLE;
485 op->arg1.mfn = pfn_to_mfn(PFN_DOWN(__pa(pgd)));
486
487 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
488 537
489 pgd_walk(pgd, unpin_page, TASK_SIZE); 538 pgd_walk(pgd, unpin_page, TASK_SIZE);
490 539
@@ -515,20 +564,43 @@ static void drop_other_mm_ref(void *info)
515 564
516 if (__get_cpu_var(cpu_tlbstate).active_mm == mm) 565 if (__get_cpu_var(cpu_tlbstate).active_mm == mm)
517 leave_mm(smp_processor_id()); 566 leave_mm(smp_processor_id());
567
568 /* If this cpu still has a stale cr3 reference, then make sure
569 it has been flushed. */
570 if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) {
571 load_cr3(swapper_pg_dir);
572 arch_flush_lazy_cpu_mode();
573 }
518} 574}
519 575
520static void drop_mm_ref(struct mm_struct *mm) 576static void drop_mm_ref(struct mm_struct *mm)
521{ 577{
578 cpumask_t mask;
579 unsigned cpu;
580
522 if (current->active_mm == mm) { 581 if (current->active_mm == mm) {
523 if (current->mm == mm) 582 if (current->mm == mm)
524 load_cr3(swapper_pg_dir); 583 load_cr3(swapper_pg_dir);
525 else 584 else
526 leave_mm(smp_processor_id()); 585 leave_mm(smp_processor_id());
586 arch_flush_lazy_cpu_mode();
527 } 587 }
528 588
529 if (!cpus_empty(mm->cpu_vm_mask)) 589 /* Get the "official" set of cpus referring to our pagetable. */
530 xen_smp_call_function_mask(mm->cpu_vm_mask, drop_other_mm_ref, 590 mask = mm->cpu_vm_mask;
531 mm, 1); 591
592 /* It's possible that a vcpu may have a stale reference to our
593 cr3, because its in lazy mode, and it hasn't yet flushed
594 its set of pending hypercalls yet. In this case, we can
595 look at its actual current cr3 value, and force it to flush
596 if needed. */
597 for_each_online_cpu(cpu) {
598 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
599 cpu_set(cpu, mask);
600 }
601
602 if (!cpus_empty(mask))
603 xen_smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
532} 604}
533#else 605#else
534static void drop_mm_ref(struct mm_struct *mm) 606static void drop_mm_ref(struct mm_struct *mm)
@@ -563,5 +635,6 @@ void xen_exit_mmap(struct mm_struct *mm)
563 /* pgd may not be pinned in the error exit path of execve */ 635 /* pgd may not be pinned in the error exit path of execve */
564 if (PagePinned(virt_to_page(mm->pgd))) 636 if (PagePinned(virt_to_page(mm->pgd)))
565 xen_pgd_unpin(mm->pgd); 637 xen_pgd_unpin(mm->pgd);
638
566 spin_unlock(&mm->page_table_lock); 639 spin_unlock(&mm->page_table_lock);
567} 640}
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
index c837e8e463db..5e6f36f6d876 100644
--- a/arch/x86/xen/multicalls.c
+++ b/arch/x86/xen/multicalls.c
@@ -26,13 +26,22 @@
26 26
27#include "multicalls.h" 27#include "multicalls.h"
28 28
29#define MC_DEBUG 1
30
29#define MC_BATCH 32 31#define MC_BATCH 32
30#define MC_ARGS (MC_BATCH * 16 / sizeof(u64)) 32#define MC_ARGS (MC_BATCH * 16 / sizeof(u64))
31 33
32struct mc_buffer { 34struct mc_buffer {
33 struct multicall_entry entries[MC_BATCH]; 35 struct multicall_entry entries[MC_BATCH];
36#if MC_DEBUG
37 struct multicall_entry debug[MC_BATCH];
38#endif
34 u64 args[MC_ARGS]; 39 u64 args[MC_ARGS];
35 unsigned mcidx, argidx; 40 struct callback {
41 void (*fn)(void *);
42 void *data;
43 } callbacks[MC_BATCH];
44 unsigned mcidx, argidx, cbidx;
36}; 45};
37 46
38static DEFINE_PER_CPU(struct mc_buffer, mc_buffer); 47static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
@@ -43,6 +52,7 @@ void xen_mc_flush(void)
43 struct mc_buffer *b = &__get_cpu_var(mc_buffer); 52 struct mc_buffer *b = &__get_cpu_var(mc_buffer);
44 int ret = 0; 53 int ret = 0;
45 unsigned long flags; 54 unsigned long flags;
55 int i;
46 56
47 BUG_ON(preemptible()); 57 BUG_ON(preemptible());
48 58
@@ -51,13 +61,31 @@ void xen_mc_flush(void)
51 local_irq_save(flags); 61 local_irq_save(flags);
52 62
53 if (b->mcidx) { 63 if (b->mcidx) {
54 int i; 64#if MC_DEBUG
65 memcpy(b->debug, b->entries,
66 b->mcidx * sizeof(struct multicall_entry));
67#endif
55 68
56 if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0) 69 if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0)
57 BUG(); 70 BUG();
58 for (i = 0; i < b->mcidx; i++) 71 for (i = 0; i < b->mcidx; i++)
59 if (b->entries[i].result < 0) 72 if (b->entries[i].result < 0)
60 ret++; 73 ret++;
74
75#if MC_DEBUG
76 if (ret) {
77 printk(KERN_ERR "%d multicall(s) failed: cpu %d\n",
78 ret, smp_processor_id());
79 for(i = 0; i < b->mcidx; i++) {
80 printk(" call %2d/%d: op=%lu arg=[%lx] result=%ld\n",
81 i+1, b->mcidx,
82 b->debug[i].op,
83 b->debug[i].args[0],
84 b->entries[i].result);
85 }
86 }
87#endif
88
61 b->mcidx = 0; 89 b->mcidx = 0;
62 b->argidx = 0; 90 b->argidx = 0;
63 } else 91 } else
@@ -65,6 +93,13 @@ void xen_mc_flush(void)
65 93
66 local_irq_restore(flags); 94 local_irq_restore(flags);
67 95
96 for(i = 0; i < b->cbidx; i++) {
97 struct callback *cb = &b->callbacks[i];
98
99 (*cb->fn)(cb->data);
100 }
101 b->cbidx = 0;
102
68 BUG_ON(ret); 103 BUG_ON(ret);
69} 104}
70 105
@@ -88,3 +123,16 @@ struct multicall_space __xen_mc_entry(size_t args)
88 123
89 return ret; 124 return ret;
90} 125}
126
127void xen_mc_callback(void (*fn)(void *), void *data)
128{
129 struct mc_buffer *b = &__get_cpu_var(mc_buffer);
130 struct callback *cb;
131
132 if (b->cbidx == MC_BATCH)
133 xen_mc_flush();
134
135 cb = &b->callbacks[b->cbidx++];
136 cb->fn = fn;
137 cb->data = data;
138}
diff --git a/arch/x86/xen/multicalls.h b/arch/x86/xen/multicalls.h
index e6f7530b156c..8bae996d99a3 100644
--- a/arch/x86/xen/multicalls.h
+++ b/arch/x86/xen/multicalls.h
@@ -35,11 +35,14 @@ void xen_mc_flush(void);
35/* Issue a multicall if we're not in a lazy mode */ 35/* Issue a multicall if we're not in a lazy mode */
36static inline void xen_mc_issue(unsigned mode) 36static inline void xen_mc_issue(unsigned mode)
37{ 37{
38 if ((xen_get_lazy_mode() & mode) == 0) 38 if ((paravirt_get_lazy_mode() & mode) == 0)
39 xen_mc_flush(); 39 xen_mc_flush();
40 40
41 /* restore flags saved in xen_mc_batch */ 41 /* restore flags saved in xen_mc_batch */
42 local_irq_restore(x86_read_percpu(xen_mc_irq_flags)); 42 local_irq_restore(x86_read_percpu(xen_mc_irq_flags));
43} 43}
44 44
45/* Set up a callback to be called when the current batch is flushed */
46void xen_mc_callback(void (*fn)(void *), void *data);
47
45#endif /* _XEN_MULTICALLS_H */ 48#endif /* _XEN_MULTICALLS_H */
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 4fa33c27ccb6..c1b131bcdcbe 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -356,6 +356,7 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
356 */ 356 */
357 irq_enter(); 357 irq_enter();
358 (*func)(info); 358 (*func)(info);
359 __get_cpu_var(irq_stat).irq_call_count++;
359 irq_exit(); 360 irq_exit();
360 361
361 if (wait) { 362 if (wait) {
@@ -370,7 +371,8 @@ int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *),
370 void *info, int wait) 371 void *info, int wait)
371{ 372{
372 struct call_data_struct data; 373 struct call_data_struct data;
373 int cpus; 374 int cpus, cpu;
375 bool yield;
374 376
375 /* Holding any lock stops cpus from going down. */ 377 /* Holding any lock stops cpus from going down. */
376 spin_lock(&call_lock); 378 spin_lock(&call_lock);
@@ -399,9 +401,14 @@ int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *),
399 /* Send a message to other CPUs and wait for them to respond */ 401 /* Send a message to other CPUs and wait for them to respond */
400 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); 402 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
401 403
402 /* Make sure other vcpus get a chance to run. 404 /* Make sure other vcpus get a chance to run if they need to. */
403 XXX too severe? Maybe we should check the other CPU's states? */ 405 yield = false;
404 HYPERVISOR_sched_op(SCHEDOP_yield, 0); 406 for_each_cpu_mask(cpu, mask)
407 if (xen_vcpu_stolen(cpu))
408 yield = true;
409
410 if (yield)
411 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
405 412
406 /* Wait for response */ 413 /* Wait for response */
407 while (atomic_read(&data.started) != cpus || 414 while (atomic_read(&data.started) != cpus ||
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index dfd6db69ead5..d083ff5ef088 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -105,6 +105,12 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
105 } while (get64(&state->state_entry_time) != state_time); 105 } while (get64(&state->state_entry_time) != state_time);
106} 106}
107 107
108/* return true when a vcpu could run but has no real cpu to run on */
109bool xen_vcpu_stolen(int vcpu)
110{
111 return per_cpu(runstate, vcpu).state == RUNSTATE_runnable;
112}
113
108static void setup_runstate_info(int cpu) 114static void setup_runstate_info(int cpu)
109{ 115{
110 struct vcpu_register_runstate_memory_area area; 116 struct vcpu_register_runstate_memory_area area;
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index b9aaea45f07f..b02a909bfd4c 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -11,6 +11,7 @@ void xen_copy_trap_info(struct trap_info *traps);
11 11
12DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu); 12DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
13DECLARE_PER_CPU(unsigned long, xen_cr3); 13DECLARE_PER_CPU(unsigned long, xen_cr3);
14DECLARE_PER_CPU(unsigned long, xen_current_cr3);
14 15
15extern struct start_info *xen_start_info; 16extern struct start_info *xen_start_info;
16extern struct shared_info *HYPERVISOR_shared_info; 17extern struct shared_info *HYPERVISOR_shared_info;
@@ -27,14 +28,9 @@ unsigned long xen_get_wallclock(void);
27int xen_set_wallclock(unsigned long time); 28int xen_set_wallclock(unsigned long time);
28unsigned long long xen_sched_clock(void); 29unsigned long long xen_sched_clock(void);
29 30
30void xen_mark_init_mm_pinned(void); 31bool xen_vcpu_stolen(int vcpu);
31
32DECLARE_PER_CPU(enum paravirt_lazy_mode, xen_lazy_mode);
33 32
34static inline unsigned xen_get_lazy_mode(void) 33void xen_mark_init_mm_pinned(void);
35{
36 return x86_read_percpu(xen_lazy_mode);
37}
38 34
39void __init xen_fill_possible_map(void); 35void __init xen_fill_possible_map(void);
40 36
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index d681be88ae5d..43fafe9e9c08 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -175,14 +175,12 @@ config MK8
175config MPSC 175config MPSC
176 bool "Intel P4 / older Netburst based Xeon" 176 bool "Intel P4 / older Netburst based Xeon"
177 help 177 help
178 Optimize for Intel Pentium 4 and older Nocona/Dempsey Xeon CPUs 178 Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey
179 with Intel Extended Memory 64 Technology(EM64T). For details see 179 Xeon CPUs with Intel 64bit which is compatible with x86-64.
180 <http://www.intel.com/technology/64bitextensions/>.
181 Note that the latest Xeons (Xeon 51xx and 53xx) are not based on the 180 Note that the latest Xeons (Xeon 51xx and 53xx) are not based on the
182 Netburst core and shouldn't use this option. You can distinguish them 181 Netburst core and shouldn't use this option. You can distinguish them
183 using the cpu family field 182 using the cpu family field
184 in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one 183 in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
185 (this rule only applies to systems that support EM64T)
186 184
187config MCORE2 185config MCORE2
188 bool "Intel Core2 / newer Xeon" 186 bool "Intel Core2 / newer Xeon"
@@ -190,8 +188,7 @@ config MCORE2
190 Optimize for Intel Core2 and newer Xeons (51xx) 188 Optimize for Intel Core2 and newer Xeons (51xx)
191 You can distinguish the newer Xeons from the older ones using 189 You can distinguish the newer Xeons from the older ones using
192 the cpu family field in /proc/cpuinfo. 15 is an older Xeon 190 the cpu family field in /proc/cpuinfo. 15 is an older Xeon
193 (use CONFIG_MPSC then), 6 is a newer one. This rule only 191 (use CONFIG_MPSC then), 6 is a newer one.
194 applies to CPUs that support EM64T.
195 192
196config GENERIC_CPU 193config GENERIC_CPU
197 bool "Generic-x86-64" 194 bool "Generic-x86-64"
@@ -476,8 +473,9 @@ config HPET_TIMER
476 <http://www.intel.com/hardwaredesign/hpetspec.htm>. 473 <http://www.intel.com/hardwaredesign/hpetspec.htm>.
477 474
478config HPET_EMULATE_RTC 475config HPET_EMULATE_RTC
479 bool "Provide RTC interrupt" 476 bool
480 depends on HPET_TIMER && RTC=y 477 depends on HPET_TIMER && RTC=y
478 default y
481 479
482# Mark as embedded because too many people got it wrong. 480# Mark as embedded because too many people got it wrong.
483# The code disables itself when not needed. 481# The code disables itself when not needed.
diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile
index 9daa32d1d2a1..03e1ede27b85 100644
--- a/arch/x86_64/Makefile
+++ b/arch/x86_64/Makefile
@@ -110,9 +110,15 @@ bzdisk: vmlinux
110fdimage fdimage144 fdimage288 isoimage: vmlinux 110fdimage fdimage144 fdimage288 isoimage: vmlinux
111 $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@ 111 $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@
112 112
113install: 113install: vdso_install
114 $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@ 114 $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@
115 115
116vdso_install:
117ifeq ($(CONFIG_IA32_EMULATION),y)
118 $(Q)$(MAKE) $(build)=arch/x86/ia32 $@
119endif
120 $(Q)$(MAKE) $(build)=arch/x86/vdso $@
121
116archclean: 122archclean:
117 $(Q)rm -rf $(objtree)/arch/x86_64/boot 123 $(Q)rm -rf $(objtree)/arch/x86_64/boot
118 $(Q)$(MAKE) $(clean)=$(boot) 124 $(Q)$(MAKE) $(clean)=$(boot)
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 9e3f3cc85d0d..3935469e3662 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1322,8 +1322,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1322 struct scatterlist *sglist) 1322 struct scatterlist *sglist)
1323{ 1323{
1324 struct bio_vec *bvec, *bvprv; 1324 struct bio_vec *bvec, *bvprv;
1325 struct scatterlist *next_sg, *sg;
1326 struct req_iterator iter; 1325 struct req_iterator iter;
1326 struct scatterlist *sg;
1327 int nsegs, cluster; 1327 int nsegs, cluster;
1328 1328
1329 nsegs = 0; 1329 nsegs = 0;
@@ -1333,7 +1333,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1333 * for each bio in rq 1333 * for each bio in rq
1334 */ 1334 */
1335 bvprv = NULL; 1335 bvprv = NULL;
1336 sg = next_sg = &sglist[0]; 1336 sg = NULL;
1337 rq_for_each_segment(bvec, rq, iter) { 1337 rq_for_each_segment(bvec, rq, iter) {
1338 int nbytes = bvec->bv_len; 1338 int nbytes = bvec->bv_len;
1339 1339
@@ -1349,8 +1349,10 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1349 sg->length += nbytes; 1349 sg->length += nbytes;
1350 } else { 1350 } else {
1351new_segment: 1351new_segment:
1352 sg = next_sg; 1352 if (!sg)
1353 next_sg = sg_next(sg); 1353 sg = sglist;
1354 else
1355 sg = sg_next(sg);
1354 1356
1355 memset(sg, 0, sizeof(*sg)); 1357 memset(sg, 0, sizeof(*sg));
1356 sg->page = bvec->bv_page; 1358 sg->page = bvec->bv_page;
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index d78cd09186aa..cac0009cebc1 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -221,7 +221,7 @@ static int efficeon_create_gatt_table(struct agp_bridge_data *bridge)
221 SetPageReserved(virt_to_page((char *)page)); 221 SetPageReserved(virt_to_page((char *)page));
222 222
223 for (offset = 0; offset < PAGE_SIZE; offset += clflush_chunk) 223 for (offset = 0; offset < PAGE_SIZE; offset += clflush_chunk)
224 asm volatile("clflush %0" : : "m" (*(char *)(page+offset))); 224 clflush((char *)page+offset);
225 225
226 efficeon_private.l1_table[index] = page; 226 efficeon_private.l1_table[index] = page;
227 227
@@ -268,15 +268,16 @@ static int efficeon_insert_memory(struct agp_memory * mem, off_t pg_start, int t
268 *page = insert; 268 *page = insert;
269 269
270 /* clflush is slow, so don't clflush until we have to */ 270 /* clflush is slow, so don't clflush until we have to */
271 if ( last_page && 271 if (last_page &&
272 ((unsigned long)page^(unsigned long)last_page) & clflush_mask ) 272 (((unsigned long)page^(unsigned long)last_page) &
273 asm volatile("clflush %0" : : "m" (*last_page)); 273 clflush_mask))
274 clflush(last_page);
274 275
275 last_page = page; 276 last_page = page;
276 } 277 }
277 278
278 if ( last_page ) 279 if ( last_page )
279 asm volatile("clflush %0" : : "m" (*last_page)); 280 clflush(last_page);
280 281
281 agp_bridge->driver->tlb_flush(mem); 282 agp_bridge->driver->tlb_flush(mem);
282 return 0; 283 return 0;
diff --git a/drivers/char/hvc_lguest.c b/drivers/char/hvc_lguest.c
index 3d6bd0baa56d..efccb2155830 100644
--- a/drivers/char/hvc_lguest.c
+++ b/drivers/char/hvc_lguest.c
@@ -115,7 +115,7 @@ static struct hv_ops lguest_cons = {
115 * (0), and the struct hv_ops containing the put_chars() function. */ 115 * (0), and the struct hv_ops containing the put_chars() function. */
116static int __init cons_init(void) 116static int __init cons_init(void)
117{ 117{
118 if (strcmp(paravirt_ops.name, "lguest") != 0) 118 if (strcmp(pv_info.name, "lguest") != 0)
119 return 0; 119 return 0;
120 120
121 return hvc_instantiate(0, 0, &lguest_cons); 121 return hvc_instantiate(0, 0, &lguest_cons);
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index 4a315f08a567..a0788c12b392 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -248,8 +248,8 @@ static void unmap_switcher(void)
248} 248}
249 249
250/*H:130 Our Guest is usually so well behaved; it never tries to do things it 250/*H:130 Our Guest is usually so well behaved; it never tries to do things it
251 * isn't allowed to. Unfortunately, "struct paravirt_ops" isn't quite 251 * isn't allowed to. Unfortunately, Linux's paravirtual infrastructure isn't
252 * complete, because it doesn't contain replacements for the Intel I/O 252 * quite complete, because it doesn't contain replacements for the Intel I/O
253 * instructions. As a result, the Guest sometimes fumbles across one during 253 * instructions. As a result, the Guest sometimes fumbles across one during
254 * the boot process as it probes for various things which are usually attached 254 * the boot process as it probes for various things which are usually attached
255 * to a PC. 255 * to a PC.
@@ -694,7 +694,7 @@ static int __init init(void)
694 694
695 /* Lguest can't run under Xen, VMI or itself. It does Tricky Stuff. */ 695 /* Lguest can't run under Xen, VMI or itself. It does Tricky Stuff. */
696 if (paravirt_enabled()) { 696 if (paravirt_enabled()) {
697 printk("lguest is afraid of %s\n", paravirt_ops.name); 697 printk("lguest is afraid of %s\n", pv_info.name);
698 return -EPERM; 698 return -EPERM;
699 } 699 }
700 700
diff --git a/drivers/lguest/lguest.c b/drivers/lguest/lguest.c
index 4a579c840301..3ba337dde857 100644
--- a/drivers/lguest/lguest.c
+++ b/drivers/lguest/lguest.c
@@ -23,7 +23,7 @@
23 * 23 *
24 * So how does the kernel know it's a Guest? The Guest starts at a special 24 * So how does the kernel know it's a Guest? The Guest starts at a special
25 * entry point marked with a magic string, which sets up a few things then 25 * entry point marked with a magic string, which sets up a few things then
26 * calls here. We replace the native functions in "struct paravirt_ops" 26 * calls here. We replace the native functions various "paravirt" structures
27 * with our Guest versions, then boot like normal. :*/ 27 * with our Guest versions, then boot like normal. :*/
28 28
29/* 29/*
@@ -97,29 +97,17 @@ static cycle_t clock_base;
97 * them as a batch when lazy_mode is eventually turned off. Because hypercalls 97 * them as a batch when lazy_mode is eventually turned off. Because hypercalls
98 * are reasonably expensive, batching them up makes sense. For example, a 98 * are reasonably expensive, batching them up makes sense. For example, a
99 * large mmap might update dozens of page table entries: that code calls 99 * large mmap might update dozens of page table entries: that code calls
100 * lguest_lazy_mode(PARAVIRT_LAZY_MMU), does the dozen updates, then calls 100 * paravirt_enter_lazy_mmu(), does the dozen updates, then calls
101 * lguest_lazy_mode(PARAVIRT_LAZY_NONE). 101 * lguest_leave_lazy_mode().
102 * 102 *
103 * So, when we're in lazy mode, we call async_hypercall() to store the call for 103 * So, when we're in lazy mode, we call async_hypercall() to store the call for
104 * future processing. When lazy mode is turned off we issue a hypercall to 104 * future processing. When lazy mode is turned off we issue a hypercall to
105 * flush the stored calls. 105 * flush the stored calls.
106 * 106 */
107 * There's also a hack where "mode" is set to "PARAVIRT_LAZY_FLUSH" which 107static void lguest_leave_lazy_mode(void)
108 * indicates we're to flush any outstanding calls immediately. This is used
109 * when an interrupt handler does a kmap_atomic(): the page table changes must
110 * happen immediately even if we're in the middle of a batch. Usually we're
111 * not, though, so there's nothing to do. */
112static enum paravirt_lazy_mode lazy_mode; /* Note: not SMP-safe! */
113static void lguest_lazy_mode(enum paravirt_lazy_mode mode)
114{ 108{
115 if (mode == PARAVIRT_LAZY_FLUSH) { 109 paravirt_leave_lazy(paravirt_get_lazy_mode());
116 if (unlikely(lazy_mode != PARAVIRT_LAZY_NONE)) 110 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
117 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
118 } else {
119 lazy_mode = mode;
120 if (mode == PARAVIRT_LAZY_NONE)
121 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
122 }
123} 111}
124 112
125static void lazy_hcall(unsigned long call, 113static void lazy_hcall(unsigned long call,
@@ -127,7 +115,7 @@ static void lazy_hcall(unsigned long call,
127 unsigned long arg2, 115 unsigned long arg2,
128 unsigned long arg3) 116 unsigned long arg3)
129{ 117{
130 if (lazy_mode == PARAVIRT_LAZY_NONE) 118 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
131 hcall(call, arg1, arg2, arg3); 119 hcall(call, arg1, arg2, arg3);
132 else 120 else
133 async_hcall(call, arg1, arg2, arg3); 121 async_hcall(call, arg1, arg2, arg3);
@@ -331,7 +319,7 @@ static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
331} 319}
332 320
333/*G:038 That's enough excitement for now, back to ploughing through each of 321/*G:038 That's enough excitement for now, back to ploughing through each of
334 * the paravirt_ops (we're about 1/3 of the way through). 322 * the different pv_ops structures (we're about 1/3 of the way through).
335 * 323 *
336 * This is the Local Descriptor Table, another weird Intel thingy. Linux only 324 * This is the Local Descriptor Table, another weird Intel thingy. Linux only
337 * uses this for some strange applications like Wine. We don't do anything 325 * uses this for some strange applications like Wine. We don't do anything
@@ -558,7 +546,7 @@ static void lguest_set_pte(pte_t *ptep, pte_t pteval)
558 lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0); 546 lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
559} 547}
560 548
561/* Unfortunately for Lguest, the paravirt_ops for page tables were based on 549/* Unfortunately for Lguest, the pv_mmu_ops for page tables were based on
562 * native page table operations. On native hardware you can set a new page 550 * native page table operations. On native hardware you can set a new page
563 * table entry whenever you want, but if you want to remove one you have to do 551 * table entry whenever you want, but if you want to remove one you have to do
564 * a TLB flush (a TLB is a little cache of page table entries kept by the CPU). 552 * a TLB flush (a TLB is a little cache of page table entries kept by the CPU).
@@ -782,7 +770,7 @@ static void lguest_time_init(void)
782 clocksource_register(&lguest_clock); 770 clocksource_register(&lguest_clock);
783 771
784 /* Now we've set up our clock, we can use it as the scheduler clock */ 772 /* Now we've set up our clock, we can use it as the scheduler clock */
785 paravirt_ops.sched_clock = lguest_sched_clock; 773 pv_time_ops.sched_clock = lguest_sched_clock;
786 774
787 /* We can't set cpumask in the initializer: damn C limitations! Set it 775 /* We can't set cpumask in the initializer: damn C limitations! Set it
788 * here and register our timer device. */ 776 * here and register our timer device. */
@@ -904,7 +892,7 @@ static __init char *lguest_memory_setup(void)
904/*G:050 892/*G:050
905 * Patching (Powerfully Placating Performance Pedants) 893 * Patching (Powerfully Placating Performance Pedants)
906 * 894 *
907 * We have already seen that "struct paravirt_ops" lets us replace simple 895 * We have already seen that pv_ops structures let us replace simple
908 * native instructions with calls to the appropriate back end all throughout 896 * native instructions with calls to the appropriate back end all throughout
909 * the kernel. This allows the same kernel to run as a Guest and as a native 897 * the kernel. This allows the same kernel to run as a Guest and as a native
910 * kernel, but it's slow because of all the indirect branches. 898 * kernel, but it's slow because of all the indirect branches.
@@ -929,10 +917,10 @@ static const struct lguest_insns
929{ 917{
930 const char *start, *end; 918 const char *start, *end;
931} lguest_insns[] = { 919} lguest_insns[] = {
932 [PARAVIRT_PATCH(irq_disable)] = { lgstart_cli, lgend_cli }, 920 [PARAVIRT_PATCH(pv_irq_ops.irq_disable)] = { lgstart_cli, lgend_cli },
933 [PARAVIRT_PATCH(irq_enable)] = { lgstart_sti, lgend_sti }, 921 [PARAVIRT_PATCH(pv_irq_ops.irq_enable)] = { lgstart_sti, lgend_sti },
934 [PARAVIRT_PATCH(restore_fl)] = { lgstart_popf, lgend_popf }, 922 [PARAVIRT_PATCH(pv_irq_ops.restore_fl)] = { lgstart_popf, lgend_popf },
935 [PARAVIRT_PATCH(save_fl)] = { lgstart_pushf, lgend_pushf }, 923 [PARAVIRT_PATCH(pv_irq_ops.save_fl)] = { lgstart_pushf, lgend_pushf },
936}; 924};
937 925
938/* Now our patch routine is fairly simple (based on the native one in 926/* Now our patch routine is fairly simple (based on the native one in
@@ -959,9 +947,9 @@ static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf,
959 return insn_len; 947 return insn_len;
960} 948}
961 949
962/*G:030 Once we get to lguest_init(), we know we're a Guest. The paravirt_ops 950/*G:030 Once we get to lguest_init(), we know we're a Guest. The pv_ops
963 * structure in the kernel provides a single point for (almost) every routine 951 * structures in the kernel provide points for (almost) every routine we have
964 * we have to override to avoid privileged instructions. */ 952 * to override to avoid privileged instructions. */
965__init void lguest_init(void *boot) 953__init void lguest_init(void *boot)
966{ 954{
967 /* Copy boot parameters first: the Launcher put the physical location 955 /* Copy boot parameters first: the Launcher put the physical location
@@ -976,54 +964,70 @@ __init void lguest_init(void *boot)
976 964
977 /* We're under lguest, paravirt is enabled, and we're running at 965 /* We're under lguest, paravirt is enabled, and we're running at
978 * privilege level 1, not 0 as normal. */ 966 * privilege level 1, not 0 as normal. */
979 paravirt_ops.name = "lguest"; 967 pv_info.name = "lguest";
980 paravirt_ops.paravirt_enabled = 1; 968 pv_info.paravirt_enabled = 1;
981 paravirt_ops.kernel_rpl = 1; 969 pv_info.kernel_rpl = 1;
982 970
983 /* We set up all the lguest overrides for sensitive operations. These 971 /* We set up all the lguest overrides for sensitive operations. These
984 * are detailed with the operations themselves. */ 972 * are detailed with the operations themselves. */
985 paravirt_ops.save_fl = save_fl; 973
986 paravirt_ops.restore_fl = restore_fl; 974 /* interrupt-related operations */
987 paravirt_ops.irq_disable = irq_disable; 975 pv_irq_ops.init_IRQ = lguest_init_IRQ;
988 paravirt_ops.irq_enable = irq_enable; 976 pv_irq_ops.save_fl = save_fl;
989 paravirt_ops.load_gdt = lguest_load_gdt; 977 pv_irq_ops.restore_fl = restore_fl;
990 paravirt_ops.memory_setup = lguest_memory_setup; 978 pv_irq_ops.irq_disable = irq_disable;
991 paravirt_ops.cpuid = lguest_cpuid; 979 pv_irq_ops.irq_enable = irq_enable;
992 paravirt_ops.write_cr3 = lguest_write_cr3; 980 pv_irq_ops.safe_halt = lguest_safe_halt;
993 paravirt_ops.flush_tlb_user = lguest_flush_tlb_user; 981
994 paravirt_ops.flush_tlb_single = lguest_flush_tlb_single; 982 /* init-time operations */
995 paravirt_ops.flush_tlb_kernel = lguest_flush_tlb_kernel; 983 pv_init_ops.memory_setup = lguest_memory_setup;
996 paravirt_ops.set_pte = lguest_set_pte; 984 pv_init_ops.patch = lguest_patch;
997 paravirt_ops.set_pte_at = lguest_set_pte_at; 985
998 paravirt_ops.set_pmd = lguest_set_pmd; 986 /* Intercepts of various cpu instructions */
987 pv_cpu_ops.load_gdt = lguest_load_gdt;
988 pv_cpu_ops.cpuid = lguest_cpuid;
989 pv_cpu_ops.load_idt = lguest_load_idt;
990 pv_cpu_ops.iret = lguest_iret;
991 pv_cpu_ops.load_esp0 = lguest_load_esp0;
992 pv_cpu_ops.load_tr_desc = lguest_load_tr_desc;
993 pv_cpu_ops.set_ldt = lguest_set_ldt;
994 pv_cpu_ops.load_tls = lguest_load_tls;
995 pv_cpu_ops.set_debugreg = lguest_set_debugreg;
996 pv_cpu_ops.clts = lguest_clts;
997 pv_cpu_ops.read_cr0 = lguest_read_cr0;
998 pv_cpu_ops.write_cr0 = lguest_write_cr0;
999 pv_cpu_ops.read_cr4 = lguest_read_cr4;
1000 pv_cpu_ops.write_cr4 = lguest_write_cr4;
1001 pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry;
1002 pv_cpu_ops.write_idt_entry = lguest_write_idt_entry;
1003 pv_cpu_ops.wbinvd = lguest_wbinvd;
1004 pv_cpu_ops.lazy_mode.enter = paravirt_enter_lazy_cpu;
1005 pv_cpu_ops.lazy_mode.leave = lguest_leave_lazy_mode;
1006
1007 /* pagetable management */
1008 pv_mmu_ops.write_cr3 = lguest_write_cr3;
1009 pv_mmu_ops.flush_tlb_user = lguest_flush_tlb_user;
1010 pv_mmu_ops.flush_tlb_single = lguest_flush_tlb_single;
1011 pv_mmu_ops.flush_tlb_kernel = lguest_flush_tlb_kernel;
1012 pv_mmu_ops.set_pte = lguest_set_pte;
1013 pv_mmu_ops.set_pte_at = lguest_set_pte_at;
1014 pv_mmu_ops.set_pmd = lguest_set_pmd;
1015 pv_mmu_ops.read_cr2 = lguest_read_cr2;
1016 pv_mmu_ops.read_cr3 = lguest_read_cr3;
1017 pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
1018 pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mode;
1019
999#ifdef CONFIG_X86_LOCAL_APIC 1020#ifdef CONFIG_X86_LOCAL_APIC
1000 paravirt_ops.apic_write = lguest_apic_write; 1021 /* apic read/write intercepts */
1001 paravirt_ops.apic_write_atomic = lguest_apic_write; 1022 pv_apic_ops.apic_write = lguest_apic_write;
1002 paravirt_ops.apic_read = lguest_apic_read; 1023 pv_apic_ops.apic_write_atomic = lguest_apic_write;
1024 pv_apic_ops.apic_read = lguest_apic_read;
1003#endif 1025#endif
1004 paravirt_ops.load_idt = lguest_load_idt; 1026
1005 paravirt_ops.iret = lguest_iret; 1027 /* time operations */
1006 paravirt_ops.load_esp0 = lguest_load_esp0; 1028 pv_time_ops.get_wallclock = lguest_get_wallclock;
1007 paravirt_ops.load_tr_desc = lguest_load_tr_desc; 1029 pv_time_ops.time_init = lguest_time_init;
1008 paravirt_ops.set_ldt = lguest_set_ldt; 1030
1009 paravirt_ops.load_tls = lguest_load_tls;
1010 paravirt_ops.set_debugreg = lguest_set_debugreg;
1011 paravirt_ops.clts = lguest_clts;
1012 paravirt_ops.read_cr0 = lguest_read_cr0;
1013 paravirt_ops.write_cr0 = lguest_write_cr0;
1014 paravirt_ops.init_IRQ = lguest_init_IRQ;
1015 paravirt_ops.read_cr2 = lguest_read_cr2;
1016 paravirt_ops.read_cr3 = lguest_read_cr3;
1017 paravirt_ops.read_cr4 = lguest_read_cr4;
1018 paravirt_ops.write_cr4 = lguest_write_cr4;
1019 paravirt_ops.write_gdt_entry = lguest_write_gdt_entry;
1020 paravirt_ops.write_idt_entry = lguest_write_idt_entry;
1021 paravirt_ops.patch = lguest_patch;
1022 paravirt_ops.safe_halt = lguest_safe_halt;
1023 paravirt_ops.get_wallclock = lguest_get_wallclock;
1024 paravirt_ops.time_init = lguest_time_init;
1025 paravirt_ops.set_lazy_mode = lguest_lazy_mode;
1026 paravirt_ops.wbinvd = lguest_wbinvd;
1027 /* Now is a good time to look at the implementations of these functions 1031 /* Now is a good time to look at the implementations of these functions
1028 * before returning to the rest of lguest_init(). */ 1032 * before returning to the rest of lguest_init(). */
1029 1033
diff --git a/drivers/lguest/lguest_bus.c b/drivers/lguest/lguest_bus.c
index 9e7752cc8002..57329788f8a7 100644
--- a/drivers/lguest/lguest_bus.c
+++ b/drivers/lguest/lguest_bus.c
@@ -201,7 +201,7 @@ static void scan_devices(void)
201 * "struct lguest_device_desc" array. */ 201 * "struct lguest_device_desc" array. */
202static int __init lguest_bus_init(void) 202static int __init lguest_bus_init(void)
203{ 203{
204 if (strcmp(paravirt_ops.name, "lguest") != 0) 204 if (strcmp(pv_info.name, "lguest") != 0)
205 return 0; 205 return 0;
206 206
207 /* Devices are in a single page above top of "normal" mem */ 207 /* Devices are in a single page above top of "normal" mem */
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 65fe28860f54..68c0e3b2f0e8 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -213,7 +213,8 @@ static int mmc_read_ext_csd(struct mmc_card *card)
213 printk(KERN_ERR "%s: unrecognised EXT_CSD structure " 213 printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
214 "version %d\n", mmc_hostname(card->host), 214 "version %d\n", mmc_hostname(card->host),
215 ext_csd_struct); 215 ext_csd_struct);
216 return -EINVAL; 216 err = -EINVAL;
217 goto out;
217 } 218 }
218 219
219 if (ext_csd_struct >= 2) { 220 if (ext_csd_struct >= 2) {
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 6ba98a49612d..7a452c2ad1f9 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -581,9 +581,7 @@ static void at91_mci_completed_command(struct at91mci_host *host)
581 pr_debug("Status = %08X [%08X %08X %08X %08X]\n", 581 pr_debug("Status = %08X [%08X %08X %08X %08X]\n",
582 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]); 582 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
583 583
584 if (status & (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE | 584 if (status & AT91_MCI_ERRORS) {
585 AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE |
586 AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)) {
587 if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) { 585 if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) {
588 cmd->error = 0; 586 cmd->error = 0;
589 } 587 }
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 5a6fdfd0f140..dae5c8d5a318 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -282,6 +282,12 @@ config LIBERTAS_CS
282 ---help--- 282 ---help---
283 A driver for Marvell Libertas 8385 CompactFlash devices. 283 A driver for Marvell Libertas 8385 CompactFlash devices.
284 284
285config LIBERTAS_SDIO
286 tristate "Marvell Libertas 8385 and 8686 SDIO 802.11b/g cards"
287 depends on LIBERTAS && MMC
288 ---help---
289 A driver for Marvell Libertas 8385 and 8686 SDIO devices.
290
285config LIBERTAS_DEBUG 291config LIBERTAS_DEBUG
286 bool "Enable full debugging output in the Libertas module." 292 bool "Enable full debugging output in the Libertas module."
287 depends on LIBERTAS 293 depends on LIBERTAS
diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile
index c469d569f090..0e2787691f96 100644
--- a/drivers/net/wireless/libertas/Makefile
+++ b/drivers/net/wireless/libertas/Makefile
@@ -7,7 +7,9 @@ libertas-objs := main.o wext.o \
7 7
8usb8xxx-objs += if_usb.o 8usb8xxx-objs += if_usb.o
9libertas_cs-objs += if_cs.o 9libertas_cs-objs += if_cs.o
10libertas_sdio-objs += if_sdio.o
10 11
11obj-$(CONFIG_LIBERTAS) += libertas.o 12obj-$(CONFIG_LIBERTAS) += libertas.o
12obj-$(CONFIG_LIBERTAS_USB) += usb8xxx.o 13obj-$(CONFIG_LIBERTAS_USB) += usb8xxx.o
13obj-$(CONFIG_LIBERTAS_CS) += libertas_cs.o 14obj-$(CONFIG_LIBERTAS_CS) += libertas_cs.o
15obj-$(CONFIG_LIBERTAS_SDIO) += libertas_sdio.o
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index 7c5b7f7b45db..3a0c9beefcf8 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -39,6 +39,7 @@
39#define LBS_DEB_FW 0x00080000 39#define LBS_DEB_FW 0x00080000
40#define LBS_DEB_THREAD 0x00100000 40#define LBS_DEB_THREAD 0x00100000
41#define LBS_DEB_HEX 0x00200000 41#define LBS_DEB_HEX 0x00200000
42#define LBS_DEB_SDIO 0x00400000
42 43
43extern unsigned int libertas_debug; 44extern unsigned int libertas_debug;
44 45
@@ -80,6 +81,7 @@ do { if ((libertas_debug & (grp)) == (grp)) \
80#define lbs_deb_usbd(dev, fmt, args...) LBS_DEB_LL(LBS_DEB_USB, " usbd", "%s:" fmt, (dev)->bus_id, ##args) 81#define lbs_deb_usbd(dev, fmt, args...) LBS_DEB_LL(LBS_DEB_USB, " usbd", "%s:" fmt, (dev)->bus_id, ##args)
81#define lbs_deb_cs(fmt, args...) LBS_DEB_LL(LBS_DEB_CS, " cs", fmt, ##args) 82#define lbs_deb_cs(fmt, args...) LBS_DEB_LL(LBS_DEB_CS, " cs", fmt, ##args)
82#define lbs_deb_thread(fmt, args...) LBS_DEB_LL(LBS_DEB_THREAD, " thread", fmt, ##args) 83#define lbs_deb_thread(fmt, args...) LBS_DEB_LL(LBS_DEB_THREAD, " thread", fmt, ##args)
84#define lbs_deb_sdio(fmt, args...) LBS_DEB_LL(LBS_DEB_SDIO, " thread", fmt, ##args)
83 85
84#define lbs_pr_info(format, args...) \ 86#define lbs_pr_info(format, args...) \
85 printk(KERN_INFO DRV_NAME": " format, ## args) 87 printk(KERN_INFO DRV_NAME": " format, ## args)
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
new file mode 100644
index 000000000000..a8e17076e7de
--- /dev/null
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -0,0 +1,1079 @@
1/*
2 * linux/drivers/net/wireless/libertas/if_sdio.c
3 *
4 * Copyright 2007 Pierre Ossman
5 *
6 * Inspired by if_cs.c, Copyright 2007 Holger Schurig
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at
11 * your option) any later version.
12 *
13 * This hardware has more or less no CMD53 support, so all registers
14 * must be accessed using sdio_readb()/sdio_writeb().
15 *
16 * Transfers must be in one transaction or the firmware goes bonkers.
17 * This means that the transfer must either be small enough to do a
18 * byte based transfer or it must be padded to a multiple of the
19 * current block size.
20 *
21 * As SDIO is still new to the kernel, it is unfortunately common with
22 * bugs in the host controllers related to that. One such bug is that
23 * controllers cannot do transfers that aren't a multiple of 4 bytes.
24 * If you don't have time to fix the host controller driver, you can
25 * work around the problem by modifying if_sdio_host_to_card() and
26 * if_sdio_card_to_host() to pad the data.
27 */
28
29#include <linux/moduleparam.h>
30#include <linux/firmware.h>
31#include <linux/netdevice.h>
32#include <linux/delay.h>
33#include <linux/mmc/card.h>
34#include <linux/mmc/sdio_func.h>
35#include <linux/mmc/sdio_ids.h>
36
37#include "host.h"
38#include "decl.h"
39#include "defs.h"
40#include "dev.h"
41#include "if_sdio.h"
42
43static char *libertas_helper_name = NULL;
44module_param_named(helper_name, libertas_helper_name, charp, 0644);
45
46static char *libertas_fw_name = NULL;
47module_param_named(fw_name, libertas_fw_name, charp, 0644);
48
49static const struct sdio_device_id if_sdio_ids[] = {
50 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_LIBERTAS) },
51 { /* end: all zeroes */ },
52};
53
54MODULE_DEVICE_TABLE(sdio, if_sdio_ids);
55
56struct if_sdio_model {
57 int model;
58 const char *helper;
59 const char *firmware;
60};
61
62static struct if_sdio_model if_sdio_models[] = {
63 {
64 /* 8385 */
65 .model = 0x04,
66 .helper = "sd8385_helper.bin",
67 .firmware = "sd8385.bin",
68 },
69 {
70 /* 8686 */
71 .model = 0x0B,
72 .helper = "sd8686_helper.bin",
73 .firmware = "sd8686.bin",
74 },
75};
76
77struct if_sdio_packet {
78 struct if_sdio_packet *next;
79 u16 nb;
80 u8 buffer[0] __attribute__((aligned(4)));
81};
82
83struct if_sdio_card {
84 struct sdio_func *func;
85 wlan_private *priv;
86
87 int model;
88 unsigned long ioport;
89
90 const char *helper;
91 const char *firmware;
92
93 u8 buffer[65536];
94 u8 int_cause;
95 u32 event;
96
97 spinlock_t lock;
98 struct if_sdio_packet *packets;
99 struct work_struct packet_worker;
100};
101
102/********************************************************************/
103/* I/O */
104/********************************************************************/
105
106static u16 if_sdio_read_scratch(struct if_sdio_card *card, int *err)
107{
108 int ret, reg;
109 u16 scratch;
110
111 if (card->model == 0x04)
112 reg = IF_SDIO_SCRATCH_OLD;
113 else
114 reg = IF_SDIO_SCRATCH;
115
116 scratch = sdio_readb(card->func, reg, &ret);
117 if (!ret)
118 scratch |= sdio_readb(card->func, reg + 1, &ret) << 8;
119
120 if (err)
121 *err = ret;
122
123 if (ret)
124 return 0xffff;
125
126 return scratch;
127}
128
129static int if_sdio_handle_cmd(struct if_sdio_card *card,
130 u8 *buffer, unsigned size)
131{
132 int ret;
133 unsigned long flags;
134
135 lbs_deb_enter(LBS_DEB_SDIO);
136
137 spin_lock_irqsave(&card->priv->adapter->driver_lock, flags);
138
139 if (!card->priv->adapter->cur_cmd) {
140 lbs_deb_sdio("discarding spurious response\n");
141 ret = 0;
142 goto out;
143 }
144
145 if (size > MRVDRV_SIZE_OF_CMD_BUFFER) {
146 lbs_deb_sdio("response packet too large (%d bytes)\n",
147 (int)size);
148 ret = -E2BIG;
149 goto out;
150 }
151
152 memcpy(card->priv->adapter->cur_cmd->bufvirtualaddr, buffer, size);
153 card->priv->upld_len = size;
154
155 card->int_cause |= MRVDRV_CMD_UPLD_RDY;
156
157 libertas_interrupt(card->priv->dev);
158
159 ret = 0;
160
161out:
162 spin_unlock_irqrestore(&card->priv->adapter->driver_lock, flags);
163
164 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
165
166 return ret;
167}
168
169static int if_sdio_handle_data(struct if_sdio_card *card,
170 u8 *buffer, unsigned size)
171{
172 int ret;
173 struct sk_buff *skb;
174 char *data;
175
176 lbs_deb_enter(LBS_DEB_SDIO);
177
178 if (size > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE) {
179 lbs_deb_sdio("response packet too large (%d bytes)\n",
180 (int)size);
181 ret = -E2BIG;
182 goto out;
183 }
184
185 skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE);
186 if (!skb) {
187 ret = -ENOMEM;
188 goto out;
189 }
190
191 data = skb_put(skb, size);
192
193 memcpy(data, buffer, size);
194
195 libertas_process_rxed_packet(card->priv, skb);
196
197 ret = 0;
198
199out:
200 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
201
202 return ret;
203}
204
205static int if_sdio_handle_event(struct if_sdio_card *card,
206 u8 *buffer, unsigned size)
207{
208 int ret;
209 unsigned long flags;
210 u32 event;
211
212 lbs_deb_enter(LBS_DEB_SDIO);
213
214 if (card->model == 0x04) {
215 event = sdio_readb(card->func, IF_SDIO_EVENT, &ret);
216 if (ret)
217 goto out;
218 } else {
219 if (size < 4) {
220 lbs_deb_sdio("event packet too small (%d bytes)\n",
221 (int)size);
222 ret = -EINVAL;
223 goto out;
224 }
225 event = buffer[3] << 24;
226 event |= buffer[2] << 16;
227 event |= buffer[1] << 8;
228 event |= buffer[0] << 0;
229 event <<= SBI_EVENT_CAUSE_SHIFT;
230 }
231
232 spin_lock_irqsave(&card->priv->adapter->driver_lock, flags);
233
234 card->event = event;
235 card->int_cause |= MRVDRV_CARDEVENT;
236
237 libertas_interrupt(card->priv->dev);
238
239 spin_unlock_irqrestore(&card->priv->adapter->driver_lock, flags);
240
241 ret = 0;
242
243out:
244 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
245
246 return ret;
247}
248
249static int if_sdio_card_to_host(struct if_sdio_card *card)
250{
251 int ret;
252 u8 status;
253 u16 size, type, chunk;
254 unsigned long timeout;
255
256 lbs_deb_enter(LBS_DEB_SDIO);
257
258 size = if_sdio_read_scratch(card, &ret);
259 if (ret)
260 goto out;
261
262 if (size < 4) {
263 lbs_deb_sdio("invalid packet size (%d bytes) from firmware\n",
264 (int)size);
265 ret = -EINVAL;
266 goto out;
267 }
268
269 timeout = jiffies + HZ;
270 while (1) {
271 status = sdio_readb(card->func, IF_SDIO_STATUS, &ret);
272 if (ret)
273 goto out;
274 if (status & IF_SDIO_IO_RDY)
275 break;
276 if (time_after(jiffies, timeout)) {
277 ret = -ETIMEDOUT;
278 goto out;
279 }
280 mdelay(1);
281 }
282
283 /*
284 * The transfer must be in one transaction or the firmware
285 * goes suicidal.
286 */
287 chunk = size;
288 if ((chunk > card->func->cur_blksize) || (chunk > 512)) {
289 chunk = (chunk + card->func->cur_blksize - 1) /
290 card->func->cur_blksize * card->func->cur_blksize;
291 }
292
293 ret = sdio_readsb(card->func, card->buffer, card->ioport, chunk);
294 if (ret)
295 goto out;
296
297 chunk = card->buffer[0] | (card->buffer[1] << 8);
298 type = card->buffer[2] | (card->buffer[3] << 8);
299
300 lbs_deb_sdio("packet of type %d and size %d bytes\n",
301 (int)type, (int)chunk);
302
303 if (chunk > size) {
304 lbs_deb_sdio("packet fragment (%d > %d)\n",
305 (int)chunk, (int)size);
306 ret = -EINVAL;
307 goto out;
308 }
309
310 if (chunk < size) {
311 lbs_deb_sdio("packet fragment (%d < %d)\n",
312 (int)chunk, (int)size);
313 }
314
315 switch (type) {
316 case MVMS_CMD:
317 ret = if_sdio_handle_cmd(card, card->buffer + 4, chunk - 4);
318 if (ret)
319 goto out;
320 break;
321 case MVMS_DAT:
322 ret = if_sdio_handle_data(card, card->buffer + 4, chunk - 4);
323 if (ret)
324 goto out;
325 break;
326 case MVMS_EVENT:
327 ret = if_sdio_handle_event(card, card->buffer + 4, chunk - 4);
328 if (ret)
329 goto out;
330 break;
331 default:
332 lbs_deb_sdio("invalid type (%d) from firmware\n",
333 (int)type);
334 ret = -EINVAL;
335 goto out;
336 }
337
338out:
339 if (ret)
340 lbs_pr_err("problem fetching packet from firmware\n");
341
342 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
343
344 return ret;
345}
346
347static void if_sdio_host_to_card_worker(struct work_struct *work)
348{
349 struct if_sdio_card *card;
350 struct if_sdio_packet *packet;
351 unsigned long timeout;
352 u8 status;
353 int ret;
354 unsigned long flags;
355
356 lbs_deb_enter(LBS_DEB_SDIO);
357
358 card = container_of(work, struct if_sdio_card, packet_worker);
359
360 while (1) {
361 spin_lock_irqsave(&card->lock, flags);
362 packet = card->packets;
363 if (packet)
364 card->packets = packet->next;
365 spin_unlock_irqrestore(&card->lock, flags);
366
367 if (!packet)
368 break;
369
370 sdio_claim_host(card->func);
371
372 timeout = jiffies + HZ;
373 while (1) {
374 status = sdio_readb(card->func, IF_SDIO_STATUS, &ret);
375 if (ret)
376 goto release;
377 if (status & IF_SDIO_IO_RDY)
378 break;
379 if (time_after(jiffies, timeout)) {
380 ret = -ETIMEDOUT;
381 goto release;
382 }
383 mdelay(1);
384 }
385
386 ret = sdio_writesb(card->func, card->ioport,
387 packet->buffer, packet->nb);
388 if (ret)
389 goto release;
390release:
391 sdio_release_host(card->func);
392
393 kfree(packet);
394 }
395
396 lbs_deb_leave(LBS_DEB_SDIO);
397}
398
399/********************************************************************/
400/* Firmware */
401/********************************************************************/
402
403static int if_sdio_prog_helper(struct if_sdio_card *card)
404{
405 int ret;
406 u8 status;
407 const struct firmware *fw;
408 unsigned long timeout;
409 u8 *chunk_buffer;
410 u32 chunk_size;
411 u8 *firmware;
412 size_t size;
413
414 lbs_deb_enter(LBS_DEB_SDIO);
415
416 ret = request_firmware(&fw, card->helper, &card->func->dev);
417 if (ret) {
418 lbs_pr_err("can't load helper firmware\n");
419 goto out;
420 }
421
422 chunk_buffer = kzalloc(64, GFP_KERNEL);
423 if (!chunk_buffer) {
424 ret = -ENOMEM;
425 goto release_fw;
426 }
427
428 sdio_claim_host(card->func);
429
430 ret = sdio_set_block_size(card->func, 32);
431 if (ret)
432 goto release;
433
434 firmware = fw->data;
435 size = fw->size;
436
437 while (size) {
438 timeout = jiffies + HZ;
439 while (1) {
440 status = sdio_readb(card->func, IF_SDIO_STATUS, &ret);
441 if (ret)
442 goto release;
443 if ((status & IF_SDIO_IO_RDY) &&
444 (status & IF_SDIO_DL_RDY))
445 break;
446 if (time_after(jiffies, timeout)) {
447 ret = -ETIMEDOUT;
448 goto release;
449 }
450 mdelay(1);
451 }
452
453 chunk_size = min(size, (size_t)60);
454
455 *((u32*)chunk_buffer) = cpu_to_le32(chunk_size);
456 memcpy(chunk_buffer + 4, firmware, chunk_size);
457/*
458 lbs_deb_sdio("sending %d bytes chunk\n", chunk_size);
459*/
460 ret = sdio_writesb(card->func, card->ioport,
461 chunk_buffer, 64);
462 if (ret)
463 goto release;
464
465 firmware += chunk_size;
466 size -= chunk_size;
467 }
468
469 /* an empty block marks the end of the transfer */
470 memset(chunk_buffer, 0, 4);
471 ret = sdio_writesb(card->func, card->ioport, chunk_buffer, 64);
472 if (ret)
473 goto release;
474
475 lbs_deb_sdio("waiting for helper to boot...\n");
476
477 /* wait for the helper to boot by looking at the size register */
478 timeout = jiffies + HZ;
479 while (1) {
480 u16 req_size;
481
482 req_size = sdio_readb(card->func, IF_SDIO_RD_BASE, &ret);
483 if (ret)
484 goto release;
485
486 req_size |= sdio_readb(card->func, IF_SDIO_RD_BASE + 1, &ret) << 8;
487 if (ret)
488 goto release;
489
490 if (req_size != 0)
491 break;
492
493 if (time_after(jiffies, timeout)) {
494 ret = -ETIMEDOUT;
495 goto release;
496 }
497
498 msleep(10);
499 }
500
501 ret = 0;
502
503release:
504 sdio_set_block_size(card->func, 0);
505 sdio_release_host(card->func);
506 kfree(chunk_buffer);
507release_fw:
508 release_firmware(fw);
509
510out:
511 if (ret)
512 lbs_pr_err("failed to load helper firmware\n");
513
514 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
515
516 return ret;
517}
518
519static int if_sdio_prog_real(struct if_sdio_card *card)
520{
521 int ret;
522 u8 status;
523 const struct firmware *fw;
524 unsigned long timeout;
525 u8 *chunk_buffer;
526 u32 chunk_size;
527 u8 *firmware;
528 size_t size, req_size;
529
530 lbs_deb_enter(LBS_DEB_SDIO);
531
532 ret = request_firmware(&fw, card->firmware, &card->func->dev);
533 if (ret) {
534 lbs_pr_err("can't load firmware\n");
535 goto out;
536 }
537
538 chunk_buffer = kzalloc(512, GFP_KERNEL);
539 if (!chunk_buffer) {
540 ret = -ENOMEM;
541 goto release_fw;
542 }
543
544 sdio_claim_host(card->func);
545
546 ret = sdio_set_block_size(card->func, 32);
547 if (ret)
548 goto release;
549
550 firmware = fw->data;
551 size = fw->size;
552
553 while (size) {
554 timeout = jiffies + HZ;
555 while (1) {
556 status = sdio_readb(card->func, IF_SDIO_STATUS, &ret);
557 if (ret)
558 goto release;
559 if ((status & IF_SDIO_IO_RDY) &&
560 (status & IF_SDIO_DL_RDY))
561 break;
562 if (time_after(jiffies, timeout)) {
563 ret = -ETIMEDOUT;
564 goto release;
565 }
566 mdelay(1);
567 }
568
569 req_size = sdio_readb(card->func, IF_SDIO_RD_BASE, &ret);
570 if (ret)
571 goto release;
572
573 req_size |= sdio_readb(card->func, IF_SDIO_RD_BASE + 1, &ret) << 8;
574 if (ret)
575 goto release;
576/*
577 lbs_deb_sdio("firmware wants %d bytes\n", (int)req_size);
578*/
579 if (req_size == 0) {
580 lbs_deb_sdio("firmware helper gave up early\n");
581 ret = -EIO;
582 goto release;
583 }
584
585 if (req_size & 0x01) {
586 lbs_deb_sdio("firmware helper signalled error\n");
587 ret = -EIO;
588 goto release;
589 }
590
591 if (req_size > size)
592 req_size = size;
593
594 while (req_size) {
595 chunk_size = min(req_size, (size_t)512);
596
597 memcpy(chunk_buffer, firmware, chunk_size);
598/*
599 lbs_deb_sdio("sending %d bytes (%d bytes) chunk\n",
600 chunk_size, (chunk_size + 31) / 32 * 32);
601*/
602 ret = sdio_writesb(card->func, card->ioport,
603 chunk_buffer, (chunk_size + 31) / 32 * 32);
604 if (ret)
605 goto release;
606
607 firmware += chunk_size;
608 size -= chunk_size;
609 req_size -= chunk_size;
610 }
611 }
612
613 ret = 0;
614
615 lbs_deb_sdio("waiting for firmware to boot...\n");
616
617 /* wait for the firmware to boot */
618 timeout = jiffies + HZ;
619 while (1) {
620 u16 scratch;
621
622 scratch = if_sdio_read_scratch(card, &ret);
623 if (ret)
624 goto release;
625
626 if (scratch == IF_SDIO_FIRMWARE_OK)
627 break;
628
629 if (time_after(jiffies, timeout)) {
630 ret = -ETIMEDOUT;
631 goto release;
632 }
633
634 msleep(10);
635 }
636
637 ret = 0;
638
639release:
640 sdio_set_block_size(card->func, 0);
641 sdio_release_host(card->func);
642 kfree(chunk_buffer);
643release_fw:
644 release_firmware(fw);
645
646out:
647 if (ret)
648 lbs_pr_err("failed to load firmware\n");
649
650 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
651
652 return ret;
653}
654
655static int if_sdio_prog_firmware(struct if_sdio_card *card)
656{
657 int ret;
658 u16 scratch;
659
660 lbs_deb_enter(LBS_DEB_SDIO);
661
662 sdio_claim_host(card->func);
663 scratch = if_sdio_read_scratch(card, &ret);
664 sdio_release_host(card->func);
665
666 if (ret)
667 goto out;
668
669 if (scratch == IF_SDIO_FIRMWARE_OK) {
670 lbs_deb_sdio("firmware already loaded\n");
671 goto success;
672 }
673
674 ret = if_sdio_prog_helper(card);
675 if (ret)
676 goto out;
677
678 ret = if_sdio_prog_real(card);
679 if (ret)
680 goto out;
681
682success:
683 ret = 0;
684
685out:
686 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
687
688 return ret;
689}
690
691/*******************************************************************/
692/* Libertas callbacks */
693/*******************************************************************/
694
695static int if_sdio_host_to_card(wlan_private *priv, u8 type, u8 *buf, u16 nb)
696{
697 int ret;
698 struct if_sdio_card *card;
699 struct if_sdio_packet *packet, *cur;
700 u16 size;
701 unsigned long flags;
702
703 lbs_deb_enter_args(LBS_DEB_SDIO, "type %d, bytes %d", type, nb);
704
705 card = priv->card;
706
707 if (nb > (65536 - sizeof(struct if_sdio_packet) - 4)) {
708 ret = -EINVAL;
709 goto out;
710 }
711
712 /*
713 * The transfer must be in one transaction or the firmware
714 * goes suicidal.
715 */
716 size = nb + 4;
717 if ((size > card->func->cur_blksize) || (size > 512)) {
718 size = (size + card->func->cur_blksize - 1) /
719 card->func->cur_blksize * card->func->cur_blksize;
720 }
721
722 packet = kzalloc(sizeof(struct if_sdio_packet) + size,
723 GFP_ATOMIC);
724 if (!packet) {
725 ret = -ENOMEM;
726 goto out;
727 }
728
729 packet->next = NULL;
730 packet->nb = size;
731
732 /*
733 * SDIO specific header.
734 */
735 packet->buffer[0] = (nb + 4) & 0xff;
736 packet->buffer[1] = ((nb + 4) >> 8) & 0xff;
737 packet->buffer[2] = type;
738 packet->buffer[3] = 0;
739
740 memcpy(packet->buffer + 4, buf, nb);
741
742 spin_lock_irqsave(&card->lock, flags);
743
744 if (!card->packets)
745 card->packets = packet;
746 else {
747 cur = card->packets;
748 while (cur->next)
749 cur = cur->next;
750 cur->next = packet;
751 }
752
753 switch (type) {
754 case MVMS_CMD:
755 priv->dnld_sent = DNLD_CMD_SENT;
756 break;
757 case MVMS_DAT:
758 priv->dnld_sent = DNLD_DATA_SENT;
759 break;
760 default:
761 lbs_deb_sdio("unknown packet type %d\n", (int)type);
762 }
763
764 spin_unlock_irqrestore(&card->lock, flags);
765
766 schedule_work(&card->packet_worker);
767
768 ret = 0;
769
770out:
771 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
772
773 return ret;
774}
775
776static int if_sdio_get_int_status(wlan_private *priv, u8 *ireg)
777{
778 struct if_sdio_card *card;
779
780 lbs_deb_enter(LBS_DEB_SDIO);
781
782 card = priv->card;
783
784 *ireg = card->int_cause;
785 card->int_cause = 0;
786
787 lbs_deb_leave(LBS_DEB_SDIO);
788
789 return 0;
790}
791
792static int if_sdio_read_event_cause(wlan_private *priv)
793{
794 struct if_sdio_card *card;
795
796 lbs_deb_enter(LBS_DEB_SDIO);
797
798 card = priv->card;
799
800 priv->adapter->eventcause = card->event;
801
802 lbs_deb_leave(LBS_DEB_SDIO);
803
804 return 0;
805}
806
807/*******************************************************************/
808/* SDIO callbacks */
809/*******************************************************************/
810
811static void if_sdio_interrupt(struct sdio_func *func)
812{
813 int ret;
814 struct if_sdio_card *card;
815 u8 cause;
816
817 lbs_deb_enter(LBS_DEB_SDIO);
818
819 card = sdio_get_drvdata(func);
820
821 cause = sdio_readb(card->func, IF_SDIO_H_INT_STATUS, &ret);
822 if (ret)
823 goto out;
824
825 lbs_deb_sdio("interrupt: 0x%X\n", (unsigned)cause);
826
827 sdio_writeb(card->func, ~cause, IF_SDIO_H_INT_STATUS, &ret);
828 if (ret)
829 goto out;
830
831 /*
832 * Ignore the define name, this really means the card has
833 * successfully received the command.
834 */
835 if (cause & IF_SDIO_H_INT_DNLD) {
836 if ((card->priv->dnld_sent == DNLD_DATA_SENT) &&
837 (card->priv->adapter->connect_status == LIBERTAS_CONNECTED))
838 netif_wake_queue(card->priv->dev);
839 card->priv->dnld_sent = DNLD_RES_RECEIVED;
840 }
841
842 if (cause & IF_SDIO_H_INT_UPLD) {
843 ret = if_sdio_card_to_host(card);
844 if (ret)
845 goto out;
846 }
847
848 ret = 0;
849
850out:
851 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
852}
853
854static int if_sdio_probe(struct sdio_func *func,
855 const struct sdio_device_id *id)
856{
857 struct if_sdio_card *card;
858 wlan_private *priv;
859 int ret, i;
860 unsigned int model;
861 struct if_sdio_packet *packet;
862
863 lbs_deb_enter(LBS_DEB_SDIO);
864
865 for (i = 0;i < func->card->num_info;i++) {
866 if (sscanf(func->card->info[i],
867 "802.11 SDIO ID: %x", &model) == 1)
868 break;
869 if (sscanf(func->card->info[i],
870 "ID: %x", &model) == 1)
871 break;
872 }
873
874 if (i == func->card->num_info) {
875 lbs_pr_err("unable to identify card model\n");
876 return -ENODEV;
877 }
878
879 card = kzalloc(sizeof(struct if_sdio_card), GFP_KERNEL);
880 if (!card)
881 return -ENOMEM;
882
883 card->func = func;
884 card->model = model;
885 spin_lock_init(&card->lock);
886 INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker);
887
888 for (i = 0;i < ARRAY_SIZE(if_sdio_models);i++) {
889 if (card->model == if_sdio_models[i].model)
890 break;
891 }
892
893 if (i == ARRAY_SIZE(if_sdio_models)) {
894 lbs_pr_err("unkown card model 0x%x\n", card->model);
895 ret = -ENODEV;
896 goto free;
897 }
898
899 card->helper = if_sdio_models[i].helper;
900 card->firmware = if_sdio_models[i].firmware;
901
902 if (libertas_helper_name) {
903 lbs_deb_sdio("overriding helper firmware: %s\n",
904 libertas_helper_name);
905 card->helper = libertas_helper_name;
906 }
907
908 if (libertas_fw_name) {
909 lbs_deb_sdio("overriding firmware: %s\n", libertas_fw_name);
910 card->firmware = libertas_fw_name;
911 }
912
913 sdio_claim_host(func);
914
915 ret = sdio_enable_func(func);
916 if (ret)
917 goto release;
918
919 ret = sdio_claim_irq(func, if_sdio_interrupt);
920 if (ret)
921 goto disable;
922
923 card->ioport = sdio_readb(func, IF_SDIO_IOPORT, &ret);
924 if (ret)
925 goto release_int;
926
927 card->ioport |= sdio_readb(func, IF_SDIO_IOPORT + 1, &ret) << 8;
928 if (ret)
929 goto release_int;
930
931 card->ioport |= sdio_readb(func, IF_SDIO_IOPORT + 2, &ret) << 16;
932 if (ret)
933 goto release_int;
934
935 sdio_release_host(func);
936
937 sdio_set_drvdata(func, card);
938
939 lbs_deb_sdio("class = 0x%X, vendor = 0x%X, "
940 "device = 0x%X, model = 0x%X, ioport = 0x%X\n",
941 func->class, func->vendor, func->device,
942 model, (unsigned)card->ioport);
943
944 ret = if_sdio_prog_firmware(card);
945 if (ret)
946 goto reclaim;
947
948 priv = libertas_add_card(card, &func->dev);
949 if (!priv) {
950 ret = -ENOMEM;
951 goto reclaim;
952 }
953
954 card->priv = priv;
955
956 priv->card = card;
957 priv->hw_host_to_card = if_sdio_host_to_card;
958 priv->hw_get_int_status = if_sdio_get_int_status;
959 priv->hw_read_event_cause = if_sdio_read_event_cause;
960
961 priv->adapter->fw_ready = 1;
962
963 /*
964 * Enable interrupts now that everything is set up
965 */
966 sdio_claim_host(func);
967 sdio_writeb(func, 0x0f, IF_SDIO_H_INT_MASK, &ret);
968 sdio_release_host(func);
969 if (ret)
970 goto reclaim;
971
972 ret = libertas_start_card(priv);
973 if (ret)
974 goto err_activate_card;
975
976out:
977 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
978
979 return ret;
980
981err_activate_card:
982 flush_scheduled_work();
983 free_netdev(priv->dev);
984 kfree(priv->adapter);
985reclaim:
986 sdio_claim_host(func);
987release_int:
988 sdio_release_irq(func);
989disable:
990 sdio_disable_func(func);
991release:
992 sdio_release_host(func);
993free:
994 while (card->packets) {
995 packet = card->packets;
996 card->packets = card->packets->next;
997 kfree(packet);
998 }
999
1000 kfree(card);
1001
1002 goto out;
1003}
1004
1005static void if_sdio_remove(struct sdio_func *func)
1006{
1007 struct if_sdio_card *card;
1008 struct if_sdio_packet *packet;
1009
1010 lbs_deb_enter(LBS_DEB_SDIO);
1011
1012 card = sdio_get_drvdata(func);
1013
1014 card->priv->adapter->surpriseremoved = 1;
1015
1016 lbs_deb_sdio("call remove card\n");
1017 libertas_stop_card(card->priv);
1018 libertas_remove_card(card->priv);
1019
1020 flush_scheduled_work();
1021
1022 sdio_claim_host(func);
1023 sdio_release_irq(func);
1024 sdio_disable_func(func);
1025 sdio_release_host(func);
1026
1027 while (card->packets) {
1028 packet = card->packets;
1029 card->packets = card->packets->next;
1030 kfree(packet);
1031 }
1032
1033 kfree(card);
1034
1035 lbs_deb_leave(LBS_DEB_SDIO);
1036}
1037
1038static struct sdio_driver if_sdio_driver = {
1039 .name = "libertas_sdio",
1040 .id_table = if_sdio_ids,
1041 .probe = if_sdio_probe,
1042 .remove = if_sdio_remove,
1043};
1044
1045/*******************************************************************/
1046/* Module functions */
1047/*******************************************************************/
1048
1049static int if_sdio_init_module(void)
1050{
1051 int ret = 0;
1052
1053 lbs_deb_enter(LBS_DEB_SDIO);
1054
1055 printk(KERN_INFO "libertas_sdio: Libertas SDIO driver\n");
1056 printk(KERN_INFO "libertas_sdio: Copyright Pierre Ossman\n");
1057
1058 ret = sdio_register_driver(&if_sdio_driver);
1059
1060 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
1061
1062 return ret;
1063}
1064
1065static void if_sdio_exit_module(void)
1066{
1067 lbs_deb_enter(LBS_DEB_SDIO);
1068
1069 sdio_unregister_driver(&if_sdio_driver);
1070
1071 lbs_deb_leave(LBS_DEB_SDIO);
1072}
1073
1074module_init(if_sdio_init_module);
1075module_exit(if_sdio_exit_module);
1076
1077MODULE_DESCRIPTION("Libertas SDIO WLAN Driver");
1078MODULE_AUTHOR("Pierre Ossman");
1079MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/libertas/if_sdio.h b/drivers/net/wireless/libertas/if_sdio.h
new file mode 100644
index 000000000000..dfcaea7b168f
--- /dev/null
+++ b/drivers/net/wireless/libertas/if_sdio.h
@@ -0,0 +1,45 @@
1/*
2 * linux/drivers/net/wireless/libertas/if_sdio.h
3 *
4 * Copyright 2007 Pierre Ossman
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 */
11
12#ifndef LIBERTAS_IF_SDIO_H
13#define LIBERTAS_IF_SDIO_H
14
15#define IF_SDIO_IOPORT 0x00
16
17#define IF_SDIO_H_INT_MASK 0x04
18#define IF_SDIO_H_INT_OFLOW 0x08
19#define IF_SDIO_H_INT_UFLOW 0x04
20#define IF_SDIO_H_INT_DNLD 0x02
21#define IF_SDIO_H_INT_UPLD 0x01
22
23#define IF_SDIO_H_INT_STATUS 0x05
24#define IF_SDIO_H_INT_RSR 0x06
25#define IF_SDIO_H_INT_STATUS2 0x07
26
27#define IF_SDIO_RD_BASE 0x10
28
29#define IF_SDIO_STATUS 0x20
30#define IF_SDIO_IO_RDY 0x08
31#define IF_SDIO_CIS_RDY 0x04
32#define IF_SDIO_UL_RDY 0x02
33#define IF_SDIO_DL_RDY 0x01
34
35#define IF_SDIO_C_INT_MASK 0x24
36#define IF_SDIO_C_INT_STATUS 0x28
37#define IF_SDIO_C_INT_RSR 0x2C
38
39#define IF_SDIO_SCRATCH 0x34
40#define IF_SDIO_SCRATCH_OLD 0x80fe
41#define IF_SDIO_FIRMWARE_OK 0xfedc
42
43#define IF_SDIO_EVENT 0x80fc
44
45#endif
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 0c86be71bb33..aac8a02cbe80 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -764,6 +764,8 @@ struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
764 if (unlikely(!sgl)) 764 if (unlikely(!sgl))
765 goto enomem; 765 goto enomem;
766 766
767 memset(sgl, 0, sizeof(*sgl) * sgp->size);
768
767 /* 769 /*
768 * first loop through, set initial index and return value 770 * first loop through, set initial index and return value
769 */ 771 */
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index efe474e2cc3b..b3bf4ecc983a 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -15,7 +15,6 @@ config VGASTATE
15 15
16config VIDEO_OUTPUT_CONTROL 16config VIDEO_OUTPUT_CONTROL
17 tristate "Lowlevel video output switch controls" 17 tristate "Lowlevel video output switch controls"
18 default m
19 help 18 help
20 This framework adds support for low-level control of the video 19 This framework adds support for low-level control of the video
21 output switch. 20 output switch.
diff --git a/include/asm-um/alternative-asm.i b/include/asm-um/alternative-asm.h
index cae9faca132f..9aa9fa2402a4 100644
--- a/include/asm-um/alternative-asm.i
+++ b/include/asm-um/alternative-asm.h
@@ -1,6 +1,6 @@
1#ifndef __UM_ALTERNATIVE_ASM_I 1#ifndef __UM_ALTERNATIVE_ASM_I
2#define __UM_ALTERNATIVE_ASM_I 2#define __UM_ALTERNATIVE_ASM_I
3 3
4#include "asm/arch/alternative-asm.i" 4#include "asm/arch/alternative-asm.h"
5 5
6#endif 6#endif
diff --git a/include/asm-um/frame.i b/include/asm-um/frame.h
index 09d5dca5d928..8a8c1cb415b4 100644
--- a/include/asm-um/frame.i
+++ b/include/asm-um/frame.h
@@ -1,6 +1,6 @@
1#ifndef __UM_FRAME_I 1#ifndef __UM_FRAME_I
2#define __UM_FRAME_I 2#define __UM_FRAME_I
3 3
4#include "asm/arch/frame.i" 4#include "asm/arch/frame.h"
5 5
6#endif 6#endif
diff --git a/include/asm-x86/Kbuild b/include/asm-x86/Kbuild
index 80744dbcfafd..559830ece755 100644
--- a/include/asm-x86/Kbuild
+++ b/include/asm-x86/Kbuild
@@ -1,40 +1,22 @@
1include include/asm-generic/Kbuild.asm 1include include/asm-generic/Kbuild.asm
2 2
3header-y += boot.h 3header-y += boot.h
4header-y += debugreg_32.h
5header-y += debugreg_64.h
6header-y += debugreg.h 4header-y += debugreg.h
7header-y += ldt_32.h
8header-y += ldt_64.h
9header-y += ldt.h 5header-y += ldt.h
10header-y += msr-index.h 6header-y += msr-index.h
11header-y += prctl.h 7header-y += prctl.h
12header-y += ptrace-abi_32.h
13header-y += ptrace-abi_64.h
14header-y += ptrace-abi.h 8header-y += ptrace-abi.h
15header-y += sigcontext32.h 9header-y += sigcontext32.h
16header-y += ucontext_32.h
17header-y += ucontext_64.h
18header-y += ucontext.h 10header-y += ucontext.h
19header-y += vsyscall32.h 11header-y += vsyscall32.h
20 12
21unifdef-y += a.out_32.h 13unifdef-y += a.out_32.h
22unifdef-y += a.out_64.h 14unifdef-y += a.out_64.h
23unifdef-y += auxvec_32.h
24unifdef-y += auxvec_64.h
25unifdef-y += byteorder_32.h 15unifdef-y += byteorder_32.h
26unifdef-y += byteorder_64.h 16unifdef-y += byteorder_64.h
27unifdef-y += elf_32.h 17unifdef-y += elf_32.h
28unifdef-y += elf_64.h 18unifdef-y += elf_64.h
29unifdef-y += errno_32.h
30unifdef-y += errno_64.h
31unifdef-y += ioctls_32.h
32unifdef-y += ioctls_64.h
33unifdef-y += ipcbuf_32.h
34unifdef-y += ipcbuf_64.h
35unifdef-y += mce.h 19unifdef-y += mce.h
36unifdef-y += mman_32.h
37unifdef-y += mman_64.h
38unifdef-y += msgbuf_32.h 20unifdef-y += msgbuf_32.h
39unifdef-y += msgbuf_64.h 21unifdef-y += msgbuf_64.h
40unifdef-y += msr_32.h 22unifdef-y += msr_32.h
@@ -45,40 +27,22 @@ unifdef-y += mtrr_64.h
45unifdef-y += mtrr.h 27unifdef-y += mtrr.h
46unifdef-y += page_32.h 28unifdef-y += page_32.h
47unifdef-y += page_64.h 29unifdef-y += page_64.h
48unifdef-y += param_32.h
49unifdef-y += param_64.h
50unifdef-y += posix_types_32.h 30unifdef-y += posix_types_32.h
51unifdef-y += posix_types_64.h 31unifdef-y += posix_types_64.h
52unifdef-y += ptrace_32.h 32unifdef-y += ptrace_32.h
53unifdef-y += ptrace_64.h 33unifdef-y += ptrace_64.h
54unifdef-y += resource_32.h
55unifdef-y += resource_64.h
56unifdef-y += sembuf_32.h
57unifdef-y += sembuf_64.h
58unifdef-y += setup_32.h 34unifdef-y += setup_32.h
59unifdef-y += setup_64.h 35unifdef-y += setup_64.h
60unifdef-y += shmbuf_32.h 36unifdef-y += shmbuf_32.h
61unifdef-y += shmbuf_64.h 37unifdef-y += shmbuf_64.h
62unifdef-y += shmparam_32.h
63unifdef-y += shmparam_64.h
64unifdef-y += sigcontext_32.h 38unifdef-y += sigcontext_32.h
65unifdef-y += sigcontext_64.h 39unifdef-y += sigcontext_64.h
66unifdef-y += siginfo_32.h
67unifdef-y += siginfo_64.h
68unifdef-y += signal_32.h 40unifdef-y += signal_32.h
69unifdef-y += signal_64.h 41unifdef-y += signal_64.h
70unifdef-y += sockios_32.h
71unifdef-y += sockios_64.h
72unifdef-y += stat_32.h 42unifdef-y += stat_32.h
73unifdef-y += stat_64.h 43unifdef-y += stat_64.h
74unifdef-y += statfs_32.h 44unifdef-y += statfs_32.h
75unifdef-y += statfs_64.h 45unifdef-y += statfs_64.h
76unifdef-y += termbits_32.h
77unifdef-y += termbits_64.h
78unifdef-y += termios_32.h
79unifdef-y += termios_64.h
80unifdef-y += types_32.h
81unifdef-y += types_64.h
82unifdef-y += unistd_32.h 46unifdef-y += unistd_32.h
83unifdef-y += unistd_64.h 47unifdef-y += unistd_64.h
84unifdef-y += user_32.h 48unifdef-y += user_32.h
diff --git a/include/asm-x86/agp.h b/include/asm-x86/agp.h
index 9348f1e4f6f1..62df2a9e7130 100644
--- a/include/asm-x86/agp.h
+++ b/include/asm-x86/agp.h
@@ -1,5 +1,40 @@
1#ifdef CONFIG_X86_32 1#ifndef _ASM_X86_AGP_H
2# include "agp_32.h" 2#define _ASM_X86_AGP_H
3#else 3
4# include "agp_64.h" 4#include <asm/pgtable.h>
5#include <asm/cacheflush.h>
6
7/*
8 * Functions to keep the agpgart mappings coherent with the MMU. The
9 * GART gives the CPU a physical alias of pages in memory. The alias
10 * region is mapped uncacheable. Make sure there are no conflicting
11 * mappings with different cachability attributes for the same
12 * page. This avoids data corruption on some CPUs.
13 */
14
15/*
16 * Caller's responsibility to call global_flush_tlb() for performance
17 * reasons
18 */
19#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE)
20#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL)
21#define flush_agp_mappings() global_flush_tlb()
22
23/*
24 * Could use CLFLUSH here if the cpu supports it. But then it would
25 * need to be called for each cacheline of the whole page so it may
26 * not be worth it. Would need a page for it.
27 */
28#define flush_agp_cache() wbinvd()
29
30/* Convert a physical address to an address suitable for the GART. */
31#define phys_to_gart(x) (x)
32#define gart_to_phys(x) (x)
33
34/* GATT allocation. Returns/accepts GATT kernel virtual address. */
35#define alloc_gatt_pages(order) \
36 ((char *)__get_free_pages(GFP_KERNEL, (order)))
37#define free_gatt_pages(table, order) \
38 free_pages((unsigned long)(table), (order))
39
5#endif 40#endif
diff --git a/include/asm-x86/agp_32.h b/include/asm-x86/agp_32.h
deleted file mode 100644
index 6af173dbf123..000000000000
--- a/include/asm-x86/agp_32.h
+++ /dev/null
@@ -1,36 +0,0 @@
1#ifndef AGP_H
2#define AGP_H 1
3
4#include <asm/pgtable.h>
5#include <asm/cacheflush.h>
6
7/*
8 * Functions to keep the agpgart mappings coherent with the MMU.
9 * The GART gives the CPU a physical alias of pages in memory. The alias region is
10 * mapped uncacheable. Make sure there are no conflicting mappings
11 * with different cachability attributes for the same page. This avoids
12 * data corruption on some CPUs.
13 */
14
15/* Caller's responsibility to call global_flush_tlb() for
16 * performance reasons */
17#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE)
18#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL)
19#define flush_agp_mappings() global_flush_tlb()
20
21/* Could use CLFLUSH here if the cpu supports it. But then it would
22 need to be called for each cacheline of the whole page so it may not be
23 worth it. Would need a page for it. */
24#define flush_agp_cache() wbinvd()
25
26/* Convert a physical address to an address suitable for the GART. */
27#define phys_to_gart(x) (x)
28#define gart_to_phys(x) (x)
29
30/* GATT allocation. Returns/accepts GATT kernel virtual address. */
31#define alloc_gatt_pages(order) \
32 ((char *)__get_free_pages(GFP_KERNEL, (order)))
33#define free_gatt_pages(table, order) \
34 free_pages((unsigned long)(table), (order))
35
36#endif
diff --git a/include/asm-x86/agp_64.h b/include/asm-x86/agp_64.h
deleted file mode 100644
index de338666f3f9..000000000000
--- a/include/asm-x86/agp_64.h
+++ /dev/null
@@ -1,34 +0,0 @@
1#ifndef AGP_H
2#define AGP_H 1
3
4#include <asm/cacheflush.h>
5
6/*
7 * Functions to keep the agpgart mappings coherent.
8 * The GART gives the CPU a physical alias of memory. The alias is
9 * mapped uncacheable. Make sure there are no conflicting mappings
10 * with different cachability attributes for the same page.
11 */
12
13/* Caller's responsibility to call global_flush_tlb() for
14 * performance reasons */
15#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE)
16#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL)
17#define flush_agp_mappings() global_flush_tlb()
18
19/* Could use CLFLUSH here if the cpu supports it. But then it would
20 need to be called for each cacheline of the whole page so it may not be
21 worth it. Would need a page for it. */
22#define flush_agp_cache() asm volatile("wbinvd":::"memory")
23
24/* Convert a physical address to an address suitable for the GART. */
25#define phys_to_gart(x) (x)
26#define gart_to_phys(x) (x)
27
28/* GATT allocation. Returns/accepts GATT kernel virtual address. */
29#define alloc_gatt_pages(order) \
30 ((char *)__get_free_pages(GFP_KERNEL, (order)))
31#define free_gatt_pages(table, order) \
32 free_pages((unsigned long)(table), (order))
33
34#endif
diff --git a/include/asm-x86/alternative-asm.h b/include/asm-x86/alternative-asm.h
new file mode 100644
index 000000000000..e2077d343c33
--- /dev/null
+++ b/include/asm-x86/alternative-asm.h
@@ -0,0 +1,22 @@
1#ifdef __ASSEMBLY__
2
3#ifdef CONFIG_X86_32
4# define X86_ALIGN .long
5#else
6# define X86_ALIGN .quad
7#endif
8
9#ifdef CONFIG_SMP
10 .macro LOCK_PREFIX
111: lock
12 .section .smp_locks,"a"
13 .align 4
14 X86_ALIGN 1b
15 .previous
16 .endm
17#else
18 .macro LOCK_PREFIX
19 .endm
20#endif
21
22#endif /* __ASSEMBLY__ */
diff --git a/include/asm-x86/alternative-asm.i b/include/asm-x86/alternative-asm.i
deleted file mode 100644
index 4f360cd3c888..000000000000
--- a/include/asm-x86/alternative-asm.i
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifdef CONFIG_X86_32
2# include "alternative-asm_32.i"
3#else
4# include "alternative-asm_64.i"
5#endif
diff --git a/include/asm-x86/alternative-asm_32.i b/include/asm-x86/alternative-asm_32.i
deleted file mode 100644
index f0510209ccbe..000000000000
--- a/include/asm-x86/alternative-asm_32.i
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifdef CONFIG_SMP
2 .macro LOCK_PREFIX
31: lock
4 .section .smp_locks,"a"
5 .align 4
6 .long 1b
7 .previous
8 .endm
9#else
10 .macro LOCK_PREFIX
11 .endm
12#endif
diff --git a/include/asm-x86/alternative-asm_64.i b/include/asm-x86/alternative-asm_64.i
deleted file mode 100644
index 0b3f1a2bb2cb..000000000000
--- a/include/asm-x86/alternative-asm_64.i
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifdef CONFIG_SMP
2 .macro LOCK_PREFIX
31: lock
4 .section .smp_locks,"a"
5 .align 8
6 .quad 1b
7 .previous
8 .endm
9#else
10 .macro LOCK_PREFIX
11 .endm
12#endif
diff --git a/include/asm-x86/atomic_64.h b/include/asm-x86/atomic_64.h
index f2e64634fa48..2d20a7a19f62 100644
--- a/include/asm-x86/atomic_64.h
+++ b/include/asm-x86/atomic_64.h
@@ -206,7 +206,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
206 206
207/* An 64bit atomic type */ 207/* An 64bit atomic type */
208 208
209typedef struct { volatile long counter; } atomic64_t; 209typedef struct { long counter; } atomic64_t;
210 210
211#define ATOMIC64_INIT(i) { (i) } 211#define ATOMIC64_INIT(i) { (i) }
212 212
diff --git a/include/asm-x86/auxvec.h b/include/asm-x86/auxvec.h
index 7ff866f829ca..87f5e6d5a020 100644
--- a/include/asm-x86/auxvec.h
+++ b/include/asm-x86/auxvec.h
@@ -1,13 +1,12 @@
1#ifdef __KERNEL__ 1#ifndef _ASM_X86_AUXVEC_H
2# ifdef CONFIG_X86_32 2#define _ASM_X86_AUXVEC_H
3# include "auxvec_32.h" 3/*
4# else 4 * Architecture-neutral AT_ values in 0-17, leave some room
5# include "auxvec_64.h" 5 * for more of them, start the x86-specific ones at 32.
6# endif 6 */
7#else 7#ifdef __i386__
8# ifdef __i386__ 8#define AT_SYSINFO 32
9# include "auxvec_32.h" 9#endif
10# else 10#define AT_SYSINFO_EHDR 33
11# include "auxvec_64.h" 11
12# endif
13#endif 12#endif
diff --git a/include/asm-x86/auxvec_32.h b/include/asm-x86/auxvec_32.h
deleted file mode 100644
index 395e13016bfb..000000000000
--- a/include/asm-x86/auxvec_32.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef __ASMi386_AUXVEC_H
2#define __ASMi386_AUXVEC_H
3
4/*
5 * Architecture-neutral AT_ values in 0-17, leave some room
6 * for more of them, start the x86-specific ones at 32.
7 */
8#define AT_SYSINFO 32
9#define AT_SYSINFO_EHDR 33
10
11#endif
diff --git a/include/asm-x86/auxvec_64.h b/include/asm-x86/auxvec_64.h
deleted file mode 100644
index 1d5ab0d03950..000000000000
--- a/include/asm-x86/auxvec_64.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_X86_64_AUXVEC_H
2#define __ASM_X86_64_AUXVEC_H
3
4#define AT_SYSINFO_EHDR 33
5
6#endif
diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h
index d4dbbe5f7bd9..1d7d9b4bcacb 100644
--- a/include/asm-x86/bitops_64.h
+++ b/include/asm-x86/bitops_64.h
@@ -260,7 +260,7 @@ extern long find_first_bit(const unsigned long * addr, unsigned long size);
260extern long find_next_bit(const unsigned long * addr, long size, long offset); 260extern long find_next_bit(const unsigned long * addr, long size, long offset);
261 261
262/* return index of first bet set in val or max when no bit is set */ 262/* return index of first bet set in val or max when no bit is set */
263static inline unsigned long __scanbit(unsigned long val, unsigned long max) 263static inline long __scanbit(unsigned long val, unsigned long max)
264{ 264{
265 asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max)); 265 asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max));
266 return val; 266 return val;
diff --git a/include/asm-x86/bug.h b/include/asm-x86/bug.h
index c655d7f3a5e0..fd8bdc639c48 100644
--- a/include/asm-x86/bug.h
+++ b/include/asm-x86/bug.h
@@ -1,5 +1,42 @@
1#ifndef _ASM_X86_BUG_H
2#define _ASM_X86_BUG_H
3
4#ifdef CONFIG_BUG
5#define HAVE_ARCH_BUG
6
7#ifdef CONFIG_DEBUG_BUGVERBOSE
8
1#ifdef CONFIG_X86_32 9#ifdef CONFIG_X86_32
2# include "bug_32.h" 10# define __BUG_C0 "2:\t.long 1b, %c0\n"
3#else 11#else
4# include "bug_64.h" 12# define __BUG_C0 "2:\t.quad 1b, %c0\n"
13#endif
14
15#define BUG() \
16 do { \
17 asm volatile("1:\tud2\n" \
18 ".pushsection __bug_table,\"a\"\n" \
19 __BUG_C0 \
20 "\t.word %c1, 0\n" \
21 "\t.org 2b+%c2\n" \
22 ".popsection" \
23 : : "i" (__FILE__), "i" (__LINE__), \
24 "i" (sizeof(struct bug_entry))); \
25 for(;;) ; \
26 } while(0)
27
28#else
29#define BUG() \
30 do { \
31 asm volatile("ud2"); \
32 for(;;) ; \
33 } while(0)
34#endif
35
36void out_of_line_bug(void);
37#else /* CONFIG_BUG */
38static inline void out_of_line_bug(void) { }
39#endif /* !CONFIG_BUG */
40
41#include <asm-generic/bug.h>
5#endif 42#endif
diff --git a/include/asm-x86/bug_32.h b/include/asm-x86/bug_32.h
deleted file mode 100644
index b0fd78ca2619..000000000000
--- a/include/asm-x86/bug_32.h
+++ /dev/null
@@ -1,37 +0,0 @@
1#ifndef _I386_BUG_H
2#define _I386_BUG_H
3
4
5/*
6 * Tell the user there is some problem.
7 * The offending file and line are encoded encoded in the __bug_table section.
8 */
9
10#ifdef CONFIG_BUG
11#define HAVE_ARCH_BUG
12
13#ifdef CONFIG_DEBUG_BUGVERBOSE
14#define BUG() \
15 do { \
16 asm volatile("1:\tud2\n" \
17 ".pushsection __bug_table,\"a\"\n" \
18 "2:\t.long 1b, %c0\n" \
19 "\t.word %c1, 0\n" \
20 "\t.org 2b+%c2\n" \
21 ".popsection" \
22 : : "i" (__FILE__), "i" (__LINE__), \
23 "i" (sizeof(struct bug_entry))); \
24 for(;;) ; \
25 } while(0)
26
27#else
28#define BUG() \
29 do { \
30 asm volatile("ud2"); \
31 for(;;) ; \
32 } while(0)
33#endif
34#endif
35
36#include <asm-generic/bug.h>
37#endif
diff --git a/include/asm-x86/bug_64.h b/include/asm-x86/bug_64.h
deleted file mode 100644
index 682606414913..000000000000
--- a/include/asm-x86/bug_64.h
+++ /dev/null
@@ -1,34 +0,0 @@
1#ifndef __ASM_X8664_BUG_H
2#define __ASM_X8664_BUG_H 1
3
4#ifdef CONFIG_BUG
5#define HAVE_ARCH_BUG
6
7#ifdef CONFIG_DEBUG_BUGVERBOSE
8#define BUG() \
9 do { \
10 asm volatile("1:\tud2\n" \
11 ".pushsection __bug_table,\"a\"\n" \
12 "2:\t.quad 1b, %c0\n" \
13 "\t.word %c1, 0\n" \
14 "\t.org 2b+%c2\n" \
15 ".popsection" \
16 : : "i" (__FILE__), "i" (__LINE__), \
17 "i" (sizeof(struct bug_entry))); \
18 for(;;) ; \
19 } while(0)
20#else
21#define BUG() \
22 do { \
23 asm volatile("ud2"); \
24 for(;;) ; \
25 } while(0)
26#endif
27
28void out_of_line_bug(void);
29#else
30static inline void out_of_line_bug(void) { }
31#endif
32
33#include <asm-generic/bug.h>
34#endif
diff --git a/include/asm-x86/bugs.h b/include/asm-x86/bugs.h
index ddf42d36dd50..aac8317420af 100644
--- a/include/asm-x86/bugs.h
+++ b/include/asm-x86/bugs.h
@@ -1,5 +1,6 @@
1#ifdef CONFIG_X86_32 1#ifndef _ASM_X86_BUGS_H
2# include "bugs_32.h" 2#define _ASM_X86_BUGS_H
3#else 3
4# include "bugs_64.h" 4void check_bugs(void);
5#endif 5
6#endif /* _ASM_X86_BUGS_H */
diff --git a/include/asm-x86/bugs_32.h b/include/asm-x86/bugs_32.h
deleted file mode 100644
index d28979ff73be..000000000000
--- a/include/asm-x86/bugs_32.h
+++ /dev/null
@@ -1,12 +0,0 @@
1/*
2 * This is included by init/main.c to check for architecture-dependent bugs.
3 *
4 * Needs:
5 * void check_bugs(void);
6 */
7#ifndef _ASM_I386_BUG_H
8#define _ASM_I386_BUG_H
9
10void check_bugs(void);
11
12#endif /* _ASM_I386_BUG_H */
diff --git a/include/asm-x86/bugs_64.h b/include/asm-x86/bugs_64.h
deleted file mode 100644
index b33dc04d8f42..000000000000
--- a/include/asm-x86/bugs_64.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_X86_64_BUGS_H
2#define _ASM_X86_64_BUGS_H
3
4void check_bugs(void);
5
6#endif /* _ASM_X86_64_BUGS_H */
diff --git a/include/asm-x86/cache.h b/include/asm-x86/cache.h
index c36d190ac9d8..1e0bac86f38f 100644
--- a/include/asm-x86/cache.h
+++ b/include/asm-x86/cache.h
@@ -1,5 +1,20 @@
1#ifdef CONFIG_X86_32 1#ifndef _ARCH_X86_CACHE_H
2# include "cache_32.h" 2#define _ARCH_X86_CACHE_H
3#else 3
4# include "cache_64.h" 4/* L1 cache line size */
5#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7
8#define __read_mostly __attribute__((__section__(".data.read_mostly")))
9
10#ifdef CONFIG_X86_VSMP
11/* vSMP Internode cacheline shift */
12#define INTERNODE_CACHE_SHIFT (12)
13#ifdef CONFIG_SMP
14#define __cacheline_aligned_in_smp \
15 __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) \
16 __attribute__((__section__(".data.page_aligned")))
17#endif
18#endif
19
5#endif 20#endif
diff --git a/include/asm-x86/cache_32.h b/include/asm-x86/cache_32.h
deleted file mode 100644
index 57c62f414158..000000000000
--- a/include/asm-x86/cache_32.h
+++ /dev/null
@@ -1,14 +0,0 @@
1/*
2 * include/asm-i386/cache.h
3 */
4#ifndef __ARCH_I386_CACHE_H
5#define __ARCH_I386_CACHE_H
6
7
8/* L1 cache line size */
9#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11
12#define __read_mostly __attribute__((__section__(".data.read_mostly")))
13
14#endif
diff --git a/include/asm-x86/cache_64.h b/include/asm-x86/cache_64.h
deleted file mode 100644
index 052df758ae61..000000000000
--- a/include/asm-x86/cache_64.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * include/asm-x86_64/cache.h
3 */
4#ifndef __ARCH_X8664_CACHE_H
5#define __ARCH_X8664_CACHE_H
6
7
8/* L1 cache line size */
9#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11
12#ifdef CONFIG_X86_VSMP
13
14/* vSMP Internode cacheline shift */
15#define INTERNODE_CACHE_SHIFT (12)
16#ifdef CONFIG_SMP
17#define __cacheline_aligned_in_smp \
18 __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) \
19 __attribute__((__section__(".data.page_aligned")))
20#endif
21
22#endif
23
24#define __read_mostly __attribute__((__section__(".data.read_mostly")))
25
26#endif
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h
index e2df3b55034a..b3d43de44c59 100644
--- a/include/asm-x86/cacheflush.h
+++ b/include/asm-x86/cacheflush.h
@@ -1,5 +1,40 @@
1#ifdef CONFIG_X86_32 1#ifndef _ASM_X86_CACHEFLUSH_H
2# include "cacheflush_32.h" 2#define _ASM_X86_CACHEFLUSH_H
3#else 3
4# include "cacheflush_64.h" 4/* Keep includes the same across arches. */
5#include <linux/mm.h>
6
7/* Caches aren't brain-dead on the intel. */
8#define flush_cache_all() do { } while (0)
9#define flush_cache_mm(mm) do { } while (0)
10#define flush_cache_dup_mm(mm) do { } while (0)
11#define flush_cache_range(vma, start, end) do { } while (0)
12#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
13#define flush_dcache_page(page) do { } while (0)
14#define flush_dcache_mmap_lock(mapping) do { } while (0)
15#define flush_dcache_mmap_unlock(mapping) do { } while (0)
16#define flush_icache_range(start, end) do { } while (0)
17#define flush_icache_page(vma,pg) do { } while (0)
18#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
19#define flush_cache_vmap(start, end) do { } while (0)
20#define flush_cache_vunmap(start, end) do { } while (0)
21
22#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
23 memcpy(dst, src, len)
24#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
25 memcpy(dst, src, len)
26
27void global_flush_tlb(void);
28int change_page_attr(struct page *page, int numpages, pgprot_t prot);
29int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot);
30
31#ifdef CONFIG_DEBUG_PAGEALLOC
32/* internal debugging function */
33void kernel_map_pages(struct page *page, int numpages, int enable);
34#endif
35
36#ifdef CONFIG_DEBUG_RODATA
37void mark_rodata_ro(void);
38#endif
39
5#endif 40#endif
diff --git a/include/asm-x86/cacheflush_32.h b/include/asm-x86/cacheflush_32.h
deleted file mode 100644
index 74e03c8f2e51..000000000000
--- a/include/asm-x86/cacheflush_32.h
+++ /dev/null
@@ -1,39 +0,0 @@
1#ifndef _I386_CACHEFLUSH_H
2#define _I386_CACHEFLUSH_H
3
4/* Keep includes the same across arches. */
5#include <linux/mm.h>
6
7/* Caches aren't brain-dead on the intel. */
8#define flush_cache_all() do { } while (0)
9#define flush_cache_mm(mm) do { } while (0)
10#define flush_cache_dup_mm(mm) do { } while (0)
11#define flush_cache_range(vma, start, end) do { } while (0)
12#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
13#define flush_dcache_page(page) do { } while (0)
14#define flush_dcache_mmap_lock(mapping) do { } while (0)
15#define flush_dcache_mmap_unlock(mapping) do { } while (0)
16#define flush_icache_range(start, end) do { } while (0)
17#define flush_icache_page(vma,pg) do { } while (0)
18#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
19#define flush_cache_vmap(start, end) do { } while (0)
20#define flush_cache_vunmap(start, end) do { } while (0)
21
22#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
23 memcpy(dst, src, len)
24#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
25 memcpy(dst, src, len)
26
27void global_flush_tlb(void);
28int change_page_attr(struct page *page, int numpages, pgprot_t prot);
29
30#ifdef CONFIG_DEBUG_PAGEALLOC
31/* internal debugging function */
32void kernel_map_pages(struct page *page, int numpages, int enable);
33#endif
34
35#ifdef CONFIG_DEBUG_RODATA
36void mark_rodata_ro(void);
37#endif
38
39#endif /* _I386_CACHEFLUSH_H */
diff --git a/include/asm-x86/cacheflush_64.h b/include/asm-x86/cacheflush_64.h
deleted file mode 100644
index ab1cb5c7dc92..000000000000
--- a/include/asm-x86/cacheflush_64.h
+++ /dev/null
@@ -1,35 +0,0 @@
1#ifndef _X8664_CACHEFLUSH_H
2#define _X8664_CACHEFLUSH_H
3
4/* Keep includes the same across arches. */
5#include <linux/mm.h>
6
7/* Caches aren't brain-dead on the intel. */
8#define flush_cache_all() do { } while (0)
9#define flush_cache_mm(mm) do { } while (0)
10#define flush_cache_dup_mm(mm) do { } while (0)
11#define flush_cache_range(vma, start, end) do { } while (0)
12#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
13#define flush_dcache_page(page) do { } while (0)
14#define flush_dcache_mmap_lock(mapping) do { } while (0)
15#define flush_dcache_mmap_unlock(mapping) do { } while (0)
16#define flush_icache_range(start, end) do { } while (0)
17#define flush_icache_page(vma,pg) do { } while (0)
18#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
19#define flush_cache_vmap(start, end) do { } while (0)
20#define flush_cache_vunmap(start, end) do { } while (0)
21
22#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
23 memcpy(dst, src, len)
24#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
25 memcpy(dst, src, len)
26
27void global_flush_tlb(void);
28int change_page_attr(struct page *page, int numpages, pgprot_t prot);
29int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot);
30
31#ifdef CONFIG_DEBUG_RODATA
32void mark_rodata_ro(void);
33#endif
34
35#endif /* _X8664_CACHEFLUSH_H */
diff --git a/include/asm-x86/cpu.h b/include/asm-x86/cpu.h
index 9d914e1e4aad..b1bc7b1b64b0 100644
--- a/include/asm-x86/cpu.h
+++ b/include/asm-x86/cpu.h
@@ -13,9 +13,6 @@ struct i386_cpu {
13extern int arch_register_cpu(int num); 13extern int arch_register_cpu(int num);
14#ifdef CONFIG_HOTPLUG_CPU 14#ifdef CONFIG_HOTPLUG_CPU
15extern void arch_unregister_cpu(int); 15extern void arch_unregister_cpu(int);
16extern int enable_cpu_hotplug;
17#else
18#define enable_cpu_hotplug 0
19#endif 16#endif
20 17
21DECLARE_PER_CPU(int, cpu_state); 18DECLARE_PER_CPU(int, cpu_state);
diff --git a/include/asm-x86/cputime.h b/include/asm-x86/cputime.h
index 87c37cf6b707..6d68ad7e0ea3 100644
--- a/include/asm-x86/cputime.h
+++ b/include/asm-x86/cputime.h
@@ -1,5 +1 @@
1#ifdef CONFIG_X86_32 #include <asm-generic/cputime.h>
2# include "cputime_32.h"
3#else
4# include "cputime_64.h"
5#endif
diff --git a/include/asm-x86/cputime_32.h b/include/asm-x86/cputime_32.h
deleted file mode 100644
index 398ed7cd171d..000000000000
--- a/include/asm-x86/cputime_32.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __I386_CPUTIME_H
2#define __I386_CPUTIME_H
3
4#include <asm-generic/cputime.h>
5
6#endif /* __I386_CPUTIME_H */
diff --git a/include/asm-x86/cputime_64.h b/include/asm-x86/cputime_64.h
deleted file mode 100644
index a07012dc5a3c..000000000000
--- a/include/asm-x86/cputime_64.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __X86_64_CPUTIME_H
2#define __X86_64_CPUTIME_H
3
4#include <asm-generic/cputime.h>
5
6#endif /* __X86_64_CPUTIME_H */
diff --git a/include/asm-x86/debugreg.h b/include/asm-x86/debugreg.h
index b6ce7e4fa002..c6344d572b03 100644
--- a/include/asm-x86/debugreg.h
+++ b/include/asm-x86/debugreg.h
@@ -1,13 +1,70 @@
1#ifdef __KERNEL__ 1#ifndef _ASM_X86_DEBUGREG_H
2# ifdef CONFIG_X86_32 2#define _ASM_X86_DEBUGREG_H
3# include "debugreg_32.h" 3
4# else 4
5# include "debugreg_64.h" 5/* Indicate the register numbers for a number of the specific
6# endif 6 debug registers. Registers 0-3 contain the addresses we wish to trap on */
7#define DR_FIRSTADDR 0 /* u_debugreg[DR_FIRSTADDR] */
8#define DR_LASTADDR 3 /* u_debugreg[DR_LASTADDR] */
9
10#define DR_STATUS 6 /* u_debugreg[DR_STATUS] */
11#define DR_CONTROL 7 /* u_debugreg[DR_CONTROL] */
12
13/* Define a few things for the status register. We can use this to determine
14 which debugging register was responsible for the trap. The other bits
15 are either reserved or not of interest to us. */
16
17#define DR_TRAP0 (0x1) /* db0 */
18#define DR_TRAP1 (0x2) /* db1 */
19#define DR_TRAP2 (0x4) /* db2 */
20#define DR_TRAP3 (0x8) /* db3 */
21
22#define DR_STEP (0x4000) /* single-step */
23#define DR_SWITCH (0x8000) /* task switch */
24
25/* Now define a bunch of things for manipulating the control register.
26 The top two bytes of the control register consist of 4 fields of 4
27 bits - each field corresponds to one of the four debug registers,
28 and indicates what types of access we trap on, and how large the data
29 field is that we are looking at */
30
31#define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */
32#define DR_CONTROL_SIZE 4 /* 4 control bits per register */
33
34#define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */
35#define DR_RW_WRITE (0x1)
36#define DR_RW_READ (0x3)
37
38#define DR_LEN_1 (0x0) /* Settings for data length to trap on */
39#define DR_LEN_2 (0x4)
40#define DR_LEN_4 (0xC)
41#define DR_LEN_8 (0x8)
42
43/* The low byte to the control register determine which registers are
44 enabled. There are 4 fields of two bits. One bit is "local", meaning
45 that the processor will reset the bit after a task switch and the other
46 is global meaning that we have to explicitly reset the bit. With linux,
47 you can use either one, since we explicitly zero the register when we enter
48 kernel mode. */
49
50#define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */
51#define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */
52#define DR_ENABLE_SIZE 2 /* 2 enable bits per register */
53
54#define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */
55#define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */
56
57/* The second byte to the control register has a few special things.
58 We can slow the instruction pipeline for instructions coming via the
59 gdt or the ldt if we want to. I am not sure why this is an advantage */
60
61#ifdef __i386__
62#define DR_CONTROL_RESERVED (0xFC00) /* Reserved by Intel */
7#else 63#else
8# ifdef __i386__ 64#define DR_CONTROL_RESERVED (0xFFFFFFFF0000FC00UL) /* Reserved */
9# include "debugreg_32.h" 65#endif
10# else 66
11# include "debugreg_64.h" 67#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */
12# endif 68#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */
69
13#endif 70#endif
diff --git a/include/asm-x86/debugreg_32.h b/include/asm-x86/debugreg_32.h
deleted file mode 100644
index f0b2b06ae0f7..000000000000
--- a/include/asm-x86/debugreg_32.h
+++ /dev/null
@@ -1,64 +0,0 @@
1#ifndef _I386_DEBUGREG_H
2#define _I386_DEBUGREG_H
3
4
5/* Indicate the register numbers for a number of the specific
6 debug registers. Registers 0-3 contain the addresses we wish to trap on */
7#define DR_FIRSTADDR 0 /* u_debugreg[DR_FIRSTADDR] */
8#define DR_LASTADDR 3 /* u_debugreg[DR_LASTADDR] */
9
10#define DR_STATUS 6 /* u_debugreg[DR_STATUS] */
11#define DR_CONTROL 7 /* u_debugreg[DR_CONTROL] */
12
13/* Define a few things for the status register. We can use this to determine
14 which debugging register was responsible for the trap. The other bits
15 are either reserved or not of interest to us. */
16
17#define DR_TRAP0 (0x1) /* db0 */
18#define DR_TRAP1 (0x2) /* db1 */
19#define DR_TRAP2 (0x4) /* db2 */
20#define DR_TRAP3 (0x8) /* db3 */
21
22#define DR_STEP (0x4000) /* single-step */
23#define DR_SWITCH (0x8000) /* task switch */
24
25/* Now define a bunch of things for manipulating the control register.
26 The top two bytes of the control register consist of 4 fields of 4
27 bits - each field corresponds to one of the four debug registers,
28 and indicates what types of access we trap on, and how large the data
29 field is that we are looking at */
30
31#define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */
32#define DR_CONTROL_SIZE 4 /* 4 control bits per register */
33
34#define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */
35#define DR_RW_WRITE (0x1)
36#define DR_RW_READ (0x3)
37
38#define DR_LEN_1 (0x0) /* Settings for data length to trap on */
39#define DR_LEN_2 (0x4)
40#define DR_LEN_4 (0xC)
41
42/* The low byte to the control register determine which registers are
43 enabled. There are 4 fields of two bits. One bit is "local", meaning
44 that the processor will reset the bit after a task switch and the other
45 is global meaning that we have to explicitly reset the bit. With linux,
46 you can use either one, since we explicitly zero the register when we enter
47 kernel mode. */
48
49#define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */
50#define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */
51#define DR_ENABLE_SIZE 2 /* 2 enable bits per register */
52
53#define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */
54#define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */
55
56/* The second byte to the control register has a few special things.
57 We can slow the instruction pipeline for instructions coming via the
58 gdt or the ldt if we want to. I am not sure why this is an advantage */
59
60#define DR_CONTROL_RESERVED (0xFC00) /* Reserved by Intel */
61#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */
62#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */
63
64#endif
diff --git a/include/asm-x86/debugreg_64.h b/include/asm-x86/debugreg_64.h
deleted file mode 100644
index bd1aab1d8c4a..000000000000
--- a/include/asm-x86/debugreg_64.h
+++ /dev/null
@@ -1,65 +0,0 @@
1#ifndef _X86_64_DEBUGREG_H
2#define _X86_64_DEBUGREG_H
3
4
5/* Indicate the register numbers for a number of the specific
6 debug registers. Registers 0-3 contain the addresses we wish to trap on */
7#define DR_FIRSTADDR 0 /* u_debugreg[DR_FIRSTADDR] */
8#define DR_LASTADDR 3 /* u_debugreg[DR_LASTADDR] */
9
10#define DR_STATUS 6 /* u_debugreg[DR_STATUS] */
11#define DR_CONTROL 7 /* u_debugreg[DR_CONTROL] */
12
13/* Define a few things for the status register. We can use this to determine
14 which debugging register was responsible for the trap. The other bits
15 are either reserved or not of interest to us. */
16
17#define DR_TRAP0 (0x1) /* db0 */
18#define DR_TRAP1 (0x2) /* db1 */
19#define DR_TRAP2 (0x4) /* db2 */
20#define DR_TRAP3 (0x8) /* db3 */
21
22#define DR_STEP (0x4000) /* single-step */
23#define DR_SWITCH (0x8000) /* task switch */
24
25/* Now define a bunch of things for manipulating the control register.
26 The top two bytes of the control register consist of 4 fields of 4
27 bits - each field corresponds to one of the four debug registers,
28 and indicates what types of access we trap on, and how large the data
29 field is that we are looking at */
30
31#define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */
32#define DR_CONTROL_SIZE 4 /* 4 control bits per register */
33
34#define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */
35#define DR_RW_WRITE (0x1)
36#define DR_RW_READ (0x3)
37
38#define DR_LEN_1 (0x0) /* Settings for data length to trap on */
39#define DR_LEN_2 (0x4)
40#define DR_LEN_4 (0xC)
41#define DR_LEN_8 (0x8)
42
43/* The low byte to the control register determine which registers are
44 enabled. There are 4 fields of two bits. One bit is "local", meaning
45 that the processor will reset the bit after a task switch and the other
46 is global meaning that we have to explicitly reset the bit. With linux,
47 you can use either one, since we explicitly zero the register when we enter
48 kernel mode. */
49
50#define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */
51#define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */
52#define DR_ENABLE_SIZE 2 /* 2 enable bits per register */
53
54#define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */
55#define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */
56
57/* The second byte to the control register has a few special things.
58 We can slow the instruction pipeline for instructions coming via the
59 gdt or the ldt if we want to. I am not sure why this is an advantage */
60
61#define DR_CONTROL_RESERVED (0xFFFFFFFF0000FC00UL) /* Reserved */
62#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */
63#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */
64
65#endif
diff --git a/include/asm-x86/delay.h b/include/asm-x86/delay.h
index 10f2c71d622b..d11d47fc1a0e 100644
--- a/include/asm-x86/delay.h
+++ b/include/asm-x86/delay.h
@@ -1,5 +1,31 @@
1#ifdef CONFIG_X86_32 1#ifndef _ASM_X86_DELAY_H
2# include "delay_32.h" 2#define _ASM_X86_DELAY_H
3#else 3
4# include "delay_64.h" 4/*
5#endif 5 * Copyright (C) 1993 Linus Torvalds
6 *
7 * Delay routines calling functions in arch/x86/lib/delay.c
8 */
9
10/* Undefined functions to get compile-time errors */
11extern void __bad_udelay(void);
12extern void __bad_ndelay(void);
13
14extern void __udelay(unsigned long usecs);
15extern void __ndelay(unsigned long nsecs);
16extern void __const_udelay(unsigned long usecs);
17extern void __delay(unsigned long loops);
18
19/* 0x10c7 is 2**32 / 1000000 (rounded up) */
20#define udelay(n) (__builtin_constant_p(n) ? \
21 ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
22 __udelay(n))
23
24/* 0x5 is 2**32 / 1000000000 (rounded up) */
25#define ndelay(n) (__builtin_constant_p(n) ? \
26 ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
27 __ndelay(n))
28
29void use_tsc_delay(void);
30
31#endif /* _ASM_X86_DELAY_H */
diff --git a/include/asm-x86/delay_32.h b/include/asm-x86/delay_32.h
deleted file mode 100644
index 9ae5e3782ed8..000000000000
--- a/include/asm-x86/delay_32.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef _I386_DELAY_H
2#define _I386_DELAY_H
3
4/*
5 * Copyright (C) 1993 Linus Torvalds
6 *
7 * Delay routines calling functions in arch/i386/lib/delay.c
8 */
9
10/* Undefined functions to get compile-time errors */
11extern void __bad_udelay(void);
12extern void __bad_ndelay(void);
13
14extern void __udelay(unsigned long usecs);
15extern void __ndelay(unsigned long nsecs);
16extern void __const_udelay(unsigned long usecs);
17extern void __delay(unsigned long loops);
18
19/* 0x10c7 is 2**32 / 1000000 (rounded up) */
20#define udelay(n) (__builtin_constant_p(n) ? \
21 ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
22 __udelay(n))
23
24/* 0x5 is 2**32 / 1000000000 (rounded up) */
25#define ndelay(n) (__builtin_constant_p(n) ? \
26 ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
27 __ndelay(n))
28
29void use_tsc_delay(void);
30
31#endif /* defined(_I386_DELAY_H) */
diff --git a/include/asm-x86/delay_64.h b/include/asm-x86/delay_64.h
deleted file mode 100644
index c2669f1f5529..000000000000
--- a/include/asm-x86/delay_64.h
+++ /dev/null
@@ -1,30 +0,0 @@
1#ifndef _X8664_DELAY_H
2#define _X8664_DELAY_H
3
4/*
5 * Copyright (C) 1993 Linus Torvalds
6 *
7 * Delay routines calling functions in arch/x86_64/lib/delay.c
8 */
9
10/* Undefined functions to get compile-time errors */
11extern void __bad_udelay(void);
12extern void __bad_ndelay(void);
13
14extern void __udelay(unsigned long usecs);
15extern void __ndelay(unsigned long nsecs);
16extern void __const_udelay(unsigned long usecs);
17extern void __delay(unsigned long loops);
18
19/* 0x10c7 is 2**32 / 1000000 (rounded up) */
20#define udelay(n) (__builtin_constant_p(n) ? \
21 ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
22 __udelay(n))
23
24/* 0x5 is 2**32 / 1000000000 (rounded up) */
25#define ndelay(n) (__builtin_constant_p(n) ? \
26 ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
27 __ndelay(n))
28
29
30#endif /* defined(_X8664_DELAY_H) */
diff --git a/include/asm-x86/device.h b/include/asm-x86/device.h
index e2bcf7c7dcee..d9ee5e52e91b 100644
--- a/include/asm-x86/device.h
+++ b/include/asm-x86/device.h
@@ -1,5 +1,10 @@
1#ifdef CONFIG_X86_32 1#ifndef _ASM_X86_DEVICE_H
2# include "device_32.h" 2#define _ASM_X86_DEVICE_H
3#else 3
4# include "device_64.h" 4struct dev_archdata {
5#ifdef CONFIG_ACPI
6 void *acpi_handle;
5#endif 7#endif
8};
9
10#endif /* _ASM_X86_DEVICE_H */
diff --git a/include/asm-x86/device_32.h b/include/asm-x86/device_32.h
deleted file mode 100644
index 849604c70e6b..000000000000
--- a/include/asm-x86/device_32.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Arch specific extensions to struct device
3 *
4 * This file is released under the GPLv2
5 */
6#ifndef _ASM_I386_DEVICE_H
7#define _ASM_I386_DEVICE_H
8
9struct dev_archdata {
10#ifdef CONFIG_ACPI
11 void *acpi_handle;
12#endif
13};
14
15#endif /* _ASM_I386_DEVICE_H */
diff --git a/include/asm-x86/device_64.h b/include/asm-x86/device_64.h
deleted file mode 100644
index 3afa03f33a36..000000000000
--- a/include/asm-x86/device_64.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Arch specific extensions to struct device
3 *
4 * This file is released under the GPLv2
5 */
6#ifndef _ASM_X86_64_DEVICE_H
7#define _ASM_X86_64_DEVICE_H
8
9struct dev_archdata {
10#ifdef CONFIG_ACPI
11 void *acpi_handle;
12#endif
13};
14
15#endif /* _ASM_X86_64_DEVICE_H */
diff --git a/include/asm-x86/dmi.h b/include/asm-x86/dmi.h
index c9e4e8ebc270..8e2b0e6aa8e7 100644
--- a/include/asm-x86/dmi.h
+++ b/include/asm-x86/dmi.h
@@ -1,5 +1,35 @@
1#ifndef _ASM_X86_DMI_H
2#define _ASM_X86_DMI_H
3
4#include <asm/io.h>
5
1#ifdef CONFIG_X86_32 6#ifdef CONFIG_X86_32
2# include "dmi_32.h" 7
3#else 8/* Use early IO mappings for DMI because it's initialized early */
4# include "dmi_64.h" 9#define dmi_ioremap bt_ioremap
10#define dmi_iounmap bt_iounmap
11#define dmi_alloc alloc_bootmem
12
13#else /* CONFIG_X86_32 */
14
15#define DMI_MAX_DATA 2048
16
17extern int dmi_alloc_index;
18extern char dmi_alloc_data[DMI_MAX_DATA];
19
20/* This is so early that there is no good way to allocate dynamic memory.
21 Allocate data in an BSS array. */
22static inline void *dmi_alloc(unsigned len)
23{
24 int idx = dmi_alloc_index;
25 if ((dmi_alloc_index += len) > DMI_MAX_DATA)
26 return NULL;
27 return dmi_alloc_data + idx;
28}
29
30#define dmi_ioremap early_ioremap
31#define dmi_iounmap early_iounmap
32
33#endif
34
5#endif 35#endif
diff --git a/include/asm-x86/dmi_32.h b/include/asm-x86/dmi_32.h
deleted file mode 100644
index 38d4eeb7fc7e..000000000000
--- a/include/asm-x86/dmi_32.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef _ASM_DMI_H
2#define _ASM_DMI_H 1
3
4#include <asm/io.h>
5
6/* Use early IO mappings for DMI because it's initialized early */
7#define dmi_ioremap bt_ioremap
8#define dmi_iounmap bt_iounmap
9#define dmi_alloc alloc_bootmem
10
11#endif
diff --git a/include/asm-x86/dmi_64.h b/include/asm-x86/dmi_64.h
deleted file mode 100644
index d02e32e3c3f0..000000000000
--- a/include/asm-x86/dmi_64.h
+++ /dev/null
@@ -1,24 +0,0 @@
1#ifndef _ASM_DMI_H
2#define _ASM_DMI_H 1
3
4#include <asm/io.h>
5
6#define DMI_MAX_DATA 2048
7
8extern int dmi_alloc_index;
9extern char dmi_alloc_data[DMI_MAX_DATA];
10
11/* This is so early that there is no good way to allocate dynamic memory.
12 Allocate data in an BSS array. */
13static inline void *dmi_alloc(unsigned len)
14{
15 int idx = dmi_alloc_index;
16 if ((dmi_alloc_index += len) > DMI_MAX_DATA)
17 return NULL;
18 return dmi_alloc_data + idx;
19}
20
21#define dmi_ioremap early_ioremap
22#define dmi_iounmap early_iounmap
23
24#endif
diff --git a/include/asm-x86/edac.h b/include/asm-x86/edac.h
index f8b888e140b0..cf3200a745ad 100644
--- a/include/asm-x86/edac.h
+++ b/include/asm-x86/edac.h
@@ -1,5 +1,18 @@
1#ifdef CONFIG_X86_32 1#ifndef _ASM_X86_EDAC_H
2# include "edac_32.h" 2#define _ASM_X86_EDAC_H
3#else 3
4# include "edac_64.h" 4/* ECC atomic, DMA, SMP and interrupt safe scrub function */
5
6static __inline__ void atomic_scrub(void *va, u32 size)
7{
8 u32 i, *virt_addr = va;
9
10 /*
11 * Very carefully read and write to memory atomically so we
12 * are interrupt, DMA and SMP safe.
13 */
14 for (i = 0; i < size / 4; i++, virt_addr++)
15 __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
16}
17
5#endif 18#endif
diff --git a/include/asm-x86/edac_32.h b/include/asm-x86/edac_32.h
deleted file mode 100644
index 3e7dd0ab68ce..000000000000
--- a/include/asm-x86/edac_32.h
+++ /dev/null
@@ -1,18 +0,0 @@
1#ifndef ASM_EDAC_H
2#define ASM_EDAC_H
3
4/* ECC atomic, DMA, SMP and interrupt safe scrub function */
5
6static __inline__ void atomic_scrub(void *va, u32 size)
7{
8 unsigned long *virt_addr = va;
9 u32 i;
10
11 for (i = 0; i < size / 4; i++, virt_addr++)
12 /* Very carefully read and write to memory atomically
13 * so we are interrupt, DMA and SMP safe.
14 */
15 __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
16}
17
18#endif
diff --git a/include/asm-x86/edac_64.h b/include/asm-x86/edac_64.h
deleted file mode 100644
index cad1cd42b4ee..000000000000
--- a/include/asm-x86/edac_64.h
+++ /dev/null
@@ -1,18 +0,0 @@
1#ifndef ASM_EDAC_H
2#define ASM_EDAC_H
3
4/* ECC atomic, DMA, SMP and interrupt safe scrub function */
5
6static __inline__ void atomic_scrub(void *va, u32 size)
7{
8 unsigned int *virt_addr = va;
9 u32 i;
10
11 for (i = 0; i < size / 4; i++, virt_addr++)
12 /* Very carefully read and write to memory atomically
13 * so we are interrupt, DMA and SMP safe.
14 */
15 __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
16}
17
18#endif
diff --git a/include/asm-x86/errno.h b/include/asm-x86/errno.h
index 9d511be8e573..4c82b503d92f 100644
--- a/include/asm-x86/errno.h
+++ b/include/asm-x86/errno.h
@@ -1,13 +1 @@
1#ifdef __KERNEL__ #include <asm-generic/errno.h>
2# ifdef CONFIG_X86_32
3# include "errno_32.h"
4# else
5# include "errno_64.h"
6# endif
7#else
8# ifdef __i386__
9# include "errno_32.h"
10# else
11# include "errno_64.h"
12# endif
13#endif
diff --git a/include/asm-x86/errno_32.h b/include/asm-x86/errno_32.h
deleted file mode 100644
index 969b34374728..000000000000
--- a/include/asm-x86/errno_32.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _I386_ERRNO_H
2#define _I386_ERRNO_H
3
4#include <asm-generic/errno.h>
5
6#endif
diff --git a/include/asm-x86/errno_64.h b/include/asm-x86/errno_64.h
deleted file mode 100644
index 311182129e32..000000000000
--- a/include/asm-x86/errno_64.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _X8664_ERRNO_H
2#define _X8664_ERRNO_H
3
4#include <asm-generic/errno.h>
5
6#endif
diff --git a/include/asm-x86/fb.h b/include/asm-x86/fb.h
index 238c7ca45877..53018464aea6 100644
--- a/include/asm-x86/fb.h
+++ b/include/asm-x86/fb.h
@@ -1,5 +1,21 @@
1#ifndef _ASM_X86_FB_H
2#define _ASM_X86_FB_H
3
4#include <linux/fb.h>
5#include <linux/fs.h>
6#include <asm/page.h>
7
8static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
9 unsigned long off)
10{
11 if (boot_cpu_data.x86 > 3)
12 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
13}
14
1#ifdef CONFIG_X86_32 15#ifdef CONFIG_X86_32
2# include "fb_32.h" 16extern int fb_is_primary_device(struct fb_info *info);
3#else 17#else
4# include "fb_64.h" 18static inline int fb_is_primary_device(struct fb_info *info) { return 0; }
5#endif 19#endif
20
21#endif /* _ASM_X86_FB_H */
diff --git a/include/asm-x86/fb_32.h b/include/asm-x86/fb_32.h
deleted file mode 100644
index d1c6297d4a61..000000000000
--- a/include/asm-x86/fb_32.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3
4#include <linux/fb.h>
5#include <linux/fs.h>
6#include <asm/page.h>
7
8extern int fb_is_primary_device(struct fb_info *info);
9
10static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
11 unsigned long off)
12{
13 if (boot_cpu_data.x86 > 3)
14 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
15}
16
17#endif /* _ASM_FB_H_ */
diff --git a/include/asm-x86/fb_64.h b/include/asm-x86/fb_64.h
deleted file mode 100644
index 60548e651d12..000000000000
--- a/include/asm-x86/fb_64.h
+++ /dev/null
@@ -1,19 +0,0 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3#include <linux/fb.h>
4#include <linux/fs.h>
5#include <asm/page.h>
6
7static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
8 unsigned long off)
9{
10 if (boot_cpu_data.x86 > 3)
11 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
12}
13
14static inline int fb_is_primary_device(struct fb_info *info)
15{
16 return 0;
17}
18
19#endif /* _ASM_FB_H_ */
diff --git a/include/asm-x86/floppy.h b/include/asm-x86/floppy.h
index aecbb6dca21d..a48d7153c097 100644
--- a/include/asm-x86/floppy.h
+++ b/include/asm-x86/floppy.h
@@ -1,5 +1,278 @@
1#ifdef CONFIG_X86_32 1/*
2# include "floppy_32.h" 2 * Architecture specific parts of the Floppy driver
3#else 3 *
4# include "floppy_64.h" 4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1995
9 */
10#ifndef _ASM_X86_FLOPPY_H
11#define _ASM_X86_FLOPPY_H
12
13#include <linux/vmalloc.h>
14
15/*
16 * The DMA channel used by the floppy controller cannot access data at
17 * addresses >= 16MB
18 *
19 * Went back to the 1MB limit, as some people had problems with the floppy
20 * driver otherwise. It doesn't matter much for performance anyway, as most
21 * floppy accesses go through the track buffer.
22 */
23#define _CROSS_64KB(a,s,vdma) \
24(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
25
26#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1)
27
28
29#define SW fd_routine[use_virtual_dma&1]
30#define CSW fd_routine[can_use_virtual_dma & 1]
31
32
33#define fd_inb(port) inb_p(port)
34#define fd_outb(value,port) outb_p(value,port)
35
36#define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy")
37#define fd_free_dma() CSW._free_dma(FLOPPY_DMA)
38#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
39#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
40#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
41#define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA)
42#define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size)
43#define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io)
44
45#define FLOPPY_CAN_FALLBACK_ON_NODMA
46
47static int virtual_dma_count;
48static int virtual_dma_residue;
49static char *virtual_dma_addr;
50static int virtual_dma_mode;
51static int doing_pdma;
52
53static irqreturn_t floppy_hardint(int irq, void *dev_id)
54{
55 register unsigned char st;
56
57#undef TRACE_FLPY_INT
58
59#ifdef TRACE_FLPY_INT
60 static int calls=0;
61 static int bytes=0;
62 static int dma_wait=0;
5#endif 63#endif
64 if (!doing_pdma)
65 return floppy_interrupt(irq, dev_id);
66
67#ifdef TRACE_FLPY_INT
68 if(!calls)
69 bytes = virtual_dma_count;
70#endif
71
72 {
73 register int lcount;
74 register char *lptr;
75
76 st = 1;
77 for(lcount=virtual_dma_count, lptr=virtual_dma_addr;
78 lcount; lcount--, lptr++) {
79 st=inb(virtual_dma_port+4) & 0xa0 ;
80 if(st != 0xa0)
81 break;
82 if(virtual_dma_mode)
83 outb_p(*lptr, virtual_dma_port+5);
84 else
85 *lptr = inb_p(virtual_dma_port+5);
86 }
87 virtual_dma_count = lcount;
88 virtual_dma_addr = lptr;
89 st = inb(virtual_dma_port+4);
90 }
91
92#ifdef TRACE_FLPY_INT
93 calls++;
94#endif
95 if(st == 0x20)
96 return IRQ_HANDLED;
97 if(!(st & 0x20)) {
98 virtual_dma_residue += virtual_dma_count;
99 virtual_dma_count=0;
100#ifdef TRACE_FLPY_INT
101 printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
102 virtual_dma_count, virtual_dma_residue, calls, bytes,
103 dma_wait);
104 calls = 0;
105 dma_wait=0;
106#endif
107 doing_pdma = 0;
108 floppy_interrupt(irq, dev_id);
109 return IRQ_HANDLED;
110 }
111#ifdef TRACE_FLPY_INT
112 if(!virtual_dma_count)
113 dma_wait++;
114#endif
115 return IRQ_HANDLED;
116}
117
118static void fd_disable_dma(void)
119{
120 if(! (can_use_virtual_dma & 1))
121 disable_dma(FLOPPY_DMA);
122 doing_pdma = 0;
123 virtual_dma_residue += virtual_dma_count;
124 virtual_dma_count=0;
125}
126
127static int vdma_request_dma(unsigned int dmanr, const char * device_id)
128{
129 return 0;
130}
131
132static void vdma_nop(unsigned int dummy)
133{
134}
135
136
137static int vdma_get_dma_residue(unsigned int dummy)
138{
139 return virtual_dma_count + virtual_dma_residue;
140}
141
142
143static int fd_request_irq(void)
144{
145 if(can_use_virtual_dma)
146 return request_irq(FLOPPY_IRQ, floppy_hardint,
147 IRQF_DISABLED, "floppy", NULL);
148 else
149 return request_irq(FLOPPY_IRQ, floppy_interrupt,
150 IRQF_DISABLED, "floppy", NULL);
151}
152
153static unsigned long dma_mem_alloc(unsigned long size)
154{
155 return __get_dma_pages(GFP_KERNEL|__GFP_NORETRY,get_order(size));
156}
157
158
159static unsigned long vdma_mem_alloc(unsigned long size)
160{
161 return (unsigned long) vmalloc(size);
162
163}
164
165#define nodma_mem_alloc(size) vdma_mem_alloc(size)
166
167static void _fd_dma_mem_free(unsigned long addr, unsigned long size)
168{
169 if((unsigned long) addr >= (unsigned long) high_memory)
170 vfree((void *)addr);
171 else
172 free_pages(addr, get_order(size));
173}
174
175#define fd_dma_mem_free(addr, size) _fd_dma_mem_free(addr, size)
176
177static void _fd_chose_dma_mode(char *addr, unsigned long size)
178{
179 if(can_use_virtual_dma == 2) {
180 if((unsigned long) addr >= (unsigned long) high_memory ||
181 isa_virt_to_bus(addr) >= 0x1000000 ||
182 _CROSS_64KB(addr, size, 0))
183 use_virtual_dma = 1;
184 else
185 use_virtual_dma = 0;
186 } else {
187 use_virtual_dma = can_use_virtual_dma & 1;
188 }
189}
190
191#define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size)
192
193
194static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
195{
196 doing_pdma = 1;
197 virtual_dma_port = io;
198 virtual_dma_mode = (mode == DMA_MODE_WRITE);
199 virtual_dma_addr = addr;
200 virtual_dma_count = size;
201 virtual_dma_residue = 0;
202 return 0;
203}
204
205static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
206{
207#ifdef FLOPPY_SANITY_CHECK
208 if (CROSS_64KB(addr, size)) {
209 printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size);
210 return -1;
211 }
212#endif
213 /* actual, physical DMA */
214 doing_pdma = 0;
215 clear_dma_ff(FLOPPY_DMA);
216 set_dma_mode(FLOPPY_DMA,mode);
217 set_dma_addr(FLOPPY_DMA,isa_virt_to_bus(addr));
218 set_dma_count(FLOPPY_DMA,size);
219 enable_dma(FLOPPY_DMA);
220 return 0;
221}
222
223static struct fd_routine_l {
224 int (*_request_dma)(unsigned int dmanr, const char * device_id);
225 void (*_free_dma)(unsigned int dmanr);
226 int (*_get_dma_residue)(unsigned int dummy);
227 unsigned long (*_dma_mem_alloc) (unsigned long size);
228 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
229} fd_routine[] = {
230 {
231 request_dma,
232 free_dma,
233 get_dma_residue,
234 dma_mem_alloc,
235 hard_dma_setup
236 },
237 {
238 vdma_request_dma,
239 vdma_nop,
240 vdma_get_dma_residue,
241 vdma_mem_alloc,
242 vdma_dma_setup
243 }
244};
245
246
247static int FDC1 = 0x3f0;
248static int FDC2 = -1;
249
250/*
251 * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
252 * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
253 * coincides with another rtc CMOS user. Paul G.
254 */
255#define FLOPPY0_TYPE ({ \
256 unsigned long flags; \
257 unsigned char val; \
258 spin_lock_irqsave(&rtc_lock, flags); \
259 val = (CMOS_READ(0x10) >> 4) & 15; \
260 spin_unlock_irqrestore(&rtc_lock, flags); \
261 val; \
262})
263
264#define FLOPPY1_TYPE ({ \
265 unsigned long flags; \
266 unsigned char val; \
267 spin_lock_irqsave(&rtc_lock, flags); \
268 val = CMOS_READ(0x10) & 15; \
269 spin_unlock_irqrestore(&rtc_lock, flags); \
270 val; \
271})
272
273#define N_FDC 2
274#define N_DRIVE 8
275
276#define EXTRA_FLOPPY_PARAMS
277
278#endif /* _ASM_X86_FLOPPY_H */
diff --git a/include/asm-x86/floppy_32.h b/include/asm-x86/floppy_32.h
deleted file mode 100644
index 99583b39d216..000000000000
--- a/include/asm-x86/floppy_32.h
+++ /dev/null
@@ -1,280 +0,0 @@
1/*
2 * Architecture specific parts of the Floppy driver
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1995
9 */
10#ifndef __ASM_I386_FLOPPY_H
11#define __ASM_I386_FLOPPY_H
12
13#include <linux/vmalloc.h>
14
15
16/*
17 * The DMA channel used by the floppy controller cannot access data at
18 * addresses >= 16MB
19 *
20 * Went back to the 1MB limit, as some people had problems with the floppy
21 * driver otherwise. It doesn't matter much for performance anyway, as most
22 * floppy accesses go through the track buffer.
23 */
24#define _CROSS_64KB(a,s,vdma) \
25(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
26
27#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1)
28
29
30#define SW fd_routine[use_virtual_dma&1]
31#define CSW fd_routine[can_use_virtual_dma & 1]
32
33
34#define fd_inb(port) inb_p(port)
35#define fd_outb(value,port) outb_p(value,port)
36
37#define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy")
38#define fd_free_dma() CSW._free_dma(FLOPPY_DMA)
39#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
40#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
41#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
42#define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA)
43#define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size)
44#define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io)
45
46#define FLOPPY_CAN_FALLBACK_ON_NODMA
47
48static int virtual_dma_count;
49static int virtual_dma_residue;
50static char *virtual_dma_addr;
51static int virtual_dma_mode;
52static int doing_pdma;
53
54static irqreturn_t floppy_hardint(int irq, void *dev_id)
55{
56 register unsigned char st;
57
58#undef TRACE_FLPY_INT
59
60#ifdef TRACE_FLPY_INT
61 static int calls=0;
62 static int bytes=0;
63 static int dma_wait=0;
64#endif
65 if (!doing_pdma)
66 return floppy_interrupt(irq, dev_id);
67
68#ifdef TRACE_FLPY_INT
69 if(!calls)
70 bytes = virtual_dma_count;
71#endif
72
73 {
74 register int lcount;
75 register char *lptr;
76
77 st = 1;
78 for(lcount=virtual_dma_count, lptr=virtual_dma_addr;
79 lcount; lcount--, lptr++) {
80 st=inb(virtual_dma_port+4) & 0xa0 ;
81 if(st != 0xa0)
82 break;
83 if(virtual_dma_mode)
84 outb_p(*lptr, virtual_dma_port+5);
85 else
86 *lptr = inb_p(virtual_dma_port+5);
87 }
88 virtual_dma_count = lcount;
89 virtual_dma_addr = lptr;
90 st = inb(virtual_dma_port+4);
91 }
92
93#ifdef TRACE_FLPY_INT
94 calls++;
95#endif
96 if(st == 0x20)
97 return IRQ_HANDLED;
98 if(!(st & 0x20)) {
99 virtual_dma_residue += virtual_dma_count;
100 virtual_dma_count=0;
101#ifdef TRACE_FLPY_INT
102 printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
103 virtual_dma_count, virtual_dma_residue, calls, bytes,
104 dma_wait);
105 calls = 0;
106 dma_wait=0;
107#endif
108 doing_pdma = 0;
109 floppy_interrupt(irq, dev_id);
110 return IRQ_HANDLED;
111 }
112#ifdef TRACE_FLPY_INT
113 if(!virtual_dma_count)
114 dma_wait++;
115#endif
116 return IRQ_HANDLED;
117}
118
119static void fd_disable_dma(void)
120{
121 if(! (can_use_virtual_dma & 1))
122 disable_dma(FLOPPY_DMA);
123 doing_pdma = 0;
124 virtual_dma_residue += virtual_dma_count;
125 virtual_dma_count=0;
126}
127
128static int vdma_request_dma(unsigned int dmanr, const char * device_id)
129{
130 return 0;
131}
132
133static void vdma_nop(unsigned int dummy)
134{
135}
136
137
138static int vdma_get_dma_residue(unsigned int dummy)
139{
140 return virtual_dma_count + virtual_dma_residue;
141}
142
143
144static int fd_request_irq(void)
145{
146 if(can_use_virtual_dma)
147 return request_irq(FLOPPY_IRQ, floppy_hardint,
148 IRQF_DISABLED, "floppy", NULL);
149 else
150 return request_irq(FLOPPY_IRQ, floppy_interrupt,
151 IRQF_DISABLED, "floppy", NULL);
152
153}
154
155static unsigned long dma_mem_alloc(unsigned long size)
156{
157 return __get_dma_pages(GFP_KERNEL,get_order(size));
158}
159
160
161static unsigned long vdma_mem_alloc(unsigned long size)
162{
163 return (unsigned long) vmalloc(size);
164
165}
166
167#define nodma_mem_alloc(size) vdma_mem_alloc(size)
168
169static void _fd_dma_mem_free(unsigned long addr, unsigned long size)
170{
171 if((unsigned int) addr >= (unsigned int) high_memory)
172 vfree((void *)addr);
173 else
174 free_pages(addr, get_order(size));
175}
176
177#define fd_dma_mem_free(addr, size) _fd_dma_mem_free(addr, size)
178
179static void _fd_chose_dma_mode(char *addr, unsigned long size)
180{
181 if(can_use_virtual_dma == 2) {
182 if((unsigned int) addr >= (unsigned int) high_memory ||
183 isa_virt_to_bus(addr) >= 0x1000000 ||
184 _CROSS_64KB(addr, size, 0))
185 use_virtual_dma = 1;
186 else
187 use_virtual_dma = 0;
188 } else {
189 use_virtual_dma = can_use_virtual_dma & 1;
190 }
191}
192
193#define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size)
194
195
196static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
197{
198 doing_pdma = 1;
199 virtual_dma_port = io;
200 virtual_dma_mode = (mode == DMA_MODE_WRITE);
201 virtual_dma_addr = addr;
202 virtual_dma_count = size;
203 virtual_dma_residue = 0;
204 return 0;
205}
206
207static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
208{
209#ifdef FLOPPY_SANITY_CHECK
210 if (CROSS_64KB(addr, size)) {
211 printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size);
212 return -1;
213 }
214#endif
215 /* actual, physical DMA */
216 doing_pdma = 0;
217 clear_dma_ff(FLOPPY_DMA);
218 set_dma_mode(FLOPPY_DMA,mode);
219 set_dma_addr(FLOPPY_DMA,isa_virt_to_bus(addr));
220 set_dma_count(FLOPPY_DMA,size);
221 enable_dma(FLOPPY_DMA);
222 return 0;
223}
224
225static struct fd_routine_l {
226 int (*_request_dma)(unsigned int dmanr, const char * device_id);
227 void (*_free_dma)(unsigned int dmanr);
228 int (*_get_dma_residue)(unsigned int dummy);
229 unsigned long (*_dma_mem_alloc) (unsigned long size);
230 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
231} fd_routine[] = {
232 {
233 request_dma,
234 free_dma,
235 get_dma_residue,
236 dma_mem_alloc,
237 hard_dma_setup
238 },
239 {
240 vdma_request_dma,
241 vdma_nop,
242 vdma_get_dma_residue,
243 vdma_mem_alloc,
244 vdma_dma_setup
245 }
246};
247
248
249static int FDC1 = 0x3f0;
250static int FDC2 = -1;
251
252/*
253 * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
254 * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
255 * coincides with another rtc CMOS user. Paul G.
256 */
257#define FLOPPY0_TYPE ({ \
258 unsigned long flags; \
259 unsigned char val; \
260 spin_lock_irqsave(&rtc_lock, flags); \
261 val = (CMOS_READ(0x10) >> 4) & 15; \
262 spin_unlock_irqrestore(&rtc_lock, flags); \
263 val; \
264})
265
266#define FLOPPY1_TYPE ({ \
267 unsigned long flags; \
268 unsigned char val; \
269 spin_lock_irqsave(&rtc_lock, flags); \
270 val = CMOS_READ(0x10) & 15; \
271 spin_unlock_irqrestore(&rtc_lock, flags); \
272 val; \
273})
274
275#define N_FDC 2
276#define N_DRIVE 8
277
278#define EXTRA_FLOPPY_PARAMS
279
280#endif /* __ASM_I386_FLOPPY_H */
diff --git a/include/asm-x86/floppy_64.h b/include/asm-x86/floppy_64.h
deleted file mode 100644
index d993380dcb47..000000000000
--- a/include/asm-x86/floppy_64.h
+++ /dev/null
@@ -1,279 +0,0 @@
1/*
2 * Architecture specific parts of the Floppy driver
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1995
9 */
10#ifndef __ASM_X86_64_FLOPPY_H
11#define __ASM_X86_64_FLOPPY_H
12
13#include <linux/vmalloc.h>
14
15
16/*
17 * The DMA channel used by the floppy controller cannot access data at
18 * addresses >= 16MB
19 *
20 * Went back to the 1MB limit, as some people had problems with the floppy
21 * driver otherwise. It doesn't matter much for performance anyway, as most
22 * floppy accesses go through the track buffer.
23 */
24#define _CROSS_64KB(a,s,vdma) \
25(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
26
27#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1)
28
29
30#define SW fd_routine[use_virtual_dma&1]
31#define CSW fd_routine[can_use_virtual_dma & 1]
32
33
34#define fd_inb(port) inb_p(port)
35#define fd_outb(value,port) outb_p(value,port)
36
37#define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy")
38#define fd_free_dma() CSW._free_dma(FLOPPY_DMA)
39#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
40#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
41#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
42#define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA)
43#define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size)
44#define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io)
45
46#define FLOPPY_CAN_FALLBACK_ON_NODMA
47
48static int virtual_dma_count;
49static int virtual_dma_residue;
50static char *virtual_dma_addr;
51static int virtual_dma_mode;
52static int doing_pdma;
53
54static irqreturn_t floppy_hardint(int irq, void *dev_id)
55{
56 register unsigned char st;
57
58#undef TRACE_FLPY_INT
59
60#ifdef TRACE_FLPY_INT
61 static int calls=0;
62 static int bytes=0;
63 static int dma_wait=0;
64#endif
65 if (!doing_pdma)
66 return floppy_interrupt(irq, dev_id);
67
68#ifdef TRACE_FLPY_INT
69 if(!calls)
70 bytes = virtual_dma_count;
71#endif
72
73 {
74 register int lcount;
75 register char *lptr;
76
77 st = 1;
78 for(lcount=virtual_dma_count, lptr=virtual_dma_addr;
79 lcount; lcount--, lptr++) {
80 st=inb(virtual_dma_port+4) & 0xa0 ;
81 if(st != 0xa0)
82 break;
83 if(virtual_dma_mode)
84 outb_p(*lptr, virtual_dma_port+5);
85 else
86 *lptr = inb_p(virtual_dma_port+5);
87 }
88 virtual_dma_count = lcount;
89 virtual_dma_addr = lptr;
90 st = inb(virtual_dma_port+4);
91 }
92
93#ifdef TRACE_FLPY_INT
94 calls++;
95#endif
96 if(st == 0x20)
97 return IRQ_HANDLED;
98 if(!(st & 0x20)) {
99 virtual_dma_residue += virtual_dma_count;
100 virtual_dma_count=0;
101#ifdef TRACE_FLPY_INT
102 printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
103 virtual_dma_count, virtual_dma_residue, calls, bytes,
104 dma_wait);
105 calls = 0;
106 dma_wait=0;
107#endif
108 doing_pdma = 0;
109 floppy_interrupt(irq, dev_id);
110 return IRQ_HANDLED;
111 }
112#ifdef TRACE_FLPY_INT
113 if(!virtual_dma_count)
114 dma_wait++;
115#endif
116 return IRQ_HANDLED;
117}
118
119static void fd_disable_dma(void)
120{
121 if(! (can_use_virtual_dma & 1))
122 disable_dma(FLOPPY_DMA);
123 doing_pdma = 0;
124 virtual_dma_residue += virtual_dma_count;
125 virtual_dma_count=0;
126}
127
128static int vdma_request_dma(unsigned int dmanr, const char * device_id)
129{
130 return 0;
131}
132
133static void vdma_nop(unsigned int dummy)
134{
135}
136
137
138static int vdma_get_dma_residue(unsigned int dummy)
139{
140 return virtual_dma_count + virtual_dma_residue;
141}
142
143
144static int fd_request_irq(void)
145{
146 if(can_use_virtual_dma)
147 return request_irq(FLOPPY_IRQ, floppy_hardint,
148 IRQF_DISABLED, "floppy", NULL);
149 else
150 return request_irq(FLOPPY_IRQ, floppy_interrupt,
151 IRQF_DISABLED, "floppy", NULL);
152}
153
154static unsigned long dma_mem_alloc(unsigned long size)
155{
156 return __get_dma_pages(GFP_KERNEL|__GFP_NORETRY,get_order(size));
157}
158
159
160static unsigned long vdma_mem_alloc(unsigned long size)
161{
162 return (unsigned long) vmalloc(size);
163
164}
165
166#define nodma_mem_alloc(size) vdma_mem_alloc(size)
167
168static void _fd_dma_mem_free(unsigned long addr, unsigned long size)
169{
170 if((unsigned long) addr >= (unsigned long) high_memory)
171 vfree((void *)addr);
172 else
173 free_pages(addr, get_order(size));
174}
175
176#define fd_dma_mem_free(addr, size) _fd_dma_mem_free(addr, size)
177
178static void _fd_chose_dma_mode(char *addr, unsigned long size)
179{
180 if(can_use_virtual_dma == 2) {
181 if((unsigned long) addr >= (unsigned long) high_memory ||
182 isa_virt_to_bus(addr) >= 0x1000000 ||
183 _CROSS_64KB(addr, size, 0))
184 use_virtual_dma = 1;
185 else
186 use_virtual_dma = 0;
187 } else {
188 use_virtual_dma = can_use_virtual_dma & 1;
189 }
190}
191
192#define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size)
193
194
195static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
196{
197 doing_pdma = 1;
198 virtual_dma_port = io;
199 virtual_dma_mode = (mode == DMA_MODE_WRITE);
200 virtual_dma_addr = addr;
201 virtual_dma_count = size;
202 virtual_dma_residue = 0;
203 return 0;
204}
205
206static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
207{
208#ifdef FLOPPY_SANITY_CHECK
209 if (CROSS_64KB(addr, size)) {
210 printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size);
211 return -1;
212 }
213#endif
214 /* actual, physical DMA */
215 doing_pdma = 0;
216 clear_dma_ff(FLOPPY_DMA);
217 set_dma_mode(FLOPPY_DMA,mode);
218 set_dma_addr(FLOPPY_DMA,isa_virt_to_bus(addr));
219 set_dma_count(FLOPPY_DMA,size);
220 enable_dma(FLOPPY_DMA);
221 return 0;
222}
223
224static struct fd_routine_l {
225 int (*_request_dma)(unsigned int dmanr, const char * device_id);
226 void (*_free_dma)(unsigned int dmanr);
227 int (*_get_dma_residue)(unsigned int dummy);
228 unsigned long (*_dma_mem_alloc) (unsigned long size);
229 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
230} fd_routine[] = {
231 {
232 request_dma,
233 free_dma,
234 get_dma_residue,
235 dma_mem_alloc,
236 hard_dma_setup
237 },
238 {
239 vdma_request_dma,
240 vdma_nop,
241 vdma_get_dma_residue,
242 vdma_mem_alloc,
243 vdma_dma_setup
244 }
245};
246
247
248static int FDC1 = 0x3f0;
249static int FDC2 = -1;
250
251/*
252 * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
253 * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
254 * coincides with another rtc CMOS user. Paul G.
255 */
256#define FLOPPY0_TYPE ({ \
257 unsigned long flags; \
258 unsigned char val; \
259 spin_lock_irqsave(&rtc_lock, flags); \
260 val = (CMOS_READ(0x10) >> 4) & 15; \
261 spin_unlock_irqrestore(&rtc_lock, flags); \
262 val; \
263})
264
265#define FLOPPY1_TYPE ({ \
266 unsigned long flags; \
267 unsigned char val; \
268 spin_lock_irqsave(&rtc_lock, flags); \
269 val = CMOS_READ(0x10) & 15; \
270 spin_unlock_irqrestore(&rtc_lock, flags); \
271 val; \
272})
273
274#define N_FDC 2
275#define N_DRIVE 8
276
277#define EXTRA_FLOPPY_PARAMS
278
279#endif /* __ASM_X86_64_FLOPPY_H */
diff --git a/include/asm-x86/frame.i b/include/asm-x86/frame.h
index 03620251ae17..06850a7194e1 100644
--- a/include/asm-x86/frame.i
+++ b/include/asm-x86/frame.h
@@ -1,3 +1,5 @@
1#ifdef __ASSEMBLY__
2
1#include <asm/dwarf2.h> 3#include <asm/dwarf2.h>
2 4
3/* The annotation hides the frame from the unwinder and makes it look 5/* The annotation hides the frame from the unwinder and makes it look
@@ -21,3 +23,5 @@
21 .macro ENDFRAME 23 .macro ENDFRAME
22 .endm 24 .endm
23#endif 25#endif
26
27#endif /* __ASSEMBLY__ */
diff --git a/include/asm-x86/hardirq_32.h b/include/asm-x86/hardirq_32.h
index 34649585bb59..4f85f0f4b563 100644
--- a/include/asm-x86/hardirq_32.h
+++ b/include/asm-x86/hardirq_32.h
@@ -10,10 +10,14 @@ typedef struct {
10 unsigned int __nmi_count; /* arch dependent */ 10 unsigned int __nmi_count; /* arch dependent */
11 unsigned int apic_timer_irqs; /* arch dependent */ 11 unsigned int apic_timer_irqs; /* arch dependent */
12 unsigned int irq0_irqs; 12 unsigned int irq0_irqs;
13 unsigned int irq_resched_count;
14 unsigned int irq_call_count;
15 unsigned int irq_tlb_count;
16 unsigned int irq_thermal_count;
17 unsigned int irq_spurious_count;
13} ____cacheline_aligned irq_cpustat_t; 18} ____cacheline_aligned irq_cpustat_t;
14 19
15DECLARE_PER_CPU(irq_cpustat_t, irq_stat); 20DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
16extern irq_cpustat_t irq_stat[];
17 21
18#define __ARCH_IRQ_STAT 22#define __ARCH_IRQ_STAT
19#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member) 23#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
diff --git a/include/asm-x86/hw_irq_64.h b/include/asm-x86/hw_irq_64.h
index 09dfc18a6dd0..a470d59da678 100644
--- a/include/asm-x86/hw_irq_64.h
+++ b/include/asm-x86/hw_irq_64.h
@@ -40,22 +40,22 @@
40/* 40/*
41 * Vectors 0x30-0x3f are used for ISA interrupts. 41 * Vectors 0x30-0x3f are used for ISA interrupts.
42 */ 42 */
43#define IRQ0_VECTOR FIRST_EXTERNAL_VECTOR + 0x10 43#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10)
44#define IRQ1_VECTOR IRQ0_VECTOR + 1 44#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
45#define IRQ2_VECTOR IRQ0_VECTOR + 2 45#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
46#define IRQ3_VECTOR IRQ0_VECTOR + 3 46#define IRQ3_VECTOR (IRQ0_VECTOR + 3)
47#define IRQ4_VECTOR IRQ0_VECTOR + 4 47#define IRQ4_VECTOR (IRQ0_VECTOR + 4)
48#define IRQ5_VECTOR IRQ0_VECTOR + 5 48#define IRQ5_VECTOR (IRQ0_VECTOR + 5)
49#define IRQ6_VECTOR IRQ0_VECTOR + 6 49#define IRQ6_VECTOR (IRQ0_VECTOR + 6)
50#define IRQ7_VECTOR IRQ0_VECTOR + 7 50#define IRQ7_VECTOR (IRQ0_VECTOR + 7)
51#define IRQ8_VECTOR IRQ0_VECTOR + 8 51#define IRQ8_VECTOR (IRQ0_VECTOR + 8)
52#define IRQ9_VECTOR IRQ0_VECTOR + 9 52#define IRQ9_VECTOR (IRQ0_VECTOR + 9)
53#define IRQ10_VECTOR IRQ0_VECTOR + 10 53#define IRQ10_VECTOR (IRQ0_VECTOR + 10)
54#define IRQ11_VECTOR IRQ0_VECTOR + 11 54#define IRQ11_VECTOR (IRQ0_VECTOR + 11)
55#define IRQ12_VECTOR IRQ0_VECTOR + 12 55#define IRQ12_VECTOR (IRQ0_VECTOR + 12)
56#define IRQ13_VECTOR IRQ0_VECTOR + 13 56#define IRQ13_VECTOR (IRQ0_VECTOR + 13)
57#define IRQ14_VECTOR IRQ0_VECTOR + 14 57#define IRQ14_VECTOR (IRQ0_VECTOR + 14)
58#define IRQ15_VECTOR IRQ0_VECTOR + 15 58#define IRQ15_VECTOR (IRQ0_VECTOR + 15)
59 59
60/* 60/*
61 * Special IRQ vectors used by the SMP architecture, 0xf0-0xff 61 * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
@@ -148,9 +148,6 @@ extern atomic_t irq_mis_count;
148 148
149#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs)) 149#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
150 150
151#define __STR(x) #x
152#define STR(x) __STR(x)
153
154#include <asm/ptrace.h> 151#include <asm/ptrace.h>
155 152
156#define IRQ_NAME2(nr) nr##_interrupt(void) 153#define IRQ_NAME2(nr) nr##_interrupt(void)
diff --git a/include/asm-x86/intel_arch_perfmon.h b/include/asm-x86/intel_arch_perfmon.h
index 4f6d4e6bf57e..fa0fd068bc2e 100644
--- a/include/asm-x86/intel_arch_perfmon.h
+++ b/include/asm-x86/intel_arch_perfmon.h
@@ -1,5 +1,31 @@
1#ifdef CONFIG_X86_32 1#ifndef _ASM_X86_INTEL_ARCH_PERFMON_H
2# include "intel_arch_perfmon_32.h" 2#define _ASM_X86_INTEL_ARCH_PERFMON_H
3#else 3
4# include "intel_arch_perfmon_64.h" 4#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
5#endif 5#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
6
7#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
8#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
9
10#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22)
11#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
12#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
13#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
14
15#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c)
16#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
17#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX (0)
18#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
19 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
20
21union cpuid10_eax {
22 struct {
23 unsigned int version_id:8;
24 unsigned int num_counters:8;
25 unsigned int bit_width:8;
26 unsigned int mask_length:8;
27 } split;
28 unsigned int full;
29};
30
31#endif /* _ASM_X86_INTEL_ARCH_PERFMON_H */
diff --git a/include/asm-x86/intel_arch_perfmon_32.h b/include/asm-x86/intel_arch_perfmon_32.h
deleted file mode 100644
index b52cd60a075b..000000000000
--- a/include/asm-x86/intel_arch_perfmon_32.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef X86_INTEL_ARCH_PERFMON_H
2#define X86_INTEL_ARCH_PERFMON_H 1
3
4#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
5#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
6
7#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
8#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
9
10#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22)
11#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
12#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
13#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
14
15#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c)
16#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
17#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX (0)
18#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
19 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
20
21union cpuid10_eax {
22 struct {
23 unsigned int version_id:8;
24 unsigned int num_counters:8;
25 unsigned int bit_width:8;
26 unsigned int mask_length:8;
27 } split;
28 unsigned int full;
29};
30
31#endif /* X86_INTEL_ARCH_PERFMON_H */
diff --git a/include/asm-x86/intel_arch_perfmon_64.h b/include/asm-x86/intel_arch_perfmon_64.h
deleted file mode 100644
index 8633331420ec..000000000000
--- a/include/asm-x86/intel_arch_perfmon_64.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef X86_64_INTEL_ARCH_PERFMON_H
2#define X86_64_INTEL_ARCH_PERFMON_H 1
3
4#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
5#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
6
7#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
8#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
9
10#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22)
11#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
12#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
13#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
14
15#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c)
16#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
17#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX (0)
18#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
19 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
20
21union cpuid10_eax {
22 struct {
23 unsigned int version_id:8;
24 unsigned int num_counters:8;
25 unsigned int bit_width:8;
26 unsigned int mask_length:8;
27 } split;
28 unsigned int full;
29};
30
31#endif /* X86_64_INTEL_ARCH_PERFMON_H */
diff --git a/include/asm-x86/io_32.h b/include/asm-x86/io_32.h
index 4ea7b1ad3c1d..fe881cd1e6f4 100644
--- a/include/asm-x86/io_32.h
+++ b/include/asm-x86/io_32.h
@@ -199,17 +199,22 @@ static inline void writel(unsigned int b, volatile void __iomem *addr)
199 199
200#define mmiowb() 200#define mmiowb()
201 201
202static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) 202static inline void
203memset_io(volatile void __iomem *addr, unsigned char val, int count)
203{ 204{
204 memset((void __force *) addr, val, count); 205 memset((void __force *)addr, val, count);
205} 206}
206static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) 207
208static inline void
209memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
207{ 210{
208 __memcpy(dst, (void __force *) src, count); 211 __memcpy(dst, (const void __force *)src, count);
209} 212}
210static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count) 213
214static inline void
215memcpy_toio(volatile void __iomem *dst, const void *src, int count)
211{ 216{
212 __memcpy((void __force *) dst, src, count); 217 __memcpy((void __force *)dst, src, count);
213} 218}
214 219
215/* 220/*
diff --git a/include/asm-x86/ioctls.h b/include/asm-x86/ioctls.h
index 1e0fd48f18bc..93c894dc5154 100644
--- a/include/asm-x86/ioctls.h
+++ b/include/asm-x86/ioctls.h
@@ -1,13 +1,87 @@
1#ifdef __KERNEL__ 1#ifndef _ASM_X86_IOCTLS_H
2# ifdef CONFIG_X86_32 2#define _ASM_X86_IOCTLS_H
3# include "ioctls_32.h" 3
4# else 4#include <asm/ioctl.h>
5# include "ioctls_64.h" 5
6# endif 6/* 0x54 is just a magic number to make these relatively unique ('T') */
7#else 7
8# ifdef __i386__ 8#define TCGETS 0x5401
9# include "ioctls_32.h" 9#define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */
10# else 10#define TCSETSW 0x5403
11# include "ioctls_64.h" 11#define TCSETSF 0x5404
12# endif 12#define TCGETA 0x5405
13#define TCSETA 0x5406
14#define TCSETAW 0x5407
15#define TCSETAF 0x5408
16#define TCSBRK 0x5409
17#define TCXONC 0x540A
18#define TCFLSH 0x540B
19#define TIOCEXCL 0x540C
20#define TIOCNXCL 0x540D
21#define TIOCSCTTY 0x540E
22#define TIOCGPGRP 0x540F
23#define TIOCSPGRP 0x5410
24#define TIOCOUTQ 0x5411
25#define TIOCSTI 0x5412
26#define TIOCGWINSZ 0x5413
27#define TIOCSWINSZ 0x5414
28#define TIOCMGET 0x5415
29#define TIOCMBIS 0x5416
30#define TIOCMBIC 0x5417
31#define TIOCMSET 0x5418
32#define TIOCGSOFTCAR 0x5419
33#define TIOCSSOFTCAR 0x541A
34#define FIONREAD 0x541B
35#define TIOCINQ FIONREAD
36#define TIOCLINUX 0x541C
37#define TIOCCONS 0x541D
38#define TIOCGSERIAL 0x541E
39#define TIOCSSERIAL 0x541F
40#define TIOCPKT 0x5420
41#define FIONBIO 0x5421
42#define TIOCNOTTY 0x5422
43#define TIOCSETD 0x5423
44#define TIOCGETD 0x5424
45#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
46/* #define TIOCTTYGSTRUCT 0x5426 - Former debugging-only ioctl */
47#define TIOCSBRK 0x5427 /* BSD compatibility */
48#define TIOCCBRK 0x5428 /* BSD compatibility */
49#define TIOCGSID 0x5429 /* Return the session ID of FD */
50#define TCGETS2 _IOR('T',0x2A, struct termios2)
51#define TCSETS2 _IOW('T',0x2B, struct termios2)
52#define TCSETSW2 _IOW('T',0x2C, struct termios2)
53#define TCSETSF2 _IOW('T',0x2D, struct termios2)
54#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
55#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
56
57#define FIONCLEX 0x5450
58#define FIOCLEX 0x5451
59#define FIOASYNC 0x5452
60#define TIOCSERCONFIG 0x5453
61#define TIOCSERGWILD 0x5454
62#define TIOCSERSWILD 0x5455
63#define TIOCGLCKTRMIOS 0x5456
64#define TIOCSLCKTRMIOS 0x5457
65#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
66#define TIOCSERGETLSR 0x5459 /* Get line status register */
67#define TIOCSERGETMULTI 0x545A /* Get multiport config */
68#define TIOCSERSETMULTI 0x545B /* Set multiport config */
69
70#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
71#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
72#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
73#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
74#define FIOQSIZE 0x5460
75
76/* Used for packet mode */
77#define TIOCPKT_DATA 0
78#define TIOCPKT_FLUSHREAD 1
79#define TIOCPKT_FLUSHWRITE 2
80#define TIOCPKT_STOP 4
81#define TIOCPKT_START 8
82#define TIOCPKT_NOSTOP 16
83#define TIOCPKT_DOSTOP 32
84
85#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
86
13#endif 87#endif
diff --git a/include/asm-x86/ioctls_32.h b/include/asm-x86/ioctls_32.h
deleted file mode 100644
index ef5878762dc9..000000000000
--- a/include/asm-x86/ioctls_32.h
+++ /dev/null
@@ -1,87 +0,0 @@
1#ifndef __ARCH_I386_IOCTLS_H__
2#define __ARCH_I386_IOCTLS_H__
3
4#include <asm/ioctl.h>
5
6/* 0x54 is just a magic number to make these relatively unique ('T') */
7
8#define TCGETS 0x5401
9#define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */
10#define TCSETSW 0x5403
11#define TCSETSF 0x5404
12#define TCGETA 0x5405
13#define TCSETA 0x5406
14#define TCSETAW 0x5407
15#define TCSETAF 0x5408
16#define TCSBRK 0x5409
17#define TCXONC 0x540A
18#define TCFLSH 0x540B
19#define TIOCEXCL 0x540C
20#define TIOCNXCL 0x540D
21#define TIOCSCTTY 0x540E
22#define TIOCGPGRP 0x540F
23#define TIOCSPGRP 0x5410
24#define TIOCOUTQ 0x5411
25#define TIOCSTI 0x5412
26#define TIOCGWINSZ 0x5413
27#define TIOCSWINSZ 0x5414
28#define TIOCMGET 0x5415
29#define TIOCMBIS 0x5416
30#define TIOCMBIC 0x5417
31#define TIOCMSET 0x5418
32#define TIOCGSOFTCAR 0x5419
33#define TIOCSSOFTCAR 0x541A
34#define FIONREAD 0x541B
35#define TIOCINQ FIONREAD
36#define TIOCLINUX 0x541C
37#define TIOCCONS 0x541D
38#define TIOCGSERIAL 0x541E
39#define TIOCSSERIAL 0x541F
40#define TIOCPKT 0x5420
41#define FIONBIO 0x5421
42#define TIOCNOTTY 0x5422
43#define TIOCSETD 0x5423
44#define TIOCGETD 0x5424
45#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
46/* #define TIOCTTYGSTRUCT 0x5426 - Former debugging-only ioctl */
47#define TIOCSBRK 0x5427 /* BSD compatibility */
48#define TIOCCBRK 0x5428 /* BSD compatibility */
49#define TIOCGSID 0x5429 /* Return the session ID of FD */
50#define TCGETS2 _IOR('T',0x2A, struct termios2)
51#define TCSETS2 _IOW('T',0x2B, struct termios2)
52#define TCSETSW2 _IOW('T',0x2C, struct termios2)
53#define TCSETSF2 _IOW('T',0x2D, struct termios2)
54#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
55#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
56
57#define FIONCLEX 0x5450
58#define FIOCLEX 0x5451
59#define FIOASYNC 0x5452
60#define TIOCSERCONFIG 0x5453
61#define TIOCSERGWILD 0x5454
62#define TIOCSERSWILD 0x5455
63#define TIOCGLCKTRMIOS 0x5456
64#define TIOCSLCKTRMIOS 0x5457
65#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
66#define TIOCSERGETLSR 0x5459 /* Get line status register */
67#define TIOCSERGETMULTI 0x545A /* Get multiport config */
68#define TIOCSERSETMULTI 0x545B /* Set multiport config */
69
70#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
71#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
72#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
73#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
74#define FIOQSIZE 0x5460
75
76/* Used for packet mode */
77#define TIOCPKT_DATA 0
78#define TIOCPKT_FLUSHREAD 1
79#define TIOCPKT_FLUSHWRITE 2
80#define TIOCPKT_STOP 4
81#define TIOCPKT_START 8
82#define TIOCPKT_NOSTOP 16
83#define TIOCPKT_DOSTOP 32
84
85#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
86
87#endif
diff --git a/include/asm-x86/ioctls_64.h b/include/asm-x86/ioctls_64.h
deleted file mode 100644
index 3fc0b15a0d7e..000000000000
--- a/include/asm-x86/ioctls_64.h
+++ /dev/null
@@ -1,86 +0,0 @@
1#ifndef __ARCH_X8664_IOCTLS_H__
2#define __ARCH_X8664_IOCTLS_H__
3
4#include <asm/ioctl.h>
5
6/* 0x54 is just a magic number to make these relatively unique ('T') */
7
8#define TCGETS 0x5401
9#define TCSETS 0x5402
10#define TCSETSW 0x5403
11#define TCSETSF 0x5404
12#define TCGETA 0x5405
13#define TCSETA 0x5406
14#define TCSETAW 0x5407
15#define TCSETAF 0x5408
16#define TCSBRK 0x5409
17#define TCXONC 0x540A
18#define TCFLSH 0x540B
19#define TIOCEXCL 0x540C
20#define TIOCNXCL 0x540D
21#define TIOCSCTTY 0x540E
22#define TIOCGPGRP 0x540F
23#define TIOCSPGRP 0x5410
24#define TIOCOUTQ 0x5411
25#define TIOCSTI 0x5412
26#define TIOCGWINSZ 0x5413
27#define TIOCSWINSZ 0x5414
28#define TIOCMGET 0x5415
29#define TIOCMBIS 0x5416
30#define TIOCMBIC 0x5417
31#define TIOCMSET 0x5418
32#define TIOCGSOFTCAR 0x5419
33#define TIOCSSOFTCAR 0x541A
34#define FIONREAD 0x541B
35#define TIOCINQ FIONREAD
36#define TIOCLINUX 0x541C
37#define TIOCCONS 0x541D
38#define TIOCGSERIAL 0x541E
39#define TIOCSSERIAL 0x541F
40#define TIOCPKT 0x5420
41#define FIONBIO 0x5421
42#define TIOCNOTTY 0x5422
43#define TIOCSETD 0x5423
44#define TIOCGETD 0x5424
45#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
46#define TIOCSBRK 0x5427 /* BSD compatibility */
47#define TIOCCBRK 0x5428 /* BSD compatibility */
48#define TIOCGSID 0x5429 /* Return the session ID of FD */
49#define TCGETS2 _IOR('T',0x2A, struct termios2)
50#define TCSETS2 _IOW('T',0x2B, struct termios2)
51#define TCSETSW2 _IOW('T',0x2C, struct termios2)
52#define TCSETSF2 _IOW('T',0x2D, struct termios2)
53#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
54#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
55
56#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
57#define FIOCLEX 0x5451
58#define FIOASYNC 0x5452
59#define TIOCSERCONFIG 0x5453
60#define TIOCSERGWILD 0x5454
61#define TIOCSERSWILD 0x5455
62#define TIOCGLCKTRMIOS 0x5456
63#define TIOCSLCKTRMIOS 0x5457
64#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
65#define TIOCSERGETLSR 0x5459 /* Get line status register */
66#define TIOCSERGETMULTI 0x545A /* Get multiport config */
67#define TIOCSERSETMULTI 0x545B /* Set multiport config */
68
69#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
70#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
71#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
72#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
73#define FIOQSIZE 0x5460
74
75/* Used for packet mode */
76#define TIOCPKT_DATA 0
77#define TIOCPKT_FLUSHREAD 1
78#define TIOCPKT_FLUSHWRITE 2
79#define TIOCPKT_STOP 4
80#define TIOCPKT_START 8
81#define TIOCPKT_NOSTOP 16
82#define TIOCPKT_DOSTOP 32
83
84#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
85
86#endif
diff --git a/include/asm-x86/ipcbuf.h b/include/asm-x86/ipcbuf.h
index eb2e448c6e28..2adf8b39a40b 100644
--- a/include/asm-x86/ipcbuf.h
+++ b/include/asm-x86/ipcbuf.h
@@ -1,13 +1,29 @@
1#ifdef __KERNEL__ 1#ifndef _ASM_X86_IPCBUF_H
2# ifdef CONFIG_X86_32 2#define _ASM_X86_IPCBUF_H
3# include "ipcbuf_32.h" 3
4# else 4/*
5# include "ipcbuf_64.h" 5 * The ipc64_perm structure for x86 architecture.
6# endif 6 * Note extra padding because this structure is passed back and forth
7#else 7 * between kernel and user space.
8# ifdef __i386__ 8 *
9# include "ipcbuf_32.h" 9 * Pad space is left for:
10# else 10 * - 32-bit mode_t and seq
11# include "ipcbuf_64.h" 11 * - 2 miscellaneous 32-bit values
12# endif 12 */
13#endif 13
14struct ipc64_perm
15{
16 __kernel_key_t key;
17 __kernel_uid32_t uid;
18 __kernel_gid32_t gid;
19 __kernel_uid32_t cuid;
20 __kernel_gid32_t cgid;
21 __kernel_mode_t mode;
22 unsigned short __pad1;
23 unsigned short seq;
24 unsigned short __pad2;
25 unsigned long __unused1;
26 unsigned long __unused2;
27};
28
29#endif /* _ASM_X86_IPCBUF_H */
diff --git a/include/asm-x86/ipcbuf_32.h b/include/asm-x86/ipcbuf_32.h
deleted file mode 100644
index 0dcad4f84c2a..000000000000
--- a/include/asm-x86/ipcbuf_32.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef __i386_IPCBUF_H__
2#define __i386_IPCBUF_H__
3
4/*
5 * The ipc64_perm structure for i386 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 32-bit mode_t and seq
11 * - 2 miscellaneous 32-bit values
12 */
13
14struct ipc64_perm
15{
16 __kernel_key_t key;
17 __kernel_uid32_t uid;
18 __kernel_gid32_t gid;
19 __kernel_uid32_t cuid;
20 __kernel_gid32_t cgid;
21 __kernel_mode_t mode;
22 unsigned short __pad1;
23 unsigned short seq;
24 unsigned short __pad2;
25 unsigned long __unused1;
26 unsigned long __unused2;
27};
28
29#endif /* __i386_IPCBUF_H__ */
diff --git a/include/asm-x86/ipcbuf_64.h b/include/asm-x86/ipcbuf_64.h
deleted file mode 100644
index 470cf85e3ba8..000000000000
--- a/include/asm-x86/ipcbuf_64.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef __x86_64_IPCBUF_H__
2#define __x86_64_IPCBUF_H__
3
4/*
5 * The ipc64_perm structure for x86_64 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 32-bit mode_t and seq
11 * - 2 miscellaneous 32-bit values
12 */
13
14struct ipc64_perm
15{
16 __kernel_key_t key;
17 __kernel_uid32_t uid;
18 __kernel_gid32_t gid;
19 __kernel_uid32_t cuid;
20 __kernel_gid32_t cgid;
21 __kernel_mode_t mode;
22 unsigned short __pad1;
23 unsigned short seq;
24 unsigned short __pad2;
25 unsigned long __unused1;
26 unsigned long __unused2;
27};
28
29#endif /* __x86_64_IPCBUF_H__ */
diff --git a/include/asm-x86/kdebug.h b/include/asm-x86/kdebug.h
index 38479106c259..e2f9b62e535e 100644
--- a/include/asm-x86/kdebug.h
+++ b/include/asm-x86/kdebug.h
@@ -1,5 +1,33 @@
1#ifdef CONFIG_X86_32 1#ifndef _ASM_X86_KDEBUG_H
2# include "kdebug_32.h" 2#define _ASM_X86_KDEBUG_H
3#else 3
4# include "kdebug_64.h" 4#include <linux/notifier.h>
5
6struct pt_regs;
7
8/* Grossly misnamed. */
9enum die_val {
10 DIE_OOPS = 1,
11 DIE_INT3,
12 DIE_DEBUG,
13 DIE_PANIC,
14 DIE_NMI,
15 DIE_DIE,
16 DIE_NMIWATCHDOG,
17 DIE_KERNELDEBUG,
18 DIE_TRAP,
19 DIE_GPF,
20 DIE_CALL,
21 DIE_NMI_IPI,
22 DIE_PAGE_FAULT,
23};
24
25extern void printk_address(unsigned long address);
26extern void die(const char *,struct pt_regs *,long);
27extern void __die(const char *,struct pt_regs *,long);
28extern void show_registers(struct pt_regs *regs);
29extern void dump_pagetable(unsigned long);
30extern unsigned long oops_begin(void);
31extern void oops_end(unsigned long);
32
5#endif 33#endif
diff --git a/include/asm-x86/kdebug_32.h b/include/asm-x86/kdebug_32.h
deleted file mode 100644
index 181d437eef4b..000000000000
--- a/include/asm-x86/kdebug_32.h
+++ /dev/null
@@ -1,27 +0,0 @@
1#ifndef _I386_KDEBUG_H
2#define _I386_KDEBUG_H 1
3
4/*
5 * Aug-05 2004 Ported by Prasanna S Panchamukhi <prasanna@in.ibm.com>
6 * from x86_64 architecture.
7 */
8
9struct pt_regs;
10
11/* Grossly misnamed. */
12enum die_val {
13 DIE_OOPS = 1,
14 DIE_INT3,
15 DIE_DEBUG,
16 DIE_PANIC,
17 DIE_NMI,
18 DIE_DIE,
19 DIE_NMIWATCHDOG,
20 DIE_KERNELDEBUG,
21 DIE_TRAP,
22 DIE_GPF,
23 DIE_CALL,
24 DIE_NMI_IPI,
25};
26
27#endif
diff --git a/include/asm-x86/kdebug_64.h b/include/asm-x86/kdebug_64.h
deleted file mode 100644
index df413e05375e..000000000000
--- a/include/asm-x86/kdebug_64.h
+++ /dev/null
@@ -1,32 +0,0 @@
1#ifndef _X86_64_KDEBUG_H
2#define _X86_64_KDEBUG_H 1
3
4#include <linux/compiler.h>
5
6struct pt_regs;
7
8/* Grossly misnamed. */
9enum die_val {
10 DIE_OOPS = 1,
11 DIE_INT3,
12 DIE_DEBUG,
13 DIE_PANIC,
14 DIE_NMI,
15 DIE_DIE,
16 DIE_NMIWATCHDOG,
17 DIE_KERNELDEBUG,
18 DIE_TRAP,
19 DIE_GPF,
20 DIE_CALL,
21 DIE_NMI_IPI,
22};
23
24extern void printk_address(unsigned long address);
25extern void die(const char *,struct pt_regs *,long);
26extern void __die(const char *,struct pt_regs *,long);
27extern void show_registers(struct pt_regs *regs);
28extern void dump_pagetable(unsigned long);
29extern unsigned long oops_begin(void);
30extern void oops_end(unsigned long);
31
32#endif
diff --git a/include/asm-x86/kmap_types.h b/include/asm-x86/kmap_types.h
index e4ec724b298e..5f4174132a22 100644
--- a/include/asm-x86/kmap_types.h
+++ b/include/asm-x86/kmap_types.h
@@ -1,5 +1,29 @@
1#ifdef CONFIG_X86_32 1#ifndef _ASM_X86_KMAP_TYPES_H
2# include "kmap_types_32.h" 2#define _ASM_X86_KMAP_TYPES_H
3
4#if defined(CONFIG_X86_32) && defined(CONFIG_DEBUG_HIGHMEM)
5# define D(n) __KM_FENCE_##n ,
3#else 6#else
4# include "kmap_types_64.h" 7# define D(n)
8#endif
9
10enum km_type {
11D(0) KM_BOUNCE_READ,
12D(1) KM_SKB_SUNRPC_DATA,
13D(2) KM_SKB_DATA_SOFTIRQ,
14D(3) KM_USER0,
15D(4) KM_USER1,
16D(5) KM_BIO_SRC_IRQ,
17D(6) KM_BIO_DST_IRQ,
18D(7) KM_PTE0,
19D(8) KM_PTE1,
20D(9) KM_IRQ0,
21D(10) KM_IRQ1,
22D(11) KM_SOFTIRQ0,
23D(12) KM_SOFTIRQ1,
24D(13) KM_TYPE_NR
25};
26
27#undef D
28
5#endif 29#endif
diff --git a/include/asm-x86/kmap_types_32.h b/include/asm-x86/kmap_types_32.h
deleted file mode 100644
index 806aae3c5338..000000000000
--- a/include/asm-x86/kmap_types_32.h
+++ /dev/null
@@ -1,30 +0,0 @@
1#ifndef _ASM_KMAP_TYPES_H
2#define _ASM_KMAP_TYPES_H
3
4
5#ifdef CONFIG_DEBUG_HIGHMEM
6# define D(n) __KM_FENCE_##n ,
7#else
8# define D(n)
9#endif
10
11enum km_type {
12D(0) KM_BOUNCE_READ,
13D(1) KM_SKB_SUNRPC_DATA,
14D(2) KM_SKB_DATA_SOFTIRQ,
15D(3) KM_USER0,
16D(4) KM_USER1,
17D(5) KM_BIO_SRC_IRQ,
18D(6) KM_BIO_DST_IRQ,
19D(7) KM_PTE0,
20D(8) KM_PTE1,
21D(9) KM_IRQ0,
22D(10) KM_IRQ1,
23D(11) KM_SOFTIRQ0,
24D(12) KM_SOFTIRQ1,
25D(13) KM_TYPE_NR
26};
27
28#undef D
29
30#endif
diff --git a/include/asm-x86/kmap_types_64.h b/include/asm-x86/kmap_types_64.h
deleted file mode 100644
index 7486338c6cea..000000000000
--- a/include/asm-x86/kmap_types_64.h
+++ /dev/null
@@ -1,19 +0,0 @@
1#ifndef _ASM_KMAP_TYPES_H
2#define _ASM_KMAP_TYPES_H
3
4enum km_type {
5 KM_BOUNCE_READ,
6 KM_SKB_SUNRPC_DATA,
7 KM_SKB_DATA_SOFTIRQ,
8 KM_USER0,
9 KM_USER1,
10 KM_BIO_SRC_IRQ,
11 KM_BIO_DST_IRQ,
12 KM_IRQ0,
13 KM_IRQ1,
14 KM_SOFTIRQ0,
15 KM_SOFTIRQ1,
16 KM_TYPE_NR
17};
18
19#endif
diff --git a/include/asm-x86/ldt.h b/include/asm-x86/ldt.h
index 3d9cc20d2ba4..20c597242b53 100644
--- a/include/asm-x86/ldt.h
+++ b/include/asm-x86/ldt.h
@@ -1,13 +1,40 @@
1#ifdef __KERNEL__ 1/*
2# ifdef CONFIG_X86_32 2 * ldt.h
3# include "ldt_32.h" 3 *
4# else 4 * Definitions of structures used with the modify_ldt system call.
5# include "ldt_64.h" 5 */
6# endif 6#ifndef _ASM_X86_LDT_H
7#else 7#define _ASM_X86_LDT_H
8# ifdef __i386__ 8
9# include "ldt_32.h" 9/* Maximum number of LDT entries supported. */
10# else 10#define LDT_ENTRIES 8192
11# include "ldt_64.h" 11/* The size of each LDT entry. */
12# endif 12#define LDT_ENTRY_SIZE 8
13
14#ifndef __ASSEMBLY__
15/*
16 * Note on 64bit base and limit is ignored and you cannot set DS/ES/CS
17 * not to the default values if you still want to do syscalls. This
18 * call is more for 32bit mode therefore.
19 */
20struct user_desc {
21 unsigned int entry_number;
22 unsigned int base_addr;
23 unsigned int limit;
24 unsigned int seg_32bit:1;
25 unsigned int contents:2;
26 unsigned int read_exec_only:1;
27 unsigned int limit_in_pages:1;
28 unsigned int seg_not_present:1;
29 unsigned int useable:1;
30#ifdef __x86_64__
31 unsigned int lm:1;
32#endif
33};
34
35#define MODIFY_LDT_CONTENTS_DATA 0
36#define MODIFY_LDT_CONTENTS_STACK 1
37#define MODIFY_LDT_CONTENTS_CODE 2
38
39#endif /* !__ASSEMBLY__ */
13#endif 40#endif
diff --git a/include/asm-x86/ldt_32.h b/include/asm-x86/ldt_32.h
deleted file mode 100644
index e9d3de1dee6c..000000000000
--- a/include/asm-x86/ldt_32.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * ldt.h
3 *
4 * Definitions of structures used with the modify_ldt system call.
5 */
6#ifndef _LINUX_LDT_H
7#define _LINUX_LDT_H
8
9/* Maximum number of LDT entries supported. */
10#define LDT_ENTRIES 8192
11/* The size of each LDT entry. */
12#define LDT_ENTRY_SIZE 8
13
14#ifndef __ASSEMBLY__
15struct user_desc {
16 unsigned int entry_number;
17 unsigned long base_addr;
18 unsigned int limit;
19 unsigned int seg_32bit:1;
20 unsigned int contents:2;
21 unsigned int read_exec_only:1;
22 unsigned int limit_in_pages:1;
23 unsigned int seg_not_present:1;
24 unsigned int useable:1;
25};
26
27#define MODIFY_LDT_CONTENTS_DATA 0
28#define MODIFY_LDT_CONTENTS_STACK 1
29#define MODIFY_LDT_CONTENTS_CODE 2
30
31#endif /* !__ASSEMBLY__ */
32#endif
diff --git a/include/asm-x86/ldt_64.h b/include/asm-x86/ldt_64.h
deleted file mode 100644
index 9ef647b890d2..000000000000
--- a/include/asm-x86/ldt_64.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * ldt.h
3 *
4 * Definitions of structures used with the modify_ldt system call.
5 */
6#ifndef _LINUX_LDT_H
7#define _LINUX_LDT_H
8
9/* Maximum number of LDT entries supported. */
10#define LDT_ENTRIES 8192
11/* The size of each LDT entry. */
12#define LDT_ENTRY_SIZE 8
13
14#ifndef __ASSEMBLY__
15/* Note on 64bit base and limit is ignored and you cannot set
16 DS/ES/CS not to the default values if you still want to do syscalls. This
17 call is more for 32bit mode therefore. */
18struct user_desc {
19 unsigned int entry_number;
20 unsigned int base_addr;
21 unsigned int limit;
22 unsigned int seg_32bit:1;
23 unsigned int contents:2;
24 unsigned int read_exec_only:1;
25 unsigned int limit_in_pages:1;
26 unsigned int seg_not_present:1;
27 unsigned int useable:1;
28 unsigned int lm:1;
29};
30
31#define MODIFY_LDT_CONTENTS_DATA 0
32#define MODIFY_LDT_CONTENTS_STACK 1
33#define MODIFY_LDT_CONTENTS_CODE 2
34
35#endif /* !__ASSEMBLY__ */
36#endif
diff --git a/include/asm-x86/mach-default/mach_apicdef.h b/include/asm-x86/mach-default/mach_apicdef.h
index 7bcb350c3ee8..ae9841319094 100644
--- a/include/asm-x86/mach-default/mach_apicdef.h
+++ b/include/asm-x86/mach-default/mach_apicdef.h
@@ -1,11 +1,17 @@
1#ifndef __ASM_MACH_APICDEF_H 1#ifndef __ASM_MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H 2#define __ASM_MACH_APICDEF_H
3 3
4#include <asm/apic.h>
5
4#define APIC_ID_MASK (0xF<<24) 6#define APIC_ID_MASK (0xF<<24)
5 7
6static inline unsigned get_apic_id(unsigned long x) 8static inline unsigned get_apic_id(unsigned long x)
7{ 9{
8 return (((x)>>24)&0xF); 10 unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
11 if (APIC_XAPIC(ver))
12 return (((x)>>24)&0xFF);
13 else
14 return (((x)>>24)&0xF);
9} 15}
10 16
11#define GET_APIC_ID(x) get_apic_id(x) 17#define GET_APIC_ID(x) get_apic_id(x)
diff --git a/include/asm-x86/mach-visws/cobalt.h b/include/asm-x86/mach-visws/cobalt.h
index 33c36225a042..995258831b7f 100644
--- a/include/asm-x86/mach-visws/cobalt.h
+++ b/include/asm-x86/mach-visws/cobalt.h
@@ -94,22 +94,22 @@
94#define CO_IRQ_8259 CO_IRQ(CO_APIC_8259) 94#define CO_IRQ_8259 CO_IRQ(CO_APIC_8259)
95 95
96#ifdef CONFIG_X86_VISWS_APIC 96#ifdef CONFIG_X86_VISWS_APIC
97extern __inline void co_cpu_write(unsigned long reg, unsigned long v) 97static inline void co_cpu_write(unsigned long reg, unsigned long v)
98{ 98{
99 *((volatile unsigned long *)(CO_CPU_VADDR+reg))=v; 99 *((volatile unsigned long *)(CO_CPU_VADDR+reg))=v;
100} 100}
101 101
102extern __inline unsigned long co_cpu_read(unsigned long reg) 102static inline unsigned long co_cpu_read(unsigned long reg)
103{ 103{
104 return *((volatile unsigned long *)(CO_CPU_VADDR+reg)); 104 return *((volatile unsigned long *)(CO_CPU_VADDR+reg));
105} 105}
106 106
107extern __inline void co_apic_write(unsigned long reg, unsigned long v) 107static inline void co_apic_write(unsigned long reg, unsigned long v)
108{ 108{
109 *((volatile unsigned long *)(CO_APIC_VADDR+reg))=v; 109 *((volatile unsigned long *)(CO_APIC_VADDR+reg))=v;
110} 110}
111 111
112extern __inline unsigned long co_apic_read(unsigned long reg) 112static inline unsigned long co_apic_read(unsigned long reg)
113{ 113{
114 return *((volatile unsigned long *)(CO_APIC_VADDR+reg)); 114 return *((volatile unsigned long *)(CO_APIC_VADDR+reg));
115} 115}
diff --git a/include/asm-x86/mach-visws/lithium.h b/include/asm-x86/mach-visws/lithium.h
index d443e68d0066..dfcd4f07ab85 100644
--- a/include/asm-x86/mach-visws/lithium.h
+++ b/include/asm-x86/mach-visws/lithium.h
@@ -29,22 +29,22 @@
29#define LI_INTD 0x0080 29#define LI_INTD 0x0080
30 30
31/* More special purpose macros... */ 31/* More special purpose macros... */
32extern __inline void li_pcia_write16(unsigned long reg, unsigned short v) 32static inline void li_pcia_write16(unsigned long reg, unsigned short v)
33{ 33{
34 *((volatile unsigned short *)(LI_PCIA_VADDR+reg))=v; 34 *((volatile unsigned short *)(LI_PCIA_VADDR+reg))=v;
35} 35}
36 36
37extern __inline unsigned short li_pcia_read16(unsigned long reg) 37static inline unsigned short li_pcia_read16(unsigned long reg)
38{ 38{
39 return *((volatile unsigned short *)(LI_PCIA_VADDR+reg)); 39 return *((volatile unsigned short *)(LI_PCIA_VADDR+reg));
40} 40}
41 41
42extern __inline void li_pcib_write16(unsigned long reg, unsigned short v) 42static inline void li_pcib_write16(unsigned long reg, unsigned short v)
43{ 43{
44 *((volatile unsigned short *)(LI_PCIB_VADDR+reg))=v; 44 *((volatile unsigned short *)(LI_PCIB_VADDR+reg))=v;
45} 45}
46 46
47extern __inline unsigned short li_pcib_read16(unsigned long reg) 47static inline unsigned short li_pcib_read16(unsigned long reg)
48{ 48{
49 return *((volatile unsigned short *)(LI_PCIB_VADDR+reg)); 49 return *((volatile unsigned short *)(LI_PCIB_VADDR+reg));
50} 50}
diff --git a/include/asm-x86/mce.h b/include/asm-x86/mce.h
index cc8ca389912e..df304fd89c27 100644
--- a/include/asm-x86/mce.h
+++ b/include/asm-x86/mce.h
@@ -1,5 +1,129 @@
1#ifndef _ASM_X86_MCE_H
2#define _ASM_X86_MCE_H
3
4#ifdef __x86_64__
5
6#include <asm/ioctls.h>
7#include <asm/types.h>
8
9/*
10 * Machine Check support for x86
11 */
12
13#define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */
14
15#define MCG_STATUS_RIPV (1UL<<0) /* restart ip valid */
16#define MCG_STATUS_EIPV (1UL<<1) /* eip points to correct instruction */
17#define MCG_STATUS_MCIP (1UL<<2) /* machine check in progress */
18
19#define MCI_STATUS_VAL (1UL<<63) /* valid error */
20#define MCI_STATUS_OVER (1UL<<62) /* previous errors lost */
21#define MCI_STATUS_UC (1UL<<61) /* uncorrected error */
22#define MCI_STATUS_EN (1UL<<60) /* error enabled */
23#define MCI_STATUS_MISCV (1UL<<59) /* misc error reg. valid */
24#define MCI_STATUS_ADDRV (1UL<<58) /* addr reg. valid */
25#define MCI_STATUS_PCC (1UL<<57) /* processor context corrupt */
26
27/* Fields are zero when not available */
28struct mce {
29 __u64 status;
30 __u64 misc;
31 __u64 addr;
32 __u64 mcgstatus;
33 __u64 rip;
34 __u64 tsc; /* cpu time stamp counter */
35 __u64 res1; /* for future extension */
36 __u64 res2; /* dito. */
37 __u8 cs; /* code segment */
38 __u8 bank; /* machine check bank */
39 __u8 cpu; /* cpu that raised the error */
40 __u8 finished; /* entry is valid */
41 __u32 pad;
42};
43
44/*
45 * This structure contains all data related to the MCE log. Also
46 * carries a signature to make it easier to find from external
47 * debugging tools. Each entry is only valid when its finished flag
48 * is set.
49 */
50
51#define MCE_LOG_LEN 32
52
53struct mce_log {
54 char signature[12]; /* "MACHINECHECK" */
55 unsigned len; /* = MCE_LOG_LEN */
56 unsigned next;
57 unsigned flags;
58 unsigned pad0;
59 struct mce entry[MCE_LOG_LEN];
60};
61
62#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
63
64#define MCE_LOG_SIGNATURE "MACHINECHECK"
65
66#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
67#define MCE_GET_LOG_LEN _IOR('M', 2, int)
68#define MCE_GETCLEAR_FLAGS _IOR('M', 3, int)
69
70/* Software defined banks */
71#define MCE_EXTENDED_BANK 128
72#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
73
74#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) /* MCE_AMD */
75#define K8_MCE_THRESHOLD_BANK_0 (MCE_THRESHOLD_BASE + 0 * 9)
76#define K8_MCE_THRESHOLD_BANK_1 (MCE_THRESHOLD_BASE + 1 * 9)
77#define K8_MCE_THRESHOLD_BANK_2 (MCE_THRESHOLD_BASE + 2 * 9)
78#define K8_MCE_THRESHOLD_BANK_3 (MCE_THRESHOLD_BASE + 3 * 9)
79#define K8_MCE_THRESHOLD_BANK_4 (MCE_THRESHOLD_BASE + 4 * 9)
80#define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9)
81#define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0)
82
83#endif /* __x86_64__ */
84
85#ifdef __KERNEL__
86
1#ifdef CONFIG_X86_32 87#ifdef CONFIG_X86_32
2# include "mce_32.h" 88#ifdef CONFIG_X86_MCE
89extern void mcheck_init(struct cpuinfo_x86 *c);
3#else 90#else
4# include "mce_64.h" 91#define mcheck_init(c) do {} while(0)
92#endif
93
94extern int mce_disabled;
95
96#else /* CONFIG_X86_32 */
97
98#include <asm/atomic.h>
99
100void mce_log(struct mce *m);
101DECLARE_PER_CPU(struct sys_device, device_mce);
102
103#ifdef CONFIG_X86_MCE_INTEL
104void mce_intel_feature_init(struct cpuinfo_x86 *c);
105#else
106static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { }
107#endif
108
109#ifdef CONFIG_X86_MCE_AMD
110void mce_amd_feature_init(struct cpuinfo_x86 *c);
111#else
112static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
113#endif
114
115void mce_log_therm_throt_event(unsigned int cpu, __u64 status);
116
117extern atomic_t mce_entry;
118
119extern void do_machine_check(struct pt_regs *, long);
120extern int mce_notify_user(void);
121
122#endif /* !CONFIG_X86_32 */
123
124extern void stop_mce(void);
125extern void restart_mce(void);
126
127#endif /* __KERNEL__ */
128
5#endif 129#endif
diff --git a/include/asm-x86/mce_32.h b/include/asm-x86/mce_32.h
deleted file mode 100644
index d56d89742e8f..000000000000
--- a/include/asm-x86/mce_32.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifdef CONFIG_X86_MCE
2extern void mcheck_init(struct cpuinfo_x86 *c);
3#else
4#define mcheck_init(c) do {} while(0)
5#endif
6
7extern int mce_disabled;
8
9extern void stop_mce(void);
10extern void restart_mce(void);
11
diff --git a/include/asm-x86/mce_64.h b/include/asm-x86/mce_64.h
deleted file mode 100644
index 7bc030a1996d..000000000000
--- a/include/asm-x86/mce_64.h
+++ /dev/null
@@ -1,115 +0,0 @@
1#ifndef _ASM_MCE_H
2#define _ASM_MCE_H 1
3
4#include <asm/ioctls.h>
5#include <asm/types.h>
6
7/*
8 * Machine Check support for x86
9 */
10
11#define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */
12
13#define MCG_STATUS_RIPV (1UL<<0) /* restart ip valid */
14#define MCG_STATUS_EIPV (1UL<<1) /* eip points to correct instruction */
15#define MCG_STATUS_MCIP (1UL<<2) /* machine check in progress */
16
17#define MCI_STATUS_VAL (1UL<<63) /* valid error */
18#define MCI_STATUS_OVER (1UL<<62) /* previous errors lost */
19#define MCI_STATUS_UC (1UL<<61) /* uncorrected error */
20#define MCI_STATUS_EN (1UL<<60) /* error enabled */
21#define MCI_STATUS_MISCV (1UL<<59) /* misc error reg. valid */
22#define MCI_STATUS_ADDRV (1UL<<58) /* addr reg. valid */
23#define MCI_STATUS_PCC (1UL<<57) /* processor context corrupt */
24
25/* Fields are zero when not available */
26struct mce {
27 __u64 status;
28 __u64 misc;
29 __u64 addr;
30 __u64 mcgstatus;
31 __u64 rip;
32 __u64 tsc; /* cpu time stamp counter */
33 __u64 res1; /* for future extension */
34 __u64 res2; /* dito. */
35 __u8 cs; /* code segment */
36 __u8 bank; /* machine check bank */
37 __u8 cpu; /* cpu that raised the error */
38 __u8 finished; /* entry is valid */
39 __u32 pad;
40};
41
42/*
43 * This structure contains all data related to the MCE log.
44 * Also carries a signature to make it easier to find from external debugging tools.
45 * Each entry is only valid when its finished flag is set.
46 */
47
48#define MCE_LOG_LEN 32
49
50struct mce_log {
51 char signature[12]; /* "MACHINECHECK" */
52 unsigned len; /* = MCE_LOG_LEN */
53 unsigned next;
54 unsigned flags;
55 unsigned pad0;
56 struct mce entry[MCE_LOG_LEN];
57};
58
59#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
60
61#define MCE_LOG_SIGNATURE "MACHINECHECK"
62
63#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
64#define MCE_GET_LOG_LEN _IOR('M', 2, int)
65#define MCE_GETCLEAR_FLAGS _IOR('M', 3, int)
66
67/* Software defined banks */
68#define MCE_EXTENDED_BANK 128
69#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
70
71#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) /* MCE_AMD */
72#define K8_MCE_THRESHOLD_BANK_0 (MCE_THRESHOLD_BASE + 0 * 9)
73#define K8_MCE_THRESHOLD_BANK_1 (MCE_THRESHOLD_BASE + 1 * 9)
74#define K8_MCE_THRESHOLD_BANK_2 (MCE_THRESHOLD_BASE + 2 * 9)
75#define K8_MCE_THRESHOLD_BANK_3 (MCE_THRESHOLD_BASE + 3 * 9)
76#define K8_MCE_THRESHOLD_BANK_4 (MCE_THRESHOLD_BASE + 4 * 9)
77#define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9)
78#define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0)
79
80#ifdef __KERNEL__
81#include <asm/atomic.h>
82
83void mce_log(struct mce *m);
84DECLARE_PER_CPU(struct sys_device, device_mce);
85
86#ifdef CONFIG_X86_MCE_INTEL
87void mce_intel_feature_init(struct cpuinfo_x86 *c);
88#else
89static inline void mce_intel_feature_init(struct cpuinfo_x86 *c)
90{
91}
92#endif
93
94#ifdef CONFIG_X86_MCE_AMD
95void mce_amd_feature_init(struct cpuinfo_x86 *c);
96#else
97static inline void mce_amd_feature_init(struct cpuinfo_x86 *c)
98{
99}
100#endif
101
102void mce_log_therm_throt_event(unsigned int cpu, __u64 status);
103
104extern atomic_t mce_entry;
105
106extern void do_machine_check(struct pt_regs *, long);
107
108extern int mce_notify_user(void);
109
110extern void stop_mce(void);
111extern void restart_mce(void);
112
113#endif
114
115#endif
diff --git a/include/asm-x86/mman.h b/include/asm-x86/mman.h
index 322db07e82c3..c1682b542daf 100644
--- a/include/asm-x86/mman.h
+++ b/include/asm-x86/mman.h
@@ -1,13 +1,19 @@
1#ifdef __KERNEL__ 1#ifndef _ASM_X86_MMAN_H
2# ifdef CONFIG_X86_32 2#define _ASM_X86_MMAN_H
3# include "mman_32.h" 3
4# else 4#include <asm-generic/mman.h>
5# include "mman_64.h" 5
6# endif 6#define MAP_32BIT 0x40 /* only give out 32bit addresses */
7#else 7
8# ifdef __i386__ 8#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
9# include "mman_32.h" 9#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
10# else 10#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
11# include "mman_64.h" 11#define MAP_LOCKED 0x2000 /* pages are locked */
12# endif 12#define MAP_NORESERVE 0x4000 /* don't check for reservations */
13#endif 13#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
14#define MAP_NONBLOCK 0x10000 /* do not block on IO */
15
16#define MCL_CURRENT 1 /* lock all current mappings */
17#define MCL_FUTURE 2 /* lock all future mappings */
18
19#endif /* _ASM_X86_MMAN_H */
diff --git a/include/asm-x86/mman_32.h b/include/asm-x86/mman_32.h
deleted file mode 100644
index 8fd9d7ab7faf..000000000000
--- a/include/asm-x86/mman_32.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef __I386_MMAN_H__
2#define __I386_MMAN_H__
3
4#include <asm-generic/mman.h>
5
6#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
7#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
8#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
9#define MAP_LOCKED 0x2000 /* pages are locked */
10#define MAP_NORESERVE 0x4000 /* don't check for reservations */
11#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
12#define MAP_NONBLOCK 0x10000 /* do not block on IO */
13
14#define MCL_CURRENT 1 /* lock all current mappings */
15#define MCL_FUTURE 2 /* lock all future mappings */
16
17#endif /* __I386_MMAN_H__ */
diff --git a/include/asm-x86/mman_64.h b/include/asm-x86/mman_64.h
deleted file mode 100644
index dd5cb0534d37..000000000000
--- a/include/asm-x86/mman_64.h
+++ /dev/null
@@ -1,19 +0,0 @@
1#ifndef __X8664_MMAN_H__
2#define __X8664_MMAN_H__
3
4#include <asm-generic/mman.h>
5
6#define MAP_32BIT 0x40 /* only give out 32bit addresses */
7
8#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
9#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
10#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
11#define MAP_LOCKED 0x2000 /* pages are locked */
12#define MAP_NORESERVE 0x4000 /* don't check for reservations */
13#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
14#define MAP_NONBLOCK 0x10000 /* do not block on IO */
15
16#define MCL_CURRENT 1 /* lock all current mappings */
17#define MCL_FUTURE 2 /* lock all future mappings */
18
19#endif
diff --git a/include/asm-x86/mmu_32.h b/include/asm-x86/mmu_32.h
index 8358dd3df7aa..5e249c51ef56 100644
--- a/include/asm-x86/mmu_32.h
+++ b/include/asm-x86/mmu_32.h
@@ -1,7 +1,7 @@
1#ifndef __i386_MMU_H 1#ifndef __i386_MMU_H
2#define __i386_MMU_H 2#define __i386_MMU_H
3 3
4#include <asm/semaphore.h> 4#include <linux/mutex.h>
5/* 5/*
6 * The i386 doesn't have a mmu context, but 6 * The i386 doesn't have a mmu context, but
7 * we put the segment information here. 7 * we put the segment information here.
@@ -10,7 +10,7 @@
10 */ 10 */
11typedef struct { 11typedef struct {
12 int size; 12 int size;
13 struct semaphore sem; 13 struct mutex lock;
14 void *ldt; 14 void *ldt;
15 void *vdso; 15 void *vdso;
16} mm_context_t; 16} mm_context_t;
diff --git a/include/asm-x86/mmu_64.h b/include/asm-x86/mmu_64.h
index d2cd4a9d984d..024357c27222 100644
--- a/include/asm-x86/mmu_64.h
+++ b/include/asm-x86/mmu_64.h
@@ -2,7 +2,7 @@
2#define __x86_64_MMU_H 2#define __x86_64_MMU_H
3 3
4#include <linux/spinlock.h> 4#include <linux/spinlock.h>
5#include <asm/semaphore.h> 5#include <linux/mutex.h>
6 6
7/* 7/*
8 * The x86_64 doesn't have a mmu context, but 8 * The x86_64 doesn't have a mmu context, but
@@ -14,7 +14,7 @@ typedef struct {
14 void *ldt; 14 void *ldt;
15 rwlock_t ldtlock; 15 rwlock_t ldtlock;
16 int size; 16 int size;
17 struct semaphore sem; 17 struct mutex lock;
18 void *vdso; 18 void *vdso;
19} mm_context_t; 19} mm_context_t;
20 20
diff --git a/include/asm-x86/namei.h b/include/asm-x86/namei.h
index 732f8f0b3dcd..415ef5d9550e 100644
--- a/include/asm-x86/namei.h
+++ b/include/asm-x86/namei.h
@@ -1,5 +1,11 @@
1#ifdef CONFIG_X86_32 1#ifndef _ASM_X86_NAMEI_H
2# include "namei_32.h" 2#define _ASM_X86_NAMEI_H
3#else 3
4# include "namei_64.h" 4/* This dummy routine maybe changed to something useful
5#endif 5 * for /usr/gnemul/ emulation stuff.
6 * Look at asm-sparc/namei.h for details.
7 */
8
9#define __emul_prefix() NULL
10
11#endif /* _ASM_X86_NAMEI_H */
diff --git a/include/asm-x86/namei_32.h b/include/asm-x86/namei_32.h
deleted file mode 100644
index 814865088617..000000000000
--- a/include/asm-x86/namei_32.h
+++ /dev/null
@@ -1,17 +0,0 @@
1/* $Id: namei.h,v 1.1 1996/12/13 14:48:21 jj Exp $
2 * linux/include/asm-i386/namei.h
3 *
4 * Included from linux/fs/namei.c
5 */
6
7#ifndef __I386_NAMEI_H
8#define __I386_NAMEI_H
9
10/* This dummy routine maybe changed to something useful
11 * for /usr/gnemul/ emulation stuff.
12 * Look at asm-sparc/namei.h for details.
13 */
14
15#define __emul_prefix() NULL
16
17#endif /* __I386_NAMEI_H */
diff --git a/include/asm-x86/namei_64.h b/include/asm-x86/namei_64.h
deleted file mode 100644
index bef239f5318f..000000000000
--- a/include/asm-x86/namei_64.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef __X8664_NAMEI_H
2#define __X8664_NAMEI_H
3
4/* This dummy routine maybe changed to something useful
5 * for /usr/gnemul/ emulation stuff.
6 * Look at asm-sparc/namei.h for details.
7 */
8
9#define __emul_prefix() NULL
10
11#endif
diff --git a/include/asm-x86/numa_64.h b/include/asm-x86/numa_64.h
index 933ff11ece15..0cc5c97a7fc9 100644
--- a/include/asm-x86/numa_64.h
+++ b/include/asm-x86/numa_64.h
@@ -2,6 +2,7 @@
2#define _ASM_X8664_NUMA_H 1 2#define _ASM_X8664_NUMA_H 1
3 3
4#include <linux/nodemask.h> 4#include <linux/nodemask.h>
5#include <asm/apicdef.h>
5 6
6struct bootnode { 7struct bootnode {
7 u64 start,end; 8 u64 start,end;
@@ -19,7 +20,7 @@ extern void numa_set_node(int cpu, int node);
19extern void srat_reserve_add_area(int nodeid); 20extern void srat_reserve_add_area(int nodeid);
20extern int hotadd_percent; 21extern int hotadd_percent;
21 22
22extern unsigned char apicid_to_node[256]; 23extern unsigned char apicid_to_node[MAX_LOCAL_APIC];
23#ifdef CONFIG_NUMA 24#ifdef CONFIG_NUMA
24extern void __init init_cpu_to_node(void); 25extern void __init init_cpu_to_node(void);
25 26
diff --git a/include/asm-x86/param.h b/include/asm-x86/param.h
index 640851bab124..c996ec4da0c8 100644
--- a/include/asm-x86/param.h
+++ b/include/asm-x86/param.h
@@ -1,13 +1,22 @@
1#ifndef _ASM_X86_PARAM_H
2#define _ASM_X86_PARAM_H
3
1#ifdef __KERNEL__ 4#ifdef __KERNEL__
2# ifdef CONFIG_X86_32 5# define HZ CONFIG_HZ /* Internal kernel timer frequency */
3# include "param_32.h" 6# define USER_HZ 100 /* .. some user interfaces are in "ticks" */
4# else 7# define CLOCKS_PER_SEC (USER_HZ) /* like times() */
5# include "param_64.h"
6# endif
7#else
8# ifdef __i386__
9# include "param_32.h"
10# else
11# include "param_64.h"
12# endif
13#endif 8#endif
9
10#ifndef HZ
11#define HZ 100
12#endif
13
14#define EXEC_PAGESIZE 4096
15
16#ifndef NOGROUP
17#define NOGROUP (-1)
18#endif
19
20#define MAXHOSTNAMELEN 64 /* max length of hostname */
21
22#endif /* _ASM_X86_PARAM_H */
diff --git a/include/asm-x86/param_32.h b/include/asm-x86/param_32.h
deleted file mode 100644
index 21b32466fcdc..000000000000
--- a/include/asm-x86/param_32.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef _ASMi386_PARAM_H
2#define _ASMi386_PARAM_H
3
4#ifdef __KERNEL__
5# define HZ CONFIG_HZ /* Internal kernel timer frequency */
6# define USER_HZ 100 /* .. some user interfaces are in "ticks" */
7# define CLOCKS_PER_SEC (USER_HZ) /* like times() */
8#endif
9
10#ifndef HZ
11#define HZ 100
12#endif
13
14#define EXEC_PAGESIZE 4096
15
16#ifndef NOGROUP
17#define NOGROUP (-1)
18#endif
19
20#define MAXHOSTNAMELEN 64 /* max length of hostname */
21
22#endif
diff --git a/include/asm-x86/param_64.h b/include/asm-x86/param_64.h
deleted file mode 100644
index a728786c3c7c..000000000000
--- a/include/asm-x86/param_64.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef _ASMx86_64_PARAM_H
2#define _ASMx86_64_PARAM_H
3
4#ifdef __KERNEL__
5# define HZ CONFIG_HZ /* Internal kernel timer frequency */
6# define USER_HZ 100 /* .. some user interfaces are in "ticks */
7#define CLOCKS_PER_SEC (USER_HZ) /* like times() */
8#endif
9
10#ifndef HZ
11#define HZ 100
12#endif
13
14#define EXEC_PAGESIZE 4096
15
16#ifndef NOGROUP
17#define NOGROUP (-1)
18#endif
19
20#define MAXHOSTNAMELEN 64 /* max length of hostname */
21
22#endif
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
index 9fa3fa9e62d1..f59d370c5df4 100644
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -25,27 +25,22 @@ struct tss_struct;
25struct mm_struct; 25struct mm_struct;
26struct desc_struct; 26struct desc_struct;
27 27
28/* Lazy mode for batching updates / context switch */ 28/* general info */
29enum paravirt_lazy_mode { 29struct pv_info {
30 PARAVIRT_LAZY_NONE = 0,
31 PARAVIRT_LAZY_MMU = 1,
32 PARAVIRT_LAZY_CPU = 2,
33 PARAVIRT_LAZY_FLUSH = 3,
34};
35
36struct paravirt_ops
37{
38 unsigned int kernel_rpl; 30 unsigned int kernel_rpl;
39 int shared_kernel_pmd; 31 int shared_kernel_pmd;
40 int paravirt_enabled; 32 int paravirt_enabled;
41 const char *name; 33 const char *name;
34};
42 35
36struct pv_init_ops {
43 /* 37 /*
44 * Patch may replace one of the defined code sequences with arbitrary 38 * Patch may replace one of the defined code sequences with
45 * code, subject to the same register constraints. This generally 39 * arbitrary code, subject to the same register constraints.
46 * means the code is not free to clobber any registers other than EAX. 40 * This generally means the code is not free to clobber any
47 * The patch function should return the number of bytes of code 41 * registers other than EAX. The patch function should return
48 * generated, as we nop pad the rest in generic code. 42 * the number of bytes of code generated, as we nop pad the
43 * rest in generic code.
49 */ 44 */
50 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf, 45 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
51 unsigned long addr, unsigned len); 46 unsigned long addr, unsigned len);
@@ -55,29 +50,29 @@ struct paravirt_ops
55 char *(*memory_setup)(void); 50 char *(*memory_setup)(void);
56 void (*post_allocator_init)(void); 51 void (*post_allocator_init)(void);
57 52
58 void (*init_IRQ)(void);
59 void (*time_init)(void);
60
61 /*
62 * Called before/after init_mm pagetable setup. setup_start
63 * may reset %cr3, and may pre-install parts of the pagetable;
64 * pagetable setup is expected to preserve any existing
65 * mapping.
66 */
67 void (*pagetable_setup_start)(pgd_t *pgd_base);
68 void (*pagetable_setup_done)(pgd_t *pgd_base);
69
70 /* Print a banner to identify the environment */ 53 /* Print a banner to identify the environment */
71 void (*banner)(void); 54 void (*banner)(void);
55};
56
57
58struct pv_lazy_ops {
59 /* Set deferred update mode, used for batching operations. */
60 void (*enter)(void);
61 void (*leave)(void);
62};
63
64struct pv_time_ops {
65 void (*time_init)(void);
72 66
73 /* Set and set time of day */ 67 /* Set and set time of day */
74 unsigned long (*get_wallclock)(void); 68 unsigned long (*get_wallclock)(void);
75 int (*set_wallclock)(unsigned long); 69 int (*set_wallclock)(unsigned long);
76 70
77 /* cpuid emulation, mostly so that caps bits can be disabled */ 71 unsigned long long (*sched_clock)(void);
78 void (*cpuid)(unsigned int *eax, unsigned int *ebx, 72 unsigned long (*get_cpu_khz)(void);
79 unsigned int *ecx, unsigned int *edx); 73};
80 74
75struct pv_cpu_ops {
81 /* hooks for various privileged instructions */ 76 /* hooks for various privileged instructions */
82 unsigned long (*get_debugreg)(int regno); 77 unsigned long (*get_debugreg)(int regno);
83 void (*set_debugreg)(int regno, unsigned long value); 78 void (*set_debugreg)(int regno, unsigned long value);
@@ -87,41 +82,10 @@ struct paravirt_ops
87 unsigned long (*read_cr0)(void); 82 unsigned long (*read_cr0)(void);
88 void (*write_cr0)(unsigned long); 83 void (*write_cr0)(unsigned long);
89 84
90 unsigned long (*read_cr2)(void);
91 void (*write_cr2)(unsigned long);
92
93 unsigned long (*read_cr3)(void);
94 void (*write_cr3)(unsigned long);
95
96 unsigned long (*read_cr4_safe)(void); 85 unsigned long (*read_cr4_safe)(void);
97 unsigned long (*read_cr4)(void); 86 unsigned long (*read_cr4)(void);
98 void (*write_cr4)(unsigned long); 87 void (*write_cr4)(unsigned long);
99 88
100 /*
101 * Get/set interrupt state. save_fl and restore_fl are only
102 * expected to use X86_EFLAGS_IF; all other bits
103 * returned from save_fl are undefined, and may be ignored by
104 * restore_fl.
105 */
106 unsigned long (*save_fl)(void);
107 void (*restore_fl)(unsigned long);
108 void (*irq_disable)(void);
109 void (*irq_enable)(void);
110 void (*safe_halt)(void);
111 void (*halt)(void);
112
113 void (*wbinvd)(void);
114
115 /* MSR, PMC and TSR operations.
116 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
117 u64 (*read_msr)(unsigned int msr, int *err);
118 int (*write_msr)(unsigned int msr, u64 val);
119
120 u64 (*read_tsc)(void);
121 u64 (*read_pmc)(void);
122 unsigned long long (*sched_clock)(void);
123 unsigned long (*get_cpu_khz)(void);
124
125 /* Segment descriptor handling */ 89 /* Segment descriptor handling */
126 void (*load_tr_desc)(void); 90 void (*load_tr_desc)(void);
127 void (*load_gdt)(const struct Xgt_desc_struct *); 91 void (*load_gdt)(const struct Xgt_desc_struct *);
@@ -140,18 +104,47 @@ struct paravirt_ops
140 void (*load_esp0)(struct tss_struct *tss, struct thread_struct *t); 104 void (*load_esp0)(struct tss_struct *tss, struct thread_struct *t);
141 105
142 void (*set_iopl_mask)(unsigned mask); 106 void (*set_iopl_mask)(unsigned mask);
107
108 void (*wbinvd)(void);
143 void (*io_delay)(void); 109 void (*io_delay)(void);
144 110
111 /* cpuid emulation, mostly so that caps bits can be disabled */
112 void (*cpuid)(unsigned int *eax, unsigned int *ebx,
113 unsigned int *ecx, unsigned int *edx);
114
115 /* MSR, PMC and TSR operations.
116 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
117 u64 (*read_msr)(unsigned int msr, int *err);
118 int (*write_msr)(unsigned int msr, u64 val);
119
120 u64 (*read_tsc)(void);
121 u64 (*read_pmc)(void);
122
123 /* These two are jmp to, not actually called. */
124 void (*irq_enable_sysexit)(void);
125 void (*iret)(void);
126
127 struct pv_lazy_ops lazy_mode;
128};
129
130struct pv_irq_ops {
131 void (*init_IRQ)(void);
132
145 /* 133 /*
146 * Hooks for intercepting the creation/use/destruction of an 134 * Get/set interrupt state. save_fl and restore_fl are only
147 * mm_struct. 135 * expected to use X86_EFLAGS_IF; all other bits
136 * returned from save_fl are undefined, and may be ignored by
137 * restore_fl.
148 */ 138 */
149 void (*activate_mm)(struct mm_struct *prev, 139 unsigned long (*save_fl)(void);
150 struct mm_struct *next); 140 void (*restore_fl)(unsigned long);
151 void (*dup_mmap)(struct mm_struct *oldmm, 141 void (*irq_disable)(void);
152 struct mm_struct *mm); 142 void (*irq_enable)(void);
153 void (*exit_mmap)(struct mm_struct *mm); 143 void (*safe_halt)(void);
144 void (*halt)(void);
145};
154 146
147struct pv_apic_ops {
155#ifdef CONFIG_X86_LOCAL_APIC 148#ifdef CONFIG_X86_LOCAL_APIC
156 /* 149 /*
157 * Direct APIC operations, principally for VMI. Ideally 150 * Direct APIC operations, principally for VMI. Ideally
@@ -167,6 +160,34 @@ struct paravirt_ops
167 unsigned long start_eip, 160 unsigned long start_eip,
168 unsigned long start_esp); 161 unsigned long start_esp);
169#endif 162#endif
163};
164
165struct pv_mmu_ops {
166 /*
167 * Called before/after init_mm pagetable setup. setup_start
168 * may reset %cr3, and may pre-install parts of the pagetable;
169 * pagetable setup is expected to preserve any existing
170 * mapping.
171 */
172 void (*pagetable_setup_start)(pgd_t *pgd_base);
173 void (*pagetable_setup_done)(pgd_t *pgd_base);
174
175 unsigned long (*read_cr2)(void);
176 void (*write_cr2)(unsigned long);
177
178 unsigned long (*read_cr3)(void);
179 void (*write_cr3)(unsigned long);
180
181 /*
182 * Hooks for intercepting the creation/use/destruction of an
183 * mm_struct.
184 */
185 void (*activate_mm)(struct mm_struct *prev,
186 struct mm_struct *next);
187 void (*dup_mmap)(struct mm_struct *oldmm,
188 struct mm_struct *mm);
189 void (*exit_mmap)(struct mm_struct *mm);
190
170 191
171 /* TLB operations */ 192 /* TLB operations */
172 void (*flush_tlb_user)(void); 193 void (*flush_tlb_user)(void);
@@ -191,15 +212,12 @@ struct paravirt_ops
191 void (*pte_update_defer)(struct mm_struct *mm, 212 void (*pte_update_defer)(struct mm_struct *mm,
192 unsigned long addr, pte_t *ptep); 213 unsigned long addr, pte_t *ptep);
193 214
194#ifdef CONFIG_HIGHPTE
195 void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
196#endif
197
198#ifdef CONFIG_X86_PAE 215#ifdef CONFIG_X86_PAE
199 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); 216 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
200 void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); 217 void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
218 pte_t *ptep, pte_t pte);
201 void (*set_pud)(pud_t *pudp, pud_t pudval); 219 void (*set_pud)(pud_t *pudp, pud_t pudval);
202 void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); 220 void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
203 void (*pmd_clear)(pmd_t *pmdp); 221 void (*pmd_clear)(pmd_t *pmdp);
204 222
205 unsigned long long (*pte_val)(pte_t); 223 unsigned long long (*pte_val)(pte_t);
@@ -217,21 +235,40 @@ struct paravirt_ops
217 pgd_t (*make_pgd)(unsigned long pgd); 235 pgd_t (*make_pgd)(unsigned long pgd);
218#endif 236#endif
219 237
220 /* Set deferred update mode, used for batching operations. */ 238#ifdef CONFIG_HIGHPTE
221 void (*set_lazy_mode)(enum paravirt_lazy_mode mode); 239 void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
240#endif
222 241
223 /* These two are jmp to, not actually called. */ 242 struct pv_lazy_ops lazy_mode;
224 void (*irq_enable_sysexit)(void);
225 void (*iret)(void);
226}; 243};
227 244
228extern struct paravirt_ops paravirt_ops; 245/* This contains all the paravirt structures: we get a convenient
246 * number for each function using the offset which we use to indicate
247 * what to patch. */
248struct paravirt_patch_template
249{
250 struct pv_init_ops pv_init_ops;
251 struct pv_time_ops pv_time_ops;
252 struct pv_cpu_ops pv_cpu_ops;
253 struct pv_irq_ops pv_irq_ops;
254 struct pv_apic_ops pv_apic_ops;
255 struct pv_mmu_ops pv_mmu_ops;
256};
257
258extern struct pv_info pv_info;
259extern struct pv_init_ops pv_init_ops;
260extern struct pv_time_ops pv_time_ops;
261extern struct pv_cpu_ops pv_cpu_ops;
262extern struct pv_irq_ops pv_irq_ops;
263extern struct pv_apic_ops pv_apic_ops;
264extern struct pv_mmu_ops pv_mmu_ops;
229 265
230#define PARAVIRT_PATCH(x) \ 266#define PARAVIRT_PATCH(x) \
231 (offsetof(struct paravirt_ops, x) / sizeof(void *)) 267 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
232 268
233#define paravirt_type(type) \ 269#define paravirt_type(op) \
234 [paravirt_typenum] "i" (PARAVIRT_PATCH(type)) 270 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
271 [paravirt_opptr] "m" (op)
235#define paravirt_clobber(clobber) \ 272#define paravirt_clobber(clobber) \
236 [paravirt_clobber] "i" (clobber) 273 [paravirt_clobber] "i" (clobber)
237 274
@@ -258,7 +295,7 @@ unsigned paravirt_patch_call(void *insnbuf,
258 const void *target, u16 tgt_clobbers, 295 const void *target, u16 tgt_clobbers,
259 unsigned long addr, u16 site_clobbers, 296 unsigned long addr, u16 site_clobbers,
260 unsigned len); 297 unsigned len);
261unsigned paravirt_patch_jmp(const void *target, void *insnbuf, 298unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
262 unsigned long addr, unsigned len); 299 unsigned long addr, unsigned len);
263unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, 300unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
264 unsigned long addr, unsigned len); 301 unsigned long addr, unsigned len);
@@ -271,14 +308,14 @@ int paravirt_disable_iospace(void);
271/* 308/*
272 * This generates an indirect call based on the operation type number. 309 * This generates an indirect call based on the operation type number.
273 * The type number, computed in PARAVIRT_PATCH, is derived from the 310 * The type number, computed in PARAVIRT_PATCH, is derived from the
274 * offset into the paravirt_ops structure, and can therefore be freely 311 * offset into the paravirt_patch_template structure, and can therefore be
275 * converted back into a structure offset. 312 * freely converted back into a structure offset.
276 */ 313 */
277#define PARAVIRT_CALL "call *(paravirt_ops+%c[paravirt_typenum]*4);" 314#define PARAVIRT_CALL "call *%[paravirt_opptr];"
278 315
279/* 316/*
280 * These macros are intended to wrap calls into a paravirt_ops 317 * These macros are intended to wrap calls through one of the paravirt
281 * operation, so that they can be later identified and patched at 318 * ops structs, so that they can be later identified and patched at
282 * runtime. 319 * runtime.
283 * 320 *
284 * Normally, a call to a pv_op function is a simple indirect call: 321 * Normally, a call to a pv_op function is a simple indirect call:
@@ -301,7 +338,7 @@ int paravirt_disable_iospace(void);
301 * The call instruction itself is marked by placing its start address 338 * The call instruction itself is marked by placing its start address
302 * and size into the .parainstructions section, so that 339 * and size into the .parainstructions section, so that
303 * apply_paravirt() in arch/i386/kernel/alternative.c can do the 340 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
304 * appropriate patching under the control of the backend paravirt_ops 341 * appropriate patching under the control of the backend pv_init_ops
305 * implementation. 342 * implementation.
306 * 343 *
307 * Unfortunately there's no way to get gcc to generate the args setup 344 * Unfortunately there's no way to get gcc to generate the args setup
@@ -409,36 +446,36 @@ int paravirt_disable_iospace(void);
409 446
410static inline int paravirt_enabled(void) 447static inline int paravirt_enabled(void)
411{ 448{
412 return paravirt_ops.paravirt_enabled; 449 return pv_info.paravirt_enabled;
413} 450}
414 451
415static inline void load_esp0(struct tss_struct *tss, 452static inline void load_esp0(struct tss_struct *tss,
416 struct thread_struct *thread) 453 struct thread_struct *thread)
417{ 454{
418 PVOP_VCALL2(load_esp0, tss, thread); 455 PVOP_VCALL2(pv_cpu_ops.load_esp0, tss, thread);
419} 456}
420 457
421#define ARCH_SETUP paravirt_ops.arch_setup(); 458#define ARCH_SETUP pv_init_ops.arch_setup();
422static inline unsigned long get_wallclock(void) 459static inline unsigned long get_wallclock(void)
423{ 460{
424 return PVOP_CALL0(unsigned long, get_wallclock); 461 return PVOP_CALL0(unsigned long, pv_time_ops.get_wallclock);
425} 462}
426 463
427static inline int set_wallclock(unsigned long nowtime) 464static inline int set_wallclock(unsigned long nowtime)
428{ 465{
429 return PVOP_CALL1(int, set_wallclock, nowtime); 466 return PVOP_CALL1(int, pv_time_ops.set_wallclock, nowtime);
430} 467}
431 468
432static inline void (*choose_time_init(void))(void) 469static inline void (*choose_time_init(void))(void)
433{ 470{
434 return paravirt_ops.time_init; 471 return pv_time_ops.time_init;
435} 472}
436 473
437/* The paravirtualized CPUID instruction. */ 474/* The paravirtualized CPUID instruction. */
438static inline void __cpuid(unsigned int *eax, unsigned int *ebx, 475static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
439 unsigned int *ecx, unsigned int *edx) 476 unsigned int *ecx, unsigned int *edx)
440{ 477{
441 PVOP_VCALL4(cpuid, eax, ebx, ecx, edx); 478 PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
442} 479}
443 480
444/* 481/*
@@ -446,87 +483,87 @@ static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
446 */ 483 */
447static inline unsigned long paravirt_get_debugreg(int reg) 484static inline unsigned long paravirt_get_debugreg(int reg)
448{ 485{
449 return PVOP_CALL1(unsigned long, get_debugreg, reg); 486 return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
450} 487}
451#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg) 488#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
452static inline void set_debugreg(unsigned long val, int reg) 489static inline void set_debugreg(unsigned long val, int reg)
453{ 490{
454 PVOP_VCALL2(set_debugreg, reg, val); 491 PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
455} 492}
456 493
457static inline void clts(void) 494static inline void clts(void)
458{ 495{
459 PVOP_VCALL0(clts); 496 PVOP_VCALL0(pv_cpu_ops.clts);
460} 497}
461 498
462static inline unsigned long read_cr0(void) 499static inline unsigned long read_cr0(void)
463{ 500{
464 return PVOP_CALL0(unsigned long, read_cr0); 501 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
465} 502}
466 503
467static inline void write_cr0(unsigned long x) 504static inline void write_cr0(unsigned long x)
468{ 505{
469 PVOP_VCALL1(write_cr0, x); 506 PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
470} 507}
471 508
472static inline unsigned long read_cr2(void) 509static inline unsigned long read_cr2(void)
473{ 510{
474 return PVOP_CALL0(unsigned long, read_cr2); 511 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
475} 512}
476 513
477static inline void write_cr2(unsigned long x) 514static inline void write_cr2(unsigned long x)
478{ 515{
479 PVOP_VCALL1(write_cr2, x); 516 PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
480} 517}
481 518
482static inline unsigned long read_cr3(void) 519static inline unsigned long read_cr3(void)
483{ 520{
484 return PVOP_CALL0(unsigned long, read_cr3); 521 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
485} 522}
486 523
487static inline void write_cr3(unsigned long x) 524static inline void write_cr3(unsigned long x)
488{ 525{
489 PVOP_VCALL1(write_cr3, x); 526 PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
490} 527}
491 528
492static inline unsigned long read_cr4(void) 529static inline unsigned long read_cr4(void)
493{ 530{
494 return PVOP_CALL0(unsigned long, read_cr4); 531 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
495} 532}
496static inline unsigned long read_cr4_safe(void) 533static inline unsigned long read_cr4_safe(void)
497{ 534{
498 return PVOP_CALL0(unsigned long, read_cr4_safe); 535 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
499} 536}
500 537
501static inline void write_cr4(unsigned long x) 538static inline void write_cr4(unsigned long x)
502{ 539{
503 PVOP_VCALL1(write_cr4, x); 540 PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
504} 541}
505 542
506static inline void raw_safe_halt(void) 543static inline void raw_safe_halt(void)
507{ 544{
508 PVOP_VCALL0(safe_halt); 545 PVOP_VCALL0(pv_irq_ops.safe_halt);
509} 546}
510 547
511static inline void halt(void) 548static inline void halt(void)
512{ 549{
513 PVOP_VCALL0(safe_halt); 550 PVOP_VCALL0(pv_irq_ops.safe_halt);
514} 551}
515 552
516static inline void wbinvd(void) 553static inline void wbinvd(void)
517{ 554{
518 PVOP_VCALL0(wbinvd); 555 PVOP_VCALL0(pv_cpu_ops.wbinvd);
519} 556}
520 557
521#define get_kernel_rpl() (paravirt_ops.kernel_rpl) 558#define get_kernel_rpl() (pv_info.kernel_rpl)
522 559
523static inline u64 paravirt_read_msr(unsigned msr, int *err) 560static inline u64 paravirt_read_msr(unsigned msr, int *err)
524{ 561{
525 return PVOP_CALL2(u64, read_msr, msr, err); 562 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
526} 563}
527static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) 564static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
528{ 565{
529 return PVOP_CALL3(int, write_msr, msr, low, high); 566 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
530} 567}
531 568
532/* These should all do BUG_ON(_err), but our headers are too tangled. */ 569/* These should all do BUG_ON(_err), but our headers are too tangled. */
@@ -560,7 +597,7 @@ static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
560 597
561static inline u64 paravirt_read_tsc(void) 598static inline u64 paravirt_read_tsc(void)
562{ 599{
563 return PVOP_CALL0(u64, read_tsc); 600 return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
564} 601}
565 602
566#define rdtscl(low) do { \ 603#define rdtscl(low) do { \
@@ -572,15 +609,15 @@ static inline u64 paravirt_read_tsc(void)
572 609
573static inline unsigned long long paravirt_sched_clock(void) 610static inline unsigned long long paravirt_sched_clock(void)
574{ 611{
575 return PVOP_CALL0(unsigned long long, sched_clock); 612 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
576} 613}
577#define calculate_cpu_khz() (paravirt_ops.get_cpu_khz()) 614#define calculate_cpu_khz() (pv_time_ops.get_cpu_khz())
578 615
579#define write_tsc(val1,val2) wrmsr(0x10, val1, val2) 616#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
580 617
581static inline unsigned long long paravirt_read_pmc(int counter) 618static inline unsigned long long paravirt_read_pmc(int counter)
582{ 619{
583 return PVOP_CALL1(u64, read_pmc, counter); 620 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
584} 621}
585 622
586#define rdpmc(counter,low,high) do { \ 623#define rdpmc(counter,low,high) do { \
@@ -591,61 +628,61 @@ static inline unsigned long long paravirt_read_pmc(int counter)
591 628
592static inline void load_TR_desc(void) 629static inline void load_TR_desc(void)
593{ 630{
594 PVOP_VCALL0(load_tr_desc); 631 PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
595} 632}
596static inline void load_gdt(const struct Xgt_desc_struct *dtr) 633static inline void load_gdt(const struct Xgt_desc_struct *dtr)
597{ 634{
598 PVOP_VCALL1(load_gdt, dtr); 635 PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
599} 636}
600static inline void load_idt(const struct Xgt_desc_struct *dtr) 637static inline void load_idt(const struct Xgt_desc_struct *dtr)
601{ 638{
602 PVOP_VCALL1(load_idt, dtr); 639 PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
603} 640}
604static inline void set_ldt(const void *addr, unsigned entries) 641static inline void set_ldt(const void *addr, unsigned entries)
605{ 642{
606 PVOP_VCALL2(set_ldt, addr, entries); 643 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
607} 644}
608static inline void store_gdt(struct Xgt_desc_struct *dtr) 645static inline void store_gdt(struct Xgt_desc_struct *dtr)
609{ 646{
610 PVOP_VCALL1(store_gdt, dtr); 647 PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
611} 648}
612static inline void store_idt(struct Xgt_desc_struct *dtr) 649static inline void store_idt(struct Xgt_desc_struct *dtr)
613{ 650{
614 PVOP_VCALL1(store_idt, dtr); 651 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
615} 652}
616static inline unsigned long paravirt_store_tr(void) 653static inline unsigned long paravirt_store_tr(void)
617{ 654{
618 return PVOP_CALL0(unsigned long, store_tr); 655 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
619} 656}
620#define store_tr(tr) ((tr) = paravirt_store_tr()) 657#define store_tr(tr) ((tr) = paravirt_store_tr())
621static inline void load_TLS(struct thread_struct *t, unsigned cpu) 658static inline void load_TLS(struct thread_struct *t, unsigned cpu)
622{ 659{
623 PVOP_VCALL2(load_tls, t, cpu); 660 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
624} 661}
625static inline void write_ldt_entry(void *dt, int entry, u32 low, u32 high) 662static inline void write_ldt_entry(void *dt, int entry, u32 low, u32 high)
626{ 663{
627 PVOP_VCALL4(write_ldt_entry, dt, entry, low, high); 664 PVOP_VCALL4(pv_cpu_ops.write_ldt_entry, dt, entry, low, high);
628} 665}
629static inline void write_gdt_entry(void *dt, int entry, u32 low, u32 high) 666static inline void write_gdt_entry(void *dt, int entry, u32 low, u32 high)
630{ 667{
631 PVOP_VCALL4(write_gdt_entry, dt, entry, low, high); 668 PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, low, high);
632} 669}
633static inline void write_idt_entry(void *dt, int entry, u32 low, u32 high) 670static inline void write_idt_entry(void *dt, int entry, u32 low, u32 high)
634{ 671{
635 PVOP_VCALL4(write_idt_entry, dt, entry, low, high); 672 PVOP_VCALL4(pv_cpu_ops.write_idt_entry, dt, entry, low, high);
636} 673}
637static inline void set_iopl_mask(unsigned mask) 674static inline void set_iopl_mask(unsigned mask)
638{ 675{
639 PVOP_VCALL1(set_iopl_mask, mask); 676 PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
640} 677}
641 678
642/* The paravirtualized I/O functions */ 679/* The paravirtualized I/O functions */
643static inline void slow_down_io(void) { 680static inline void slow_down_io(void) {
644 paravirt_ops.io_delay(); 681 pv_cpu_ops.io_delay();
645#ifdef REALLY_SLOW_IO 682#ifdef REALLY_SLOW_IO
646 paravirt_ops.io_delay(); 683 pv_cpu_ops.io_delay();
647 paravirt_ops.io_delay(); 684 pv_cpu_ops.io_delay();
648 paravirt_ops.io_delay(); 685 pv_cpu_ops.io_delay();
649#endif 686#endif
650} 687}
651 688
@@ -655,121 +692,120 @@ static inline void slow_down_io(void) {
655 */ 692 */
656static inline void apic_write(unsigned long reg, unsigned long v) 693static inline void apic_write(unsigned long reg, unsigned long v)
657{ 694{
658 PVOP_VCALL2(apic_write, reg, v); 695 PVOP_VCALL2(pv_apic_ops.apic_write, reg, v);
659} 696}
660 697
661static inline void apic_write_atomic(unsigned long reg, unsigned long v) 698static inline void apic_write_atomic(unsigned long reg, unsigned long v)
662{ 699{
663 PVOP_VCALL2(apic_write_atomic, reg, v); 700 PVOP_VCALL2(pv_apic_ops.apic_write_atomic, reg, v);
664} 701}
665 702
666static inline unsigned long apic_read(unsigned long reg) 703static inline unsigned long apic_read(unsigned long reg)
667{ 704{
668 return PVOP_CALL1(unsigned long, apic_read, reg); 705 return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg);
669} 706}
670 707
671static inline void setup_boot_clock(void) 708static inline void setup_boot_clock(void)
672{ 709{
673 PVOP_VCALL0(setup_boot_clock); 710 PVOP_VCALL0(pv_apic_ops.setup_boot_clock);
674} 711}
675 712
676static inline void setup_secondary_clock(void) 713static inline void setup_secondary_clock(void)
677{ 714{
678 PVOP_VCALL0(setup_secondary_clock); 715 PVOP_VCALL0(pv_apic_ops.setup_secondary_clock);
679} 716}
680#endif 717#endif
681 718
682static inline void paravirt_post_allocator_init(void) 719static inline void paravirt_post_allocator_init(void)
683{ 720{
684 if (paravirt_ops.post_allocator_init) 721 if (pv_init_ops.post_allocator_init)
685 (*paravirt_ops.post_allocator_init)(); 722 (*pv_init_ops.post_allocator_init)();
686} 723}
687 724
688static inline void paravirt_pagetable_setup_start(pgd_t *base) 725static inline void paravirt_pagetable_setup_start(pgd_t *base)
689{ 726{
690 if (paravirt_ops.pagetable_setup_start) 727 (*pv_mmu_ops.pagetable_setup_start)(base);
691 (*paravirt_ops.pagetable_setup_start)(base);
692} 728}
693 729
694static inline void paravirt_pagetable_setup_done(pgd_t *base) 730static inline void paravirt_pagetable_setup_done(pgd_t *base)
695{ 731{
696 if (paravirt_ops.pagetable_setup_done) 732 (*pv_mmu_ops.pagetable_setup_done)(base);
697 (*paravirt_ops.pagetable_setup_done)(base);
698} 733}
699 734
700#ifdef CONFIG_SMP 735#ifdef CONFIG_SMP
701static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip, 736static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
702 unsigned long start_esp) 737 unsigned long start_esp)
703{ 738{
704 PVOP_VCALL3(startup_ipi_hook, phys_apicid, start_eip, start_esp); 739 PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
740 phys_apicid, start_eip, start_esp);
705} 741}
706#endif 742#endif
707 743
708static inline void paravirt_activate_mm(struct mm_struct *prev, 744static inline void paravirt_activate_mm(struct mm_struct *prev,
709 struct mm_struct *next) 745 struct mm_struct *next)
710{ 746{
711 PVOP_VCALL2(activate_mm, prev, next); 747 PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
712} 748}
713 749
714static inline void arch_dup_mmap(struct mm_struct *oldmm, 750static inline void arch_dup_mmap(struct mm_struct *oldmm,
715 struct mm_struct *mm) 751 struct mm_struct *mm)
716{ 752{
717 PVOP_VCALL2(dup_mmap, oldmm, mm); 753 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
718} 754}
719 755
720static inline void arch_exit_mmap(struct mm_struct *mm) 756static inline void arch_exit_mmap(struct mm_struct *mm)
721{ 757{
722 PVOP_VCALL1(exit_mmap, mm); 758 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
723} 759}
724 760
725static inline void __flush_tlb(void) 761static inline void __flush_tlb(void)
726{ 762{
727 PVOP_VCALL0(flush_tlb_user); 763 PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
728} 764}
729static inline void __flush_tlb_global(void) 765static inline void __flush_tlb_global(void)
730{ 766{
731 PVOP_VCALL0(flush_tlb_kernel); 767 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
732} 768}
733static inline void __flush_tlb_single(unsigned long addr) 769static inline void __flush_tlb_single(unsigned long addr)
734{ 770{
735 PVOP_VCALL1(flush_tlb_single, addr); 771 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
736} 772}
737 773
738static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, 774static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
739 unsigned long va) 775 unsigned long va)
740{ 776{
741 PVOP_VCALL3(flush_tlb_others, &cpumask, mm, va); 777 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
742} 778}
743 779
744static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn) 780static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn)
745{ 781{
746 PVOP_VCALL2(alloc_pt, mm, pfn); 782 PVOP_VCALL2(pv_mmu_ops.alloc_pt, mm, pfn);
747} 783}
748static inline void paravirt_release_pt(unsigned pfn) 784static inline void paravirt_release_pt(unsigned pfn)
749{ 785{
750 PVOP_VCALL1(release_pt, pfn); 786 PVOP_VCALL1(pv_mmu_ops.release_pt, pfn);
751} 787}
752 788
753static inline void paravirt_alloc_pd(unsigned pfn) 789static inline void paravirt_alloc_pd(unsigned pfn)
754{ 790{
755 PVOP_VCALL1(alloc_pd, pfn); 791 PVOP_VCALL1(pv_mmu_ops.alloc_pd, pfn);
756} 792}
757 793
758static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn, 794static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn,
759 unsigned start, unsigned count) 795 unsigned start, unsigned count)
760{ 796{
761 PVOP_VCALL4(alloc_pd_clone, pfn, clonepfn, start, count); 797 PVOP_VCALL4(pv_mmu_ops.alloc_pd_clone, pfn, clonepfn, start, count);
762} 798}
763static inline void paravirt_release_pd(unsigned pfn) 799static inline void paravirt_release_pd(unsigned pfn)
764{ 800{
765 PVOP_VCALL1(release_pd, pfn); 801 PVOP_VCALL1(pv_mmu_ops.release_pd, pfn);
766} 802}
767 803
768#ifdef CONFIG_HIGHPTE 804#ifdef CONFIG_HIGHPTE
769static inline void *kmap_atomic_pte(struct page *page, enum km_type type) 805static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
770{ 806{
771 unsigned long ret; 807 unsigned long ret;
772 ret = PVOP_CALL2(unsigned long, kmap_atomic_pte, page, type); 808 ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
773 return (void *)ret; 809 return (void *)ret;
774} 810}
775#endif 811#endif
@@ -777,162 +813,191 @@ static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
777static inline void pte_update(struct mm_struct *mm, unsigned long addr, 813static inline void pte_update(struct mm_struct *mm, unsigned long addr,
778 pte_t *ptep) 814 pte_t *ptep)
779{ 815{
780 PVOP_VCALL3(pte_update, mm, addr, ptep); 816 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
781} 817}
782 818
783static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr, 819static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
784 pte_t *ptep) 820 pte_t *ptep)
785{ 821{
786 PVOP_VCALL3(pte_update_defer, mm, addr, ptep); 822 PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
787} 823}
788 824
789#ifdef CONFIG_X86_PAE 825#ifdef CONFIG_X86_PAE
790static inline pte_t __pte(unsigned long long val) 826static inline pte_t __pte(unsigned long long val)
791{ 827{
792 unsigned long long ret = PVOP_CALL2(unsigned long long, make_pte, 828 unsigned long long ret = PVOP_CALL2(unsigned long long,
829 pv_mmu_ops.make_pte,
793 val, val >> 32); 830 val, val >> 32);
794 return (pte_t) { ret, ret >> 32 }; 831 return (pte_t) { ret, ret >> 32 };
795} 832}
796 833
797static inline pmd_t __pmd(unsigned long long val) 834static inline pmd_t __pmd(unsigned long long val)
798{ 835{
799 return (pmd_t) { PVOP_CALL2(unsigned long long, make_pmd, val, val >> 32) }; 836 return (pmd_t) { PVOP_CALL2(unsigned long long, pv_mmu_ops.make_pmd,
837 val, val >> 32) };
800} 838}
801 839
802static inline pgd_t __pgd(unsigned long long val) 840static inline pgd_t __pgd(unsigned long long val)
803{ 841{
804 return (pgd_t) { PVOP_CALL2(unsigned long long, make_pgd, val, val >> 32) }; 842 return (pgd_t) { PVOP_CALL2(unsigned long long, pv_mmu_ops.make_pgd,
843 val, val >> 32) };
805} 844}
806 845
807static inline unsigned long long pte_val(pte_t x) 846static inline unsigned long long pte_val(pte_t x)
808{ 847{
809 return PVOP_CALL2(unsigned long long, pte_val, x.pte_low, x.pte_high); 848 return PVOP_CALL2(unsigned long long, pv_mmu_ops.pte_val,
849 x.pte_low, x.pte_high);
810} 850}
811 851
812static inline unsigned long long pmd_val(pmd_t x) 852static inline unsigned long long pmd_val(pmd_t x)
813{ 853{
814 return PVOP_CALL2(unsigned long long, pmd_val, x.pmd, x.pmd >> 32); 854 return PVOP_CALL2(unsigned long long, pv_mmu_ops.pmd_val,
855 x.pmd, x.pmd >> 32);
815} 856}
816 857
817static inline unsigned long long pgd_val(pgd_t x) 858static inline unsigned long long pgd_val(pgd_t x)
818{ 859{
819 return PVOP_CALL2(unsigned long long, pgd_val, x.pgd, x.pgd >> 32); 860 return PVOP_CALL2(unsigned long long, pv_mmu_ops.pgd_val,
861 x.pgd, x.pgd >> 32);
820} 862}
821 863
822static inline void set_pte(pte_t *ptep, pte_t pteval) 864static inline void set_pte(pte_t *ptep, pte_t pteval)
823{ 865{
824 PVOP_VCALL3(set_pte, ptep, pteval.pte_low, pteval.pte_high); 866 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep, pteval.pte_low, pteval.pte_high);
825} 867}
826 868
827static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 869static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
828 pte_t *ptep, pte_t pteval) 870 pte_t *ptep, pte_t pteval)
829{ 871{
830 /* 5 arg words */ 872 /* 5 arg words */
831 paravirt_ops.set_pte_at(mm, addr, ptep, pteval); 873 pv_mmu_ops.set_pte_at(mm, addr, ptep, pteval);
832} 874}
833 875
834static inline void set_pte_atomic(pte_t *ptep, pte_t pteval) 876static inline void set_pte_atomic(pte_t *ptep, pte_t pteval)
835{ 877{
836 PVOP_VCALL3(set_pte_atomic, ptep, pteval.pte_low, pteval.pte_high); 878 PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
879 pteval.pte_low, pteval.pte_high);
837} 880}
838 881
839static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, 882static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
840 pte_t *ptep, pte_t pte) 883 pte_t *ptep, pte_t pte)
841{ 884{
842 /* 5 arg words */ 885 /* 5 arg words */
843 paravirt_ops.set_pte_present(mm, addr, ptep, pte); 886 pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
844} 887}
845 888
846static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) 889static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
847{ 890{
848 PVOP_VCALL3(set_pmd, pmdp, pmdval.pmd, pmdval.pmd >> 32); 891 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp,
892 pmdval.pmd, pmdval.pmd >> 32);
849} 893}
850 894
851static inline void set_pud(pud_t *pudp, pud_t pudval) 895static inline void set_pud(pud_t *pudp, pud_t pudval)
852{ 896{
853 PVOP_VCALL3(set_pud, pudp, pudval.pgd.pgd, pudval.pgd.pgd >> 32); 897 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
898 pudval.pgd.pgd, pudval.pgd.pgd >> 32);
854} 899}
855 900
856static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 901static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
857{ 902{
858 PVOP_VCALL3(pte_clear, mm, addr, ptep); 903 PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
859} 904}
860 905
861static inline void pmd_clear(pmd_t *pmdp) 906static inline void pmd_clear(pmd_t *pmdp)
862{ 907{
863 PVOP_VCALL1(pmd_clear, pmdp); 908 PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
864} 909}
865 910
866#else /* !CONFIG_X86_PAE */ 911#else /* !CONFIG_X86_PAE */
867 912
868static inline pte_t __pte(unsigned long val) 913static inline pte_t __pte(unsigned long val)
869{ 914{
870 return (pte_t) { PVOP_CALL1(unsigned long, make_pte, val) }; 915 return (pte_t) { PVOP_CALL1(unsigned long, pv_mmu_ops.make_pte, val) };
871} 916}
872 917
873static inline pgd_t __pgd(unsigned long val) 918static inline pgd_t __pgd(unsigned long val)
874{ 919{
875 return (pgd_t) { PVOP_CALL1(unsigned long, make_pgd, val) }; 920 return (pgd_t) { PVOP_CALL1(unsigned long, pv_mmu_ops.make_pgd, val) };
876} 921}
877 922
878static inline unsigned long pte_val(pte_t x) 923static inline unsigned long pte_val(pte_t x)
879{ 924{
880 return PVOP_CALL1(unsigned long, pte_val, x.pte_low); 925 return PVOP_CALL1(unsigned long, pv_mmu_ops.pte_val, x.pte_low);
881} 926}
882 927
883static inline unsigned long pgd_val(pgd_t x) 928static inline unsigned long pgd_val(pgd_t x)
884{ 929{
885 return PVOP_CALL1(unsigned long, pgd_val, x.pgd); 930 return PVOP_CALL1(unsigned long, pv_mmu_ops.pgd_val, x.pgd);
886} 931}
887 932
888static inline void set_pte(pte_t *ptep, pte_t pteval) 933static inline void set_pte(pte_t *ptep, pte_t pteval)
889{ 934{
890 PVOP_VCALL2(set_pte, ptep, pteval.pte_low); 935 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep, pteval.pte_low);
891} 936}
892 937
893static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 938static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
894 pte_t *ptep, pte_t pteval) 939 pte_t *ptep, pte_t pteval)
895{ 940{
896 PVOP_VCALL4(set_pte_at, mm, addr, ptep, pteval.pte_low); 941 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pteval.pte_low);
897} 942}
898 943
899static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) 944static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
900{ 945{
901 PVOP_VCALL2(set_pmd, pmdp, pmdval.pud.pgd.pgd); 946 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, pmdval.pud.pgd.pgd);
902} 947}
903#endif /* CONFIG_X86_PAE */ 948#endif /* CONFIG_X86_PAE */
904 949
950/* Lazy mode for batching updates / context switch */
951enum paravirt_lazy_mode {
952 PARAVIRT_LAZY_NONE,
953 PARAVIRT_LAZY_MMU,
954 PARAVIRT_LAZY_CPU,
955};
956
957enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
958void paravirt_enter_lazy_cpu(void);
959void paravirt_leave_lazy_cpu(void);
960void paravirt_enter_lazy_mmu(void);
961void paravirt_leave_lazy_mmu(void);
962void paravirt_leave_lazy(enum paravirt_lazy_mode mode);
963
905#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE 964#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
906static inline void arch_enter_lazy_cpu_mode(void) 965static inline void arch_enter_lazy_cpu_mode(void)
907{ 966{
908 PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_CPU); 967 PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
909} 968}
910 969
911static inline void arch_leave_lazy_cpu_mode(void) 970static inline void arch_leave_lazy_cpu_mode(void)
912{ 971{
913 PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_NONE); 972 PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
914} 973}
915 974
916static inline void arch_flush_lazy_cpu_mode(void) 975static inline void arch_flush_lazy_cpu_mode(void)
917{ 976{
918 PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_FLUSH); 977 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
978 arch_leave_lazy_cpu_mode();
979 arch_enter_lazy_cpu_mode();
980 }
919} 981}
920 982
921 983
922#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 984#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
923static inline void arch_enter_lazy_mmu_mode(void) 985static inline void arch_enter_lazy_mmu_mode(void)
924{ 986{
925 PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_MMU); 987 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
926} 988}
927 989
928static inline void arch_leave_lazy_mmu_mode(void) 990static inline void arch_leave_lazy_mmu_mode(void)
929{ 991{
930 PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_NONE); 992 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
931} 993}
932 994
933static inline void arch_flush_lazy_mmu_mode(void) 995static inline void arch_flush_lazy_mmu_mode(void)
934{ 996{
935 PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_FLUSH); 997 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
998 arch_leave_lazy_mmu_mode();
999 arch_enter_lazy_mmu_mode();
1000 }
936} 1001}
937 1002
938void _paravirt_nop(void); 1003void _paravirt_nop(void);
@@ -957,7 +1022,7 @@ static inline unsigned long __raw_local_save_flags(void)
957 PARAVIRT_CALL 1022 PARAVIRT_CALL
958 "popl %%edx; popl %%ecx") 1023 "popl %%edx; popl %%ecx")
959 : "=a"(f) 1024 : "=a"(f)
960 : paravirt_type(save_fl), 1025 : paravirt_type(pv_irq_ops.save_fl),
961 paravirt_clobber(CLBR_EAX) 1026 paravirt_clobber(CLBR_EAX)
962 : "memory", "cc"); 1027 : "memory", "cc");
963 return f; 1028 return f;
@@ -970,7 +1035,7 @@ static inline void raw_local_irq_restore(unsigned long f)
970 "popl %%edx; popl %%ecx") 1035 "popl %%edx; popl %%ecx")
971 : "=a"(f) 1036 : "=a"(f)
972 : "0"(f), 1037 : "0"(f),
973 paravirt_type(restore_fl), 1038 paravirt_type(pv_irq_ops.restore_fl),
974 paravirt_clobber(CLBR_EAX) 1039 paravirt_clobber(CLBR_EAX)
975 : "memory", "cc"); 1040 : "memory", "cc");
976} 1041}
@@ -981,7 +1046,7 @@ static inline void raw_local_irq_disable(void)
981 PARAVIRT_CALL 1046 PARAVIRT_CALL
982 "popl %%edx; popl %%ecx") 1047 "popl %%edx; popl %%ecx")
983 : 1048 :
984 : paravirt_type(irq_disable), 1049 : paravirt_type(pv_irq_ops.irq_disable),
985 paravirt_clobber(CLBR_EAX) 1050 paravirt_clobber(CLBR_EAX)
986 : "memory", "eax", "cc"); 1051 : "memory", "eax", "cc");
987} 1052}
@@ -992,7 +1057,7 @@ static inline void raw_local_irq_enable(void)
992 PARAVIRT_CALL 1057 PARAVIRT_CALL
993 "popl %%edx; popl %%ecx") 1058 "popl %%edx; popl %%ecx")
994 : 1059 :
995 : paravirt_type(irq_enable), 1060 : paravirt_type(pv_irq_ops.irq_enable),
996 paravirt_clobber(CLBR_EAX) 1061 paravirt_clobber(CLBR_EAX)
997 : "memory", "eax", "cc"); 1062 : "memory", "eax", "cc");
998} 1063}
@@ -1008,21 +1073,23 @@ static inline unsigned long __raw_local_irq_save(void)
1008 1073
1009#define CLI_STRING \ 1074#define CLI_STRING \
1010 _paravirt_alt("pushl %%ecx; pushl %%edx;" \ 1075 _paravirt_alt("pushl %%ecx; pushl %%edx;" \
1011 "call *paravirt_ops+%c[paravirt_cli_type]*4;" \ 1076 "call *%[paravirt_cli_opptr];" \
1012 "popl %%edx; popl %%ecx", \ 1077 "popl %%edx; popl %%ecx", \
1013 "%c[paravirt_cli_type]", "%c[paravirt_clobber]") 1078 "%c[paravirt_cli_type]", "%c[paravirt_clobber]")
1014 1079
1015#define STI_STRING \ 1080#define STI_STRING \
1016 _paravirt_alt("pushl %%ecx; pushl %%edx;" \ 1081 _paravirt_alt("pushl %%ecx; pushl %%edx;" \
1017 "call *paravirt_ops+%c[paravirt_sti_type]*4;" \ 1082 "call *%[paravirt_sti_opptr];" \
1018 "popl %%edx; popl %%ecx", \ 1083 "popl %%edx; popl %%ecx", \
1019 "%c[paravirt_sti_type]", "%c[paravirt_clobber]") 1084 "%c[paravirt_sti_type]", "%c[paravirt_clobber]")
1020 1085
1021#define CLI_STI_CLOBBERS , "%eax" 1086#define CLI_STI_CLOBBERS , "%eax"
1022#define CLI_STI_INPUT_ARGS \ 1087#define CLI_STI_INPUT_ARGS \
1023 , \ 1088 , \
1024 [paravirt_cli_type] "i" (PARAVIRT_PATCH(irq_disable)), \ 1089 [paravirt_cli_type] "i" (PARAVIRT_PATCH(pv_irq_ops.irq_disable)), \
1025 [paravirt_sti_type] "i" (PARAVIRT_PATCH(irq_enable)), \ 1090 [paravirt_cli_opptr] "m" (pv_irq_ops.irq_disable), \
1091 [paravirt_sti_type] "i" (PARAVIRT_PATCH(pv_irq_ops.irq_enable)), \
1092 [paravirt_sti_opptr] "m" (pv_irq_ops.irq_enable), \
1026 paravirt_clobber(CLBR_EAX) 1093 paravirt_clobber(CLBR_EAX)
1027 1094
1028/* Make sure as little as possible of this mess escapes. */ 1095/* Make sure as little as possible of this mess escapes. */
@@ -1042,7 +1109,7 @@ static inline unsigned long __raw_local_irq_save(void)
1042 1109
1043#else /* __ASSEMBLY__ */ 1110#else /* __ASSEMBLY__ */
1044 1111
1045#define PARA_PATCH(off) ((off) / 4) 1112#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
1046 1113
1047#define PARA_SITE(ptype, clobbers, ops) \ 1114#define PARA_SITE(ptype, clobbers, ops) \
1048771:; \ 1115771:; \
@@ -1055,29 +1122,29 @@ static inline unsigned long __raw_local_irq_save(void)
1055 .short clobbers; \ 1122 .short clobbers; \
1056 .popsection 1123 .popsection
1057 1124
1058#define INTERRUPT_RETURN \ 1125#define INTERRUPT_RETURN \
1059 PARA_SITE(PARA_PATCH(PARAVIRT_iret), CLBR_NONE, \ 1126 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
1060 jmp *%cs:paravirt_ops+PARAVIRT_iret) 1127 jmp *%cs:pv_cpu_ops+PV_CPU_iret)
1061 1128
1062#define DISABLE_INTERRUPTS(clobbers) \ 1129#define DISABLE_INTERRUPTS(clobbers) \
1063 PARA_SITE(PARA_PATCH(PARAVIRT_irq_disable), clobbers, \ 1130 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1064 pushl %eax; pushl %ecx; pushl %edx; \ 1131 pushl %eax; pushl %ecx; pushl %edx; \
1065 call *%cs:paravirt_ops+PARAVIRT_irq_disable; \ 1132 call *%cs:pv_irq_ops+PV_IRQ_irq_disable; \
1066 popl %edx; popl %ecx; popl %eax) \ 1133 popl %edx; popl %ecx; popl %eax) \
1067 1134
1068#define ENABLE_INTERRUPTS(clobbers) \ 1135#define ENABLE_INTERRUPTS(clobbers) \
1069 PARA_SITE(PARA_PATCH(PARAVIRT_irq_enable), clobbers, \ 1136 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
1070 pushl %eax; pushl %ecx; pushl %edx; \ 1137 pushl %eax; pushl %ecx; pushl %edx; \
1071 call *%cs:paravirt_ops+PARAVIRT_irq_enable; \ 1138 call *%cs:pv_irq_ops+PV_IRQ_irq_enable; \
1072 popl %edx; popl %ecx; popl %eax) 1139 popl %edx; popl %ecx; popl %eax)
1073 1140
1074#define ENABLE_INTERRUPTS_SYSEXIT \ 1141#define ENABLE_INTERRUPTS_SYSEXIT \
1075 PARA_SITE(PARA_PATCH(PARAVIRT_irq_enable_sysexit), CLBR_NONE, \ 1142 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), CLBR_NONE,\
1076 jmp *%cs:paravirt_ops+PARAVIRT_irq_enable_sysexit) 1143 jmp *%cs:pv_cpu_ops+PV_CPU_irq_enable_sysexit)
1077 1144
1078#define GET_CR0_INTO_EAX \ 1145#define GET_CR0_INTO_EAX \
1079 push %ecx; push %edx; \ 1146 push %ecx; push %edx; \
1080 call *paravirt_ops+PARAVIRT_read_cr0; \ 1147 call *pv_cpu_ops+PV_CPU_read_cr0; \
1081 pop %edx; pop %ecx 1148 pop %edx; pop %ecx
1082 1149
1083#endif /* __ASSEMBLY__ */ 1150#endif /* __ASSEMBLY__ */
diff --git a/include/asm-x86/parport.h b/include/asm-x86/parport.h
index 2a31157349c9..019cbca24a38 100644
--- a/include/asm-x86/parport.h
+++ b/include/asm-x86/parport.h
@@ -1,5 +1,10 @@
1#ifdef CONFIG_X86_32 1#ifndef _ASM_X86_PARPORT_H
2# include "parport_32.h" 2#define _ASM_X86_PARPORT_H
3#else 3
4# include "parport_64.h" 4static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma);
5#endif 5static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
6{
7 return parport_pc_find_isa_ports (autoirq, autodma);
8}
9
10#endif /* _ASM_X86_PARPORT_H */
diff --git a/include/asm-x86/parport_32.h b/include/asm-x86/parport_32.h
deleted file mode 100644
index fa0e321e498e..000000000000
--- a/include/asm-x86/parport_32.h
+++ /dev/null
@@ -1,18 +0,0 @@
1/*
2 * parport.h: ia32-specific parport initialisation
3 *
4 * Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk>
5 *
6 * This file should only be included by drivers/parport/parport_pc.c.
7 */
8
9#ifndef _ASM_I386_PARPORT_H
10#define _ASM_I386_PARPORT_H 1
11
12static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma);
13static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
14{
15 return parport_pc_find_isa_ports (autoirq, autodma);
16}
17
18#endif /* !(_ASM_I386_PARPORT_H) */
diff --git a/include/asm-x86/parport_64.h b/include/asm-x86/parport_64.h
deleted file mode 100644
index 7135ef977c96..000000000000
--- a/include/asm-x86/parport_64.h
+++ /dev/null
@@ -1,18 +0,0 @@
1/*
2 * parport.h: ia32-specific parport initialisation
3 *
4 * Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk>
5 *
6 * This file should only be included by drivers/parport/parport_pc.c.
7 */
8
9#ifndef _ASM_X8664_PARPORT_H
10#define _ASM_X8664_PARPORT_H 1
11
12static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma);
13static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
14{
15 return parport_pc_find_isa_ports (autoirq, autodma);
16}
17
18#endif
diff --git a/include/asm-x86/pda.h b/include/asm-x86/pda.h
index fb49f80eb94f..35962bbe5e72 100644
--- a/include/asm-x86/pda.h
+++ b/include/asm-x86/pda.h
@@ -30,6 +30,12 @@ struct x8664_pda {
30 struct mm_struct *active_mm; 30 struct mm_struct *active_mm;
31 unsigned apic_timer_irqs; 31 unsigned apic_timer_irqs;
32 unsigned irq0_irqs; 32 unsigned irq0_irqs;
33 unsigned irq_resched_count;
34 unsigned irq_call_count;
35 unsigned irq_tlb_count;
36 unsigned irq_thermal_count;
37 unsigned irq_threshold_count;
38 unsigned irq_spurious_count;
33} ____cacheline_aligned_in_smp; 39} ____cacheline_aligned_in_smp;
34 40
35extern struct x8664_pda *_cpu_pda[]; 41extern struct x8664_pda *_cpu_pda[];
diff --git a/include/asm-x86/pgtable-3level-defs.h b/include/asm-x86/pgtable-3level-defs.h
index c0df89f66e8b..448ac9516314 100644
--- a/include/asm-x86/pgtable-3level-defs.h
+++ b/include/asm-x86/pgtable-3level-defs.h
@@ -2,7 +2,7 @@
2#define _I386_PGTABLE_3LEVEL_DEFS_H 2#define _I386_PGTABLE_3LEVEL_DEFS_H
3 3
4#ifdef CONFIG_PARAVIRT 4#ifdef CONFIG_PARAVIRT
5#define SHARED_KERNEL_PMD (paravirt_ops.shared_kernel_pmd) 5#define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd)
6#else 6#else
7#define SHARED_KERNEL_PMD 1 7#define SHARED_KERNEL_PMD 1
8#endif 8#endif
diff --git a/include/asm-x86/processor_32.h b/include/asm-x86/processor_32.h
index 3845fe72383e..83800e7496ee 100644
--- a/include/asm-x86/processor_32.h
+++ b/include/asm-x86/processor_32.h
@@ -595,7 +595,9 @@ static inline void load_esp0(struct tss_struct *tss, struct thread_struct *threa
595 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx 595 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
596 * resulting in stale register contents being returned. 596 * resulting in stale register contents being returned.
597 */ 597 */
598static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) 598static inline void cpuid(unsigned int op,
599 unsigned int *eax, unsigned int *ebx,
600 unsigned int *ecx, unsigned int *edx)
599{ 601{
600 *eax = op; 602 *eax = op;
601 *ecx = 0; 603 *ecx = 0;
@@ -603,8 +605,9 @@ static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx,
603} 605}
604 606
605/* Some CPUID calls want 'count' to be placed in ecx */ 607/* Some CPUID calls want 'count' to be placed in ecx */
606static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, 608static inline void cpuid_count(unsigned int op, int count,
607 int *edx) 609 unsigned int *eax, unsigned int *ebx,
610 unsigned int *ecx, unsigned int *edx)
608{ 611{
609 *eax = op; 612 *eax = op;
610 *ecx = count; 613 *ecx = count;
@@ -674,6 +677,17 @@ static inline unsigned int cpuid_edx(unsigned int op)
674#define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n" 677#define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
675#define K7_NOP8 K7_NOP7 ASM_NOP1 678#define K7_NOP8 K7_NOP7 ASM_NOP1
676 679
680/* P6 nops */
681/* uses eax dependencies (Intel-recommended choice) */
682#define P6_NOP1 GENERIC_NOP1
683#define P6_NOP2 ".byte 0x66,0x90\n"
684#define P6_NOP3 ".byte 0x0f,0x1f,0x00\n"
685#define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n"
686#define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n"
687#define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n"
688#define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n"
689#define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n"
690
677#ifdef CONFIG_MK8 691#ifdef CONFIG_MK8
678#define ASM_NOP1 K8_NOP1 692#define ASM_NOP1 K8_NOP1
679#define ASM_NOP2 K8_NOP2 693#define ASM_NOP2 K8_NOP2
@@ -692,6 +706,17 @@ static inline unsigned int cpuid_edx(unsigned int op)
692#define ASM_NOP6 K7_NOP6 706#define ASM_NOP6 K7_NOP6
693#define ASM_NOP7 K7_NOP7 707#define ASM_NOP7 K7_NOP7
694#define ASM_NOP8 K7_NOP8 708#define ASM_NOP8 K7_NOP8
709#elif defined(CONFIG_M686) || defined(CONFIG_MPENTIUMII) || \
710 defined(CONFIG_MPENTIUMIII) || defined(CONFIG_MPENTIUMM) || \
711 defined(CONFIG_MCORE2) || defined(CONFIG_PENTIUM4)
712#define ASM_NOP1 P6_NOP1
713#define ASM_NOP2 P6_NOP2
714#define ASM_NOP3 P6_NOP3
715#define ASM_NOP4 P6_NOP4
716#define ASM_NOP5 P6_NOP5
717#define ASM_NOP6 P6_NOP6
718#define ASM_NOP7 P6_NOP7
719#define ASM_NOP8 P6_NOP8
695#else 720#else
696#define ASM_NOP1 GENERIC_NOP1 721#define ASM_NOP1 GENERIC_NOP1
697#define ASM_NOP2 GENERIC_NOP2 722#define ASM_NOP2 GENERIC_NOP2
diff --git a/include/asm-x86/processor_64.h b/include/asm-x86/processor_64.h
index 31f579b828f2..f422becbddd9 100644
--- a/include/asm-x86/processor_64.h
+++ b/include/asm-x86/processor_64.h
@@ -334,6 +334,16 @@ struct extended_sigtable {
334}; 334};
335 335
336 336
337#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2)
338#define ASM_NOP1 P6_NOP1
339#define ASM_NOP2 P6_NOP2
340#define ASM_NOP3 P6_NOP3
341#define ASM_NOP4 P6_NOP4
342#define ASM_NOP5 P6_NOP5
343#define ASM_NOP6 P6_NOP6
344#define ASM_NOP7 P6_NOP7
345#define ASM_NOP8 P6_NOP8
346#else
337#define ASM_NOP1 K8_NOP1 347#define ASM_NOP1 K8_NOP1
338#define ASM_NOP2 K8_NOP2 348#define ASM_NOP2 K8_NOP2
339#define ASM_NOP3 K8_NOP3 349#define ASM_NOP3 K8_NOP3
@@ -342,6 +352,7 @@ struct extended_sigtable {
342#define ASM_NOP6 K8_NOP6 352#define ASM_NOP6 K8_NOP6
343#define ASM_NOP7 K8_NOP7 353#define ASM_NOP7 K8_NOP7
344#define ASM_NOP8 K8_NOP8 354#define ASM_NOP8 K8_NOP8
355#endif
345 356
346/* Opteron nops */ 357/* Opteron nops */
347#define K8_NOP1 ".byte 0x90\n" 358#define K8_NOP1 ".byte 0x90\n"
@@ -353,6 +364,17 @@ struct extended_sigtable {
353#define K8_NOP7 K8_NOP4 K8_NOP3 364#define K8_NOP7 K8_NOP4 K8_NOP3
354#define K8_NOP8 K8_NOP4 K8_NOP4 365#define K8_NOP8 K8_NOP4 K8_NOP4
355 366
367/* P6 nops */
368/* uses eax dependencies (Intel-recommended choice) */
369#define P6_NOP1 ".byte 0x90\n"
370#define P6_NOP2 ".byte 0x66,0x90\n"
371#define P6_NOP3 ".byte 0x0f,0x1f,0x00\n"
372#define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n"
373#define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n"
374#define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n"
375#define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n"
376#define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n"
377
356#define ASM_NOP_MAX 8 378#define ASM_NOP_MAX 8
357 379
358/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ 380/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
@@ -389,11 +411,6 @@ static inline void prefetchw(void *x)
389 411
390#define cpu_relax() rep_nop() 412#define cpu_relax() rep_nop()
391 413
392static inline void serialize_cpu(void)
393{
394 __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx");
395}
396
397static inline void __monitor(const void *eax, unsigned long ecx, 414static inline void __monitor(const void *eax, unsigned long ecx,
398 unsigned long edx) 415 unsigned long edx)
399{ 416{
diff --git a/include/asm-x86/ptrace-abi.h b/include/asm-x86/ptrace-abi.h
index 6824c49def1c..7524e1233833 100644
--- a/include/asm-x86/ptrace-abi.h
+++ b/include/asm-x86/ptrace-abi.h
@@ -1,13 +1,81 @@
1#ifdef __KERNEL__ 1#ifndef _ASM_X86_PTRACE_ABI_H
2# ifdef CONFIG_X86_32 2#define _ASM_X86_PTRACE_ABI_H
3# include "ptrace-abi_32.h" 3
4# else 4#ifdef __i386__
5# include "ptrace-abi_64.h" 5
6# endif 6#define EBX 0
7#define ECX 1
8#define EDX 2
9#define ESI 3
10#define EDI 4
11#define EBP 5
12#define EAX 6
13#define DS 7
14#define ES 8
15#define FS 9
16#define GS 10
17#define ORIG_EAX 11
18#define EIP 12
19#define CS 13
20#define EFL 14
21#define UESP 15
22#define SS 16
23#define FRAME_SIZE 17
24
25#else /* __i386__ */
26
27#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS)
28#define R15 0
29#define R14 8
30#define R13 16
31#define R12 24
32#define RBP 32
33#define RBX 40
34/* arguments: interrupts/non tracing syscalls only save upto here*/
35#define R11 48
36#define R10 56
37#define R9 64
38#define R8 72
39#define RAX 80
40#define RCX 88
41#define RDX 96
42#define RSI 104
43#define RDI 112
44#define ORIG_RAX 120 /* = ERROR */
45/* end of arguments */
46/* cpu exception frame or undefined in case of fast syscall. */
47#define RIP 128
48#define CS 136
49#define EFLAGS 144
50#define RSP 152
51#define SS 160
52#define ARGOFFSET R11
53#endif /* __ASSEMBLY__ */
54
55/* top of stack page */
56#define FRAME_SIZE 168
57
58#endif /* !__i386__ */
59
60/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
61#define PTRACE_GETREGS 12
62#define PTRACE_SETREGS 13
63#define PTRACE_GETFPREGS 14
64#define PTRACE_SETFPREGS 15
65#define PTRACE_GETFPXREGS 18
66#define PTRACE_SETFPXREGS 19
67
68#define PTRACE_OLDSETOPTIONS 21
69
70/* only useful for access 32bit programs / kernels */
71#define PTRACE_GET_THREAD_AREA 25
72#define PTRACE_SET_THREAD_AREA 26
73
74#ifdef __x86_64__
75# define PTRACE_ARCH_PRCTL 30
7#else 76#else
8# ifdef __i386__ 77# define PTRACE_SYSEMU 31
9# include "ptrace-abi_32.h" 78# define PTRACE_SYSEMU_SINGLESTEP 32
10# else 79#endif
11# include "ptrace-abi_64.h" 80
12# endif
13#endif 81#endif
diff --git a/include/asm-x86/ptrace-abi_32.h b/include/asm-x86/ptrace-abi_32.h
deleted file mode 100644
index a44901817a26..000000000000
--- a/include/asm-x86/ptrace-abi_32.h
+++ /dev/null
@@ -1,39 +0,0 @@
1#ifndef I386_PTRACE_ABI_H
2#define I386_PTRACE_ABI_H
3
4#define EBX 0
5#define ECX 1
6#define EDX 2
7#define ESI 3
8#define EDI 4
9#define EBP 5
10#define EAX 6
11#define DS 7
12#define ES 8
13#define FS 9
14#define GS 10
15#define ORIG_EAX 11
16#define EIP 12
17#define CS 13
18#define EFL 14
19#define UESP 15
20#define SS 16
21#define FRAME_SIZE 17
22
23/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
24#define PTRACE_GETREGS 12
25#define PTRACE_SETREGS 13
26#define PTRACE_GETFPREGS 14
27#define PTRACE_SETFPREGS 15
28#define PTRACE_GETFPXREGS 18
29#define PTRACE_SETFPXREGS 19
30
31#define PTRACE_OLDSETOPTIONS 21
32
33#define PTRACE_GET_THREAD_AREA 25
34#define PTRACE_SET_THREAD_AREA 26
35
36#define PTRACE_SYSEMU 31
37#define PTRACE_SYSEMU_SINGLESTEP 32
38
39#endif
diff --git a/include/asm-x86/ptrace-abi_64.h b/include/asm-x86/ptrace-abi_64.h
deleted file mode 100644
index 19184b0806b1..000000000000
--- a/include/asm-x86/ptrace-abi_64.h
+++ /dev/null
@@ -1,51 +0,0 @@
1#ifndef _X86_64_PTRACE_ABI_H
2#define _X86_64_PTRACE_ABI_H
3
4#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS)
5#define R15 0
6#define R14 8
7#define R13 16
8#define R12 24
9#define RBP 32
10#define RBX 40
11/* arguments: interrupts/non tracing syscalls only save upto here*/
12#define R11 48
13#define R10 56
14#define R9 64
15#define R8 72
16#define RAX 80
17#define RCX 88
18#define RDX 96
19#define RSI 104
20#define RDI 112
21#define ORIG_RAX 120 /* = ERROR */
22/* end of arguments */
23/* cpu exception frame or undefined in case of fast syscall. */
24#define RIP 128
25#define CS 136
26#define EFLAGS 144
27#define RSP 152
28#define SS 160
29#define ARGOFFSET R11
30#endif /* __ASSEMBLY__ */
31
32/* top of stack page */
33#define FRAME_SIZE 168
34
35#define PTRACE_OLDSETOPTIONS 21
36
37/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
38#define PTRACE_GETREGS 12
39#define PTRACE_SETREGS 13
40#define PTRACE_GETFPREGS 14
41#define PTRACE_SETFPREGS 15
42#define PTRACE_GETFPXREGS 18
43#define PTRACE_SETFPXREGS 19
44
45/* only useful for access 32bit programs */
46#define PTRACE_GET_THREAD_AREA 25
47#define PTRACE_SET_THREAD_AREA 26
48
49#define PTRACE_ARCH_PRCTL 30 /* arch_prctl for child */
50
51#endif
diff --git a/include/asm-x86/resource.h b/include/asm-x86/resource.h
index 732410a8c02a..04bc4db8921b 100644
--- a/include/asm-x86/resource.h
+++ b/include/asm-x86/resource.h
@@ -1,13 +1 @@
1#ifdef __KERNEL__ #include <asm-generic/resource.h>
2# ifdef CONFIG_X86_32
3# include "resource_32.h"
4# else
5# include "resource_64.h"
6# endif
7#else
8# ifdef __i386__
9# include "resource_32.h"
10# else
11# include "resource_64.h"
12# endif
13#endif
diff --git a/include/asm-x86/resource_32.h b/include/asm-x86/resource_32.h
deleted file mode 100644
index 6c1ea37c7718..000000000000
--- a/include/asm-x86/resource_32.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _I386_RESOURCE_H
2#define _I386_RESOURCE_H
3
4#include <asm-generic/resource.h>
5
6#endif
diff --git a/include/asm-x86/resource_64.h b/include/asm-x86/resource_64.h
deleted file mode 100644
index f40b40623234..000000000000
--- a/include/asm-x86/resource_64.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _X8664_RESOURCE_H
2#define _X8664_RESOURCE_H
3
4#include <asm-generic/resource.h>
5
6#endif
diff --git a/include/asm-x86/rtc.h b/include/asm-x86/rtc.h
index 1f0c98eb2e38..f71c3b0ed360 100644
--- a/include/asm-x86/rtc.h
+++ b/include/asm-x86/rtc.h
@@ -1,5 +1 @@
1#ifdef CONFIG_X86_32 #include <asm-generic/rtc.h>
2# include "rtc_32.h"
3#else
4# include "rtc_64.h"
5#endif
diff --git a/include/asm-x86/rtc_32.h b/include/asm-x86/rtc_32.h
deleted file mode 100644
index ffd02109a0e5..000000000000
--- a/include/asm-x86/rtc_32.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef _I386_RTC_H
2#define _I386_RTC_H
3
4/*
5 * x86 uses the default access methods for the RTC.
6 */
7
8#include <asm-generic/rtc.h>
9
10#endif
diff --git a/include/asm-x86/rtc_64.h b/include/asm-x86/rtc_64.h
deleted file mode 100644
index 18ed713ac7de..000000000000
--- a/include/asm-x86/rtc_64.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef _X86_64_RTC_H
2#define _X86_64_RTC_H
3
4/*
5 * x86 uses the default access methods for the RTC.
6 */
7
8#include <asm-generic/rtc.h>
9
10#endif
diff --git a/include/asm-x86/rwlock.h b/include/asm-x86/rwlock.h
index a3be7d8364af..f2b64a429e6b 100644
--- a/include/asm-x86/rwlock.h
+++ b/include/asm-x86/rwlock.h
@@ -1,5 +1,9 @@
1#ifdef CONFIG_X86_32 1#ifndef _ASM_X86_RWLOCK_H
2# include "rwlock_32.h" 2#define _ASM_X86_RWLOCK_H
3#else 3
4# include "rwlock_64.h" 4#define RW_LOCK_BIAS 0x01000000
5#endif 5#define RW_LOCK_BIAS_STR "0x01000000"
6
7/* Actual code is in asm/spinlock.h or in arch/x86/lib/rwlock.S */
8
9#endif /* _ASM_X86_RWLOCK_H */
diff --git a/include/asm-x86/rwlock_32.h b/include/asm-x86/rwlock_32.h
deleted file mode 100644
index c3e5db32fa48..000000000000
--- a/include/asm-x86/rwlock_32.h
+++ /dev/null
@@ -1,25 +0,0 @@
1/* include/asm-i386/rwlock.h
2 *
3 * Helpers used by both rw spinlocks and rw semaphores.
4 *
5 * Based in part on code from semaphore.h and
6 * spinlock.h Copyright 1996 Linus Torvalds.
7 *
8 * Copyright 1999 Red Hat, Inc.
9 *
10 * Written by Benjamin LaHaise.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17#ifndef _ASM_I386_RWLOCK_H
18#define _ASM_I386_RWLOCK_H
19
20#define RW_LOCK_BIAS 0x01000000
21#define RW_LOCK_BIAS_STR "0x01000000"
22
23/* Code is in asm-i386/spinlock.h */
24
25#endif
diff --git a/include/asm-x86/rwlock_64.h b/include/asm-x86/rwlock_64.h
deleted file mode 100644
index 72aeebed920b..000000000000
--- a/include/asm-x86/rwlock_64.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/* include/asm-x86_64/rwlock.h
2 *
3 * Helpers used by both rw spinlocks and rw semaphores.
4 *
5 * Based in part on code from semaphore.h and
6 * spinlock.h Copyright 1996 Linus Torvalds.
7 *
8 * Copyright 1999 Red Hat, Inc.
9 * Copyright 2001,2002 SuSE labs
10 *
11 * Written by Benjamin LaHaise.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18#ifndef _ASM_X86_64_RWLOCK_H
19#define _ASM_X86_64_RWLOCK_H
20
21#define RW_LOCK_BIAS 0x01000000
22#define RW_LOCK_BIAS_STR "0x01000000"
23
24/* Actual code is in asm/spinlock.h or in arch/x86_64/lib/rwlock.S */
25
26#endif
diff --git a/include/asm-x86/sections.h b/include/asm-x86/sections.h
index ae6c69d9be3f..2b8c5160388f 100644
--- a/include/asm-x86/sections.h
+++ b/include/asm-x86/sections.h
@@ -1,5 +1 @@
1#ifdef CONFIG_X86_32 #include <asm-generic/sections.h>
2# include "sections_32.h"
3#else
4# include "sections_64.h"
5#endif
diff --git a/include/asm-x86/sections_32.h b/include/asm-x86/sections_32.h
deleted file mode 100644
index 2dcbb92918b2..000000000000
--- a/include/asm-x86/sections_32.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef _I386_SECTIONS_H
2#define _I386_SECTIONS_H
3
4/* nothing to see, move along */
5#include <asm-generic/sections.h>
6
7#endif
diff --git a/include/asm-x86/sections_64.h b/include/asm-x86/sections_64.h
deleted file mode 100644
index c746d9f1e70c..000000000000
--- a/include/asm-x86/sections_64.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef _X8664_SECTIONS_H
2#define _X8664_SECTIONS_H
3
4/* nothing to see, move along */
5#include <asm-generic/sections.h>
6
7#endif
diff --git a/include/asm-x86/sembuf.h b/include/asm-x86/sembuf.h
index e42c971e383f..ee50c801f7b7 100644
--- a/include/asm-x86/sembuf.h
+++ b/include/asm-x86/sembuf.h
@@ -1,13 +1,24 @@
1#ifdef __KERNEL__ 1#ifndef _ASM_X86_SEMBUF_H
2# ifdef CONFIG_X86_32 2#define _ASM_X86_SEMBUF_H
3# include "sembuf_32.h" 3
4# else 4/*
5# include "sembuf_64.h" 5 * The semid64_ds structure for x86 architecture.
6# endif 6 * Note extra padding because this structure is passed back and forth
7#else 7 * between kernel and user space.
8# ifdef __i386__ 8 *
9# include "sembuf_32.h" 9 * Pad space is left for:
10# else 10 * - 64-bit time_t to solve y2038 problem
11# include "sembuf_64.h" 11 * - 2 miscellaneous 32-bit values
12# endif 12 */
13#endif 13struct semid64_ds {
14 struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
15 __kernel_time_t sem_otime; /* last semop time */
16 unsigned long __unused1;
17 __kernel_time_t sem_ctime; /* last change time */
18 unsigned long __unused2;
19 unsigned long sem_nsems; /* no. of semaphores in array */
20 unsigned long __unused3;
21 unsigned long __unused4;
22};
23
24#endif /* _ASM_X86_SEMBUF_H */
diff --git a/include/asm-x86/sembuf_32.h b/include/asm-x86/sembuf_32.h
deleted file mode 100644
index 323835166c14..000000000000
--- a/include/asm-x86/sembuf_32.h
+++ /dev/null
@@ -1,25 +0,0 @@
1#ifndef _I386_SEMBUF_H
2#define _I386_SEMBUF_H
3
4/*
5 * The semid64_ds structure for i386 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 64-bit time_t to solve y2038 problem
11 * - 2 miscellaneous 32-bit values
12 */
13
14struct semid64_ds {
15 struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
16 __kernel_time_t sem_otime; /* last semop time */
17 unsigned long __unused1;
18 __kernel_time_t sem_ctime; /* last change time */
19 unsigned long __unused2;
20 unsigned long sem_nsems; /* no. of semaphores in array */
21 unsigned long __unused3;
22 unsigned long __unused4;
23};
24
25#endif /* _I386_SEMBUF_H */
diff --git a/include/asm-x86/sembuf_64.h b/include/asm-x86/sembuf_64.h
deleted file mode 100644
index 63b52925ae2a..000000000000
--- a/include/asm-x86/sembuf_64.h
+++ /dev/null
@@ -1,25 +0,0 @@
1#ifndef _X86_64_SEMBUF_H
2#define _X86_64_SEMBUF_H
3
4/*
5 * The semid64_ds structure for x86_64 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 64-bit time_t to solve y2038 problem
11 * - 2 miscellaneous 32-bit values
12 */
13
14struct semid64_ds {
15 struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
16 __kernel_time_t sem_otime; /* last semop time */
17 unsigned long __unused1;
18 __kernel_time_t sem_ctime; /* last change time */
19 unsigned long __unused2;
20 unsigned long sem_nsems; /* no. of semaphores in array */
21 unsigned long __unused3;
22 unsigned long __unused4;
23};
24
25#endif /* _X86_64_SEMBUF_H */
diff --git a/include/asm-x86/serial.h b/include/asm-x86/serial.h
index cf1b05227b29..628c801535ea 100644
--- a/include/asm-x86/serial.h
+++ b/include/asm-x86/serial.h
@@ -1,5 +1,29 @@
1#ifdef CONFIG_X86_32 1#ifndef _ASM_X86_SERIAL_H
2# include "serial_32.h" 2#define _ASM_X86_SERIAL_H
3
4/*
5 * This assumes you have a 1.8432 MHz clock for your UART.
6 *
7 * It'd be nice if someone built a serial card with a 24.576 MHz
8 * clock, since the 16550A is capable of handling a top speed of 1.5
9 * megabits/second; but this requires the faster clock.
10 */
11#define BASE_BAUD ( 1843200 / 16 )
12
13/* Standard COM flags (except for COM4, because of the 8514 problem) */
14#ifdef CONFIG_SERIAL_DETECT_IRQ
15#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
16#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
3#else 17#else
4# include "serial_64.h" 18#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
19#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
5#endif 20#endif
21
22#define SERIAL_PORT_DFNS \
23 /* UART CLK PORT IRQ FLAGS */ \
24 { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
25 { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
26 { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
27 { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
28
29#endif /* _ASM_X86_SERIAL_H */
diff --git a/include/asm-x86/serial_32.h b/include/asm-x86/serial_32.h
deleted file mode 100644
index bd67480ca109..000000000000
--- a/include/asm-x86/serial_32.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * include/asm-i386/serial.h
3 */
4
5
6/*
7 * This assumes you have a 1.8432 MHz clock for your UART.
8 *
9 * It'd be nice if someone built a serial card with a 24.576 MHz
10 * clock, since the 16550A is capable of handling a top speed of 1.5
11 * megabits/second; but this requires the faster clock.
12 */
13#define BASE_BAUD ( 1843200 / 16 )
14
15/* Standard COM flags (except for COM4, because of the 8514 problem) */
16#ifdef CONFIG_SERIAL_DETECT_IRQ
17#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
18#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
19#else
20#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
21#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
22#endif
23
24#define SERIAL_PORT_DFNS \
25 /* UART CLK PORT IRQ FLAGS */ \
26 { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
27 { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
28 { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
29 { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
diff --git a/include/asm-x86/serial_64.h b/include/asm-x86/serial_64.h
deleted file mode 100644
index b0496e0d72a6..000000000000
--- a/include/asm-x86/serial_64.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * include/asm-x86_64/serial.h
3 */
4
5
6/*
7 * This assumes you have a 1.8432 MHz clock for your UART.
8 *
9 * It'd be nice if someone built a serial card with a 24.576 MHz
10 * clock, since the 16550A is capable of handling a top speed of 1.5
11 * megabits/second; but this requires the faster clock.
12 */
13#define BASE_BAUD ( 1843200 / 16 )
14
15/* Standard COM flags (except for COM4, because of the 8514 problem) */
16#ifdef CONFIG_SERIAL_DETECT_IRQ
17#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
18#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
19#else
20#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
21#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
22#endif
23
24#define SERIAL_PORT_DFNS \
25 /* UART CLK PORT IRQ FLAGS */ \
26 { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
27 { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
28 { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
29 { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
diff --git a/include/asm-x86/shmparam.h b/include/asm-x86/shmparam.h
index 165627cc5345..0880cf0917b9 100644
--- a/include/asm-x86/shmparam.h
+++ b/include/asm-x86/shmparam.h
@@ -1,13 +1,6 @@
1#ifdef __KERNEL__ 1#ifndef _ASM_X86_SHMPARAM_H
2# ifdef CONFIG_X86_32 2#define _ASM_X86_SHMPARAM_H
3# include "shmparam_32.h" 3
4# else 4#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
5# include "shmparam_64.h" 5
6# endif 6#endif /* _ASM_X86_SHMPARAM_H */
7#else
8# ifdef __i386__
9# include "shmparam_32.h"
10# else
11# include "shmparam_64.h"
12# endif
13#endif
diff --git a/include/asm-x86/shmparam_32.h b/include/asm-x86/shmparam_32.h
deleted file mode 100644
index 786243a5b319..000000000000
--- a/include/asm-x86/shmparam_32.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASMI386_SHMPARAM_H
2#define _ASMI386_SHMPARAM_H
3
4#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
5
6#endif /* _ASMI386_SHMPARAM_H */
diff --git a/include/asm-x86/shmparam_64.h b/include/asm-x86/shmparam_64.h
deleted file mode 100644
index d7021620dcb7..000000000000
--- a/include/asm-x86/shmparam_64.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASMX8664_SHMPARAM_H
2#define _ASMX8664_SHMPARAM_H
3
4#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
5
6#endif /* _ASMX8664_SHMPARAM_H */
diff --git a/include/asm-x86/siginfo.h b/include/asm-x86/siginfo.h
index 0b8e4bb47d25..a477bea0c2a1 100644
--- a/include/asm-x86/siginfo.h
+++ b/include/asm-x86/siginfo.h
@@ -1,13 +1,10 @@
1#ifdef __KERNEL__ 1#ifndef _ASM_X86_SIGINFO_H
2# ifdef CONFIG_X86_32 2#define _ASM_X86_SIGINFO_H
3# include "siginfo_32.h" 3
4# else 4#ifdef __x86_64__
5# include "siginfo_64.h" 5# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
6# endif 6#endif
7#else 7
8# ifdef __i386__ 8#include <asm-generic/siginfo.h>
9# include "siginfo_32.h" 9
10# else
11# include "siginfo_64.h"
12# endif
13#endif 10#endif
diff --git a/include/asm-x86/siginfo_32.h b/include/asm-x86/siginfo_32.h
deleted file mode 100644
index fe18f98fccfa..000000000000
--- a/include/asm-x86/siginfo_32.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _I386_SIGINFO_H
2#define _I386_SIGINFO_H
3
4#include <asm-generic/siginfo.h>
5
6#endif
diff --git a/include/asm-x86/siginfo_64.h b/include/asm-x86/siginfo_64.h
deleted file mode 100644
index d09a1e6e7246..000000000000
--- a/include/asm-x86/siginfo_64.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef _X8664_SIGINFO_H
2#define _X8664_SIGINFO_H
3
4#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
5
6#include <asm-generic/siginfo.h>
7
8#endif
diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h
index 955dd7c8538f..ee46038d126c 100644
--- a/include/asm-x86/smp_32.h
+++ b/include/asm-x86/smp_32.h
@@ -92,12 +92,9 @@ static inline void smp_send_reschedule(int cpu)
92{ 92{
93 smp_ops.smp_send_reschedule(cpu); 93 smp_ops.smp_send_reschedule(cpu);
94} 94}
95static inline int smp_call_function_mask(cpumask_t mask, 95extern int smp_call_function_mask(cpumask_t mask,
96 void (*func) (void *info), void *info, 96 void (*func) (void *info), void *info,
97 int wait) 97 int wait);
98{
99 return smp_ops.smp_call_function_mask(mask, func, info, wait);
100}
101 98
102void native_smp_prepare_boot_cpu(void); 99void native_smp_prepare_boot_cpu(void);
103void native_smp_prepare_cpus(unsigned int max_cpus); 100void native_smp_prepare_cpus(unsigned int max_cpus);
diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h
index f5bcee1c0927..d30e9b684fdd 100644
--- a/include/asm-x86/smp_64.h
+++ b/include/asm-x86/smp_64.h
@@ -85,7 +85,6 @@ static inline int hard_smp_processor_id(void)
85 * the real APIC ID <-> CPU # mapping. 85 * the real APIC ID <-> CPU # mapping.
86 */ 86 */
87extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */ 87extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */
88extern u8 x86_cpu_to_log_apicid[NR_CPUS];
89extern u8 bios_cpu_apicid[]; 88extern u8 bios_cpu_apicid[];
90 89
91static inline int cpu_present_to_apicid(int mps_cpu) 90static inline int cpu_present_to_apicid(int mps_cpu)
diff --git a/include/asm-x86/sockios.h b/include/asm-x86/sockios.h
index 5a134fc70b9c..49cc72b5d3c9 100644
--- a/include/asm-x86/sockios.h
+++ b/include/asm-x86/sockios.h
@@ -1,13 +1,13 @@
1#ifdef __KERNEL__ 1#ifndef _ASM_X86_SOCKIOS_H
2# ifdef CONFIG_X86_32 2#define _ASM_X86_SOCKIOS_H
3# include "sockios_32.h" 3
4# else 4/* Socket-level I/O control calls. */
5# include "sockios_64.h" 5#define FIOSETOWN 0x8901
6# endif 6#define SIOCSPGRP 0x8902
7#else 7#define FIOGETOWN 0x8903
8# ifdef __i386__ 8#define SIOCGPGRP 0x8904
9# include "sockios_32.h" 9#define SIOCATMARK 0x8905
10# else 10#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
11# include "sockios_64.h" 11#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
12# endif 12
13#endif 13#endif /* _ASM_X86_SOCKIOS_H */
diff --git a/include/asm-x86/sockios_32.h b/include/asm-x86/sockios_32.h
deleted file mode 100644
index ff528c7d255c..000000000000
--- a/include/asm-x86/sockios_32.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ARCH_I386_SOCKIOS__
2#define __ARCH_I386_SOCKIOS__
3
4/* Socket-level I/O control calls. */
5#define FIOSETOWN 0x8901
6#define SIOCSPGRP 0x8902
7#define FIOGETOWN 0x8903
8#define SIOCGPGRP 0x8904
9#define SIOCATMARK 0x8905
10#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
11#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
12
13#endif
diff --git a/include/asm-x86/sockios_64.h b/include/asm-x86/sockios_64.h
deleted file mode 100644
index d726ba2513e3..000000000000
--- a/include/asm-x86/sockios_64.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ARCH_X8664_SOCKIOS__
2#define __ARCH_X8664_SOCKIOS__
3
4/* Socket-level I/O control calls. */
5#define FIOSETOWN 0x8901
6#define SIOCSPGRP 0x8902
7#define FIOGETOWN 0x8903
8#define SIOCGPGRP 0x8904
9#define SIOCATMARK 0x8905
10#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
11#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
12
13#endif
diff --git a/include/asm-x86/stacktrace.h b/include/asm-x86/stacktrace.h
index 6f0b54594307..70dd5bae3235 100644
--- a/include/asm-x86/stacktrace.h
+++ b/include/asm-x86/stacktrace.h
@@ -15,6 +15,6 @@ struct stacktrace_ops {
15}; 15};
16 16
17void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack, 17void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack,
18 struct stacktrace_ops *ops, void *data); 18 const struct stacktrace_ops *ops, void *data);
19 19
20#endif 20#endif
diff --git a/include/asm-x86/string_32.h b/include/asm-x86/string_32.h
index a9b64453bdf5..55bfa308f900 100644
--- a/include/asm-x86/string_32.h
+++ b/include/asm-x86/string_32.h
@@ -26,9 +26,6 @@ extern int strncmp(const char *cs, const char *ct, size_t count);
26#define __HAVE_ARCH_STRCHR 26#define __HAVE_ARCH_STRCHR
27extern char *strchr(const char *s, int c); 27extern char *strchr(const char *s, int c);
28 28
29#define __HAVE_ARCH_STRRCHR
30extern char *strrchr(const char *s, int c);
31
32#define __HAVE_ARCH_STRLEN 29#define __HAVE_ARCH_STRLEN
33extern size_t strlen(const char *s); 30extern size_t strlen(const char *s);
34 31
diff --git a/include/asm-x86/system_32.h b/include/asm-x86/system_32.h
index d84e593b7dfc..db6283eb5e46 100644
--- a/include/asm-x86/system_32.h
+++ b/include/asm-x86/system_32.h
@@ -142,7 +142,7 @@ static inline unsigned long native_read_cr4_safe(void)
142{ 142{
143 unsigned long val; 143 unsigned long val;
144 /* This could fault if %cr4 does not exist */ 144 /* This could fault if %cr4 does not exist */
145 asm("1: movl %%cr4, %0 \n" 145 asm volatile("1: movl %%cr4, %0 \n"
146 "2: \n" 146 "2: \n"
147 ".section __ex_table,\"a\" \n" 147 ".section __ex_table,\"a\" \n"
148 ".long 1b,2b \n" 148 ".long 1b,2b \n"
@@ -161,6 +161,10 @@ static inline void native_wbinvd(void)
161 asm volatile("wbinvd": : :"memory"); 161 asm volatile("wbinvd": : :"memory");
162} 162}
163 163
164static inline void clflush(volatile void *__p)
165{
166 asm volatile("clflush %0" : "+m" (*(char __force *)__p));
167}
164 168
165#ifdef CONFIG_PARAVIRT 169#ifdef CONFIG_PARAVIRT
166#include <asm/paravirt.h> 170#include <asm/paravirt.h>
diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h
index 5022aecc333d..4cb23848d460 100644
--- a/include/asm-x86/system_64.h
+++ b/include/asm-x86/system_64.h
@@ -7,9 +7,6 @@
7 7
8#ifdef __KERNEL__ 8#ifdef __KERNEL__
9 9
10#define __STR(x) #x
11#define STR(x) __STR(x)
12
13#define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t" 10#define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
14#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" 11#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
15 12
@@ -85,7 +82,7 @@ static inline void write_cr0(unsigned long val)
85static inline unsigned long read_cr2(void) 82static inline unsigned long read_cr2(void)
86{ 83{
87 unsigned long cr2; 84 unsigned long cr2;
88 asm("movq %%cr2,%0" : "=r" (cr2)); 85 asm volatile("movq %%cr2,%0" : "=r" (cr2));
89 return cr2; 86 return cr2;
90} 87}
91 88
@@ -97,7 +94,7 @@ static inline void write_cr2(unsigned long val)
97static inline unsigned long read_cr3(void) 94static inline unsigned long read_cr3(void)
98{ 95{
99 unsigned long cr3; 96 unsigned long cr3;
100 asm("movq %%cr3,%0" : "=r" (cr3)); 97 asm volatile("movq %%cr3,%0" : "=r" (cr3));
101 return cr3; 98 return cr3;
102} 99}
103 100
@@ -109,7 +106,7 @@ static inline void write_cr3(unsigned long val)
109static inline unsigned long read_cr4(void) 106static inline unsigned long read_cr4(void)
110{ 107{
111 unsigned long cr4; 108 unsigned long cr4;
112 asm("movq %%cr4,%0" : "=r" (cr4)); 109 asm volatile("movq %%cr4,%0" : "=r" (cr4));
113 return cr4; 110 return cr4;
114} 111}
115 112
@@ -121,7 +118,7 @@ static inline void write_cr4(unsigned long val)
121static inline unsigned long read_cr8(void) 118static inline unsigned long read_cr8(void)
122{ 119{
123 unsigned long cr8; 120 unsigned long cr8;
124 asm("movq %%cr8,%0" : "=r" (cr8)); 121 asm volatile("movq %%cr8,%0" : "=r" (cr8));
125 return cr8; 122 return cr8;
126} 123}
127 124
@@ -137,6 +134,11 @@ static inline void write_cr8(unsigned long val)
137 134
138#endif /* __KERNEL__ */ 135#endif /* __KERNEL__ */
139 136
137static inline void clflush(volatile void *__p)
138{
139 asm volatile("clflush %0" : "+m" (*(char __force *)__p));
140}
141
140#define nop() __asm__ __volatile__ ("nop") 142#define nop() __asm__ __volatile__ ("nop")
141 143
142#ifdef CONFIG_SMP 144#ifdef CONFIG_SMP
diff --git a/include/asm-x86/termbits.h b/include/asm-x86/termbits.h
index 69f3080e2a1d..af1b70ea440f 100644
--- a/include/asm-x86/termbits.h
+++ b/include/asm-x86/termbits.h
@@ -1,13 +1,198 @@
1#ifdef __KERNEL__ 1#ifndef _ASM_X86_TERMBITS_H
2# ifdef CONFIG_X86_32 2#define _ASM_X86_TERMBITS_H
3# include "termbits_32.h" 3
4# else 4#include <linux/posix_types.h>
5# include "termbits_64.h" 5
6# endif 6typedef unsigned char cc_t;
7#else 7typedef unsigned int speed_t;
8# ifdef __i386__ 8typedef unsigned int tcflag_t;
9# include "termbits_32.h" 9
10# else 10#define NCCS 19
11# include "termbits_64.h" 11struct termios {
12# endif 12 tcflag_t c_iflag; /* input mode flags */
13#endif 13 tcflag_t c_oflag; /* output mode flags */
14 tcflag_t c_cflag; /* control mode flags */
15 tcflag_t c_lflag; /* local mode flags */
16 cc_t c_line; /* line discipline */
17 cc_t c_cc[NCCS]; /* control characters */
18};
19
20struct termios2 {
21 tcflag_t c_iflag; /* input mode flags */
22 tcflag_t c_oflag; /* output mode flags */
23 tcflag_t c_cflag; /* control mode flags */
24 tcflag_t c_lflag; /* local mode flags */
25 cc_t c_line; /* line discipline */
26 cc_t c_cc[NCCS]; /* control characters */
27 speed_t c_ispeed; /* input speed */
28 speed_t c_ospeed; /* output speed */
29};
30
31struct ktermios {
32 tcflag_t c_iflag; /* input mode flags */
33 tcflag_t c_oflag; /* output mode flags */
34 tcflag_t c_cflag; /* control mode flags */
35 tcflag_t c_lflag; /* local mode flags */
36 cc_t c_line; /* line discipline */
37 cc_t c_cc[NCCS]; /* control characters */
38 speed_t c_ispeed; /* input speed */
39 speed_t c_ospeed; /* output speed */
40};
41
42/* c_cc characters */
43#define VINTR 0
44#define VQUIT 1
45#define VERASE 2
46#define VKILL 3
47#define VEOF 4
48#define VTIME 5
49#define VMIN 6
50#define VSWTC 7
51#define VSTART 8
52#define VSTOP 9
53#define VSUSP 10
54#define VEOL 11
55#define VREPRINT 12
56#define VDISCARD 13
57#define VWERASE 14
58#define VLNEXT 15
59#define VEOL2 16
60
61/* c_iflag bits */
62#define IGNBRK 0000001
63#define BRKINT 0000002
64#define IGNPAR 0000004
65#define PARMRK 0000010
66#define INPCK 0000020
67#define ISTRIP 0000040
68#define INLCR 0000100
69#define IGNCR 0000200
70#define ICRNL 0000400
71#define IUCLC 0001000
72#define IXON 0002000
73#define IXANY 0004000
74#define IXOFF 0010000
75#define IMAXBEL 0020000
76#define IUTF8 0040000
77
78/* c_oflag bits */
79#define OPOST 0000001
80#define OLCUC 0000002
81#define ONLCR 0000004
82#define OCRNL 0000010
83#define ONOCR 0000020
84#define ONLRET 0000040
85#define OFILL 0000100
86#define OFDEL 0000200
87#define NLDLY 0000400
88#define NL0 0000000
89#define NL1 0000400
90#define CRDLY 0003000
91#define CR0 0000000
92#define CR1 0001000
93#define CR2 0002000
94#define CR3 0003000
95#define TABDLY 0014000
96#define TAB0 0000000
97#define TAB1 0004000
98#define TAB2 0010000
99#define TAB3 0014000
100#define XTABS 0014000
101#define BSDLY 0020000
102#define BS0 0000000
103#define BS1 0020000
104#define VTDLY 0040000
105#define VT0 0000000
106#define VT1 0040000
107#define FFDLY 0100000
108#define FF0 0000000
109#define FF1 0100000
110
111/* c_cflag bit meaning */
112#define CBAUD 0010017
113#define B0 0000000 /* hang up */
114#define B50 0000001
115#define B75 0000002
116#define B110 0000003
117#define B134 0000004
118#define B150 0000005
119#define B200 0000006
120#define B300 0000007
121#define B600 0000010
122#define B1200 0000011
123#define B1800 0000012
124#define B2400 0000013
125#define B4800 0000014
126#define B9600 0000015
127#define B19200 0000016
128#define B38400 0000017
129#define EXTA B19200
130#define EXTB B38400
131#define CSIZE 0000060
132#define CS5 0000000
133#define CS6 0000020
134#define CS7 0000040
135#define CS8 0000060
136#define CSTOPB 0000100
137#define CREAD 0000200
138#define PARENB 0000400
139#define PARODD 0001000
140#define HUPCL 0002000
141#define CLOCAL 0004000
142#define CBAUDEX 0010000
143#define BOTHER 0010000 /* non standard rate */
144#define B57600 0010001
145#define B115200 0010002
146#define B230400 0010003
147#define B460800 0010004
148#define B500000 0010005
149#define B576000 0010006
150#define B921600 0010007
151#define B1000000 0010010
152#define B1152000 0010011
153#define B1500000 0010012
154#define B2000000 0010013
155#define B2500000 0010014
156#define B3000000 0010015
157#define B3500000 0010016
158#define B4000000 0010017
159#define CIBAUD 002003600000 /* input baud rate */
160#define CMSPAR 010000000000 /* mark or space (stick) parity */
161#define CRTSCTS 020000000000 /* flow control */
162
163#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
164
165/* c_lflag bits */
166#define ISIG 0000001
167#define ICANON 0000002
168#define XCASE 0000004
169#define ECHO 0000010
170#define ECHOE 0000020
171#define ECHOK 0000040
172#define ECHONL 0000100
173#define NOFLSH 0000200
174#define TOSTOP 0000400
175#define ECHOCTL 0001000
176#define ECHOPRT 0002000
177#define ECHOKE 0004000
178#define FLUSHO 0010000
179#define PENDIN 0040000
180#define IEXTEN 0100000
181
182/* tcflow() and TCXONC use these */
183#define TCOOFF 0
184#define TCOON 1
185#define TCIOFF 2
186#define TCION 3
187
188/* tcflush() and TCFLSH use these */
189#define TCIFLUSH 0
190#define TCOFLUSH 1
191#define TCIOFLUSH 2
192
193/* tcsetattr uses these */
194#define TCSANOW 0
195#define TCSADRAIN 1
196#define TCSAFLUSH 2
197
198#endif /* _ASM_X86_TERMBITS_H */
diff --git a/include/asm-x86/termbits_32.h b/include/asm-x86/termbits_32.h
deleted file mode 100644
index a21700352e7b..000000000000
--- a/include/asm-x86/termbits_32.h
+++ /dev/null
@@ -1,198 +0,0 @@
1#ifndef __ARCH_I386_TERMBITS_H__
2#define __ARCH_I386_TERMBITS_H__
3
4#include <linux/posix_types.h>
5
6typedef unsigned char cc_t;
7typedef unsigned int speed_t;
8typedef unsigned int tcflag_t;
9
10#define NCCS 19
11struct termios {
12 tcflag_t c_iflag; /* input mode flags */
13 tcflag_t c_oflag; /* output mode flags */
14 tcflag_t c_cflag; /* control mode flags */
15 tcflag_t c_lflag; /* local mode flags */
16 cc_t c_line; /* line discipline */
17 cc_t c_cc[NCCS]; /* control characters */
18};
19
20struct termios2 {
21 tcflag_t c_iflag; /* input mode flags */
22 tcflag_t c_oflag; /* output mode flags */
23 tcflag_t c_cflag; /* control mode flags */
24 tcflag_t c_lflag; /* local mode flags */
25 cc_t c_line; /* line discipline */
26 cc_t c_cc[NCCS]; /* control characters */
27 speed_t c_ispeed; /* input speed */
28 speed_t c_ospeed; /* output speed */
29};
30
31struct ktermios {
32 tcflag_t c_iflag; /* input mode flags */
33 tcflag_t c_oflag; /* output mode flags */
34 tcflag_t c_cflag; /* control mode flags */
35 tcflag_t c_lflag; /* local mode flags */
36 cc_t c_line; /* line discipline */
37 cc_t c_cc[NCCS]; /* control characters */
38 speed_t c_ispeed; /* input speed */
39 speed_t c_ospeed; /* output speed */
40};
41
42/* c_cc characters */
43#define VINTR 0
44#define VQUIT 1
45#define VERASE 2
46#define VKILL 3
47#define VEOF 4
48#define VTIME 5
49#define VMIN 6
50#define VSWTC 7
51#define VSTART 8
52#define VSTOP 9
53#define VSUSP 10
54#define VEOL 11
55#define VREPRINT 12
56#define VDISCARD 13
57#define VWERASE 14
58#define VLNEXT 15
59#define VEOL2 16
60
61/* c_iflag bits */
62#define IGNBRK 0000001
63#define BRKINT 0000002
64#define IGNPAR 0000004
65#define PARMRK 0000010
66#define INPCK 0000020
67#define ISTRIP 0000040
68#define INLCR 0000100
69#define IGNCR 0000200
70#define ICRNL 0000400
71#define IUCLC 0001000
72#define IXON 0002000
73#define IXANY 0004000
74#define IXOFF 0010000
75#define IMAXBEL 0020000
76#define IUTF8 0040000
77
78/* c_oflag bits */
79#define OPOST 0000001
80#define OLCUC 0000002
81#define ONLCR 0000004
82#define OCRNL 0000010
83#define ONOCR 0000020
84#define ONLRET 0000040
85#define OFILL 0000100
86#define OFDEL 0000200
87#define NLDLY 0000400
88#define NL0 0000000
89#define NL1 0000400
90#define CRDLY 0003000
91#define CR0 0000000
92#define CR1 0001000
93#define CR2 0002000
94#define CR3 0003000
95#define TABDLY 0014000
96#define TAB0 0000000
97#define TAB1 0004000
98#define TAB2 0010000
99#define TAB3 0014000
100#define XTABS 0014000
101#define BSDLY 0020000
102#define BS0 0000000
103#define BS1 0020000
104#define VTDLY 0040000
105#define VT0 0000000
106#define VT1 0040000
107#define FFDLY 0100000
108#define FF0 0000000
109#define FF1 0100000
110
111/* c_cflag bit meaning */
112#define CBAUD 0010017
113#define B0 0000000 /* hang up */
114#define B50 0000001
115#define B75 0000002
116#define B110 0000003
117#define B134 0000004
118#define B150 0000005
119#define B200 0000006
120#define B300 0000007
121#define B600 0000010
122#define B1200 0000011
123#define B1800 0000012
124#define B2400 0000013
125#define B4800 0000014
126#define B9600 0000015
127#define B19200 0000016
128#define B38400 0000017
129#define EXTA B19200
130#define EXTB B38400
131#define CSIZE 0000060
132#define CS5 0000000
133#define CS6 0000020
134#define CS7 0000040
135#define CS8 0000060
136#define CSTOPB 0000100
137#define CREAD 0000200
138#define PARENB 0000400
139#define PARODD 0001000
140#define HUPCL 0002000
141#define CLOCAL 0004000
142#define CBAUDEX 0010000
143#define BOTHER 0010000
144#define B57600 0010001
145#define B115200 0010002
146#define B230400 0010003
147#define B460800 0010004
148#define B500000 0010005
149#define B576000 0010006
150#define B921600 0010007
151#define B1000000 0010010
152#define B1152000 0010011
153#define B1500000 0010012
154#define B2000000 0010013
155#define B2500000 0010014
156#define B3000000 0010015
157#define B3500000 0010016
158#define B4000000 0010017
159#define CIBAUD 002003600000
160#define CMSPAR 010000000000 /* mark or space (stick) parity */
161#define CRTSCTS 020000000000 /* flow control */
162
163#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
164
165/* c_lflag bits */
166#define ISIG 0000001
167#define ICANON 0000002
168#define XCASE 0000004
169#define ECHO 0000010
170#define ECHOE 0000020
171#define ECHOK 0000040
172#define ECHONL 0000100
173#define NOFLSH 0000200
174#define TOSTOP 0000400
175#define ECHOCTL 0001000
176#define ECHOPRT 0002000
177#define ECHOKE 0004000
178#define FLUSHO 0010000
179#define PENDIN 0040000
180#define IEXTEN 0100000
181
182/* tcflow() and TCXONC use these */
183#define TCOOFF 0
184#define TCOON 1
185#define TCIOFF 2
186#define TCION 3
187
188/* tcflush() and TCFLSH use these */
189#define TCIFLUSH 0
190#define TCOFLUSH 1
191#define TCIOFLUSH 2
192
193/* tcsetattr uses these */
194#define TCSANOW 0
195#define TCSADRAIN 1
196#define TCSAFLUSH 2
197
198#endif
diff --git a/include/asm-x86/termbits_64.h b/include/asm-x86/termbits_64.h
deleted file mode 100644
index 7405756dd41b..000000000000
--- a/include/asm-x86/termbits_64.h
+++ /dev/null
@@ -1,198 +0,0 @@
1#ifndef __ARCH_X8664_TERMBITS_H__
2#define __ARCH_X8664_TERMBITS_H__
3
4#include <linux/posix_types.h>
5
6typedef unsigned char cc_t;
7typedef unsigned int speed_t;
8typedef unsigned int tcflag_t;
9
10#define NCCS 19
11struct termios {
12 tcflag_t c_iflag; /* input mode flags */
13 tcflag_t c_oflag; /* output mode flags */
14 tcflag_t c_cflag; /* control mode flags */
15 tcflag_t c_lflag; /* local mode flags */
16 cc_t c_line; /* line discipline */
17 cc_t c_cc[NCCS]; /* control characters */
18};
19
20struct termios2 {
21 tcflag_t c_iflag; /* input mode flags */
22 tcflag_t c_oflag; /* output mode flags */
23 tcflag_t c_cflag; /* control mode flags */
24 tcflag_t c_lflag; /* local mode flags */
25 cc_t c_line; /* line discipline */
26 cc_t c_cc[NCCS]; /* control characters */
27 speed_t c_ispeed; /* input speed */
28 speed_t c_ospeed; /* output speed */
29};
30
31struct ktermios {
32 tcflag_t c_iflag; /* input mode flags */
33 tcflag_t c_oflag; /* output mode flags */
34 tcflag_t c_cflag; /* control mode flags */
35 tcflag_t c_lflag; /* local mode flags */
36 cc_t c_line; /* line discipline */
37 cc_t c_cc[NCCS]; /* control characters */
38 speed_t c_ispeed; /* input speed */
39 speed_t c_ospeed; /* output speed */
40};
41
42/* c_cc characters */
43#define VINTR 0
44#define VQUIT 1
45#define VERASE 2
46#define VKILL 3
47#define VEOF 4
48#define VTIME 5
49#define VMIN 6
50#define VSWTC 7
51#define VSTART 8
52#define VSTOP 9
53#define VSUSP 10
54#define VEOL 11
55#define VREPRINT 12
56#define VDISCARD 13
57#define VWERASE 14
58#define VLNEXT 15
59#define VEOL2 16
60
61/* c_iflag bits */
62#define IGNBRK 0000001
63#define BRKINT 0000002
64#define IGNPAR 0000004
65#define PARMRK 0000010
66#define INPCK 0000020
67#define ISTRIP 0000040
68#define INLCR 0000100
69#define IGNCR 0000200
70#define ICRNL 0000400
71#define IUCLC 0001000
72#define IXON 0002000
73#define IXANY 0004000
74#define IXOFF 0010000
75#define IMAXBEL 0020000
76#define IUTF8 0040000
77
78/* c_oflag bits */
79#define OPOST 0000001
80#define OLCUC 0000002
81#define ONLCR 0000004
82#define OCRNL 0000010
83#define ONOCR 0000020
84#define ONLRET 0000040
85#define OFILL 0000100
86#define OFDEL 0000200
87#define NLDLY 0000400
88#define NL0 0000000
89#define NL1 0000400
90#define CRDLY 0003000
91#define CR0 0000000
92#define CR1 0001000
93#define CR2 0002000
94#define CR3 0003000
95#define TABDLY 0014000
96#define TAB0 0000000
97#define TAB1 0004000
98#define TAB2 0010000
99#define TAB3 0014000
100#define XTABS 0014000
101#define BSDLY 0020000
102#define BS0 0000000
103#define BS1 0020000
104#define VTDLY 0040000
105#define VT0 0000000
106#define VT1 0040000
107#define FFDLY 0100000
108#define FF0 0000000
109#define FF1 0100000
110
111/* c_cflag bit meaning */
112#define CBAUD 0010017
113#define B0 0000000 /* hang up */
114#define B50 0000001
115#define B75 0000002
116#define B110 0000003
117#define B134 0000004
118#define B150 0000005
119#define B200 0000006
120#define B300 0000007
121#define B600 0000010
122#define B1200 0000011
123#define B1800 0000012
124#define B2400 0000013
125#define B4800 0000014
126#define B9600 0000015
127#define B19200 0000016
128#define B38400 0000017
129#define EXTA B19200
130#define EXTB B38400
131#define CSIZE 0000060
132#define CS5 0000000
133#define CS6 0000020
134#define CS7 0000040
135#define CS8 0000060
136#define CSTOPB 0000100
137#define CREAD 0000200
138#define PARENB 0000400
139#define PARODD 0001000
140#define HUPCL 0002000
141#define CLOCAL 0004000
142#define CBAUDEX 0010000
143#define BOTHER 0010000 /* non standard rate */
144#define B57600 0010001
145#define B115200 0010002
146#define B230400 0010003
147#define B460800 0010004
148#define B500000 0010005
149#define B576000 0010006
150#define B921600 0010007
151#define B1000000 0010010
152#define B1152000 0010011
153#define B1500000 0010012
154#define B2000000 0010013
155#define B2500000 0010014
156#define B3000000 0010015
157#define B3500000 0010016
158#define B4000000 0010017
159#define CIBAUD 002003600000 /* input baud rate */
160#define CMSPAR 010000000000 /* mark or space (stick) parity */
161#define CRTSCTS 020000000000 /* flow control */
162
163#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
164
165/* c_lflag bits */
166#define ISIG 0000001
167#define ICANON 0000002
168#define XCASE 0000004
169#define ECHO 0000010
170#define ECHOE 0000020
171#define ECHOK 0000040
172#define ECHONL 0000100
173#define NOFLSH 0000200
174#define TOSTOP 0000400
175#define ECHOCTL 0001000
176#define ECHOPRT 0002000
177#define ECHOKE 0004000
178#define FLUSHO 0010000
179#define PENDIN 0040000
180#define IEXTEN 0100000
181
182/* tcflow() and TCXONC use these */
183#define TCOOFF 0
184#define TCOON 1
185#define TCIOFF 2
186#define TCION 3
187
188/* tcflush() and TCFLSH use these */
189#define TCIFLUSH 0
190#define TCOFLUSH 1
191#define TCIOFLUSH 2
192
193/* tcsetattr uses these */
194#define TCSANOW 0
195#define TCSADRAIN 1
196#define TCSAFLUSH 2
197
198#endif
diff --git a/include/asm-x86/termios.h b/include/asm-x86/termios.h
index a4f4ae20a591..d501748700d2 100644
--- a/include/asm-x86/termios.h
+++ b/include/asm-x86/termios.h
@@ -1,13 +1,97 @@
1#ifndef _ASM_X86_TERMIOS_H
2#define _ASM_X86_TERMIOS_H
3
4#include <asm/termbits.h>
5#include <asm/ioctls.h>
6
7struct winsize {
8 unsigned short ws_row;
9 unsigned short ws_col;
10 unsigned short ws_xpixel;
11 unsigned short ws_ypixel;
12};
13
14#define NCC 8
15struct termio {
16 unsigned short c_iflag; /* input mode flags */
17 unsigned short c_oflag; /* output mode flags */
18 unsigned short c_cflag; /* control mode flags */
19 unsigned short c_lflag; /* local mode flags */
20 unsigned char c_line; /* line discipline */
21 unsigned char c_cc[NCC]; /* control characters */
22};
23
24/* modem lines */
25#define TIOCM_LE 0x001
26#define TIOCM_DTR 0x002
27#define TIOCM_RTS 0x004
28#define TIOCM_ST 0x008
29#define TIOCM_SR 0x010
30#define TIOCM_CTS 0x020
31#define TIOCM_CAR 0x040
32#define TIOCM_RNG 0x080
33#define TIOCM_DSR 0x100
34#define TIOCM_CD TIOCM_CAR
35#define TIOCM_RI TIOCM_RNG
36#define TIOCM_OUT1 0x2000
37#define TIOCM_OUT2 0x4000
38#define TIOCM_LOOP 0x8000
39
40/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
41
1#ifdef __KERNEL__ 42#ifdef __KERNEL__
2# ifdef CONFIG_X86_32 43
3# include "termios_32.h" 44/* intr=^C quit=^\ erase=del kill=^U
4# else 45 eof=^D vtime=\0 vmin=\1 sxtc=\0
5# include "termios_64.h" 46 start=^Q stop=^S susp=^Z eol=\0
6# endif 47 reprint=^R discard=^U werase=^W lnext=^V
7#else 48 eol2=\0
8# ifdef __i386__ 49*/
9# include "termios_32.h" 50#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
10# else 51
11# include "termios_64.h" 52/*
12# endif 53 * Translate a "termio" structure into a "termios". Ugh.
13#endif 54 */
55#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
56 unsigned short __tmp; \
57 get_user(__tmp,&(termio)->x); \
58 *(unsigned short *) &(termios)->x = __tmp; \
59}
60
61#define user_termio_to_kernel_termios(termios, termio) \
62({ \
63 SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
64 SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
65 SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
66 SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
67 copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
68})
69
70/*
71 * Translate a "termios" structure into a "termio". Ugh.
72 */
73#define kernel_termios_to_user_termio(termio, termios) \
74({ \
75 put_user((termios)->c_iflag, &(termio)->c_iflag); \
76 put_user((termios)->c_oflag, &(termio)->c_oflag); \
77 put_user((termios)->c_cflag, &(termio)->c_cflag); \
78 put_user((termios)->c_lflag, &(termio)->c_lflag); \
79 put_user((termios)->c_line, &(termio)->c_line); \
80 copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
81})
82
83#define user_termios_to_kernel_termios(k, u) \
84 copy_from_user(k, u, sizeof(struct termios2))
85
86#define kernel_termios_to_user_termios(u, k) \
87 copy_to_user(u, k, sizeof(struct termios2))
88
89#define user_termios_to_kernel_termios_1(k, u) \
90 copy_from_user(k, u, sizeof(struct termios))
91
92#define kernel_termios_to_user_termios_1(u, k) \
93 copy_to_user(u, k, sizeof(struct termios))
94
95#endif /* __KERNEL__ */
96
97#endif /* _ASM_X86_TERMIOS_H */
diff --git a/include/asm-x86/termios_32.h b/include/asm-x86/termios_32.h
deleted file mode 100644
index 6fdb2c841b73..000000000000
--- a/include/asm-x86/termios_32.h
+++ /dev/null
@@ -1,90 +0,0 @@
1#ifndef _I386_TERMIOS_H
2#define _I386_TERMIOS_H
3
4#include <asm/termbits.h>
5#include <asm/ioctls.h>
6
7struct winsize {
8 unsigned short ws_row;
9 unsigned short ws_col;
10 unsigned short ws_xpixel;
11 unsigned short ws_ypixel;
12};
13
14#define NCC 8
15struct termio {
16 unsigned short c_iflag; /* input mode flags */
17 unsigned short c_oflag; /* output mode flags */
18 unsigned short c_cflag; /* control mode flags */
19 unsigned short c_lflag; /* local mode flags */
20 unsigned char c_line; /* line discipline */
21 unsigned char c_cc[NCC]; /* control characters */
22};
23
24/* modem lines */
25#define TIOCM_LE 0x001
26#define TIOCM_DTR 0x002
27#define TIOCM_RTS 0x004
28#define TIOCM_ST 0x008
29#define TIOCM_SR 0x010
30#define TIOCM_CTS 0x020
31#define TIOCM_CAR 0x040
32#define TIOCM_RNG 0x080
33#define TIOCM_DSR 0x100
34#define TIOCM_CD TIOCM_CAR
35#define TIOCM_RI TIOCM_RNG
36#define TIOCM_OUT1 0x2000
37#define TIOCM_OUT2 0x4000
38#define TIOCM_LOOP 0x8000
39
40/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
41
42#ifdef __KERNEL__
43
44/* intr=^C quit=^\ erase=del kill=^U
45 eof=^D vtime=\0 vmin=\1 sxtc=\0
46 start=^Q stop=^S susp=^Z eol=\0
47 reprint=^R discard=^U werase=^W lnext=^V
48 eol2=\0
49*/
50#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
51
52/*
53 * Translate a "termio" structure into a "termios". Ugh.
54 */
55#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
56 unsigned short __tmp; \
57 get_user(__tmp,&(termio)->x); \
58 *(unsigned short *) &(termios)->x = __tmp; \
59}
60
61#define user_termio_to_kernel_termios(termios, termio) \
62({ \
63 SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
64 SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
65 SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
66 SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
67 copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
68})
69
70/*
71 * Translate a "termios" structure into a "termio". Ugh.
72 */
73#define kernel_termios_to_user_termio(termio, termios) \
74({ \
75 put_user((termios)->c_iflag, &(termio)->c_iflag); \
76 put_user((termios)->c_oflag, &(termio)->c_oflag); \
77 put_user((termios)->c_cflag, &(termio)->c_cflag); \
78 put_user((termios)->c_lflag, &(termio)->c_lflag); \
79 put_user((termios)->c_line, &(termio)->c_line); \
80 copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
81})
82
83#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
84#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
85#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
86#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
87
88#endif /* __KERNEL__ */
89
90#endif /* _I386_TERMIOS_H */
diff --git a/include/asm-x86/termios_64.h b/include/asm-x86/termios_64.h
deleted file mode 100644
index 35ee59b78329..000000000000
--- a/include/asm-x86/termios_64.h
+++ /dev/null
@@ -1,90 +0,0 @@
1#ifndef _X8664_TERMIOS_H
2#define _X8664_TERMIOS_H
3
4#include <asm/termbits.h>
5#include <asm/ioctls.h>
6
7struct winsize {
8 unsigned short ws_row;
9 unsigned short ws_col;
10 unsigned short ws_xpixel;
11 unsigned short ws_ypixel;
12};
13
14#define NCC 8
15struct termio {
16 unsigned short c_iflag; /* input mode flags */
17 unsigned short c_oflag; /* output mode flags */
18 unsigned short c_cflag; /* control mode flags */
19 unsigned short c_lflag; /* local mode flags */
20 unsigned char c_line; /* line discipline */
21 unsigned char c_cc[NCC]; /* control characters */
22};
23
24/* modem lines */
25#define TIOCM_LE 0x001
26#define TIOCM_DTR 0x002
27#define TIOCM_RTS 0x004
28#define TIOCM_ST 0x008
29#define TIOCM_SR 0x010
30#define TIOCM_CTS 0x020
31#define TIOCM_CAR 0x040
32#define TIOCM_RNG 0x080
33#define TIOCM_DSR 0x100
34#define TIOCM_CD TIOCM_CAR
35#define TIOCM_RI TIOCM_RNG
36#define TIOCM_OUT1 0x2000
37#define TIOCM_OUT2 0x4000
38#define TIOCM_LOOP 0x8000
39
40/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
41
42#ifdef __KERNEL__
43
44/* intr=^C quit=^\ erase=del kill=^U
45 eof=^D vtime=\0 vmin=\1 sxtc=\0
46 start=^Q stop=^S susp=^Z eol=\0
47 reprint=^R discard=^U werase=^W lnext=^V
48 eol2=\0
49*/
50#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
51
52/*
53 * Translate a "termio" structure into a "termios". Ugh.
54 */
55#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
56 unsigned short __tmp; \
57 get_user(__tmp,&(termio)->x); \
58 *(unsigned short *) &(termios)->x = __tmp; \
59}
60
61#define user_termio_to_kernel_termios(termios, termio) \
62({ \
63 SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
64 SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
65 SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
66 SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
67 copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
68})
69
70/*
71 * Translate a "termios" structure into a "termio". Ugh.
72 */
73#define kernel_termios_to_user_termio(termio, termios) \
74({ \
75 put_user((termios)->c_iflag, &(termio)->c_iflag); \
76 put_user((termios)->c_oflag, &(termio)->c_oflag); \
77 put_user((termios)->c_cflag, &(termio)->c_cflag); \
78 put_user((termios)->c_lflag, &(termio)->c_lflag); \
79 put_user((termios)->c_line, &(termio)->c_line); \
80 copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
81})
82
83#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
84#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
85#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
86#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
87
88#endif /* __KERNEL__ */
89
90#endif /* _X8664_TERMIOS_H */
diff --git a/include/asm-x86/tlb.h b/include/asm-x86/tlb.h
index 7d55c3762b43..e4e9e2d07a93 100644
--- a/include/asm-x86/tlb.h
+++ b/include/asm-x86/tlb.h
@@ -1,5 +1,11 @@
1#ifdef CONFIG_X86_32 1#ifndef _ASM_X86_TLB_H
2# include "tlb_32.h" 2#define _ASM_X86_TLB_H
3#else 3
4# include "tlb_64.h" 4#define tlb_start_vma(tlb, vma) do { } while (0)
5#define tlb_end_vma(tlb, vma) do { } while (0)
6#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
7#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
8
9#include <asm-generic/tlb.h>
10
5#endif 11#endif
diff --git a/include/asm-x86/tlb_32.h b/include/asm-x86/tlb_32.h
deleted file mode 100644
index c006c5c92bea..000000000000
--- a/include/asm-x86/tlb_32.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef _I386_TLB_H
2#define _I386_TLB_H
3
4/*
5 * x86 doesn't need any special per-pte or
6 * per-vma handling..
7 */
8#define tlb_start_vma(tlb, vma) do { } while (0)
9#define tlb_end_vma(tlb, vma) do { } while (0)
10#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
11
12/*
13 * .. because we flush the whole mm when it
14 * fills up.
15 */
16#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
17
18#include <asm-generic/tlb.h>
19
20#endif
diff --git a/include/asm-x86/tlb_64.h b/include/asm-x86/tlb_64.h
deleted file mode 100644
index cd4c3c590a0e..000000000000
--- a/include/asm-x86/tlb_64.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef TLB_H
2#define TLB_H 1
3
4
5#define tlb_start_vma(tlb, vma) do { } while (0)
6#define tlb_end_vma(tlb, vma) do { } while (0)
7#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
8
9#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
10
11#include <asm-generic/tlb.h>
12
13#endif
diff --git a/include/asm-x86/types.h b/include/asm-x86/types.h
index a777a9b83974..63733f315688 100644
--- a/include/asm-x86/types.h
+++ b/include/asm-x86/types.h
@@ -1,13 +1,70 @@
1#ifdef __KERNEL__ 1#ifndef _ASM_X86_TYPES_H
2# ifdef CONFIG_X86_32 2#define _ASM_X86_TYPES_H
3# include "types_32.h" 3
4# else 4#ifndef __ASSEMBLY__
5# include "types_64.h" 5
6typedef unsigned short umode_t;
7
8/*
9 * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
10 * header files exported to user space
11 */
12
13typedef __signed__ char __s8;
14typedef unsigned char __u8;
15
16typedef __signed__ short __s16;
17typedef unsigned short __u16;
18
19typedef __signed__ int __s32;
20typedef unsigned int __u32;
21
22#ifdef __i386__
23# ifdef __GNUC__
24__extension__ typedef __signed__ long long __s64;
25__extension__ typedef unsigned long long __u64;
6# endif 26# endif
7#else 27#else
8# ifdef __i386__ 28typedef __signed__ long long __s64;
9# include "types_32.h" 29typedef unsigned long long __u64;
10# else 30#endif
11# include "types_64.h" 31
12# endif 32#endif /* __ASSEMBLY__ */
33
34/*
35 * These aren't exported outside the kernel to avoid name space clashes
36 */
37#ifdef __KERNEL__
38
39#ifdef CONFIG_X86_32
40# define BITS_PER_LONG 32
41#else
42# define BITS_PER_LONG 64
43#endif
44
45#ifndef __ASSEMBLY__
46
47typedef signed char s8;
48typedef unsigned char u8;
49
50typedef signed short s16;
51typedef unsigned short u16;
52
53typedef signed int s32;
54typedef unsigned int u32;
55
56typedef signed long long s64;
57typedef unsigned long long u64;
58
59typedef u64 dma64_addr_t;
60#if defined(CONFIG_X86_64) || defined(CONFIG_HIGHMEM64G)
61/* DMA addresses come in 32-bit and 64-bit flavours. */
62typedef u64 dma_addr_t;
63#else
64typedef u32 dma_addr_t;
65#endif
66
67#endif /* __ASSEMBLY__ */
68#endif /* __KERNEL__ */
69
13#endif 70#endif
diff --git a/include/asm-x86/types_32.h b/include/asm-x86/types_32.h
deleted file mode 100644
index faca1922c4c3..000000000000
--- a/include/asm-x86/types_32.h
+++ /dev/null
@@ -1,64 +0,0 @@
1#ifndef _I386_TYPES_H
2#define _I386_TYPES_H
3
4#ifndef __ASSEMBLY__
5
6typedef unsigned short umode_t;
7
8/*
9 * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
10 * header files exported to user space
11 */
12
13typedef __signed__ char __s8;
14typedef unsigned char __u8;
15
16typedef __signed__ short __s16;
17typedef unsigned short __u16;
18
19typedef __signed__ int __s32;
20typedef unsigned int __u32;
21
22#if defined(__GNUC__)
23__extension__ typedef __signed__ long long __s64;
24__extension__ typedef unsigned long long __u64;
25#endif
26
27#endif /* __ASSEMBLY__ */
28
29/*
30 * These aren't exported outside the kernel to avoid name space clashes
31 */
32#ifdef __KERNEL__
33
34#define BITS_PER_LONG 32
35
36#ifndef __ASSEMBLY__
37
38
39typedef signed char s8;
40typedef unsigned char u8;
41
42typedef signed short s16;
43typedef unsigned short u16;
44
45typedef signed int s32;
46typedef unsigned int u32;
47
48typedef signed long long s64;
49typedef unsigned long long u64;
50
51/* DMA addresses come in generic and 64-bit flavours. */
52
53#ifdef CONFIG_HIGHMEM64G
54typedef u64 dma_addr_t;
55#else
56typedef u32 dma_addr_t;
57#endif
58typedef u64 dma64_addr_t;
59
60#endif /* __ASSEMBLY__ */
61
62#endif /* __KERNEL__ */
63
64#endif
diff --git a/include/asm-x86/types_64.h b/include/asm-x86/types_64.h
deleted file mode 100644
index 2d4491aae281..000000000000
--- a/include/asm-x86/types_64.h
+++ /dev/null
@@ -1,55 +0,0 @@
1#ifndef _X86_64_TYPES_H
2#define _X86_64_TYPES_H
3
4#ifndef __ASSEMBLY__
5
6typedef unsigned short umode_t;
7
8/*
9 * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
10 * header files exported to user space
11 */
12
13typedef __signed__ char __s8;
14typedef unsigned char __u8;
15
16typedef __signed__ short __s16;
17typedef unsigned short __u16;
18
19typedef __signed__ int __s32;
20typedef unsigned int __u32;
21
22typedef __signed__ long long __s64;
23typedef unsigned long long __u64;
24
25#endif /* __ASSEMBLY__ */
26
27/*
28 * These aren't exported outside the kernel to avoid name space clashes
29 */
30#ifdef __KERNEL__
31
32#define BITS_PER_LONG 64
33
34#ifndef __ASSEMBLY__
35
36typedef signed char s8;
37typedef unsigned char u8;
38
39typedef signed short s16;
40typedef unsigned short u16;
41
42typedef signed int s32;
43typedef unsigned int u32;
44
45typedef signed long long s64;
46typedef unsigned long long u64;
47
48typedef u64 dma64_addr_t;
49typedef u64 dma_addr_t;
50
51#endif /* __ASSEMBLY__ */
52
53#endif /* __KERNEL__ */
54
55#endif
diff --git a/include/asm-x86/ucontext.h b/include/asm-x86/ucontext.h
index 175c8cb59731..50a79f7fcde9 100644
--- a/include/asm-x86/ucontext.h
+++ b/include/asm-x86/ucontext.h
@@ -1,13 +1,12 @@
1#ifdef __KERNEL__ 1#ifndef _ASM_X86_UCONTEXT_H
2# ifdef CONFIG_X86_32 2#define _ASM_X86_UCONTEXT_H
3# include "ucontext_32.h" 3
4# else 4struct ucontext {
5# include "ucontext_64.h" 5 unsigned long uc_flags;
6# endif 6 struct ucontext *uc_link;
7#else 7 stack_t uc_stack;
8# ifdef __i386__ 8 struct sigcontext uc_mcontext;
9# include "ucontext_32.h" 9 sigset_t uc_sigmask; /* mask last for extensibility */
10# else 10};
11# include "ucontext_64.h" 11
12# endif 12#endif /* _ASM_X86_UCONTEXT_H */
13#endif
diff --git a/include/asm-x86/ucontext_32.h b/include/asm-x86/ucontext_32.h
deleted file mode 100644
index b0db36925f55..000000000000
--- a/include/asm-x86/ucontext_32.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _ASMi386_UCONTEXT_H
2#define _ASMi386_UCONTEXT_H
3
4struct ucontext {
5 unsigned long uc_flags;
6 struct ucontext *uc_link;
7 stack_t uc_stack;
8 struct sigcontext uc_mcontext;
9 sigset_t uc_sigmask; /* mask last for extensibility */
10};
11
12#endif /* !_ASMi386_UCONTEXT_H */
diff --git a/include/asm-x86/ucontext_64.h b/include/asm-x86/ucontext_64.h
deleted file mode 100644
index 159a3da9e112..000000000000
--- a/include/asm-x86/ucontext_64.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _ASMX8664_UCONTEXT_H
2#define _ASMX8664_UCONTEXT_H
3
4struct ucontext {
5 unsigned long uc_flags;
6 struct ucontext *uc_link;
7 stack_t uc_stack;
8 struct sigcontext uc_mcontext;
9 sigset_t uc_sigmask; /* mask last for extensibility */
10};
11
12#endif
diff --git a/include/asm-x86/unaligned.h b/include/asm-x86/unaligned.h
index 68067150fbcb..913598d4f761 100644
--- a/include/asm-x86/unaligned.h
+++ b/include/asm-x86/unaligned.h
@@ -1,5 +1,37 @@
1#ifdef CONFIG_X86_32 1#ifndef _ASM_X86_UNALIGNED_H
2# include "unaligned_32.h" 2#define _ASM_X86_UNALIGNED_H
3#else 3
4# include "unaligned_64.h" 4/*
5#endif 5 * The x86 can do unaligned accesses itself.
6 *
7 * The strange macros are there to make sure these can't
8 * be misused in a way that makes them not work on other
9 * architectures where unaligned accesses aren't as simple.
10 */
11
12/**
13 * get_unaligned - get value from possibly mis-aligned location
14 * @ptr: pointer to value
15 *
16 * This macro should be used for accessing values larger in size than
17 * single bytes at locations that are expected to be improperly aligned,
18 * e.g. retrieving a u16 value from a location not u16-aligned.
19 *
20 * Note that unaligned accesses can be very expensive on some architectures.
21 */
22#define get_unaligned(ptr) (*(ptr))
23
24/**
25 * put_unaligned - put value to a possibly mis-aligned location
26 * @val: value to place
27 * @ptr: pointer to location
28 *
29 * This macro should be used for placing values larger in size than
30 * single bytes at locations that are expected to be improperly aligned,
31 * e.g. writing a u16 value to a location not u16-aligned.
32 *
33 * Note that unaligned accesses can be very expensive on some architectures.
34 */
35#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
36
37#endif /* _ASM_X86_UNALIGNED_H */
diff --git a/include/asm-x86/unaligned_32.h b/include/asm-x86/unaligned_32.h
deleted file mode 100644
index 7acd7957621e..000000000000
--- a/include/asm-x86/unaligned_32.h
+++ /dev/null
@@ -1,37 +0,0 @@
1#ifndef __I386_UNALIGNED_H
2#define __I386_UNALIGNED_H
3
4/*
5 * The i386 can do unaligned accesses itself.
6 *
7 * The strange macros are there to make sure these can't
8 * be misused in a way that makes them not work on other
9 * architectures where unaligned accesses aren't as simple.
10 */
11
12/**
13 * get_unaligned - get value from possibly mis-aligned location
14 * @ptr: pointer to value
15 *
16 * This macro should be used for accessing values larger in size than
17 * single bytes at locations that are expected to be improperly aligned,
18 * e.g. retrieving a u16 value from a location not u16-aligned.
19 *
20 * Note that unaligned accesses can be very expensive on some architectures.
21 */
22#define get_unaligned(ptr) (*(ptr))
23
24/**
25 * put_unaligned - put value to a possibly mis-aligned location
26 * @val: value to place
27 * @ptr: pointer to location
28 *
29 * This macro should be used for placing values larger in size than
30 * single bytes at locations that are expected to be improperly aligned,
31 * e.g. writing a u16 value to a location not u16-aligned.
32 *
33 * Note that unaligned accesses can be very expensive on some architectures.
34 */
35#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
36
37#endif
diff --git a/include/asm-x86/unaligned_64.h b/include/asm-x86/unaligned_64.h
deleted file mode 100644
index d4bf78dc6f39..000000000000
--- a/include/asm-x86/unaligned_64.h
+++ /dev/null
@@ -1,37 +0,0 @@
1#ifndef __X8664_UNALIGNED_H
2#define __X8664_UNALIGNED_H
3
4/*
5 * The x86-64 can do unaligned accesses itself.
6 *
7 * The strange macros are there to make sure these can't
8 * be misused in a way that makes them not work on other
9 * architectures where unaligned accesses aren't as simple.
10 */
11
12/**
13 * get_unaligned - get value from possibly mis-aligned location
14 * @ptr: pointer to value
15 *
16 * This macro should be used for accessing values larger in size than
17 * single bytes at locations that are expected to be improperly aligned,
18 * e.g. retrieving a u16 value from a location not u16-aligned.
19 *
20 * Note that unaligned accesses can be very expensive on some architectures.
21 */
22#define get_unaligned(ptr) (*(ptr))
23
24/**
25 * put_unaligned - put value to a possibly mis-aligned location
26 * @val: value to place
27 * @ptr: pointer to location
28 *
29 * This macro should be used for placing values larger in size than
30 * single bytes at locations that are expected to be improperly aligned,
31 * e.g. writing a u16 value to a location not u16-aligned.
32 *
33 * Note that unaligned accesses can be very expensive on some architectures.
34 */
35#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
36
37#endif
diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h
index fc4e73f5f1fa..5ff4d3e24c34 100644
--- a/include/asm-x86/unistd_64.h
+++ b/include/asm-x86/unistd_64.h
@@ -2,635 +2,638 @@
2#define _ASM_X86_64_UNISTD_H_ 2#define _ASM_X86_64_UNISTD_H_
3 3
4#ifndef __SYSCALL 4#ifndef __SYSCALL
5#define __SYSCALL(a,b) 5#define __SYSCALL(a,b)
6#endif 6#endif
7 7
8/* 8/*
9 * This file contains the system call numbers. 9 * This file contains the system call numbers.
10 * 10 *
11 * Note: holes are not allowed. 11 * Note: holes are not allowed.
12 */ 12 */
13 13
14/* at least 8 syscall per cacheline */ 14/* at least 8 syscall per cacheline */
15#define __NR_read 0 15#define __NR_read 0
16__SYSCALL(__NR_read, sys_read) 16__SYSCALL(__NR_read, sys_read)
17#define __NR_write 1 17#define __NR_write 1
18__SYSCALL(__NR_write, sys_write) 18__SYSCALL(__NR_write, sys_write)
19#define __NR_open 2 19#define __NR_open 2
20__SYSCALL(__NR_open, sys_open) 20__SYSCALL(__NR_open, sys_open)
21#define __NR_close 3 21#define __NR_close 3
22__SYSCALL(__NR_close, sys_close) 22__SYSCALL(__NR_close, sys_close)
23#define __NR_stat 4 23#define __NR_stat 4
24__SYSCALL(__NR_stat, sys_newstat) 24__SYSCALL(__NR_stat, sys_newstat)
25#define __NR_fstat 5 25#define __NR_fstat 5
26__SYSCALL(__NR_fstat, sys_newfstat) 26__SYSCALL(__NR_fstat, sys_newfstat)
27#define __NR_lstat 6 27#define __NR_lstat 6
28__SYSCALL(__NR_lstat, sys_newlstat) 28__SYSCALL(__NR_lstat, sys_newlstat)
29#define __NR_poll 7 29#define __NR_poll 7
30__SYSCALL(__NR_poll, sys_poll) 30__SYSCALL(__NR_poll, sys_poll)
31 31
32#define __NR_lseek 8 32#define __NR_lseek 8
33__SYSCALL(__NR_lseek, sys_lseek) 33__SYSCALL(__NR_lseek, sys_lseek)
34#define __NR_mmap 9 34#define __NR_mmap 9
35__SYSCALL(__NR_mmap, sys_mmap) 35__SYSCALL(__NR_mmap, sys_mmap)
36#define __NR_mprotect 10 36#define __NR_mprotect 10
37__SYSCALL(__NR_mprotect, sys_mprotect) 37__SYSCALL(__NR_mprotect, sys_mprotect)
38#define __NR_munmap 11 38#define __NR_munmap 11
39__SYSCALL(__NR_munmap, sys_munmap) 39__SYSCALL(__NR_munmap, sys_munmap)
40#define __NR_brk 12 40#define __NR_brk 12
41__SYSCALL(__NR_brk, sys_brk) 41__SYSCALL(__NR_brk, sys_brk)
42#define __NR_rt_sigaction 13 42#define __NR_rt_sigaction 13
43__SYSCALL(__NR_rt_sigaction, sys_rt_sigaction) 43__SYSCALL(__NR_rt_sigaction, sys_rt_sigaction)
44#define __NR_rt_sigprocmask 14 44#define __NR_rt_sigprocmask 14
45__SYSCALL(__NR_rt_sigprocmask, sys_rt_sigprocmask) 45__SYSCALL(__NR_rt_sigprocmask, sys_rt_sigprocmask)
46#define __NR_rt_sigreturn 15 46#define __NR_rt_sigreturn 15
47__SYSCALL(__NR_rt_sigreturn, stub_rt_sigreturn) 47__SYSCALL(__NR_rt_sigreturn, stub_rt_sigreturn)
48 48
49#define __NR_ioctl 16 49#define __NR_ioctl 16
50__SYSCALL(__NR_ioctl, sys_ioctl) 50__SYSCALL(__NR_ioctl, sys_ioctl)
51#define __NR_pread64 17 51#define __NR_pread64 17
52__SYSCALL(__NR_pread64, sys_pread64) 52__SYSCALL(__NR_pread64, sys_pread64)
53#define __NR_pwrite64 18 53#define __NR_pwrite64 18
54__SYSCALL(__NR_pwrite64, sys_pwrite64) 54__SYSCALL(__NR_pwrite64, sys_pwrite64)
55#define __NR_readv 19 55#define __NR_readv 19
56__SYSCALL(__NR_readv, sys_readv) 56__SYSCALL(__NR_readv, sys_readv)
57#define __NR_writev 20 57#define __NR_writev 20
58__SYSCALL(__NR_writev, sys_writev) 58__SYSCALL(__NR_writev, sys_writev)
59#define __NR_access 21 59#define __NR_access 21
60__SYSCALL(__NR_access, sys_access) 60__SYSCALL(__NR_access, sys_access)
61#define __NR_pipe 22 61#define __NR_pipe 22
62__SYSCALL(__NR_pipe, sys_pipe) 62__SYSCALL(__NR_pipe, sys_pipe)
63#define __NR_select 23 63#define __NR_select 23
64__SYSCALL(__NR_select, sys_select) 64__SYSCALL(__NR_select, sys_select)
65 65
66#define __NR_sched_yield 24 66#define __NR_sched_yield 24
67__SYSCALL(__NR_sched_yield, sys_sched_yield) 67__SYSCALL(__NR_sched_yield, sys_sched_yield)
68#define __NR_mremap 25 68#define __NR_mremap 25
69__SYSCALL(__NR_mremap, sys_mremap) 69__SYSCALL(__NR_mremap, sys_mremap)
70#define __NR_msync 26 70#define __NR_msync 26
71__SYSCALL(__NR_msync, sys_msync) 71__SYSCALL(__NR_msync, sys_msync)
72#define __NR_mincore 27 72#define __NR_mincore 27
73__SYSCALL(__NR_mincore, sys_mincore) 73__SYSCALL(__NR_mincore, sys_mincore)
74#define __NR_madvise 28 74#define __NR_madvise 28
75__SYSCALL(__NR_madvise, sys_madvise) 75__SYSCALL(__NR_madvise, sys_madvise)
76#define __NR_shmget 29 76#define __NR_shmget 29
77__SYSCALL(__NR_shmget, sys_shmget) 77__SYSCALL(__NR_shmget, sys_shmget)
78#define __NR_shmat 30 78#define __NR_shmat 30
79__SYSCALL(__NR_shmat, sys_shmat) 79__SYSCALL(__NR_shmat, sys_shmat)
80#define __NR_shmctl 31 80#define __NR_shmctl 31
81__SYSCALL(__NR_shmctl, sys_shmctl) 81__SYSCALL(__NR_shmctl, sys_shmctl)
82 82
83#define __NR_dup 32 83#define __NR_dup 32
84__SYSCALL(__NR_dup, sys_dup) 84__SYSCALL(__NR_dup, sys_dup)
85#define __NR_dup2 33 85#define __NR_dup2 33
86__SYSCALL(__NR_dup2, sys_dup2) 86__SYSCALL(__NR_dup2, sys_dup2)
87#define __NR_pause 34 87#define __NR_pause 34
88__SYSCALL(__NR_pause, sys_pause) 88__SYSCALL(__NR_pause, sys_pause)
89#define __NR_nanosleep 35 89#define __NR_nanosleep 35
90__SYSCALL(__NR_nanosleep, sys_nanosleep) 90__SYSCALL(__NR_nanosleep, sys_nanosleep)
91#define __NR_getitimer 36 91#define __NR_getitimer 36
92__SYSCALL(__NR_getitimer, sys_getitimer) 92__SYSCALL(__NR_getitimer, sys_getitimer)
93#define __NR_alarm 37 93#define __NR_alarm 37
94__SYSCALL(__NR_alarm, sys_alarm) 94__SYSCALL(__NR_alarm, sys_alarm)
95#define __NR_setitimer 38 95#define __NR_setitimer 38
96__SYSCALL(__NR_setitimer, sys_setitimer) 96__SYSCALL(__NR_setitimer, sys_setitimer)
97#define __NR_getpid 39 97#define __NR_getpid 39
98__SYSCALL(__NR_getpid, sys_getpid) 98__SYSCALL(__NR_getpid, sys_getpid)
99 99
100#define __NR_sendfile 40 100#define __NR_sendfile 40
101__SYSCALL(__NR_sendfile, sys_sendfile64) 101__SYSCALL(__NR_sendfile, sys_sendfile64)
102#define __NR_socket 41 102#define __NR_socket 41
103__SYSCALL(__NR_socket, sys_socket) 103__SYSCALL(__NR_socket, sys_socket)
104#define __NR_connect 42 104#define __NR_connect 42
105__SYSCALL(__NR_connect, sys_connect) 105__SYSCALL(__NR_connect, sys_connect)
106#define __NR_accept 43 106#define __NR_accept 43
107__SYSCALL(__NR_accept, sys_accept) 107__SYSCALL(__NR_accept, sys_accept)
108#define __NR_sendto 44 108#define __NR_sendto 44
109__SYSCALL(__NR_sendto, sys_sendto) 109__SYSCALL(__NR_sendto, sys_sendto)
110#define __NR_recvfrom 45 110#define __NR_recvfrom 45
111__SYSCALL(__NR_recvfrom, sys_recvfrom) 111__SYSCALL(__NR_recvfrom, sys_recvfrom)
112#define __NR_sendmsg 46 112#define __NR_sendmsg 46
113__SYSCALL(__NR_sendmsg, sys_sendmsg) 113__SYSCALL(__NR_sendmsg, sys_sendmsg)
114#define __NR_recvmsg 47 114#define __NR_recvmsg 47
115__SYSCALL(__NR_recvmsg, sys_recvmsg) 115__SYSCALL(__NR_recvmsg, sys_recvmsg)
116 116
117#define __NR_shutdown 48 117#define __NR_shutdown 48
118__SYSCALL(__NR_shutdown, sys_shutdown) 118__SYSCALL(__NR_shutdown, sys_shutdown)
119#define __NR_bind 49 119#define __NR_bind 49
120__SYSCALL(__NR_bind, sys_bind) 120__SYSCALL(__NR_bind, sys_bind)
121#define __NR_listen 50 121#define __NR_listen 50
122__SYSCALL(__NR_listen, sys_listen) 122__SYSCALL(__NR_listen, sys_listen)
123#define __NR_getsockname 51 123#define __NR_getsockname 51
124__SYSCALL(__NR_getsockname, sys_getsockname) 124__SYSCALL(__NR_getsockname, sys_getsockname)
125#define __NR_getpeername 52 125#define __NR_getpeername 52
126__SYSCALL(__NR_getpeername, sys_getpeername) 126__SYSCALL(__NR_getpeername, sys_getpeername)
127#define __NR_socketpair 53 127#define __NR_socketpair 53
128__SYSCALL(__NR_socketpair, sys_socketpair) 128__SYSCALL(__NR_socketpair, sys_socketpair)
129#define __NR_setsockopt 54 129#define __NR_setsockopt 54
130__SYSCALL(__NR_setsockopt, sys_setsockopt) 130__SYSCALL(__NR_setsockopt, sys_setsockopt)
131#define __NR_getsockopt 55 131#define __NR_getsockopt 55
132__SYSCALL(__NR_getsockopt, sys_getsockopt) 132__SYSCALL(__NR_getsockopt, sys_getsockopt)
133 133
134#define __NR_clone 56 134#define __NR_clone 56
135__SYSCALL(__NR_clone, stub_clone) 135__SYSCALL(__NR_clone, stub_clone)
136#define __NR_fork 57 136#define __NR_fork 57
137__SYSCALL(__NR_fork, stub_fork) 137__SYSCALL(__NR_fork, stub_fork)
138#define __NR_vfork 58 138#define __NR_vfork 58
139__SYSCALL(__NR_vfork, stub_vfork) 139__SYSCALL(__NR_vfork, stub_vfork)
140#define __NR_execve 59 140#define __NR_execve 59
141__SYSCALL(__NR_execve, stub_execve) 141__SYSCALL(__NR_execve, stub_execve)
142#define __NR_exit 60 142#define __NR_exit 60
143__SYSCALL(__NR_exit, sys_exit) 143__SYSCALL(__NR_exit, sys_exit)
144#define __NR_wait4 61 144#define __NR_wait4 61
145__SYSCALL(__NR_wait4, sys_wait4) 145__SYSCALL(__NR_wait4, sys_wait4)
146#define __NR_kill 62 146#define __NR_kill 62
147__SYSCALL(__NR_kill, sys_kill) 147__SYSCALL(__NR_kill, sys_kill)
148#define __NR_uname 63 148#define __NR_uname 63
149__SYSCALL(__NR_uname, sys_uname) 149__SYSCALL(__NR_uname, sys_uname)
150 150
151#define __NR_semget 64 151#define __NR_semget 64
152__SYSCALL(__NR_semget, sys_semget) 152__SYSCALL(__NR_semget, sys_semget)
153#define __NR_semop 65 153#define __NR_semop 65
154__SYSCALL(__NR_semop, sys_semop) 154__SYSCALL(__NR_semop, sys_semop)
155#define __NR_semctl 66 155#define __NR_semctl 66
156__SYSCALL(__NR_semctl, sys_semctl) 156__SYSCALL(__NR_semctl, sys_semctl)
157#define __NR_shmdt 67 157#define __NR_shmdt 67
158__SYSCALL(__NR_shmdt, sys_shmdt) 158__SYSCALL(__NR_shmdt, sys_shmdt)
159#define __NR_msgget 68 159#define __NR_msgget 68
160__SYSCALL(__NR_msgget, sys_msgget) 160__SYSCALL(__NR_msgget, sys_msgget)
161#define __NR_msgsnd 69 161#define __NR_msgsnd 69
162__SYSCALL(__NR_msgsnd, sys_msgsnd) 162__SYSCALL(__NR_msgsnd, sys_msgsnd)
163#define __NR_msgrcv 70 163#define __NR_msgrcv 70
164__SYSCALL(__NR_msgrcv, sys_msgrcv) 164__SYSCALL(__NR_msgrcv, sys_msgrcv)
165#define __NR_msgctl 71 165#define __NR_msgctl 71
166__SYSCALL(__NR_msgctl, sys_msgctl) 166__SYSCALL(__NR_msgctl, sys_msgctl)
167 167
168#define __NR_fcntl 72 168#define __NR_fcntl 72
169__SYSCALL(__NR_fcntl, sys_fcntl) 169__SYSCALL(__NR_fcntl, sys_fcntl)
170#define __NR_flock 73 170#define __NR_flock 73
171__SYSCALL(__NR_flock, sys_flock) 171__SYSCALL(__NR_flock, sys_flock)
172#define __NR_fsync 74 172#define __NR_fsync 74
173__SYSCALL(__NR_fsync, sys_fsync) 173__SYSCALL(__NR_fsync, sys_fsync)
174#define __NR_fdatasync 75 174#define __NR_fdatasync 75
175__SYSCALL(__NR_fdatasync, sys_fdatasync) 175__SYSCALL(__NR_fdatasync, sys_fdatasync)
176#define __NR_truncate 76 176#define __NR_truncate 76
177__SYSCALL(__NR_truncate, sys_truncate) 177__SYSCALL(__NR_truncate, sys_truncate)
178#define __NR_ftruncate 77 178#define __NR_ftruncate 77
179__SYSCALL(__NR_ftruncate, sys_ftruncate) 179__SYSCALL(__NR_ftruncate, sys_ftruncate)
180#define __NR_getdents 78 180#define __NR_getdents 78
181__SYSCALL(__NR_getdents, sys_getdents) 181__SYSCALL(__NR_getdents, sys_getdents)
182#define __NR_getcwd 79 182#define __NR_getcwd 79
183__SYSCALL(__NR_getcwd, sys_getcwd) 183__SYSCALL(__NR_getcwd, sys_getcwd)
184 184
185#define __NR_chdir 80 185#define __NR_chdir 80
186__SYSCALL(__NR_chdir, sys_chdir) 186__SYSCALL(__NR_chdir, sys_chdir)
187#define __NR_fchdir 81 187#define __NR_fchdir 81
188__SYSCALL(__NR_fchdir, sys_fchdir) 188__SYSCALL(__NR_fchdir, sys_fchdir)
189#define __NR_rename 82 189#define __NR_rename 82
190__SYSCALL(__NR_rename, sys_rename) 190__SYSCALL(__NR_rename, sys_rename)
191#define __NR_mkdir 83 191#define __NR_mkdir 83
192__SYSCALL(__NR_mkdir, sys_mkdir) 192__SYSCALL(__NR_mkdir, sys_mkdir)
193#define __NR_rmdir 84 193#define __NR_rmdir 84
194__SYSCALL(__NR_rmdir, sys_rmdir) 194__SYSCALL(__NR_rmdir, sys_rmdir)
195#define __NR_creat 85 195#define __NR_creat 85
196__SYSCALL(__NR_creat, sys_creat) 196__SYSCALL(__NR_creat, sys_creat)
197#define __NR_link 86 197#define __NR_link 86
198__SYSCALL(__NR_link, sys_link) 198__SYSCALL(__NR_link, sys_link)
199#define __NR_unlink 87 199#define __NR_unlink 87
200__SYSCALL(__NR_unlink, sys_unlink) 200__SYSCALL(__NR_unlink, sys_unlink)
201 201
202#define __NR_symlink 88 202#define __NR_symlink 88
203__SYSCALL(__NR_symlink, sys_symlink) 203__SYSCALL(__NR_symlink, sys_symlink)
204#define __NR_readlink 89 204#define __NR_readlink 89
205__SYSCALL(__NR_readlink, sys_readlink) 205__SYSCALL(__NR_readlink, sys_readlink)
206#define __NR_chmod 90 206#define __NR_chmod 90
207__SYSCALL(__NR_chmod, sys_chmod) 207__SYSCALL(__NR_chmod, sys_chmod)
208#define __NR_fchmod 91 208#define __NR_fchmod 91
209__SYSCALL(__NR_fchmod, sys_fchmod) 209__SYSCALL(__NR_fchmod, sys_fchmod)
210#define __NR_chown 92 210#define __NR_chown 92
211__SYSCALL(__NR_chown, sys_chown) 211__SYSCALL(__NR_chown, sys_chown)
212#define __NR_fchown 93 212#define __NR_fchown 93
213__SYSCALL(__NR_fchown, sys_fchown) 213__SYSCALL(__NR_fchown, sys_fchown)
214#define __NR_lchown 94 214#define __NR_lchown 94
215__SYSCALL(__NR_lchown, sys_lchown) 215__SYSCALL(__NR_lchown, sys_lchown)
216#define __NR_umask 95 216#define __NR_umask 95
217__SYSCALL(__NR_umask, sys_umask) 217__SYSCALL(__NR_umask, sys_umask)
218 218
219#define __NR_gettimeofday 96 219#define __NR_gettimeofday 96
220__SYSCALL(__NR_gettimeofday, sys_gettimeofday) 220__SYSCALL(__NR_gettimeofday, sys_gettimeofday)
221#define __NR_getrlimit 97 221#define __NR_getrlimit 97
222__SYSCALL(__NR_getrlimit, sys_getrlimit) 222__SYSCALL(__NR_getrlimit, sys_getrlimit)
223#define __NR_getrusage 98 223#define __NR_getrusage 98
224__SYSCALL(__NR_getrusage, sys_getrusage) 224__SYSCALL(__NR_getrusage, sys_getrusage)
225#define __NR_sysinfo 99 225#define __NR_sysinfo 99
226__SYSCALL(__NR_sysinfo, sys_sysinfo) 226__SYSCALL(__NR_sysinfo, sys_sysinfo)
227#define __NR_times 100 227#define __NR_times 100
228__SYSCALL(__NR_times, sys_times) 228__SYSCALL(__NR_times, sys_times)
229#define __NR_ptrace 101 229#define __NR_ptrace 101
230__SYSCALL(__NR_ptrace, sys_ptrace) 230__SYSCALL(__NR_ptrace, sys_ptrace)
231#define __NR_getuid 102 231#define __NR_getuid 102
232__SYSCALL(__NR_getuid, sys_getuid) 232__SYSCALL(__NR_getuid, sys_getuid)
233#define __NR_syslog 103 233#define __NR_syslog 103
234__SYSCALL(__NR_syslog, sys_syslog) 234__SYSCALL(__NR_syslog, sys_syslog)
235 235
236/* at the very end the stuff that never runs during the benchmarks */ 236/* at the very end the stuff that never runs during the benchmarks */
237#define __NR_getgid 104 237#define __NR_getgid 104
238__SYSCALL(__NR_getgid, sys_getgid) 238__SYSCALL(__NR_getgid, sys_getgid)
239#define __NR_setuid 105 239#define __NR_setuid 105
240__SYSCALL(__NR_setuid, sys_setuid) 240__SYSCALL(__NR_setuid, sys_setuid)
241#define __NR_setgid 106 241#define __NR_setgid 106
242__SYSCALL(__NR_setgid, sys_setgid) 242__SYSCALL(__NR_setgid, sys_setgid)
243#define __NR_geteuid 107 243#define __NR_geteuid 107
244__SYSCALL(__NR_geteuid, sys_geteuid) 244__SYSCALL(__NR_geteuid, sys_geteuid)
245#define __NR_getegid 108 245#define __NR_getegid 108
246__SYSCALL(__NR_getegid, sys_getegid) 246__SYSCALL(__NR_getegid, sys_getegid)
247#define __NR_setpgid 109 247#define __NR_setpgid 109
248__SYSCALL(__NR_setpgid, sys_setpgid) 248__SYSCALL(__NR_setpgid, sys_setpgid)
249#define __NR_getppid 110 249#define __NR_getppid 110
250__SYSCALL(__NR_getppid, sys_getppid) 250__SYSCALL(__NR_getppid, sys_getppid)
251#define __NR_getpgrp 111 251#define __NR_getpgrp 111
252__SYSCALL(__NR_getpgrp, sys_getpgrp) 252__SYSCALL(__NR_getpgrp, sys_getpgrp)
253 253
254#define __NR_setsid 112 254#define __NR_setsid 112
255__SYSCALL(__NR_setsid, sys_setsid) 255__SYSCALL(__NR_setsid, sys_setsid)
256#define __NR_setreuid 113 256#define __NR_setreuid 113
257__SYSCALL(__NR_setreuid, sys_setreuid) 257__SYSCALL(__NR_setreuid, sys_setreuid)
258#define __NR_setregid 114 258#define __NR_setregid 114
259__SYSCALL(__NR_setregid, sys_setregid) 259__SYSCALL(__NR_setregid, sys_setregid)
260#define __NR_getgroups 115 260#define __NR_getgroups 115
261__SYSCALL(__NR_getgroups, sys_getgroups) 261__SYSCALL(__NR_getgroups, sys_getgroups)
262#define __NR_setgroups 116 262#define __NR_setgroups 116
263__SYSCALL(__NR_setgroups, sys_setgroups) 263__SYSCALL(__NR_setgroups, sys_setgroups)
264#define __NR_setresuid 117 264#define __NR_setresuid 117
265__SYSCALL(__NR_setresuid, sys_setresuid) 265__SYSCALL(__NR_setresuid, sys_setresuid)
266#define __NR_getresuid 118 266#define __NR_getresuid 118
267__SYSCALL(__NR_getresuid, sys_getresuid) 267__SYSCALL(__NR_getresuid, sys_getresuid)
268#define __NR_setresgid 119 268#define __NR_setresgid 119
269__SYSCALL(__NR_setresgid, sys_setresgid) 269__SYSCALL(__NR_setresgid, sys_setresgid)
270 270
271#define __NR_getresgid 120 271#define __NR_getresgid 120
272__SYSCALL(__NR_getresgid, sys_getresgid) 272__SYSCALL(__NR_getresgid, sys_getresgid)
273#define __NR_getpgid 121 273#define __NR_getpgid 121
274__SYSCALL(__NR_getpgid, sys_getpgid) 274__SYSCALL(__NR_getpgid, sys_getpgid)
275#define __NR_setfsuid 122 275#define __NR_setfsuid 122
276__SYSCALL(__NR_setfsuid, sys_setfsuid) 276__SYSCALL(__NR_setfsuid, sys_setfsuid)
277#define __NR_setfsgid 123 277#define __NR_setfsgid 123
278__SYSCALL(__NR_setfsgid, sys_setfsgid) 278__SYSCALL(__NR_setfsgid, sys_setfsgid)
279#define __NR_getsid 124 279#define __NR_getsid 124
280__SYSCALL(__NR_getsid, sys_getsid) 280__SYSCALL(__NR_getsid, sys_getsid)
281#define __NR_capget 125 281#define __NR_capget 125
282__SYSCALL(__NR_capget, sys_capget) 282__SYSCALL(__NR_capget, sys_capget)
283#define __NR_capset 126 283#define __NR_capset 126
284__SYSCALL(__NR_capset, sys_capset) 284__SYSCALL(__NR_capset, sys_capset)
285 285
286#define __NR_rt_sigpending 127 286#define __NR_rt_sigpending 127
287__SYSCALL(__NR_rt_sigpending, sys_rt_sigpending) 287__SYSCALL(__NR_rt_sigpending, sys_rt_sigpending)
288#define __NR_rt_sigtimedwait 128 288#define __NR_rt_sigtimedwait 128
289__SYSCALL(__NR_rt_sigtimedwait, sys_rt_sigtimedwait) 289__SYSCALL(__NR_rt_sigtimedwait, sys_rt_sigtimedwait)
290#define __NR_rt_sigqueueinfo 129 290#define __NR_rt_sigqueueinfo 129
291__SYSCALL(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo) 291__SYSCALL(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo)
292#define __NR_rt_sigsuspend 130 292#define __NR_rt_sigsuspend 130
293__SYSCALL(__NR_rt_sigsuspend, stub_rt_sigsuspend) 293__SYSCALL(__NR_rt_sigsuspend, stub_rt_sigsuspend)
294#define __NR_sigaltstack 131 294#define __NR_sigaltstack 131
295__SYSCALL(__NR_sigaltstack, stub_sigaltstack) 295__SYSCALL(__NR_sigaltstack, stub_sigaltstack)
296#define __NR_utime 132 296#define __NR_utime 132
297__SYSCALL(__NR_utime, sys_utime) 297__SYSCALL(__NR_utime, sys_utime)
298#define __NR_mknod 133 298#define __NR_mknod 133
299__SYSCALL(__NR_mknod, sys_mknod) 299__SYSCALL(__NR_mknod, sys_mknod)
300 300
301/* Only needed for a.out */ 301/* Only needed for a.out */
302#define __NR_uselib 134 302#define __NR_uselib 134
303__SYSCALL(__NR_uselib, sys_ni_syscall) 303__SYSCALL(__NR_uselib, sys_ni_syscall)
304#define __NR_personality 135 304#define __NR_personality 135
305__SYSCALL(__NR_personality, sys_personality) 305__SYSCALL(__NR_personality, sys_personality)
306 306
307#define __NR_ustat 136 307#define __NR_ustat 136
308__SYSCALL(__NR_ustat, sys_ustat) 308__SYSCALL(__NR_ustat, sys_ustat)
309#define __NR_statfs 137 309#define __NR_statfs 137
310__SYSCALL(__NR_statfs, sys_statfs) 310__SYSCALL(__NR_statfs, sys_statfs)
311#define __NR_fstatfs 138 311#define __NR_fstatfs 138
312__SYSCALL(__NR_fstatfs, sys_fstatfs) 312__SYSCALL(__NR_fstatfs, sys_fstatfs)
313#define __NR_sysfs 139 313#define __NR_sysfs 139
314__SYSCALL(__NR_sysfs, sys_sysfs) 314__SYSCALL(__NR_sysfs, sys_sysfs)
315 315
316#define __NR_getpriority 140 316#define __NR_getpriority 140
317__SYSCALL(__NR_getpriority, sys_getpriority) 317__SYSCALL(__NR_getpriority, sys_getpriority)
318#define __NR_setpriority 141 318#define __NR_setpriority 141
319__SYSCALL(__NR_setpriority, sys_setpriority) 319__SYSCALL(__NR_setpriority, sys_setpriority)
320#define __NR_sched_setparam 142 320#define __NR_sched_setparam 142
321__SYSCALL(__NR_sched_setparam, sys_sched_setparam) 321__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
322#define __NR_sched_getparam 143 322#define __NR_sched_getparam 143
323__SYSCALL(__NR_sched_getparam, sys_sched_getparam) 323__SYSCALL(__NR_sched_getparam, sys_sched_getparam)
324#define __NR_sched_setscheduler 144 324#define __NR_sched_setscheduler 144
325__SYSCALL(__NR_sched_setscheduler, sys_sched_setscheduler) 325__SYSCALL(__NR_sched_setscheduler, sys_sched_setscheduler)
326#define __NR_sched_getscheduler 145 326#define __NR_sched_getscheduler 145
327__SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler) 327__SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler)
328#define __NR_sched_get_priority_max 146 328#define __NR_sched_get_priority_max 146
329__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max) 329__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
330#define __NR_sched_get_priority_min 147 330#define __NR_sched_get_priority_min 147
331__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min) 331__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
332#define __NR_sched_rr_get_interval 148 332#define __NR_sched_rr_get_interval 148
333__SYSCALL(__NR_sched_rr_get_interval, sys_sched_rr_get_interval) 333__SYSCALL(__NR_sched_rr_get_interval, sys_sched_rr_get_interval)
334 334
335#define __NR_mlock 149 335#define __NR_mlock 149
336__SYSCALL(__NR_mlock, sys_mlock) 336__SYSCALL(__NR_mlock, sys_mlock)
337#define __NR_munlock 150 337#define __NR_munlock 150
338__SYSCALL(__NR_munlock, sys_munlock) 338__SYSCALL(__NR_munlock, sys_munlock)
339#define __NR_mlockall 151 339#define __NR_mlockall 151
340__SYSCALL(__NR_mlockall, sys_mlockall) 340__SYSCALL(__NR_mlockall, sys_mlockall)
341#define __NR_munlockall 152 341#define __NR_munlockall 152
342__SYSCALL(__NR_munlockall, sys_munlockall) 342__SYSCALL(__NR_munlockall, sys_munlockall)
343 343
344#define __NR_vhangup 153 344#define __NR_vhangup 153
345__SYSCALL(__NR_vhangup, sys_vhangup) 345__SYSCALL(__NR_vhangup, sys_vhangup)
346 346
347#define __NR_modify_ldt 154 347#define __NR_modify_ldt 154
348__SYSCALL(__NR_modify_ldt, sys_modify_ldt) 348__SYSCALL(__NR_modify_ldt, sys_modify_ldt)
349 349
350#define __NR_pivot_root 155 350#define __NR_pivot_root 155
351__SYSCALL(__NR_pivot_root, sys_pivot_root) 351__SYSCALL(__NR_pivot_root, sys_pivot_root)
352 352
353#define __NR__sysctl 156 353#define __NR__sysctl 156
354__SYSCALL(__NR__sysctl, sys_sysctl) 354__SYSCALL(__NR__sysctl, sys_sysctl)
355 355
356#define __NR_prctl 157 356#define __NR_prctl 157
357__SYSCALL(__NR_prctl, sys_prctl) 357__SYSCALL(__NR_prctl, sys_prctl)
358#define __NR_arch_prctl 158 358#define __NR_arch_prctl 158
359__SYSCALL(__NR_arch_prctl, sys_arch_prctl) 359__SYSCALL(__NR_arch_prctl, sys_arch_prctl)
360 360
361#define __NR_adjtimex 159 361#define __NR_adjtimex 159
362__SYSCALL(__NR_adjtimex, sys_adjtimex) 362__SYSCALL(__NR_adjtimex, sys_adjtimex)
363 363
364#define __NR_setrlimit 160 364#define __NR_setrlimit 160
365__SYSCALL(__NR_setrlimit, sys_setrlimit) 365__SYSCALL(__NR_setrlimit, sys_setrlimit)
366 366
367#define __NR_chroot 161 367#define __NR_chroot 161
368__SYSCALL(__NR_chroot, sys_chroot) 368__SYSCALL(__NR_chroot, sys_chroot)
369 369
370#define __NR_sync 162 370#define __NR_sync 162
371__SYSCALL(__NR_sync, sys_sync) 371__SYSCALL(__NR_sync, sys_sync)
372 372
373#define __NR_acct 163 373#define __NR_acct 163
374__SYSCALL(__NR_acct, sys_acct) 374__SYSCALL(__NR_acct, sys_acct)
375 375
376#define __NR_settimeofday 164 376#define __NR_settimeofday 164
377__SYSCALL(__NR_settimeofday, sys_settimeofday) 377__SYSCALL(__NR_settimeofday, sys_settimeofday)
378 378
379#define __NR_mount 165 379#define __NR_mount 165
380__SYSCALL(__NR_mount, sys_mount) 380__SYSCALL(__NR_mount, sys_mount)
381#define __NR_umount2 166 381#define __NR_umount2 166
382__SYSCALL(__NR_umount2, sys_umount) 382__SYSCALL(__NR_umount2, sys_umount)
383 383
384#define __NR_swapon 167 384#define __NR_swapon 167
385__SYSCALL(__NR_swapon, sys_swapon) 385__SYSCALL(__NR_swapon, sys_swapon)
386#define __NR_swapoff 168 386#define __NR_swapoff 168
387__SYSCALL(__NR_swapoff, sys_swapoff) 387__SYSCALL(__NR_swapoff, sys_swapoff)
388 388
389#define __NR_reboot 169 389#define __NR_reboot 169
390__SYSCALL(__NR_reboot, sys_reboot) 390__SYSCALL(__NR_reboot, sys_reboot)
391 391
392#define __NR_sethostname 170 392#define __NR_sethostname 170
393__SYSCALL(__NR_sethostname, sys_sethostname) 393__SYSCALL(__NR_sethostname, sys_sethostname)
394#define __NR_setdomainname 171 394#define __NR_setdomainname 171
395__SYSCALL(__NR_setdomainname, sys_setdomainname) 395__SYSCALL(__NR_setdomainname, sys_setdomainname)
396 396
397#define __NR_iopl 172 397#define __NR_iopl 172
398__SYSCALL(__NR_iopl, stub_iopl) 398__SYSCALL(__NR_iopl, stub_iopl)
399#define __NR_ioperm 173 399#define __NR_ioperm 173
400__SYSCALL(__NR_ioperm, sys_ioperm) 400__SYSCALL(__NR_ioperm, sys_ioperm)
401 401
402#define __NR_create_module 174 402#define __NR_create_module 174
403__SYSCALL(__NR_create_module, sys_ni_syscall) 403__SYSCALL(__NR_create_module, sys_ni_syscall)
404#define __NR_init_module 175 404#define __NR_init_module 175
405__SYSCALL(__NR_init_module, sys_init_module) 405__SYSCALL(__NR_init_module, sys_init_module)
406#define __NR_delete_module 176 406#define __NR_delete_module 176
407__SYSCALL(__NR_delete_module, sys_delete_module) 407__SYSCALL(__NR_delete_module, sys_delete_module)
408#define __NR_get_kernel_syms 177 408#define __NR_get_kernel_syms 177
409__SYSCALL(__NR_get_kernel_syms, sys_ni_syscall) 409__SYSCALL(__NR_get_kernel_syms, sys_ni_syscall)
410#define __NR_query_module 178 410#define __NR_query_module 178
411__SYSCALL(__NR_query_module, sys_ni_syscall) 411__SYSCALL(__NR_query_module, sys_ni_syscall)
412 412
413#define __NR_quotactl 179 413#define __NR_quotactl 179
414__SYSCALL(__NR_quotactl, sys_quotactl) 414__SYSCALL(__NR_quotactl, sys_quotactl)
415 415
416#define __NR_nfsservctl 180 416#define __NR_nfsservctl 180
417__SYSCALL(__NR_nfsservctl, sys_nfsservctl) 417__SYSCALL(__NR_nfsservctl, sys_nfsservctl)
418 418
419#define __NR_getpmsg 181 /* reserved for LiS/STREAMS */ 419/* reserved for LiS/STREAMS */
420#define __NR_getpmsg 181
420__SYSCALL(__NR_getpmsg, sys_ni_syscall) 421__SYSCALL(__NR_getpmsg, sys_ni_syscall)
421#define __NR_putpmsg 182 /* reserved for LiS/STREAMS */ 422#define __NR_putpmsg 182
422__SYSCALL(__NR_putpmsg, sys_ni_syscall) 423__SYSCALL(__NR_putpmsg, sys_ni_syscall)
423 424
424#define __NR_afs_syscall 183 /* reserved for AFS */ 425/* reserved for AFS */
426#define __NR_afs_syscall 183
425__SYSCALL(__NR_afs_syscall, sys_ni_syscall) 427__SYSCALL(__NR_afs_syscall, sys_ni_syscall)
426 428
427#define __NR_tuxcall 184 /* reserved for tux */ 429/* reserved for tux */
430#define __NR_tuxcall 184
428__SYSCALL(__NR_tuxcall, sys_ni_syscall) 431__SYSCALL(__NR_tuxcall, sys_ni_syscall)
429 432
430#define __NR_security 185 433#define __NR_security 185
431__SYSCALL(__NR_security, sys_ni_syscall) 434__SYSCALL(__NR_security, sys_ni_syscall)
432 435
433#define __NR_gettid 186 436#define __NR_gettid 186
434__SYSCALL(__NR_gettid, sys_gettid) 437__SYSCALL(__NR_gettid, sys_gettid)
435 438
436#define __NR_readahead 187 439#define __NR_readahead 187
437__SYSCALL(__NR_readahead, sys_readahead) 440__SYSCALL(__NR_readahead, sys_readahead)
438#define __NR_setxattr 188 441#define __NR_setxattr 188
439__SYSCALL(__NR_setxattr, sys_setxattr) 442__SYSCALL(__NR_setxattr, sys_setxattr)
440#define __NR_lsetxattr 189 443#define __NR_lsetxattr 189
441__SYSCALL(__NR_lsetxattr, sys_lsetxattr) 444__SYSCALL(__NR_lsetxattr, sys_lsetxattr)
442#define __NR_fsetxattr 190 445#define __NR_fsetxattr 190
443__SYSCALL(__NR_fsetxattr, sys_fsetxattr) 446__SYSCALL(__NR_fsetxattr, sys_fsetxattr)
444#define __NR_getxattr 191 447#define __NR_getxattr 191
445__SYSCALL(__NR_getxattr, sys_getxattr) 448__SYSCALL(__NR_getxattr, sys_getxattr)
446#define __NR_lgetxattr 192 449#define __NR_lgetxattr 192
447__SYSCALL(__NR_lgetxattr, sys_lgetxattr) 450__SYSCALL(__NR_lgetxattr, sys_lgetxattr)
448#define __NR_fgetxattr 193 451#define __NR_fgetxattr 193
449__SYSCALL(__NR_fgetxattr, sys_fgetxattr) 452__SYSCALL(__NR_fgetxattr, sys_fgetxattr)
450#define __NR_listxattr 194 453#define __NR_listxattr 194
451__SYSCALL(__NR_listxattr, sys_listxattr) 454__SYSCALL(__NR_listxattr, sys_listxattr)
452#define __NR_llistxattr 195 455#define __NR_llistxattr 195
453__SYSCALL(__NR_llistxattr, sys_llistxattr) 456__SYSCALL(__NR_llistxattr, sys_llistxattr)
454#define __NR_flistxattr 196 457#define __NR_flistxattr 196
455__SYSCALL(__NR_flistxattr, sys_flistxattr) 458__SYSCALL(__NR_flistxattr, sys_flistxattr)
456#define __NR_removexattr 197 459#define __NR_removexattr 197
457__SYSCALL(__NR_removexattr, sys_removexattr) 460__SYSCALL(__NR_removexattr, sys_removexattr)
458#define __NR_lremovexattr 198 461#define __NR_lremovexattr 198
459__SYSCALL(__NR_lremovexattr, sys_lremovexattr) 462__SYSCALL(__NR_lremovexattr, sys_lremovexattr)
460#define __NR_fremovexattr 199 463#define __NR_fremovexattr 199
461__SYSCALL(__NR_fremovexattr, sys_fremovexattr) 464__SYSCALL(__NR_fremovexattr, sys_fremovexattr)
462#define __NR_tkill 200 465#define __NR_tkill 200
463__SYSCALL(__NR_tkill, sys_tkill) 466__SYSCALL(__NR_tkill, sys_tkill)
464#define __NR_time 201 467#define __NR_time 201
465__SYSCALL(__NR_time, sys_time) 468__SYSCALL(__NR_time, sys_time)
466#define __NR_futex 202 469#define __NR_futex 202
467__SYSCALL(__NR_futex, sys_futex) 470__SYSCALL(__NR_futex, sys_futex)
468#define __NR_sched_setaffinity 203 471#define __NR_sched_setaffinity 203
469__SYSCALL(__NR_sched_setaffinity, sys_sched_setaffinity) 472__SYSCALL(__NR_sched_setaffinity, sys_sched_setaffinity)
470#define __NR_sched_getaffinity 204 473#define __NR_sched_getaffinity 204
471__SYSCALL(__NR_sched_getaffinity, sys_sched_getaffinity) 474__SYSCALL(__NR_sched_getaffinity, sys_sched_getaffinity)
472#define __NR_set_thread_area 205 475#define __NR_set_thread_area 205
473__SYSCALL(__NR_set_thread_area, sys_ni_syscall) /* use arch_prctl */ 476__SYSCALL(__NR_set_thread_area, sys_ni_syscall) /* use arch_prctl */
474#define __NR_io_setup 206 477#define __NR_io_setup 206
475__SYSCALL(__NR_io_setup, sys_io_setup) 478__SYSCALL(__NR_io_setup, sys_io_setup)
476#define __NR_io_destroy 207 479#define __NR_io_destroy 207
477__SYSCALL(__NR_io_destroy, sys_io_destroy) 480__SYSCALL(__NR_io_destroy, sys_io_destroy)
478#define __NR_io_getevents 208 481#define __NR_io_getevents 208
479__SYSCALL(__NR_io_getevents, sys_io_getevents) 482__SYSCALL(__NR_io_getevents, sys_io_getevents)
480#define __NR_io_submit 209 483#define __NR_io_submit 209
481__SYSCALL(__NR_io_submit, sys_io_submit) 484__SYSCALL(__NR_io_submit, sys_io_submit)
482#define __NR_io_cancel 210 485#define __NR_io_cancel 210
483__SYSCALL(__NR_io_cancel, sys_io_cancel) 486__SYSCALL(__NR_io_cancel, sys_io_cancel)
484#define __NR_get_thread_area 211 487#define __NR_get_thread_area 211
485__SYSCALL(__NR_get_thread_area, sys_ni_syscall) /* use arch_prctl */ 488__SYSCALL(__NR_get_thread_area, sys_ni_syscall) /* use arch_prctl */
486#define __NR_lookup_dcookie 212 489#define __NR_lookup_dcookie 212
487__SYSCALL(__NR_lookup_dcookie, sys_lookup_dcookie) 490__SYSCALL(__NR_lookup_dcookie, sys_lookup_dcookie)
488#define __NR_epoll_create 213 491#define __NR_epoll_create 213
489__SYSCALL(__NR_epoll_create, sys_epoll_create) 492__SYSCALL(__NR_epoll_create, sys_epoll_create)
490#define __NR_epoll_ctl_old 214 493#define __NR_epoll_ctl_old 214
491__SYSCALL(__NR_epoll_ctl_old, sys_ni_syscall) 494__SYSCALL(__NR_epoll_ctl_old, sys_ni_syscall)
492#define __NR_epoll_wait_old 215 495#define __NR_epoll_wait_old 215
493__SYSCALL(__NR_epoll_wait_old, sys_ni_syscall) 496__SYSCALL(__NR_epoll_wait_old, sys_ni_syscall)
494#define __NR_remap_file_pages 216 497#define __NR_remap_file_pages 216
495__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages) 498__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
496#define __NR_getdents64 217 499#define __NR_getdents64 217
497__SYSCALL(__NR_getdents64, sys_getdents64) 500__SYSCALL(__NR_getdents64, sys_getdents64)
498#define __NR_set_tid_address 218 501#define __NR_set_tid_address 218
499__SYSCALL(__NR_set_tid_address, sys_set_tid_address) 502__SYSCALL(__NR_set_tid_address, sys_set_tid_address)
500#define __NR_restart_syscall 219 503#define __NR_restart_syscall 219
501__SYSCALL(__NR_restart_syscall, sys_restart_syscall) 504__SYSCALL(__NR_restart_syscall, sys_restart_syscall)
502#define __NR_semtimedop 220 505#define __NR_semtimedop 220
503__SYSCALL(__NR_semtimedop, sys_semtimedop) 506__SYSCALL(__NR_semtimedop, sys_semtimedop)
504#define __NR_fadvise64 221 507#define __NR_fadvise64 221
505__SYSCALL(__NR_fadvise64, sys_fadvise64) 508__SYSCALL(__NR_fadvise64, sys_fadvise64)
506#define __NR_timer_create 222 509#define __NR_timer_create 222
507__SYSCALL(__NR_timer_create, sys_timer_create) 510__SYSCALL(__NR_timer_create, sys_timer_create)
508#define __NR_timer_settime 223 511#define __NR_timer_settime 223
509__SYSCALL(__NR_timer_settime, sys_timer_settime) 512__SYSCALL(__NR_timer_settime, sys_timer_settime)
510#define __NR_timer_gettime 224 513#define __NR_timer_gettime 224
511__SYSCALL(__NR_timer_gettime, sys_timer_gettime) 514__SYSCALL(__NR_timer_gettime, sys_timer_gettime)
512#define __NR_timer_getoverrun 225 515#define __NR_timer_getoverrun 225
513__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun) 516__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
514#define __NR_timer_delete 226 517#define __NR_timer_delete 226
515__SYSCALL(__NR_timer_delete, sys_timer_delete) 518__SYSCALL(__NR_timer_delete, sys_timer_delete)
516#define __NR_clock_settime 227 519#define __NR_clock_settime 227
517__SYSCALL(__NR_clock_settime, sys_clock_settime) 520__SYSCALL(__NR_clock_settime, sys_clock_settime)
518#define __NR_clock_gettime 228 521#define __NR_clock_gettime 228
519__SYSCALL(__NR_clock_gettime, sys_clock_gettime) 522__SYSCALL(__NR_clock_gettime, sys_clock_gettime)
520#define __NR_clock_getres 229 523#define __NR_clock_getres 229
521__SYSCALL(__NR_clock_getres, sys_clock_getres) 524__SYSCALL(__NR_clock_getres, sys_clock_getres)
522#define __NR_clock_nanosleep 230 525#define __NR_clock_nanosleep 230
523__SYSCALL(__NR_clock_nanosleep, sys_clock_nanosleep) 526__SYSCALL(__NR_clock_nanosleep, sys_clock_nanosleep)
524#define __NR_exit_group 231 527#define __NR_exit_group 231
525__SYSCALL(__NR_exit_group, sys_exit_group) 528__SYSCALL(__NR_exit_group, sys_exit_group)
526#define __NR_epoll_wait 232 529#define __NR_epoll_wait 232
527__SYSCALL(__NR_epoll_wait, sys_epoll_wait) 530__SYSCALL(__NR_epoll_wait, sys_epoll_wait)
528#define __NR_epoll_ctl 233 531#define __NR_epoll_ctl 233
529__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl) 532__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
530#define __NR_tgkill 234 533#define __NR_tgkill 234
531__SYSCALL(__NR_tgkill, sys_tgkill) 534__SYSCALL(__NR_tgkill, sys_tgkill)
532#define __NR_utimes 235 535#define __NR_utimes 235
533__SYSCALL(__NR_utimes, sys_utimes) 536__SYSCALL(__NR_utimes, sys_utimes)
534#define __NR_vserver 236 537#define __NR_vserver 236
535__SYSCALL(__NR_vserver, sys_ni_syscall) 538__SYSCALL(__NR_vserver, sys_ni_syscall)
536#define __NR_mbind 237 539#define __NR_mbind 237
537__SYSCALL(__NR_mbind, sys_mbind) 540__SYSCALL(__NR_mbind, sys_mbind)
538#define __NR_set_mempolicy 238 541#define __NR_set_mempolicy 238
539__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy) 542__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy)
540#define __NR_get_mempolicy 239 543#define __NR_get_mempolicy 239
541__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy) 544__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy)
542#define __NR_mq_open 240 545#define __NR_mq_open 240
543__SYSCALL(__NR_mq_open, sys_mq_open) 546__SYSCALL(__NR_mq_open, sys_mq_open)
544#define __NR_mq_unlink 241 547#define __NR_mq_unlink 241
545__SYSCALL(__NR_mq_unlink, sys_mq_unlink) 548__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
546#define __NR_mq_timedsend 242 549#define __NR_mq_timedsend 242
547__SYSCALL(__NR_mq_timedsend, sys_mq_timedsend) 550__SYSCALL(__NR_mq_timedsend, sys_mq_timedsend)
548#define __NR_mq_timedreceive 243 551#define __NR_mq_timedreceive 243
549__SYSCALL(__NR_mq_timedreceive, sys_mq_timedreceive) 552__SYSCALL(__NR_mq_timedreceive, sys_mq_timedreceive)
550#define __NR_mq_notify 244 553#define __NR_mq_notify 244
551__SYSCALL(__NR_mq_notify, sys_mq_notify) 554__SYSCALL(__NR_mq_notify, sys_mq_notify)
552#define __NR_mq_getsetattr 245 555#define __NR_mq_getsetattr 245
553__SYSCALL(__NR_mq_getsetattr, sys_mq_getsetattr) 556__SYSCALL(__NR_mq_getsetattr, sys_mq_getsetattr)
554#define __NR_kexec_load 246 557#define __NR_kexec_load 246
555__SYSCALL(__NR_kexec_load, sys_kexec_load) 558__SYSCALL(__NR_kexec_load, sys_kexec_load)
556#define __NR_waitid 247 559#define __NR_waitid 247
557__SYSCALL(__NR_waitid, sys_waitid) 560__SYSCALL(__NR_waitid, sys_waitid)
558#define __NR_add_key 248 561#define __NR_add_key 248
559__SYSCALL(__NR_add_key, sys_add_key) 562__SYSCALL(__NR_add_key, sys_add_key)
560#define __NR_request_key 249 563#define __NR_request_key 249
561__SYSCALL(__NR_request_key, sys_request_key) 564__SYSCALL(__NR_request_key, sys_request_key)
562#define __NR_keyctl 250 565#define __NR_keyctl 250
563__SYSCALL(__NR_keyctl, sys_keyctl) 566__SYSCALL(__NR_keyctl, sys_keyctl)
564#define __NR_ioprio_set 251 567#define __NR_ioprio_set 251
565__SYSCALL(__NR_ioprio_set, sys_ioprio_set) 568__SYSCALL(__NR_ioprio_set, sys_ioprio_set)
566#define __NR_ioprio_get 252 569#define __NR_ioprio_get 252
567__SYSCALL(__NR_ioprio_get, sys_ioprio_get) 570__SYSCALL(__NR_ioprio_get, sys_ioprio_get)
568#define __NR_inotify_init 253 571#define __NR_inotify_init 253
569__SYSCALL(__NR_inotify_init, sys_inotify_init) 572__SYSCALL(__NR_inotify_init, sys_inotify_init)
570#define __NR_inotify_add_watch 254 573#define __NR_inotify_add_watch 254
571__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch) 574__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
572#define __NR_inotify_rm_watch 255 575#define __NR_inotify_rm_watch 255
573__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch) 576__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
574#define __NR_migrate_pages 256 577#define __NR_migrate_pages 256
575__SYSCALL(__NR_migrate_pages, sys_migrate_pages) 578__SYSCALL(__NR_migrate_pages, sys_migrate_pages)
576#define __NR_openat 257 579#define __NR_openat 257
577__SYSCALL(__NR_openat, sys_openat) 580__SYSCALL(__NR_openat, sys_openat)
578#define __NR_mkdirat 258 581#define __NR_mkdirat 258
579__SYSCALL(__NR_mkdirat, sys_mkdirat) 582__SYSCALL(__NR_mkdirat, sys_mkdirat)
580#define __NR_mknodat 259 583#define __NR_mknodat 259
581__SYSCALL(__NR_mknodat, sys_mknodat) 584__SYSCALL(__NR_mknodat, sys_mknodat)
582#define __NR_fchownat 260 585#define __NR_fchownat 260
583__SYSCALL(__NR_fchownat, sys_fchownat) 586__SYSCALL(__NR_fchownat, sys_fchownat)
584#define __NR_futimesat 261 587#define __NR_futimesat 261
585__SYSCALL(__NR_futimesat, sys_futimesat) 588__SYSCALL(__NR_futimesat, sys_futimesat)
586#define __NR_newfstatat 262 589#define __NR_newfstatat 262
587__SYSCALL(__NR_newfstatat, sys_newfstatat) 590__SYSCALL(__NR_newfstatat, sys_newfstatat)
588#define __NR_unlinkat 263 591#define __NR_unlinkat 263
589__SYSCALL(__NR_unlinkat, sys_unlinkat) 592__SYSCALL(__NR_unlinkat, sys_unlinkat)
590#define __NR_renameat 264 593#define __NR_renameat 264
591__SYSCALL(__NR_renameat, sys_renameat) 594__SYSCALL(__NR_renameat, sys_renameat)
592#define __NR_linkat 265 595#define __NR_linkat 265
593__SYSCALL(__NR_linkat, sys_linkat) 596__SYSCALL(__NR_linkat, sys_linkat)
594#define __NR_symlinkat 266 597#define __NR_symlinkat 266
595__SYSCALL(__NR_symlinkat, sys_symlinkat) 598__SYSCALL(__NR_symlinkat, sys_symlinkat)
596#define __NR_readlinkat 267 599#define __NR_readlinkat 267
597__SYSCALL(__NR_readlinkat, sys_readlinkat) 600__SYSCALL(__NR_readlinkat, sys_readlinkat)
598#define __NR_fchmodat 268 601#define __NR_fchmodat 268
599__SYSCALL(__NR_fchmodat, sys_fchmodat) 602__SYSCALL(__NR_fchmodat, sys_fchmodat)
600#define __NR_faccessat 269 603#define __NR_faccessat 269
601__SYSCALL(__NR_faccessat, sys_faccessat) 604__SYSCALL(__NR_faccessat, sys_faccessat)
602#define __NR_pselect6 270 605#define __NR_pselect6 270
603__SYSCALL(__NR_pselect6, sys_pselect6) 606__SYSCALL(__NR_pselect6, sys_pselect6)
604#define __NR_ppoll 271 607#define __NR_ppoll 271
605__SYSCALL(__NR_ppoll, sys_ppoll) 608__SYSCALL(__NR_ppoll, sys_ppoll)
606#define __NR_unshare 272 609#define __NR_unshare 272
607__SYSCALL(__NR_unshare, sys_unshare) 610__SYSCALL(__NR_unshare, sys_unshare)
608#define __NR_set_robust_list 273 611#define __NR_set_robust_list 273
609__SYSCALL(__NR_set_robust_list, sys_set_robust_list) 612__SYSCALL(__NR_set_robust_list, sys_set_robust_list)
610#define __NR_get_robust_list 274 613#define __NR_get_robust_list 274
611__SYSCALL(__NR_get_robust_list, sys_get_robust_list) 614__SYSCALL(__NR_get_robust_list, sys_get_robust_list)
612#define __NR_splice 275 615#define __NR_splice 275
613__SYSCALL(__NR_splice, sys_splice) 616__SYSCALL(__NR_splice, sys_splice)
614#define __NR_tee 276 617#define __NR_tee 276
615__SYSCALL(__NR_tee, sys_tee) 618__SYSCALL(__NR_tee, sys_tee)
616#define __NR_sync_file_range 277 619#define __NR_sync_file_range 277
617__SYSCALL(__NR_sync_file_range, sys_sync_file_range) 620__SYSCALL(__NR_sync_file_range, sys_sync_file_range)
618#define __NR_vmsplice 278 621#define __NR_vmsplice 278
619__SYSCALL(__NR_vmsplice, sys_vmsplice) 622__SYSCALL(__NR_vmsplice, sys_vmsplice)
620#define __NR_move_pages 279 623#define __NR_move_pages 279
621__SYSCALL(__NR_move_pages, sys_move_pages) 624__SYSCALL(__NR_move_pages, sys_move_pages)
622#define __NR_utimensat 280 625#define __NR_utimensat 280
623__SYSCALL(__NR_utimensat, sys_utimensat) 626__SYSCALL(__NR_utimensat, sys_utimensat)
624#define __IGNORE_getcpu /* implemented as a vsyscall */ 627#define __IGNORE_getcpu /* implemented as a vsyscall */
625#define __NR_epoll_pwait 281 628#define __NR_epoll_pwait 281
626__SYSCALL(__NR_epoll_pwait, sys_epoll_pwait) 629__SYSCALL(__NR_epoll_pwait, sys_epoll_pwait)
627#define __NR_signalfd 282 630#define __NR_signalfd 282
628__SYSCALL(__NR_signalfd, sys_signalfd) 631__SYSCALL(__NR_signalfd, sys_signalfd)
629#define __NR_timerfd 283 632#define __NR_timerfd 283
630__SYSCALL(__NR_timerfd, sys_timerfd) 633__SYSCALL(__NR_timerfd, sys_timerfd)
631#define __NR_eventfd 284 634#define __NR_eventfd 284
632__SYSCALL(__NR_eventfd, sys_eventfd) 635__SYSCALL(__NR_eventfd, sys_eventfd)
633#define __NR_fallocate 285 636#define __NR_fallocate 285
634__SYSCALL(__NR_fallocate, sys_fallocate) 637__SYSCALL(__NR_fallocate, sys_fallocate)
635 638
636#ifndef __NO_STUBS 639#ifndef __NO_STUBS
@@ -656,26 +659,9 @@ __SYSCALL(__NR_fallocate, sys_fallocate)
656#define __ARCH_WANT_SYS_RT_SIGSUSPEND 659#define __ARCH_WANT_SYS_RT_SIGSUSPEND
657#define __ARCH_WANT_SYS_TIME 660#define __ARCH_WANT_SYS_TIME
658#define __ARCH_WANT_COMPAT_SYS_TIME 661#define __ARCH_WANT_COMPAT_SYS_TIME
659
660#ifdef __KERNEL__
661#ifndef __ASSEMBLY__
662
663#include <linux/linkage.h>
664#include <linux/compiler.h>
665#include <linux/types.h>
666#include <asm/ptrace.h>
667
668asmlinkage long sys_iopl(unsigned int level, struct pt_regs *regs);
669struct sigaction;
670asmlinkage long sys_rt_sigaction(int sig,
671 const struct sigaction __user *act,
672 struct sigaction __user *oact,
673 size_t sigsetsize);
674
675#endif /* __ASSEMBLY__ */
676#endif /* __KERNEL__ */
677#endif /* __NO_STUBS */ 662#endif /* __NO_STUBS */
678 663
664#ifdef __KERNEL__
679/* 665/*
680 * "Conditional" syscalls 666 * "Conditional" syscalls
681 * 667 *
@@ -683,5 +669,6 @@ asmlinkage long sys_rt_sigaction(int sig,
683 * but it doesn't work on all toolchains, so we just do it by hand 669 * but it doesn't work on all toolchains, so we just do it by hand
684 */ 670 */
685#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") 671#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
672#endif /* __KERNEL__ */
686 673
687#endif /* _ASM_X86_64_UNISTD_H_ */ 674#endif /* _ASM_X86_64_UNISTD_H_ */
diff --git a/include/asm-x86/unwind.h b/include/asm-x86/unwind.h
index 7e4d7ad55208..8b064bd9c553 100644
--- a/include/asm-x86/unwind.h
+++ b/include/asm-x86/unwind.h
@@ -1,5 +1,13 @@
1#ifdef CONFIG_X86_32 1#ifndef _ASM_X86_UNWIND_H
2# include "unwind_32.h" 2#define _ASM_X86_UNWIND_H
3#else 3
4# include "unwind_64.h" 4#define UNW_PC(frame) ((void)(frame), 0UL)
5#endif 5#define UNW_SP(frame) ((void)(frame), 0UL)
6#define UNW_FP(frame) ((void)(frame), 0UL)
7
8static inline int arch_unw_user_mode(const void *info)
9{
10 return 0;
11}
12
13#endif /* _ASM_X86_UNWIND_H */
diff --git a/include/asm-x86/unwind_32.h b/include/asm-x86/unwind_32.h
deleted file mode 100644
index 43c70c3de2f9..000000000000
--- a/include/asm-x86/unwind_32.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef _ASM_I386_UNWIND_H
2#define _ASM_I386_UNWIND_H
3
4#define UNW_PC(frame) ((void)(frame), 0)
5#define UNW_SP(frame) ((void)(frame), 0)
6#define UNW_FP(frame) ((void)(frame), 0)
7
8static inline int arch_unw_user_mode(const void *info)
9{
10 return 0;
11}
12
13#endif /* _ASM_I386_UNWIND_H */
diff --git a/include/asm-x86/unwind_64.h b/include/asm-x86/unwind_64.h
deleted file mode 100644
index 02710f6a4560..000000000000
--- a/include/asm-x86/unwind_64.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _ASM_X86_64_UNWIND_H
2#define _ASM_X86_64_UNWIND_H
3
4#define UNW_PC(frame) ((void)(frame), 0UL)
5#define UNW_SP(frame) ((void)(frame), 0UL)
6
7static inline int arch_unw_user_mode(const void *info)
8{
9 return 0;
10}
11
12#endif /* _ASM_X86_64_UNWIND_H */
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 09306d47ff5e..ea1bf5ba092f 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -19,5 +19,11 @@
19#define SDIO_CLASS_WLAN 0x07 /* WLAN interface */ 19#define SDIO_CLASS_WLAN 0x07 /* WLAN interface */
20#define SDIO_CLASS_ATA 0x08 /* Embedded SDIO-ATA std interface */ 20#define SDIO_CLASS_ATA 0x08 /* Embedded SDIO-ATA std interface */
21 21
22/*
23 * Vendors and devices. Sort key: vendor first, device next.
24 */
25
26#define SDIO_VENDOR_ID_MARVELL 0x02df
27#define SDIO_DEVICE_ID_MARVELL_LIBERTAS 0x9103
22 28
23#endif 29#endif
diff --git a/include/xen/interface/vcpu.h b/include/xen/interface/vcpu.h
index ff61ea365997..b05d8a6d9143 100644
--- a/include/xen/interface/vcpu.h
+++ b/include/xen/interface/vcpu.h
@@ -160,8 +160,9 @@ struct vcpu_set_singleshot_timer {
160 */ 160 */
161#define VCPUOP_register_vcpu_info 10 /* arg == struct vcpu_info */ 161#define VCPUOP_register_vcpu_info 10 /* arg == struct vcpu_info */
162struct vcpu_register_vcpu_info { 162struct vcpu_register_vcpu_info {
163 uint32_t mfn; /* mfn of page to place vcpu_info */ 163 uint64_t mfn; /* mfn of page to place vcpu_info */
164 uint32_t offset; /* offset within page */ 164 uint32_t offset; /* offset within page */
165 uint32_t rsvd; /* unused */
165}; 166};
166 167
167#endif /* __XEN_PUBLIC_VCPU_H__ */ 168#endif /* __XEN_PUBLIC_VCPU_H__ */
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index fab9dd8bbd6b..8cfb8b2ce773 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -222,20 +222,8 @@ static void tick_do_broadcast_on_off(void *why)
222 if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP)) 222 if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
223 goto out; 223 goto out;
224 224
225 /* 225 if (!tick_device_is_functional(dev))
226 * Defect device ? 226 goto out;
227 */
228 if (!tick_device_is_functional(dev)) {
229 /*
230 * AMD C1E wreckage fixup:
231 *
232 * Device was registered functional in the first
233 * place. Now the secondary CPU detected the C1E
234 * misfeature and notifies us to fix it up
235 */
236 if (*reason != CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
237 goto out;
238 }
239 227
240 switch (*reason) { 228 switch (*reason) {
241 case CLOCK_EVT_NOTIFY_BROADCAST_ON: 229 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
@@ -246,6 +234,8 @@ static void tick_do_broadcast_on_off(void *why)
246 clockevents_set_mode(dev, 234 clockevents_set_mode(dev,
247 CLOCK_EVT_MODE_SHUTDOWN); 235 CLOCK_EVT_MODE_SHUTDOWN);
248 } 236 }
237 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
238 dev->features |= CLOCK_EVT_FEAT_DUMMY;
249 break; 239 break;
250 case CLOCK_EVT_NOTIFY_BROADCAST_OFF: 240 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
251 if (cpu_isset(cpu, tick_broadcast_mask)) { 241 if (cpu_isset(cpu, tick_broadcast_mask)) {
diff --git a/mm/Kconfig b/mm/Kconfig
index 1cc6cada2bbf..b1f03b0eb7f1 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -155,7 +155,6 @@ config SPLIT_PTLOCK_CPUS
155 int 155 int
156 default "4096" if ARM && !CPU_CACHE_VIPT 156 default "4096" if ARM && !CPU_CACHE_VIPT
157 default "4096" if PARISC && !PA20 157 default "4096" if PARISC && !PA20
158 default "4096" if XEN
159 default "4" 158 default "4"
160 159
161# 160#