aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-10 11:07:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-10 11:28:58 -0400
commitd403a6484f0341bf0624d17ece46f24f741b6a92 (patch)
treebe1c2ec69a3caa9f437e4b87ca9cac80e57fbc4d
parented458df4d2470adc02762a87a9ad665d0b1a2bd4 (diff)
parente496e3d645c93206faf61ff6005995ebd08cc39c (diff)
Merge phase #1 of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
This merges phase 1 of the x86 tree, which is a collection of branches: x86/alternatives, x86/cleanups, x86/commandline, x86/crashdump, x86/debug, x86/defconfig, x86/doc, x86/exports, x86/fpu, x86/gart, x86/idle, x86/mm, x86/mtrr, x86/nmi-watchdog, x86/oprofile, x86/paravirt, x86/reboot, x86/sparse-fixes, x86/tsc, x86/urgent and x86/vmalloc and as Ingo says: "these are the easiest, purely independent x86 topics with no conflicts, in one nice Octopus merge". * 'x86-v28-for-linus-phase1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (147 commits) x86: mtrr_cleanup: treat WRPROT as UNCACHEABLE x86: mtrr_cleanup: first 1M may be covered in var mtrrs x86: mtrr_cleanup: print out correct type v2 x86: trivial printk fix in efi.c x86, debug: mtrr_cleanup print out var mtrr before change it x86: mtrr_cleanup try gran_size to less than 1M, v3 x86: mtrr_cleanup try gran_size to less than 1M, cleanup x86: change MTRR_SANITIZER to def_bool y x86, debug printouts: IOMMU setup failures should not be KERN_ERR x86: export set_memory_ro and set_memory_rw x86: mtrr_cleanup try gran_size to less than 1M x86: mtrr_cleanup prepare to make gran_size to less 1M x86: mtrr_cleanup safe to get more spare regs now x86_64: be less annoying on boot, v2 x86: mtrr_cleanup hole size should be less than half of chunk_size, v2 x86: add mtrr_cleanup_debug command line x86: mtrr_cleanup optimization, v2 x86: don't need to go to chunksize to 4G x86_64: be less annoying on boot x86, olpc: fix endian bug in openfirmware workaround ...
-rw-r--r--Documentation/00-INDEX2
-rw-r--r--Documentation/kernel-parameters.txt12
-rw-r--r--Documentation/x86/00-INDEX4
-rw-r--r--Documentation/x86/boot.txt (renamed from Documentation/x86/i386/boot.txt)2
-rw-r--r--Documentation/x86/mtrr.txt (renamed from Documentation/mtrr.txt)4
-rw-r--r--Documentation/x86/pat.txt54
-rw-r--r--Documentation/x86/usb-legacy-support.txt (renamed from Documentation/x86/i386/usb-legacy-support.txt)0
-rw-r--r--Documentation/x86/x86_64/boot-options.txt4
-rw-r--r--Documentation/x86/zero-page.txt (renamed from Documentation/x86/i386/zero-page.txt)0
-rw-r--r--arch/x86/Kconfig75
-rw-r--r--arch/x86/Kconfig.cpu18
-rw-r--r--arch/x86/boot/compressed/head_32.S5
-rw-r--r--arch/x86/boot/compressed/misc.c12
-rw-r--r--arch/x86/boot/header.S1
-rw-r--r--arch/x86/configs/i386_defconfig19
-rw-r--r--arch/x86/configs/x86_64_defconfig29
-rw-r--r--arch/x86/ia32/ia32_aout.c11
-rw-r--r--arch/x86/ia32/ia32_signal.c21
-rw-r--r--arch/x86/ia32/sys_ia32.c9
-rw-r--r--arch/x86/kernel/acpi/boot.c5
-rw-r--r--arch/x86/kernel/alternative.c8
-rw-r--r--arch/x86/kernel/aperture_64.c6
-rw-r--r--arch/x86/kernel/apm_32.c1
-rw-r--r--arch/x86/kernel/asm-offsets_64.c2
-rw-r--r--arch/x86/kernel/bios_uv.c10
-rw-r--r--arch/x86/kernel/cpu/common_64.c51
-rw-r--r--arch/x86/kernel/cpu/intel.c3
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c7
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c4
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c274
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c86
-rw-r--r--arch/x86/kernel/cpuid.c1
-rw-r--r--arch/x86/kernel/crash_dump_64.c13
-rw-r--r--arch/x86/kernel/ds.c954
-rw-r--r--arch/x86/kernel/efi.c6
-rw-r--r--arch/x86/kernel/entry_64.S4
-rw-r--r--arch/x86/kernel/head64.c5
-rw-r--r--arch/x86/kernel/ioport.c1
-rw-r--r--arch/x86/kernel/ipi.c3
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/irq_64.c2
-rw-r--r--arch/x86/kernel/kvm.c2
-rw-r--r--arch/x86/kernel/ldt.c1
-rw-r--r--arch/x86/kernel/nmi.c11
-rw-r--r--arch/x86/kernel/olpc.c6
-rw-r--r--arch/x86/kernel/paravirt.c1
-rw-r--r--arch/x86/kernel/paravirt_patch_32.c2
-rw-r--r--arch/x86/kernel/pci-dma.c2
-rw-r--r--arch/x86/kernel/pci-gart_64.c27
-rw-r--r--arch/x86/kernel/pcspeaker.c13
-rw-r--r--arch/x86/kernel/process.c3
-rw-r--r--arch/x86/kernel/process_32.c62
-rw-r--r--arch/x86/kernel/process_64.c170
-rw-r--r--arch/x86/kernel/ptrace.c480
-rw-r--r--arch/x86/kernel/reboot.c6
-rw-r--r--arch/x86/kernel/setup.c16
-rw-r--r--arch/x86/kernel/setup_percpu.c9
-rw-r--r--arch/x86/kernel/sigframe.h5
-rw-r--r--arch/x86/kernel/signal_32.c12
-rw-r--r--arch/x86/kernel/signal_64.c112
-rw-r--r--arch/x86/kernel/smpboot.c9
-rw-r--r--arch/x86/kernel/sys_i386_32.c2
-rw-r--r--arch/x86/kernel/sys_x86_64.c44
-rw-r--r--arch/x86/kernel/syscall_64.c4
-rw-r--r--arch/x86/kernel/time_32.c1
-rw-r--r--arch/x86/kernel/tls.c1
-rw-r--r--arch/x86/kernel/traps_64.c66
-rw-r--r--arch/x86/kernel/tsc.c290
-rw-r--r--arch/x86/kernel/visws_quirks.c16
-rw-r--r--arch/x86/kernel/vm86_32.c1
-rw-r--r--arch/x86/kernel/vmi_32.c10
-rw-r--r--arch/x86/lib/msr-on-cpu.c78
-rw-r--r--arch/x86/lib/string_32.c42
-rw-r--r--arch/x86/lib/strstr_32.c6
-rw-r--r--arch/x86/mach-default/setup.c4
-rw-r--r--arch/x86/mm/discontig_32.c2
-rw-r--r--arch/x86/mm/dump_pagetables.c4
-rw-r--r--arch/x86/mm/fault.c3
-rw-r--r--arch/x86/mm/init_32.c1
-rw-r--r--arch/x86/mm/init_64.c8
-rw-r--r--arch/x86/mm/ioremap.c4
-rw-r--r--arch/x86/mm/numa_64.c10
-rw-r--r--arch/x86/mm/pageattr.c4
-rw-r--r--arch/x86/mm/pgtable.c6
-rw-r--r--arch/x86/mm/pgtable_32.c3
-rw-r--r--arch/x86/oprofile/op_model_p4.c175
-rw-r--r--arch/x86/pci/amd_bus.c2
-rw-r--r--arch/x86/pci/irq.c67
-rw-r--r--arch/x86/power/hibernate_asm_32.S14
-rw-r--r--arch/x86/xen/enlighten.c20
-rw-r--r--include/asm-x86/a.out-core.h6
-rw-r--r--include/asm-x86/a.out.h6
-rw-r--r--include/asm-x86/acpi.h6
-rw-r--r--include/asm-x86/agp.h6
-rw-r--r--include/asm-x86/alternative.h6
-rw-r--r--include/asm-x86/amd_iommu.h6
-rw-r--r--include/asm-x86/amd_iommu_types.h6
-rw-r--r--include/asm-x86/apic.h15
-rw-r--r--include/asm-x86/apicdef.h6
-rw-r--r--include/asm-x86/arch_hooks.h6
-rw-r--r--include/asm-x86/asm.h13
-rw-r--r--include/asm-x86/atomic_32.h6
-rw-r--r--include/asm-x86/atomic_64.h6
-rw-r--r--include/asm-x86/auxvec.h6
-rw-r--r--include/asm-x86/bios_ebda.h6
-rw-r--r--include/asm-x86/bitops.h6
-rw-r--r--include/asm-x86/boot.h6
-rw-r--r--include/asm-x86/bootparam.h6
-rw-r--r--include/asm-x86/bug.h6
-rw-r--r--include/asm-x86/bugs.h6
-rw-r--r--include/asm-x86/byteorder.h6
-rw-r--r--include/asm-x86/cache.h6
-rw-r--r--include/asm-x86/cacheflush.h6
-rw-r--r--include/asm-x86/calgary.h6
-rw-r--r--include/asm-x86/checksum_32.h6
-rw-r--r--include/asm-x86/checksum_64.h6
-rw-r--r--include/asm-x86/cmpxchg_32.h6
-rw-r--r--include/asm-x86/cmpxchg_64.h6
-rw-r--r--include/asm-x86/compat.h6
-rw-r--r--include/asm-x86/cpu.h6
-rw-r--r--include/asm-x86/cpufeature.h6
-rw-r--r--include/asm-x86/current.h6
-rw-r--r--include/asm-x86/debugreg.h6
-rw-r--r--include/asm-x86/delay.h6
-rw-r--r--include/asm-x86/desc.h6
-rw-r--r--include/asm-x86/desc_defs.h6
-rw-r--r--include/asm-x86/device.h6
-rw-r--r--include/asm-x86/div64.h6
-rw-r--r--include/asm-x86/dma-mapping.h6
-rw-r--r--include/asm-x86/dma.h6
-rw-r--r--include/asm-x86/dmi.h6
-rw-r--r--include/asm-x86/ds.h266
-rw-r--r--include/asm-x86/dwarf2.h6
-rw-r--r--include/asm-x86/e820.h7
-rw-r--r--include/asm-x86/edac.h6
-rw-r--r--include/asm-x86/efi.h6
-rw-r--r--include/asm-x86/elf.h11
-rw-r--r--include/asm-x86/emergency-restart.h6
-rw-r--r--include/asm-x86/fb.h6
-rw-r--r--include/asm-x86/fixmap.h6
-rw-r--r--include/asm-x86/fixmap_32.h6
-rw-r--r--include/asm-x86/fixmap_64.h6
-rw-r--r--include/asm-x86/floppy.h6
-rw-r--r--include/asm-x86/ftrace.h6
-rw-r--r--include/asm-x86/futex.h12
-rw-r--r--include/asm-x86/gart.h12
-rw-r--r--include/asm-x86/genapic_32.h6
-rw-r--r--include/asm-x86/genapic_64.h6
-rw-r--r--include/asm-x86/geode.h6
-rw-r--r--include/asm-x86/gpio.h2
-rw-r--r--include/asm-x86/hardirq_32.h6
-rw-r--r--include/asm-x86/hardirq_64.h6
-rw-r--r--include/asm-x86/highmem.h6
-rw-r--r--include/asm-x86/hpet.h6
-rw-r--r--include/asm-x86/hugetlb.h6
-rw-r--r--include/asm-x86/hw_irq.h26
-rw-r--r--include/asm-x86/hypertransport.h6
-rw-r--r--include/asm-x86/i387.h7
-rw-r--r--include/asm-x86/i8253.h6
-rw-r--r--include/asm-x86/i8259.h6
-rw-r--r--include/asm-x86/ia32.h6
-rw-r--r--include/asm-x86/ia32_unistd.h6
-rw-r--r--include/asm-x86/idle.h6
-rw-r--r--include/asm-x86/intel_arch_perfmon.h6
-rw-r--r--include/asm-x86/io.h8
-rw-r--r--include/asm-x86/io_32.h6
-rw-r--r--include/asm-x86/io_64.h7
-rw-r--r--include/asm-x86/io_apic.h6
-rw-r--r--include/asm-x86/ioctls.h6
-rw-r--r--include/asm-x86/iommu.h6
-rw-r--r--include/asm-x86/ipcbuf.h6
-rw-r--r--include/asm-x86/ipi.h6
-rw-r--r--include/asm-x86/irq.h6
-rw-r--r--include/asm-x86/irq_regs_32.h6
-rw-r--r--include/asm-x86/irq_vectors.h6
-rw-r--r--include/asm-x86/ist.h6
-rw-r--r--include/asm-x86/k8.h6
-rw-r--r--include/asm-x86/kdebug.h6
-rw-r--r--include/asm-x86/kexec.h6
-rw-r--r--include/asm-x86/kgdb.h6
-rw-r--r--include/asm-x86/kmap_types.h6
-rw-r--r--include/asm-x86/kprobes.h6
-rw-r--r--include/asm-x86/kvm.h6
-rw-r--r--include/asm-x86/kvm_host.h8
-rw-r--r--include/asm-x86/kvm_para.h6
-rw-r--r--include/asm-x86/kvm_x86_emulate.h6
-rw-r--r--include/asm-x86/ldt.h6
-rw-r--r--include/asm-x86/lguest.h6
-rw-r--r--include/asm-x86/lguest_hcall.h6
-rw-r--r--include/asm-x86/linkage.h6
-rw-r--r--include/asm-x86/local.h6
-rw-r--r--include/asm-x86/mach-bigsmp/mach_apic.h6
-rw-r--r--include/asm-x86/mach-bigsmp/mach_apicdef.h6
-rw-r--r--include/asm-x86/mach-bigsmp/mach_ipi.h6
-rw-r--r--include/asm-x86/mach-default/apm.h6
-rw-r--r--include/asm-x86/mach-default/mach_apic.h6
-rw-r--r--include/asm-x86/mach-default/mach_apicdef.h6
-rw-r--r--include/asm-x86/mach-default/mach_ipi.h6
-rw-r--r--include/asm-x86/mach-default/mach_mpparse.h6
-rw-r--r--include/asm-x86/mach-default/mach_mpspec.h6
-rw-r--r--include/asm-x86/mach-default/mach_timer.h6
-rw-r--r--include/asm-x86/mach-default/mach_traps.h6
-rw-r--r--include/asm-x86/mach-default/mach_wakecpu.h6
-rw-r--r--include/asm-x86/mach-es7000/mach_apic.h6
-rw-r--r--include/asm-x86/mach-es7000/mach_apicdef.h6
-rw-r--r--include/asm-x86/mach-es7000/mach_ipi.h6
-rw-r--r--include/asm-x86/mach-es7000/mach_mpparse.h6
-rw-r--r--include/asm-x86/mach-es7000/mach_wakecpu.h6
-rw-r--r--include/asm-x86/mach-generic/gpio.h6
-rw-r--r--include/asm-x86/mach-generic/irq_vectors_limits.h6
-rw-r--r--include/asm-x86/mach-generic/mach_apic.h6
-rw-r--r--include/asm-x86/mach-generic/mach_apicdef.h6
-rw-r--r--include/asm-x86/mach-generic/mach_ipi.h6
-rw-r--r--include/asm-x86/mach-generic/mach_mpparse.h6
-rw-r--r--include/asm-x86/mach-generic/mach_mpspec.h6
-rw-r--r--include/asm-x86/mach-numaq/mach_apic.h6
-rw-r--r--include/asm-x86/mach-numaq/mach_apicdef.h6
-rw-r--r--include/asm-x86/mach-numaq/mach_ipi.h6
-rw-r--r--include/asm-x86/mach-numaq/mach_mpparse.h6
-rw-r--r--include/asm-x86/mach-numaq/mach_wakecpu.h6
-rw-r--r--include/asm-x86/mach-rdc321x/gpio.h9
-rw-r--r--include/asm-x86/mach-summit/irq_vectors_limits.h6
-rw-r--r--include/asm-x86/mach-summit/mach_apic.h6
-rw-r--r--include/asm-x86/mach-summit/mach_apicdef.h6
-rw-r--r--include/asm-x86/mach-summit/mach_ipi.h6
-rw-r--r--include/asm-x86/mach-summit/mach_mpparse.h6
-rw-r--r--include/asm-x86/math_emu.h6
-rw-r--r--include/asm-x86/mc146818rtc.h6
-rw-r--r--include/asm-x86/mca.h6
-rw-r--r--include/asm-x86/mca_dma.h6
-rw-r--r--include/asm-x86/mce.h6
-rw-r--r--include/asm-x86/mman.h6
-rw-r--r--include/asm-x86/mmconfig.h6
-rw-r--r--include/asm-x86/mmu.h11
-rw-r--r--include/asm-x86/mmu_context.h6
-rw-r--r--include/asm-x86/mmu_context_32.h6
-rw-r--r--include/asm-x86/mmu_context_64.h6
-rw-r--r--include/asm-x86/mmx.h6
-rw-r--r--include/asm-x86/mmzone_32.h6
-rw-r--r--include/asm-x86/mmzone_64.h6
-rw-r--r--include/asm-x86/module.h6
-rw-r--r--include/asm-x86/mpspec.h6
-rw-r--r--include/asm-x86/mpspec_def.h6
-rw-r--r--include/asm-x86/msgbuf.h6
-rw-r--r--include/asm-x86/msidef.h6
-rw-r--r--include/asm-x86/msr-index.h6
-rw-r--r--include/asm-x86/msr.h29
-rw-r--r--include/asm-x86/mtrr.h6
-rw-r--r--include/asm-x86/mutex_32.h6
-rw-r--r--include/asm-x86/mutex_64.h6
-rw-r--r--include/asm-x86/nmi.h7
-rw-r--r--include/asm-x86/nops.h6
-rw-r--r--include/asm-x86/numa_32.h6
-rw-r--r--include/asm-x86/numa_64.h6
-rw-r--r--include/asm-x86/numaq.h6
-rw-r--r--include/asm-x86/olpc.h6
-rw-r--r--include/asm-x86/page.h6
-rw-r--r--include/asm-x86/page_32.h10
-rw-r--r--include/asm-x86/page_64.h7
-rw-r--r--include/asm-x86/param.h6
-rw-r--r--include/asm-x86/paravirt.h48
-rw-r--r--include/asm-x86/parport.h6
-rw-r--r--include/asm-x86/pat.h6
-rw-r--r--include/asm-x86/pci-direct.h6
-rw-r--r--include/asm-x86/pci.h6
-rw-r--r--include/asm-x86/pci_32.h6
-rw-r--r--include/asm-x86/pci_64.h6
-rw-r--r--include/asm-x86/pda.h6
-rw-r--r--include/asm-x86/percpu.h6
-rw-r--r--include/asm-x86/pgalloc.h6
-rw-r--r--include/asm-x86/pgtable-2level-defs.h6
-rw-r--r--include/asm-x86/pgtable-2level.h8
-rw-r--r--include/asm-x86/pgtable-3level-defs.h6
-rw-r--r--include/asm-x86/pgtable-3level.h13
-rw-r--r--include/asm-x86/pgtable.h15
-rw-r--r--include/asm-x86/pgtable_32.h12
-rw-r--r--include/asm-x86/pgtable_64.h8
-rw-r--r--include/asm-x86/posix_types_32.h6
-rw-r--r--include/asm-x86/posix_types_64.h6
-rw-r--r--include/asm-x86/prctl.h6
-rw-r--r--include/asm-x86/processor-flags.h6
-rw-r--r--include/asm-x86/processor.h22
-rw-r--r--include/asm-x86/proto.h6
-rw-r--r--include/asm-x86/ptrace-abi.h20
-rw-r--r--include/asm-x86/ptrace.h52
-rw-r--r--include/asm-x86/pvclock-abi.h6
-rw-r--r--include/asm-x86/pvclock.h6
-rw-r--r--include/asm-x86/reboot.h6
-rw-r--r--include/asm-x86/reboot_fixups.h6
-rw-r--r--include/asm-x86/required-features.h6
-rw-r--r--include/asm-x86/resume-trace.h8
-rw-r--r--include/asm-x86/rio.h6
-rw-r--r--include/asm-x86/rwlock.h6
-rw-r--r--include/asm-x86/rwsem.h6
-rw-r--r--include/asm-x86/scatterlist.h6
-rw-r--r--include/asm-x86/seccomp_32.h6
-rw-r--r--include/asm-x86/seccomp_64.h6
-rw-r--r--include/asm-x86/segment.h6
-rw-r--r--include/asm-x86/sembuf.h6
-rw-r--r--include/asm-x86/serial.h6
-rw-r--r--include/asm-x86/setup.h7
-rw-r--r--include/asm-x86/shmbuf.h6
-rw-r--r--include/asm-x86/shmparam.h6
-rw-r--r--include/asm-x86/sigcontext.h6
-rw-r--r--include/asm-x86/sigcontext32.h6
-rw-r--r--include/asm-x86/siginfo.h6
-rw-r--r--include/asm-x86/signal.h9
-rw-r--r--include/asm-x86/smp.h10
-rw-r--r--include/asm-x86/socket.h6
-rw-r--r--include/asm-x86/sockios.h6
-rw-r--r--include/asm-x86/sparsemem.h6
-rw-r--r--include/asm-x86/spinlock.h12
-rw-r--r--include/asm-x86/spinlock_types.h6
-rw-r--r--include/asm-x86/srat.h6
-rw-r--r--include/asm-x86/stacktrace.h6
-rw-r--r--include/asm-x86/stat.h6
-rw-r--r--include/asm-x86/statfs.h6
-rw-r--r--include/asm-x86/string_32.h6
-rw-r--r--include/asm-x86/string_64.h6
-rw-r--r--include/asm-x86/suspend_32.h6
-rw-r--r--include/asm-x86/suspend_64.h6
-rw-r--r--include/asm-x86/swiotlb.h6
-rw-r--r--include/asm-x86/sync_bitops.h6
-rw-r--r--include/asm-x86/syscall.h211
-rw-r--r--include/asm-x86/syscalls.h93
-rw-r--r--include/asm-x86/system.h6
-rw-r--r--include/asm-x86/system_64.h6
-rw-r--r--include/asm-x86/tce.h6
-rw-r--r--include/asm-x86/termbits.h6
-rw-r--r--include/asm-x86/termios.h6
-rw-r--r--include/asm-x86/therm_throt.h6
-rw-r--r--include/asm-x86/thread_info.h10
-rw-r--r--include/asm-x86/time.h8
-rw-r--r--include/asm-x86/timer.h11
-rw-r--r--include/asm-x86/timex.h6
-rw-r--r--include/asm-x86/tlb.h6
-rw-r--r--include/asm-x86/tlbflush.h6
-rw-r--r--include/asm-x86/topology.h6
-rw-r--r--include/asm-x86/trampoline.h6
-rw-r--r--include/asm-x86/traps.h10
-rw-r--r--include/asm-x86/tsc.h6
-rw-r--r--include/asm-x86/types.h6
-rw-r--r--include/asm-x86/uaccess.h6
-rw-r--r--include/asm-x86/uaccess_32.h6
-rw-r--r--include/asm-x86/uaccess_64.h6
-rw-r--r--include/asm-x86/ucontext.h6
-rw-r--r--include/asm-x86/unaligned.h6
-rw-r--r--include/asm-x86/unistd_32.h6
-rw-r--r--include/asm-x86/unistd_64.h6
-rw-r--r--include/asm-x86/unwind.h6
-rw-r--r--include/asm-x86/user32.h6
-rw-r--r--include/asm-x86/user_32.h6
-rw-r--r--include/asm-x86/user_64.h6
-rw-r--r--include/asm-x86/uv/bios.h6
-rw-r--r--include/asm-x86/uv/uv_bau.h6
-rw-r--r--include/asm-x86/uv/uv_hub.h6
-rw-r--r--include/asm-x86/uv/uv_mmrs.h6
-rw-r--r--include/asm-x86/vdso.h6
-rw-r--r--include/asm-x86/vga.h6
-rw-r--r--include/asm-x86/vgtod.h6
-rw-r--r--include/asm-x86/visws/cobalt.h6
-rw-r--r--include/asm-x86/visws/lithium.h6
-rw-r--r--include/asm-x86/visws/piix4.h6
-rw-r--r--include/asm-x86/vm86.h6
-rw-r--r--include/asm-x86/vmi_time.h6
-rw-r--r--include/asm-x86/vsyscall.h6
-rw-r--r--include/asm-x86/xen/events.h6
-rw-r--r--include/asm-x86/xen/grant_table.h6
-rw-r--r--include/asm-x86/xen/hypercall.h6
-rw-r--r--include/asm-x86/xen/hypervisor.h6
-rw-r--r--include/asm-x86/xen/interface.h6
-rw-r--r--include/asm-x86/xen/interface_32.h6
-rw-r--r--include/asm-x86/xen/interface_64.h6
-rw-r--r--include/asm-x86/xen/page.h6
374 files changed, 3870 insertions, 2221 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 5b5aba404aa..73060819ed9 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -251,8 +251,6 @@ mono.txt
251 - how to execute Mono-based .NET binaries with the help of BINFMT_MISC. 251 - how to execute Mono-based .NET binaries with the help of BINFMT_MISC.
252moxa-smartio 252moxa-smartio
253 - file with info on installing/using Moxa multiport serial driver. 253 - file with info on installing/using Moxa multiport serial driver.
254mtrr.txt
255 - how to use PPro Memory Type Range Registers to increase performance.
256mutex-design.txt 254mutex-design.txt
257 - info on the generic mutex subsystem. 255 - info on the generic mutex subsystem.
258namespaces/ 256namespaces/
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 1150444a21a..329dcabe4c5 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -463,12 +463,6 @@ and is between 256 and 4096 characters. It is defined in the file
463 Range: 0 - 8192 463 Range: 0 - 8192
464 Default: 64 464 Default: 64
465 465
466 disable_8254_timer
467 enable_8254_timer
468 [IA32/X86_64] Disable/Enable interrupt 0 timer routing
469 over the 8254 in addition to over the IO-APIC. The
470 kernel tries to set a sensible default.
471
472 hpet= [X86-32,HPET] option to control HPET usage 466 hpet= [X86-32,HPET] option to control HPET usage
473 Format: { enable (default) | disable | force } 467 Format: { enable (default) | disable | force }
474 disable: disable HPET and use PIT instead 468 disable: disable HPET and use PIT instead
@@ -1882,6 +1876,12 @@ and is between 256 and 4096 characters. It is defined in the file
1882 shapers= [NET] 1876 shapers= [NET]
1883 Maximal number of shapers. 1877 Maximal number of shapers.
1884 1878
1879 show_msr= [x86] show boot-time MSR settings
1880 Format: { <integer> }
1881 Show boot-time (BIOS-initialized) MSR settings.
1882 The parameter means the number of CPUs to show,
1883 for example 1 means boot CPU only.
1884
1885 sim710= [SCSI,HW] 1885 sim710= [SCSI,HW]
1886 See header of drivers/scsi/sim710.c. 1886 See header of drivers/scsi/sim710.c.
1887 1887
diff --git a/Documentation/x86/00-INDEX b/Documentation/x86/00-INDEX
new file mode 100644
index 00000000000..dbe3377754a
--- /dev/null
+++ b/Documentation/x86/00-INDEX
@@ -0,0 +1,4 @@
100-INDEX
2 - this file
3mtrr.txt
4 - how to use x86 Memory Type Range Registers to increase performance
diff --git a/Documentation/x86/i386/boot.txt b/Documentation/x86/boot.txt
index 147bfe511cd..83c0033ee9e 100644
--- a/Documentation/x86/i386/boot.txt
+++ b/Documentation/x86/boot.txt
@@ -308,7 +308,7 @@ Protocol: 2.00+
308 308
309Field name: start_sys 309Field name: start_sys
310Type: read 310Type: read
311Offset/size: 0x20c/4 311Offset/size: 0x20c/2
312Protocol: 2.00+ 312Protocol: 2.00+
313 313
314 The load low segment (0x1000). Obsolete. 314 The load low segment (0x1000). Obsolete.
diff --git a/Documentation/mtrr.txt b/Documentation/x86/mtrr.txt
index c39ac395970..cc071dc333c 100644
--- a/Documentation/mtrr.txt
+++ b/Documentation/x86/mtrr.txt
@@ -18,7 +18,7 @@ Richard Gooch
18 The AMD K6-2 (stepping 8 and above) and K6-3 processors have two 18 The AMD K6-2 (stepping 8 and above) and K6-3 processors have two
19 MTRRs. These are supported. The AMD Athlon family provide 8 Intel 19 MTRRs. These are supported. The AMD Athlon family provide 8 Intel
20 style MTRRs. 20 style MTRRs.
21 21
22 The Centaur C6 (WinChip) has 8 MCRs, allowing write-combining. These 22 The Centaur C6 (WinChip) has 8 MCRs, allowing write-combining. These
23 are supported. 23 are supported.
24 24
@@ -87,7 +87,7 @@ reg00: base=0x00000000 ( 0MB), size= 64MB: write-back, count=1
87reg01: base=0xfb000000 (4016MB), size= 16MB: write-combining, count=1 87reg01: base=0xfb000000 (4016MB), size= 16MB: write-combining, count=1
88reg02: base=0xfb000000 (4016MB), size= 4kB: uncachable, count=1 88reg02: base=0xfb000000 (4016MB), size= 4kB: uncachable, count=1
89 89
90Some cards (especially Voodoo Graphics boards) need this 4 kB area 90Some cards (especially Voodoo Graphics boards) need this 4 kB area
91excluded from the beginning of the region because it is used for 91excluded from the beginning of the region because it is used for
92registers. 92registers.
93 93
diff --git a/Documentation/x86/pat.txt b/Documentation/x86/pat.txt
index 17965f927c1..c93ff5f4c0d 100644
--- a/Documentation/x86/pat.txt
+++ b/Documentation/x86/pat.txt
@@ -14,6 +14,10 @@ PAT allows for different types of memory attributes. The most commonly used
14ones that will be supported at this time are Write-back, Uncached, 14ones that will be supported at this time are Write-back, Uncached,
15Write-combined and Uncached Minus. 15Write-combined and Uncached Minus.
16 16
17
18PAT APIs
19--------
20
17There are many different APIs in the kernel that allows setting of memory 21There are many different APIs in the kernel that allows setting of memory
18attributes at the page level. In order to avoid aliasing, these interfaces 22attributes at the page level. In order to avoid aliasing, these interfaces
19should be used thoughtfully. Below is a table of interfaces available, 23should be used thoughtfully. Below is a table of interfaces available,
@@ -26,38 +30,38 @@ address range to avoid any aliasing.
26API | RAM | ACPI,... | Reserved/Holes | 30API | RAM | ACPI,... | Reserved/Holes |
27-----------------------|----------|------------|------------------| 31-----------------------|----------|------------|------------------|
28 | | | | 32 | | | |
29ioremap | -- | UC | UC | 33ioremap | -- | UC- | UC- |
30 | | | | 34 | | | |
31ioremap_cache | -- | WB | WB | 35ioremap_cache | -- | WB | WB |
32 | | | | 36 | | | |
33ioremap_nocache | -- | UC | UC | 37ioremap_nocache | -- | UC- | UC- |
34 | | | | 38 | | | |
35ioremap_wc | -- | -- | WC | 39ioremap_wc | -- | -- | WC |
36 | | | | 40 | | | |
37set_memory_uc | UC | -- | -- | 41set_memory_uc | UC- | -- | -- |
38 set_memory_wb | | | | 42 set_memory_wb | | | |
39 | | | | 43 | | | |
40set_memory_wc | WC | -- | -- | 44set_memory_wc | WC | -- | -- |
41 set_memory_wb | | | | 45 set_memory_wb | | | |
42 | | | | 46 | | | |
43pci sysfs resource | -- | -- | UC | 47pci sysfs resource | -- | -- | UC- |
44 | | | | 48 | | | |
45pci sysfs resource_wc | -- | -- | WC | 49pci sysfs resource_wc | -- | -- | WC |
46 is IORESOURCE_PREFETCH| | | | 50 is IORESOURCE_PREFETCH| | | |
47 | | | | 51 | | | |
48pci proc | -- | -- | UC | 52pci proc | -- | -- | UC- |
49 !PCIIOC_WRITE_COMBINE | | | | 53 !PCIIOC_WRITE_COMBINE | | | |
50 | | | | 54 | | | |
51pci proc | -- | -- | WC | 55pci proc | -- | -- | WC |
52 PCIIOC_WRITE_COMBINE | | | | 56 PCIIOC_WRITE_COMBINE | | | |
53 | | | | 57 | | | |
54/dev/mem | -- | UC | UC | 58/dev/mem | -- | WB/WC/UC- | WB/WC/UC- |
55 read-write | | | | 59 read-write | | | |
56 | | | | 60 | | | |
57/dev/mem | -- | UC | UC | 61/dev/mem | -- | UC- | UC- |
58 mmap SYNC flag | | | | 62 mmap SYNC flag | | | |
59 | | | | 63 | | | |
60/dev/mem | -- | WB/WC/UC | WB/WC/UC | 64/dev/mem | -- | WB/WC/UC- | WB/WC/UC- |
61 mmap !SYNC flag | |(from exist-| (from exist- | 65 mmap !SYNC flag | |(from exist-| (from exist- |
62 and | | ing alias)| ing alias) | 66 and | | ing alias)| ing alias) |
63 any alias to this area| | | | 67 any alias to this area| | | |
@@ -68,7 +72,7 @@ pci proc | -- | -- | WC |
68 and | | | | 72 and | | | |
69 MTRR says WB | | | | 73 MTRR says WB | | | |
70 | | | | 74 | | | |
71/dev/mem | -- | -- | UC_MINUS | 75/dev/mem | -- | -- | UC- |
72 mmap !SYNC flag | | | | 76 mmap !SYNC flag | | | |
73 no alias to this area | | | | 77 no alias to this area | | | |
74 and | | | | 78 and | | | |
@@ -98,3 +102,35 @@ types.
98 102
99Drivers should use set_memory_[uc|wc] to set access type for RAM ranges. 103Drivers should use set_memory_[uc|wc] to set access type for RAM ranges.
100 104
105
106PAT debugging
107-------------
108
109With CONFIG_DEBUG_FS enabled, PAT memtype list can be examined by
110
111# mount -t debugfs debugfs /sys/kernel/debug
112# cat /sys/kernel/debug/x86/pat_memtype_list
113PAT memtype list:
114uncached-minus @ 0x7fadf000-0x7fae0000
115uncached-minus @ 0x7fb19000-0x7fb1a000
116uncached-minus @ 0x7fb1a000-0x7fb1b000
117uncached-minus @ 0x7fb1b000-0x7fb1c000
118uncached-minus @ 0x7fb1c000-0x7fb1d000
119uncached-minus @ 0x7fb1d000-0x7fb1e000
120uncached-minus @ 0x7fb1e000-0x7fb25000
121uncached-minus @ 0x7fb25000-0x7fb26000
122uncached-minus @ 0x7fb26000-0x7fb27000
123uncached-minus @ 0x7fb27000-0x7fb28000
124uncached-minus @ 0x7fb28000-0x7fb2e000
125uncached-minus @ 0x7fb2e000-0x7fb2f000
126uncached-minus @ 0x7fb2f000-0x7fb30000
127uncached-minus @ 0x7fb31000-0x7fb32000
128uncached-minus @ 0x80000000-0x90000000
129
130This list shows physical address ranges and various PAT settings used to
131access those physical address ranges.
132
133Another, more verbose way of getting PAT related debug messages is with
134"debugpat" boot parameter. With this parameter, various debug messages are
135printed to dmesg log.
136
diff --git a/Documentation/x86/i386/usb-legacy-support.txt b/Documentation/x86/usb-legacy-support.txt
index 1894cdfc69d..1894cdfc69d 100644
--- a/Documentation/x86/i386/usb-legacy-support.txt
+++ b/Documentation/x86/usb-legacy-support.txt
diff --git a/Documentation/x86/x86_64/boot-options.txt b/Documentation/x86/x86_64/boot-options.txt
index b0c7b6c4abd..72ffb5373ec 100644
--- a/Documentation/x86/x86_64/boot-options.txt
+++ b/Documentation/x86/x86_64/boot-options.txt
@@ -54,10 +54,6 @@ APICs
54 apicmaintimer. Useful when your PIT timer is totally 54 apicmaintimer. Useful when your PIT timer is totally
55 broken. 55 broken.
56 56
57 disable_8254_timer / enable_8254_timer
58 Enable interrupt 0 timer routing over the 8254 in addition to over
59 the IO-APIC. The kernel tries to set a sensible default.
60
61Early Console 57Early Console
62 58
63 syntax: earlyprintk=vga 59 syntax: earlyprintk=vga
diff --git a/Documentation/x86/i386/zero-page.txt b/Documentation/x86/zero-page.txt
index 169ad423a3d..169ad423a3d 100644
--- a/Documentation/x86/i386/zero-page.txt
+++ b/Documentation/x86/zero-page.txt
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ed92864d132..97f0d2b6dc0 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -29,6 +29,7 @@ config X86
29 select HAVE_FTRACE 29 select HAVE_FTRACE
30 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) 30 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
31 select HAVE_ARCH_KGDB if !X86_VOYAGER 31 select HAVE_ARCH_KGDB if !X86_VOYAGER
32 select HAVE_ARCH_TRACEHOOK
32 select HAVE_GENERIC_DMA_COHERENT if X86_32 33 select HAVE_GENERIC_DMA_COHERENT if X86_32
33 select HAVE_EFFICIENT_UNALIGNED_ACCESS 34 select HAVE_EFFICIENT_UNALIGNED_ACCESS
34 35
@@ -1020,7 +1021,7 @@ config HAVE_ARCH_ALLOC_REMAP
1020 1021
1021config ARCH_FLATMEM_ENABLE 1022config ARCH_FLATMEM_ENABLE
1022 def_bool y 1023 def_bool y
1023 depends on X86_32 && ARCH_SELECT_MEMORY_MODEL && X86_PC && !NUMA 1024 depends on X86_32 && ARCH_SELECT_MEMORY_MODEL && !NUMA
1024 1025
1025config ARCH_DISCONTIGMEM_ENABLE 1026config ARCH_DISCONTIGMEM_ENABLE
1026 def_bool y 1027 def_bool y
@@ -1036,7 +1037,7 @@ config ARCH_SPARSEMEM_DEFAULT
1036 1037
1037config ARCH_SPARSEMEM_ENABLE 1038config ARCH_SPARSEMEM_ENABLE
1038 def_bool y 1039 def_bool y
1039 depends on X86_64 || NUMA || (EXPERIMENTAL && X86_PC) 1040 depends on X86_64 || NUMA || (EXPERIMENTAL && X86_PC) || X86_GENERICARCH
1040 select SPARSEMEM_STATIC if X86_32 1041 select SPARSEMEM_STATIC if X86_32
1041 select SPARSEMEM_VMEMMAP_ENABLE if X86_64 1042 select SPARSEMEM_VMEMMAP_ENABLE if X86_64
1042 1043
@@ -1117,10 +1118,10 @@ config MTRR
1117 You can safely say Y even if your machine doesn't have MTRRs, you'll 1118 You can safely say Y even if your machine doesn't have MTRRs, you'll
1118 just add about 9 KB to your kernel. 1119 just add about 9 KB to your kernel.
1119 1120
1120 See <file:Documentation/mtrr.txt> for more information. 1121 See <file:Documentation/x86/mtrr.txt> for more information.
1121 1122
1122config MTRR_SANITIZER 1123config MTRR_SANITIZER
1123 bool 1124 def_bool y
1124 prompt "MTRR cleanup support" 1125 prompt "MTRR cleanup support"
1125 depends on MTRR 1126 depends on MTRR
1126 help 1127 help
@@ -1131,7 +1132,7 @@ config MTRR_SANITIZER
1131 The largest mtrr entry size for a continous block can be set with 1132 The largest mtrr entry size for a continous block can be set with
1132 mtrr_chunk_size. 1133 mtrr_chunk_size.
1133 1134
1134 If unsure, say N. 1135 If unsure, say Y.
1135 1136
1136config MTRR_SANITIZER_ENABLE_DEFAULT 1137config MTRR_SANITIZER_ENABLE_DEFAULT
1137 int "MTRR cleanup enable value (0-1)" 1138 int "MTRR cleanup enable value (0-1)"
@@ -1191,7 +1192,6 @@ config IRQBALANCE
1191config SECCOMP 1192config SECCOMP
1192 def_bool y 1193 def_bool y
1193 prompt "Enable seccomp to safely compute untrusted bytecode" 1194 prompt "Enable seccomp to safely compute untrusted bytecode"
1194 depends on PROC_FS
1195 help 1195 help
1196 This kernel feature is useful for number crunching applications 1196 This kernel feature is useful for number crunching applications
1197 that may need to compute untrusted bytecode during their 1197 that may need to compute untrusted bytecode during their
@@ -1199,7 +1199,7 @@ config SECCOMP
1199 the process as file descriptors supporting the read/write 1199 the process as file descriptors supporting the read/write
1200 syscalls, it's possible to isolate those applications in 1200 syscalls, it's possible to isolate those applications in
1201 their own address space using seccomp. Once seccomp is 1201 their own address space using seccomp. Once seccomp is
1202 enabled via /proc/<pid>/seccomp, it cannot be disabled 1202 enabled via prctl(PR_SET_SECCOMP), it cannot be disabled
1203 and the task is only allowed to execute a few safe syscalls 1203 and the task is only allowed to execute a few safe syscalls
1204 defined by each seccomp mode. 1204 defined by each seccomp mode.
1205 1205
@@ -1356,14 +1356,14 @@ config PHYSICAL_ALIGN
1356 Don't change this unless you know what you are doing. 1356 Don't change this unless you know what you are doing.
1357 1357
1358config HOTPLUG_CPU 1358config HOTPLUG_CPU
1359 bool "Support for suspend on SMP and hot-pluggable CPUs (EXPERIMENTAL)" 1359 bool "Support for hot-pluggable CPUs"
1360 depends on SMP && HOTPLUG && EXPERIMENTAL && !X86_VOYAGER 1360 depends on SMP && HOTPLUG && !X86_VOYAGER
1361 ---help--- 1361 ---help---
1362 Say Y here to experiment with turning CPUs off and on, and to 1362 Say Y here to allow turning CPUs off and on. CPUs can be
1363 enable suspend on SMP systems. CPUs can be controlled through 1363 controlled through /sys/devices/system/cpu.
1364 /sys/devices/system/cpu. 1364 ( Note: power management support will enable this option
1365 Say N if you want to disable CPU hotplug and don't need to 1365 automatically on SMP systems. )
1366 suspend. 1366 Say N if you want to disable CPU hotplug.
1367 1367
1368config COMPAT_VDSO 1368config COMPAT_VDSO
1369 def_bool y 1369 def_bool y
@@ -1378,6 +1378,51 @@ config COMPAT_VDSO
1378 1378
1379 If unsure, say Y. 1379 If unsure, say Y.
1380 1380
1381config CMDLINE_BOOL
1382 bool "Built-in kernel command line"
1383 default n
1384 help
1385 Allow for specifying boot arguments to the kernel at
1386 build time. On some systems (e.g. embedded ones), it is
1387 necessary or convenient to provide some or all of the
1388 kernel boot arguments with the kernel itself (that is,
1389 to not rely on the boot loader to provide them.)
1390
1391 To compile command line arguments into the kernel,
1392 set this option to 'Y', then fill in the
1393 the boot arguments in CONFIG_CMDLINE.
1394
1395 Systems with fully functional boot loaders (i.e. non-embedded)
1396 should leave this option set to 'N'.
1397
1398config CMDLINE
1399 string "Built-in kernel command string"
1400 depends on CMDLINE_BOOL
1401 default ""
1402 help
1403 Enter arguments here that should be compiled into the kernel
1404 image and used at boot time. If the boot loader provides a
1405 command line at boot time, it is appended to this string to
1406 form the full kernel command line, when the system boots.
1407
1408 However, you can use the CONFIG_CMDLINE_OVERRIDE option to
1409 change this behavior.
1410
1411 In most cases, the command line (whether built-in or provided
1412 by the boot loader) should specify the device for the root
1413 file system.
1414
1415config CMDLINE_OVERRIDE
1416 bool "Built-in command line overrides boot loader arguments"
1417 default n
1418 depends on CMDLINE_BOOL
1419 help
1420 Set this option to 'Y' to have the kernel ignore the boot loader
1421 command line, and use ONLY the built-in command line.
1422
1423 This is used to work around broken boot loaders. This should
1424 be set to 'N' under normal conditions.
1425
1381endmenu 1426endmenu
1382 1427
1383config ARCH_ENABLE_MEMORY_HOTPLUG 1428config ARCH_ENABLE_MEMORY_HOTPLUG
@@ -1773,7 +1818,7 @@ config COMPAT_FOR_U64_ALIGNMENT
1773 1818
1774config SYSVIPC_COMPAT 1819config SYSVIPC_COMPAT
1775 def_bool y 1820 def_bool y
1776 depends on X86_64 && COMPAT && SYSVIPC 1821 depends on COMPAT && SYSVIPC
1777 1822
1778endmenu 1823endmenu
1779 1824
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index b225219c448..60a85768cfc 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -418,3 +418,21 @@ config X86_MINIMUM_CPU_FAMILY
418config X86_DEBUGCTLMSR 418config X86_DEBUGCTLMSR
419 def_bool y 419 def_bool y
420 depends on !(MK6 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386) 420 depends on !(MK6 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386)
421
422config X86_DS
423 bool "Debug Store support"
424 default y
425 help
426 Add support for Debug Store.
427 This allows the kernel to provide a memory buffer to the hardware
428 to store various profiling and tracing events.
429
430config X86_PTRACE_BTS
431 bool "ptrace interface to Branch Trace Store"
432 default y
433 depends on (X86_DS && X86_DEBUGCTLMSR)
434 help
435 Add a ptrace interface to allow collecting an execution trace
436 of the traced task.
437 This collects control flow changes in a (cyclic) buffer and allows
438 debuggers to fill in the gaps and show an execution trace of the debuggee.
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index ba7736cf2ec..29c5fbf0839 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -137,14 +137,15 @@ relocated:
137 */ 137 */
138 movl output_len(%ebx), %eax 138 movl output_len(%ebx), %eax
139 pushl %eax 139 pushl %eax
140 # push arguments for decompress_kernel:
140 pushl %ebp # output address 141 pushl %ebp # output address
141 movl input_len(%ebx), %eax 142 movl input_len(%ebx), %eax
142 pushl %eax # input_len 143 pushl %eax # input_len
143 leal input_data(%ebx), %eax 144 leal input_data(%ebx), %eax
144 pushl %eax # input_data 145 pushl %eax # input_data
145 leal boot_heap(%ebx), %eax 146 leal boot_heap(%ebx), %eax
146 pushl %eax # heap area as third argument 147 pushl %eax # heap area
147 pushl %esi # real mode pointer as second arg 148 pushl %esi # real mode pointer
148 call decompress_kernel 149 call decompress_kernel
149 addl $20, %esp 150 addl $20, %esp
150 popl %ecx 151 popl %ecx
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 9fea7370647..5780d361105 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -16,7 +16,7 @@
16 */ 16 */
17#undef CONFIG_PARAVIRT 17#undef CONFIG_PARAVIRT
18#ifdef CONFIG_X86_32 18#ifdef CONFIG_X86_32
19#define _ASM_DESC_H_ 1 19#define ASM_X86__DESC_H 1
20#endif 20#endif
21 21
22#ifdef CONFIG_X86_64 22#ifdef CONFIG_X86_64
@@ -27,7 +27,7 @@
27#include <linux/linkage.h> 27#include <linux/linkage.h>
28#include <linux/screen_info.h> 28#include <linux/screen_info.h>
29#include <linux/elf.h> 29#include <linux/elf.h>
30#include <asm/io.h> 30#include <linux/io.h>
31#include <asm/page.h> 31#include <asm/page.h>
32#include <asm/boot.h> 32#include <asm/boot.h>
33#include <asm/bootparam.h> 33#include <asm/bootparam.h>
@@ -251,7 +251,7 @@ static void __putstr(int error, const char *s)
251 y--; 251 y--;
252 } 252 }
253 } else { 253 } else {
254 vidmem [(x + cols * y) * 2] = c; 254 vidmem[(x + cols * y) * 2] = c;
255 if (++x >= cols) { 255 if (++x >= cols) {
256 x = 0; 256 x = 0;
257 if (++y >= lines) { 257 if (++y >= lines) {
@@ -277,7 +277,8 @@ static void *memset(void *s, int c, unsigned n)
277 int i; 277 int i;
278 char *ss = s; 278 char *ss = s;
279 279
280 for (i = 0; i < n; i++) ss[i] = c; 280 for (i = 0; i < n; i++)
281 ss[i] = c;
281 return s; 282 return s;
282} 283}
283 284
@@ -287,7 +288,8 @@ static void *memcpy(void *dest, const void *src, unsigned n)
287 const char *s = src; 288 const char *s = src;
288 char *d = dest; 289 char *d = dest;
289 290
290 for (i = 0; i < n; i++) d[i] = s[i]; 291 for (i = 0; i < n; i++)
292 d[i] = s[i];
291 return dest; 293 return dest;
292} 294}
293 295
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index af86e431acf..b993062e9a5 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -30,7 +30,6 @@ SYSSEG = DEF_SYSSEG /* system loaded at 0x10000 (65536) */
30SYSSIZE = DEF_SYSSIZE /* system size: # of 16-byte clicks */ 30SYSSIZE = DEF_SYSSIZE /* system size: # of 16-byte clicks */
31 /* to be loaded */ 31 /* to be loaded */
32ROOT_DEV = 0 /* ROOT_DEV is now written by "build" */ 32ROOT_DEV = 0 /* ROOT_DEV is now written by "build" */
33SWAP_DEV = 0 /* SWAP_DEV is now written by "build" */
34 33
35#ifndef SVGA_MODE 34#ifndef SVGA_MODE
36#define SVGA_MODE ASK_VGA 35#define SVGA_MODE ASK_VGA
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 104275e191a..ef9a52005ec 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.27-rc4 3# Linux kernel version: 2.6.27-rc5
4# Mon Aug 25 15:04:00 2008 4# Wed Sep 3 17:23:09 2008
5# 5#
6# CONFIG_64BIT is not set 6# CONFIG_64BIT is not set
7CONFIG_X86_32=y 7CONFIG_X86_32=y
@@ -202,7 +202,7 @@ CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
202# CONFIG_M586 is not set 202# CONFIG_M586 is not set
203# CONFIG_M586TSC is not set 203# CONFIG_M586TSC is not set
204# CONFIG_M586MMX is not set 204# CONFIG_M586MMX is not set
205# CONFIG_M686 is not set 205CONFIG_M686=y
206# CONFIG_MPENTIUMII is not set 206# CONFIG_MPENTIUMII is not set
207# CONFIG_MPENTIUMIII is not set 207# CONFIG_MPENTIUMIII is not set
208# CONFIG_MPENTIUMM is not set 208# CONFIG_MPENTIUMM is not set
@@ -221,13 +221,14 @@ CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
221# CONFIG_MVIAC3_2 is not set 221# CONFIG_MVIAC3_2 is not set
222# CONFIG_MVIAC7 is not set 222# CONFIG_MVIAC7 is not set
223# CONFIG_MPSC is not set 223# CONFIG_MPSC is not set
224CONFIG_MCORE2=y 224# CONFIG_MCORE2 is not set
225# CONFIG_GENERIC_CPU is not set 225# CONFIG_GENERIC_CPU is not set
226CONFIG_X86_GENERIC=y 226CONFIG_X86_GENERIC=y
227CONFIG_X86_CPU=y 227CONFIG_X86_CPU=y
228CONFIG_X86_CMPXCHG=y 228CONFIG_X86_CMPXCHG=y
229CONFIG_X86_L1_CACHE_SHIFT=7 229CONFIG_X86_L1_CACHE_SHIFT=7
230CONFIG_X86_XADD=y 230CONFIG_X86_XADD=y
231# CONFIG_X86_PPRO_FENCE is not set
231CONFIG_X86_WP_WORKS_OK=y 232CONFIG_X86_WP_WORKS_OK=y
232CONFIG_X86_INVLPG=y 233CONFIG_X86_INVLPG=y
233CONFIG_X86_BSWAP=y 234CONFIG_X86_BSWAP=y
@@ -235,14 +236,15 @@ CONFIG_X86_POPAD_OK=y
235CONFIG_X86_INTEL_USERCOPY=y 236CONFIG_X86_INTEL_USERCOPY=y
236CONFIG_X86_USE_PPRO_CHECKSUM=y 237CONFIG_X86_USE_PPRO_CHECKSUM=y
237CONFIG_X86_TSC=y 238CONFIG_X86_TSC=y
239CONFIG_X86_CMOV=y
238CONFIG_X86_MINIMUM_CPU_FAMILY=4 240CONFIG_X86_MINIMUM_CPU_FAMILY=4
239CONFIG_X86_DEBUGCTLMSR=y 241CONFIG_X86_DEBUGCTLMSR=y
240CONFIG_HPET_TIMER=y 242CONFIG_HPET_TIMER=y
241CONFIG_HPET_EMULATE_RTC=y 243CONFIG_HPET_EMULATE_RTC=y
242CONFIG_DMI=y 244CONFIG_DMI=y
243# CONFIG_IOMMU_HELPER is not set 245# CONFIG_IOMMU_HELPER is not set
244CONFIG_NR_CPUS=4 246CONFIG_NR_CPUS=64
245# CONFIG_SCHED_SMT is not set 247CONFIG_SCHED_SMT=y
246CONFIG_SCHED_MC=y 248CONFIG_SCHED_MC=y
247# CONFIG_PREEMPT_NONE is not set 249# CONFIG_PREEMPT_NONE is not set
248CONFIG_PREEMPT_VOLUNTARY=y 250CONFIG_PREEMPT_VOLUNTARY=y
@@ -254,7 +256,8 @@ CONFIG_VM86=y
254# CONFIG_TOSHIBA is not set 256# CONFIG_TOSHIBA is not set
255# CONFIG_I8K is not set 257# CONFIG_I8K is not set
256CONFIG_X86_REBOOTFIXUPS=y 258CONFIG_X86_REBOOTFIXUPS=y
257# CONFIG_MICROCODE is not set 259CONFIG_MICROCODE=y
260CONFIG_MICROCODE_OLD_INTERFACE=y
258CONFIG_X86_MSR=y 261CONFIG_X86_MSR=y
259CONFIG_X86_CPUID=y 262CONFIG_X86_CPUID=y
260# CONFIG_NOHIGHMEM is not set 263# CONFIG_NOHIGHMEM is not set
@@ -2115,7 +2118,7 @@ CONFIG_IO_DELAY_0X80=y
2115CONFIG_DEFAULT_IO_DELAY_TYPE=0 2118CONFIG_DEFAULT_IO_DELAY_TYPE=0
2116CONFIG_DEBUG_BOOT_PARAMS=y 2119CONFIG_DEBUG_BOOT_PARAMS=y
2117# CONFIG_CPA_DEBUG is not set 2120# CONFIG_CPA_DEBUG is not set
2118# CONFIG_OPTIMIZE_INLINING is not set 2121CONFIG_OPTIMIZE_INLINING=y
2119 2122
2120# 2123#
2121# Security options 2124# Security options
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 678c8acefe0..e620ea6e2a7 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.27-rc4 3# Linux kernel version: 2.6.27-rc5
4# Mon Aug 25 14:40:46 2008 4# Wed Sep 3 17:13:39 2008
5# 5#
6CONFIG_64BIT=y 6CONFIG_64BIT=y
7# CONFIG_X86_32 is not set 7# CONFIG_X86_32 is not set
@@ -218,17 +218,14 @@ CONFIG_X86_PC=y
218# CONFIG_MVIAC3_2 is not set 218# CONFIG_MVIAC3_2 is not set
219# CONFIG_MVIAC7 is not set 219# CONFIG_MVIAC7 is not set
220# CONFIG_MPSC is not set 220# CONFIG_MPSC is not set
221CONFIG_MCORE2=y 221# CONFIG_MCORE2 is not set
222# CONFIG_GENERIC_CPU is not set 222CONFIG_GENERIC_CPU=y
223CONFIG_X86_CPU=y 223CONFIG_X86_CPU=y
224CONFIG_X86_L1_CACHE_BYTES=64 224CONFIG_X86_L1_CACHE_BYTES=128
225CONFIG_X86_INTERNODE_CACHE_BYTES=64 225CONFIG_X86_INTERNODE_CACHE_BYTES=128
226CONFIG_X86_CMPXCHG=y 226CONFIG_X86_CMPXCHG=y
227CONFIG_X86_L1_CACHE_SHIFT=6 227CONFIG_X86_L1_CACHE_SHIFT=7
228CONFIG_X86_WP_WORKS_OK=y 228CONFIG_X86_WP_WORKS_OK=y
229CONFIG_X86_INTEL_USERCOPY=y
230CONFIG_X86_USE_PPRO_CHECKSUM=y
231CONFIG_X86_P6_NOP=y
232CONFIG_X86_TSC=y 229CONFIG_X86_TSC=y
233CONFIG_X86_CMPXCHG64=y 230CONFIG_X86_CMPXCHG64=y
234CONFIG_X86_CMOV=y 231CONFIG_X86_CMOV=y
@@ -243,9 +240,8 @@ CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT=y
243CONFIG_AMD_IOMMU=y 240CONFIG_AMD_IOMMU=y
244CONFIG_SWIOTLB=y 241CONFIG_SWIOTLB=y
245CONFIG_IOMMU_HELPER=y 242CONFIG_IOMMU_HELPER=y
246# CONFIG_MAXSMP is not set 243CONFIG_NR_CPUS=64
247CONFIG_NR_CPUS=4 244CONFIG_SCHED_SMT=y
248# CONFIG_SCHED_SMT is not set
249CONFIG_SCHED_MC=y 245CONFIG_SCHED_MC=y
250# CONFIG_PREEMPT_NONE is not set 246# CONFIG_PREEMPT_NONE is not set
251CONFIG_PREEMPT_VOLUNTARY=y 247CONFIG_PREEMPT_VOLUNTARY=y
@@ -254,7 +250,8 @@ CONFIG_X86_LOCAL_APIC=y
254CONFIG_X86_IO_APIC=y 250CONFIG_X86_IO_APIC=y
255# CONFIG_X86_MCE is not set 251# CONFIG_X86_MCE is not set
256# CONFIG_I8K is not set 252# CONFIG_I8K is not set
257# CONFIG_MICROCODE is not set 253CONFIG_MICROCODE=y
254CONFIG_MICROCODE_OLD_INTERFACE=y
258CONFIG_X86_MSR=y 255CONFIG_X86_MSR=y
259CONFIG_X86_CPUID=y 256CONFIG_X86_CPUID=y
260CONFIG_NUMA=y 257CONFIG_NUMA=y
@@ -290,7 +287,7 @@ CONFIG_BOUNCE=y
290CONFIG_VIRT_TO_BUS=y 287CONFIG_VIRT_TO_BUS=y
291CONFIG_MTRR=y 288CONFIG_MTRR=y
292# CONFIG_MTRR_SANITIZER is not set 289# CONFIG_MTRR_SANITIZER is not set
293# CONFIG_X86_PAT is not set 290CONFIG_X86_PAT=y
294CONFIG_EFI=y 291CONFIG_EFI=y
295CONFIG_SECCOMP=y 292CONFIG_SECCOMP=y
296# CONFIG_HZ_100 is not set 293# CONFIG_HZ_100 is not set
@@ -2089,7 +2086,7 @@ CONFIG_IO_DELAY_0X80=y
2089CONFIG_DEFAULT_IO_DELAY_TYPE=0 2086CONFIG_DEFAULT_IO_DELAY_TYPE=0
2090CONFIG_DEBUG_BOOT_PARAMS=y 2087CONFIG_DEBUG_BOOT_PARAMS=y
2091# CONFIG_CPA_DEBUG is not set 2088# CONFIG_CPA_DEBUG is not set
2092# CONFIG_OPTIMIZE_INLINING is not set 2089CONFIG_OPTIMIZE_INLINING=y
2093 2090
2094# 2091#
2095# Security options 2092# Security options
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index a0e1dbe67dc..127ec3f0721 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -85,8 +85,10 @@ static void dump_thread32(struct pt_regs *regs, struct user32 *dump)
85 dump->regs.ax = regs->ax; 85 dump->regs.ax = regs->ax;
86 dump->regs.ds = current->thread.ds; 86 dump->regs.ds = current->thread.ds;
87 dump->regs.es = current->thread.es; 87 dump->regs.es = current->thread.es;
88 asm("movl %%fs,%0" : "=r" (fs)); dump->regs.fs = fs; 88 savesegment(fs, fs);
89 asm("movl %%gs,%0" : "=r" (gs)); dump->regs.gs = gs; 89 dump->regs.fs = fs;
90 savesegment(gs, gs);
91 dump->regs.gs = gs;
90 dump->regs.orig_ax = regs->orig_ax; 92 dump->regs.orig_ax = regs->orig_ax;
91 dump->regs.ip = regs->ip; 93 dump->regs.ip = regs->ip;
92 dump->regs.cs = regs->cs; 94 dump->regs.cs = regs->cs;
@@ -430,8 +432,9 @@ beyond_if:
430 current->mm->start_stack = 432 current->mm->start_stack =
431 (unsigned long)create_aout_tables((char __user *)bprm->p, bprm); 433 (unsigned long)create_aout_tables((char __user *)bprm->p, bprm);
432 /* start thread */ 434 /* start thread */
433 asm volatile("movl %0,%%fs" :: "r" (0)); \ 435 loadsegment(fs, 0);
434 asm volatile("movl %0,%%es; movl %0,%%ds": :"r" (__USER32_DS)); 436 loadsegment(ds, __USER32_DS);
437 loadsegment(es, __USER32_DS);
435 load_gs_index(0); 438 load_gs_index(0);
436 (regs)->ip = ex.a_entry; 439 (regs)->ip = ex.a_entry;
437 (regs)->sp = current->mm->start_stack; 440 (regs)->sp = current->mm->start_stack;
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 20af4c79579..f1a2ac777fa 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -206,7 +206,7 @@ struct rt_sigframe
206 { unsigned int cur; \ 206 { unsigned int cur; \
207 unsigned short pre; \ 207 unsigned short pre; \
208 err |= __get_user(pre, &sc->seg); \ 208 err |= __get_user(pre, &sc->seg); \
209 asm volatile("movl %%" #seg ",%0" : "=r" (cur)); \ 209 savesegment(seg, cur); \
210 pre |= mask; \ 210 pre |= mask; \
211 if (pre != cur) loadsegment(seg, pre); } 211 if (pre != cur) loadsegment(seg, pre); }
212 212
@@ -235,7 +235,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
235 */ 235 */
236 err |= __get_user(gs, &sc->gs); 236 err |= __get_user(gs, &sc->gs);
237 gs |= 3; 237 gs |= 3;
238 asm("movl %%gs,%0" : "=r" (oldgs)); 238 savesegment(gs, oldgs);
239 if (gs != oldgs) 239 if (gs != oldgs)
240 load_gs_index(gs); 240 load_gs_index(gs);
241 241
@@ -355,14 +355,13 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
355{ 355{
356 int tmp, err = 0; 356 int tmp, err = 0;
357 357
358 tmp = 0; 358 savesegment(gs, tmp);
359 __asm__("movl %%gs,%0" : "=r"(tmp): "0"(tmp));
360 err |= __put_user(tmp, (unsigned int __user *)&sc->gs); 359 err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
361 __asm__("movl %%fs,%0" : "=r"(tmp): "0"(tmp)); 360 savesegment(fs, tmp);
362 err |= __put_user(tmp, (unsigned int __user *)&sc->fs); 361 err |= __put_user(tmp, (unsigned int __user *)&sc->fs);
363 __asm__("movl %%ds,%0" : "=r"(tmp): "0"(tmp)); 362 savesegment(ds, tmp);
364 err |= __put_user(tmp, (unsigned int __user *)&sc->ds); 363 err |= __put_user(tmp, (unsigned int __user *)&sc->ds);
365 __asm__("movl %%es,%0" : "=r"(tmp): "0"(tmp)); 364 savesegment(es, tmp);
366 err |= __put_user(tmp, (unsigned int __user *)&sc->es); 365 err |= __put_user(tmp, (unsigned int __user *)&sc->es);
367 366
368 err |= __put_user((u32)regs->di, &sc->di); 367 err |= __put_user((u32)regs->di, &sc->di);
@@ -498,8 +497,8 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
498 regs->dx = 0; 497 regs->dx = 0;
499 regs->cx = 0; 498 regs->cx = 0;
500 499
501 asm volatile("movl %0,%%ds" :: "r" (__USER32_DS)); 500 loadsegment(ds, __USER32_DS);
502 asm volatile("movl %0,%%es" :: "r" (__USER32_DS)); 501 loadsegment(es, __USER32_DS);
503 502
504 regs->cs = __USER32_CS; 503 regs->cs = __USER32_CS;
505 regs->ss = __USER32_DS; 504 regs->ss = __USER32_DS;
@@ -591,8 +590,8 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
591 regs->dx = (unsigned long) &frame->info; 590 regs->dx = (unsigned long) &frame->info;
592 regs->cx = (unsigned long) &frame->uc; 591 regs->cx = (unsigned long) &frame->uc;
593 592
594 asm volatile("movl %0,%%ds" :: "r" (__USER32_DS)); 593 loadsegment(ds, __USER32_DS);
595 asm volatile("movl %0,%%es" :: "r" (__USER32_DS)); 594 loadsegment(es, __USER32_DS);
596 595
597 regs->cs = __USER32_CS; 596 regs->cs = __USER32_CS;
598 regs->ss = __USER32_DS; 597 regs->ss = __USER32_DS;
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index d3c64088b98..beda4232ce6 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -556,15 +556,6 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
556 return ret; 556 return ret;
557} 557}
558 558
559/* These are here just in case some old ia32 binary calls it. */
560asmlinkage long sys32_pause(void)
561{
562 current->state = TASK_INTERRUPTIBLE;
563 schedule();
564 return -ERESTARTNOHAND;
565}
566
567
568#ifdef CONFIG_SYSCTL_SYSCALL 559#ifdef CONFIG_SYSCTL_SYSCALL
569struct sysctl_ia32 { 560struct sysctl_ia32 {
570 unsigned int name; 561 unsigned int name;
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index c102af85df9..7d40ef7b36e 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -58,7 +58,6 @@ EXPORT_SYMBOL(acpi_disabled);
58#ifdef CONFIG_X86_64 58#ifdef CONFIG_X86_64
59 59
60#include <asm/proto.h> 60#include <asm/proto.h>
61#include <asm/genapic.h>
62 61
63#else /* X86 */ 62#else /* X86 */
64 63
@@ -97,8 +96,6 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
97#warning ACPI uses CMPXCHG, i486 and later hardware 96#warning ACPI uses CMPXCHG, i486 and later hardware
98#endif 97#endif
99 98
100static int acpi_mcfg_64bit_base_addr __initdata = FALSE;
101
102/* -------------------------------------------------------------------------- 99/* --------------------------------------------------------------------------
103 Boot-time Configuration 100 Boot-time Configuration
104 -------------------------------------------------------------------------- */ 101 -------------------------------------------------------------------------- */
@@ -160,6 +157,8 @@ char *__init __acpi_map_table(unsigned long phys, unsigned long size)
160struct acpi_mcfg_allocation *pci_mmcfg_config; 157struct acpi_mcfg_allocation *pci_mmcfg_config;
161int pci_mmcfg_config_num; 158int pci_mmcfg_config_num;
162 159
160static int acpi_mcfg_64bit_base_addr __initdata = FALSE;
161
163static int __init acpi_mcfg_oem_check(struct acpi_table_mcfg *mcfg) 162static int __init acpi_mcfg_oem_check(struct acpi_table_mcfg *mcfg)
164{ 163{
165 if (!strcmp(mcfg->header.oem_id, "SGI")) 164 if (!strcmp(mcfg->header.oem_id, "SGI"))
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 65a0c1b4869..fb04e49776b 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -231,25 +231,25 @@ static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
231 continue; 231 continue;
232 if (*ptr > text_end) 232 if (*ptr > text_end)
233 continue; 233 continue;
234 text_poke(*ptr, ((unsigned char []){0xf0}), 1); /* add lock prefix */ 234 /* turn DS segment override prefix into lock prefix */
235 text_poke(*ptr, ((unsigned char []){0xf0}), 1);
235 }; 236 };
236} 237}
237 238
238static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end) 239static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
239{ 240{
240 u8 **ptr; 241 u8 **ptr;
241 char insn[1];
242 242
243 if (noreplace_smp) 243 if (noreplace_smp)
244 return; 244 return;
245 245
246 add_nops(insn, 1);
247 for (ptr = start; ptr < end; ptr++) { 246 for (ptr = start; ptr < end; ptr++) {
248 if (*ptr < text) 247 if (*ptr < text)
249 continue; 248 continue;
250 if (*ptr > text_end) 249 if (*ptr > text_end)
251 continue; 250 continue;
252 text_poke(*ptr, insn, 1); 251 /* turn lock prefix into DS segment override prefix */
252 text_poke(*ptr, ((unsigned char []){0x3E}), 1);
253 }; 253 };
254} 254}
255 255
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 44e21826db1..9a32b37ee2e 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -455,11 +455,11 @@ out:
455 force_iommu || 455 force_iommu ||
456 valid_agp || 456 valid_agp ||
457 fallback_aper_force) { 457 fallback_aper_force) {
458 printk(KERN_ERR 458 printk(KERN_INFO
459 "Your BIOS doesn't leave a aperture memory hole\n"); 459 "Your BIOS doesn't leave a aperture memory hole\n");
460 printk(KERN_ERR 460 printk(KERN_INFO
461 "Please enable the IOMMU option in the BIOS setup\n"); 461 "Please enable the IOMMU option in the BIOS setup\n");
462 printk(KERN_ERR 462 printk(KERN_INFO
463 "This costs you %d MB of RAM\n", 463 "This costs you %d MB of RAM\n",
464 32 << fallback_aper_order); 464 32 << fallback_aper_order);
465 465
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 732d1f4e10e..5145a6e72bb 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -228,7 +228,6 @@
228#include <linux/suspend.h> 228#include <linux/suspend.h>
229#include <linux/kthread.h> 229#include <linux/kthread.h>
230#include <linux/jiffies.h> 230#include <linux/jiffies.h>
231#include <linux/smp_lock.h>
232 231
233#include <asm/system.h> 232#include <asm/system.h>
234#include <asm/uaccess.h> 233#include <asm/uaccess.h>
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index aa89387006f..505543a75a5 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -22,7 +22,7 @@
22 22
23#define __NO_STUBS 1 23#define __NO_STUBS 1
24#undef __SYSCALL 24#undef __SYSCALL
25#undef _ASM_X86_64_UNISTD_H_ 25#undef ASM_X86__UNISTD_64_H
26#define __SYSCALL(nr, sym) [nr] = 1, 26#define __SYSCALL(nr, sym) [nr] = 1,
27static char syscalls[] = { 27static char syscalls[] = {
28#include <asm/unistd.h> 28#include <asm/unistd.h>
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c
index c639bd55391..fdd585f9c53 100644
--- a/arch/x86/kernel/bios_uv.c
+++ b/arch/x86/kernel/bios_uv.c
@@ -25,11 +25,11 @@ x86_bios_strerror(long status)
25{ 25{
26 const char *str; 26 const char *str;
27 switch (status) { 27 switch (status) {
28 case 0: str = "Call completed without error"; break; 28 case 0: str = "Call completed without error"; break;
29 case -1: str = "Not implemented"; break; 29 case -1: str = "Not implemented"; break;
30 case -2: str = "Invalid argument"; break; 30 case -2: str = "Invalid argument"; break;
31 case -3: str = "Call completed with error"; break; 31 case -3: str = "Call completed with error"; break;
32 default: str = "Unknown BIOS status code"; break; 32 default: str = "Unknown BIOS status code"; break;
33 } 33 }
34 return str; 34 return str;
35} 35}
diff --git a/arch/x86/kernel/cpu/common_64.c b/arch/x86/kernel/cpu/common_64.c
index a11f5d4477c..305b465889b 100644
--- a/arch/x86/kernel/cpu/common_64.c
+++ b/arch/x86/kernel/cpu/common_64.c
@@ -430,6 +430,49 @@ static __init int setup_noclflush(char *arg)
430} 430}
431__setup("noclflush", setup_noclflush); 431__setup("noclflush", setup_noclflush);
432 432
433struct msr_range {
434 unsigned min;
435 unsigned max;
436};
437
438static struct msr_range msr_range_array[] __cpuinitdata = {
439 { 0x00000000, 0x00000418},
440 { 0xc0000000, 0xc000040b},
441 { 0xc0010000, 0xc0010142},
442 { 0xc0011000, 0xc001103b},
443};
444
445static void __cpuinit print_cpu_msr(void)
446{
447 unsigned index;
448 u64 val;
449 int i;
450 unsigned index_min, index_max;
451
452 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
453 index_min = msr_range_array[i].min;
454 index_max = msr_range_array[i].max;
455 for (index = index_min; index < index_max; index++) {
456 if (rdmsrl_amd_safe(index, &val))
457 continue;
458 printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
459 }
460 }
461}
462
463static int show_msr __cpuinitdata;
464static __init int setup_show_msr(char *arg)
465{
466 int num;
467
468 get_option(&arg, &num);
469
470 if (num > 0)
471 show_msr = num;
472 return 1;
473}
474__setup("show_msr=", setup_show_msr);
475
433void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) 476void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
434{ 477{
435 if (c->x86_model_id[0]) 478 if (c->x86_model_id[0])
@@ -439,6 +482,14 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
439 printk(KERN_CONT " stepping %02x\n", c->x86_mask); 482 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
440 else 483 else
441 printk(KERN_CONT "\n"); 484 printk(KERN_CONT "\n");
485
486#ifdef CONFIG_SMP
487 if (c->cpu_index < show_msr)
488 print_cpu_msr();
489#else
490 if (show_msr)
491 print_cpu_msr();
492#endif
442} 493}
443 494
444static __init int setup_disablecpuid(char *arg) 495static __init int setup_disablecpuid(char *arg)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index b75f2569b8f..f113ef4595f 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -222,10 +222,11 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
222 set_cpu_cap(c, X86_FEATURE_BTS); 222 set_cpu_cap(c, X86_FEATURE_BTS);
223 if (!(l1 & (1<<12))) 223 if (!(l1 & (1<<12)))
224 set_cpu_cap(c, X86_FEATURE_PEBS); 224 set_cpu_cap(c, X86_FEATURE_PEBS);
225 ds_init_intel(c);
225 } 226 }
226 227
227 if (cpu_has_bts) 228 if (cpu_has_bts)
228 ds_init_intel(c); 229 ptrace_bts_init_intel(c);
229 230
230 /* 231 /*
231 * See if we have a good local APIC by checking for buggy Pentia, 232 * See if we have a good local APIC by checking for buggy Pentia,
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index cb7d3b6a80e..4e8d77f01ee 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -401,12 +401,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
401 tmp |= ~((1<<(hi - 1)) - 1); 401 tmp |= ~((1<<(hi - 1)) - 1);
402 402
403 if (tmp != mask_lo) { 403 if (tmp != mask_lo) {
404 static int once = 1; 404 WARN_ONCE(1, KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n");
405
406 if (once) {
407 printk(KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n");
408 once = 0;
409 }
410 mask_lo = tmp; 405 mask_lo = tmp;
411 } 406 }
412 } 407 }
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index 84c480bb371..4c4214690dd 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -405,9 +405,9 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset)
405 } 405 }
406 /* RED-PEN: base can be > 32bit */ 406 /* RED-PEN: base can be > 32bit */
407 len += seq_printf(seq, 407 len += seq_printf(seq,
408 "reg%02i: base=0x%05lx000 (%4luMB), size=%4lu%cB: %s, count=%d\n", 408 "reg%02i: base=0x%06lx000 (%5luMB), size=%5lu%cB, count=%d: %s\n",
409 i, base, base >> (20 - PAGE_SHIFT), size, factor, 409 i, base, base >> (20 - PAGE_SHIFT), size, factor,
410 mtrr_attrib_to_str(type), mtrr_usage_table[i]); 410 mtrr_usage_table[i], mtrr_attrib_to_str(type));
411 } 411 }
412 } 412 }
413 return 0; 413 return 0;
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 885c8265e6b..c78c04821ea 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -729,7 +729,7 @@ struct var_mtrr_range_state {
729 mtrr_type type; 729 mtrr_type type;
730}; 730};
731 731
732struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; 732static struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
733static int __initdata debug_print; 733static int __initdata debug_print;
734 734
735static int __init 735static int __init
@@ -759,7 +759,8 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
759 /* take out UC ranges */ 759 /* take out UC ranges */
760 for (i = 0; i < num_var_ranges; i++) { 760 for (i = 0; i < num_var_ranges; i++) {
761 type = range_state[i].type; 761 type = range_state[i].type;
762 if (type != MTRR_TYPE_UNCACHABLE) 762 if (type != MTRR_TYPE_UNCACHABLE &&
763 type != MTRR_TYPE_WRPROT)
763 continue; 764 continue;
764 size = range_state[i].size_pfn; 765 size = range_state[i].size_pfn;
765 if (!size) 766 if (!size)
@@ -836,6 +837,13 @@ static int __init enable_mtrr_cleanup_setup(char *str)
836} 837}
837early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup); 838early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup);
838 839
840static int __init mtrr_cleanup_debug_setup(char *str)
841{
842 debug_print = 1;
843 return 0;
844}
845early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup);
846
839struct var_mtrr_state { 847struct var_mtrr_state {
840 unsigned long range_startk; 848 unsigned long range_startk;
841 unsigned long range_sizek; 849 unsigned long range_sizek;
@@ -898,6 +906,27 @@ set_var_mtrr_all(unsigned int address_bits)
898 } 906 }
899} 907}
900 908
909static unsigned long to_size_factor(unsigned long sizek, char *factorp)
910{
911 char factor;
912 unsigned long base = sizek;
913
914 if (base & ((1<<10) - 1)) {
915 /* not MB alignment */
916 factor = 'K';
917 } else if (base & ((1<<20) - 1)){
918 factor = 'M';
919 base >>= 10;
920 } else {
921 factor = 'G';
922 base >>= 20;
923 }
924
925 *factorp = factor;
926
927 return base;
928}
929
901static unsigned int __init 930static unsigned int __init
902range_to_mtrr(unsigned int reg, unsigned long range_startk, 931range_to_mtrr(unsigned int reg, unsigned long range_startk,
903 unsigned long range_sizek, unsigned char type) 932 unsigned long range_sizek, unsigned char type)
@@ -919,13 +948,21 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk,
919 align = max_align; 948 align = max_align;
920 949
921 sizek = 1 << align; 950 sizek = 1 << align;
922 if (debug_print) 951 if (debug_print) {
952 char start_factor = 'K', size_factor = 'K';
953 unsigned long start_base, size_base;
954
955 start_base = to_size_factor(range_startk, &start_factor),
956 size_base = to_size_factor(sizek, &size_factor),
957
923 printk(KERN_DEBUG "Setting variable MTRR %d, " 958 printk(KERN_DEBUG "Setting variable MTRR %d, "
924 "base: %ldMB, range: %ldMB, type %s\n", 959 "base: %ld%cB, range: %ld%cB, type %s\n",
925 reg, range_startk >> 10, sizek >> 10, 960 reg, start_base, start_factor,
961 size_base, size_factor,
926 (type == MTRR_TYPE_UNCACHABLE)?"UC": 962 (type == MTRR_TYPE_UNCACHABLE)?"UC":
927 ((type == MTRR_TYPE_WRBACK)?"WB":"Other") 963 ((type == MTRR_TYPE_WRBACK)?"WB":"Other")
928 ); 964 );
965 }
929 save_var_mtrr(reg++, range_startk, sizek, type); 966 save_var_mtrr(reg++, range_startk, sizek, type);
930 range_startk += sizek; 967 range_startk += sizek;
931 range_sizek -= sizek; 968 range_sizek -= sizek;
@@ -970,6 +1007,8 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
970 /* try to append some small hole */ 1007 /* try to append some small hole */
971 range0_basek = state->range_startk; 1008 range0_basek = state->range_startk;
972 range0_sizek = ALIGN(state->range_sizek, chunk_sizek); 1009 range0_sizek = ALIGN(state->range_sizek, chunk_sizek);
1010
1011 /* no increase */
973 if (range0_sizek == state->range_sizek) { 1012 if (range0_sizek == state->range_sizek) {
974 if (debug_print) 1013 if (debug_print)
975 printk(KERN_DEBUG "rangeX: %016lx - %016lx\n", 1014 printk(KERN_DEBUG "rangeX: %016lx - %016lx\n",
@@ -980,13 +1019,40 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
980 return 0; 1019 return 0;
981 } 1020 }
982 1021
983 range0_sizek -= chunk_sizek; 1022 /* only cut back, when it is not the last */
984 if (range0_sizek && sizek) { 1023 if (sizek) {
985 while (range0_basek + range0_sizek > (basek + sizek)) { 1024 while (range0_basek + range0_sizek > (basek + sizek)) {
986 range0_sizek -= chunk_sizek; 1025 if (range0_sizek >= chunk_sizek)
987 if (!range0_sizek) 1026 range0_sizek -= chunk_sizek;
988 break; 1027 else
989 } 1028 range0_sizek = 0;
1029
1030 if (!range0_sizek)
1031 break;
1032 }
1033 }
1034
1035second_try:
1036 range_basek = range0_basek + range0_sizek;
1037
1038 /* one hole in the middle */
1039 if (range_basek > basek && range_basek <= (basek + sizek))
1040 second_sizek = range_basek - basek;
1041
1042 if (range0_sizek > state->range_sizek) {
1043
1044 /* one hole in middle or at end */
1045 hole_sizek = range0_sizek - state->range_sizek - second_sizek;
1046
1047 /* hole size should be less than half of range0 size */
1048 if (hole_sizek >= (range0_sizek >> 1) &&
1049 range0_sizek >= chunk_sizek) {
1050 range0_sizek -= chunk_sizek;
1051 second_sizek = 0;
1052 hole_sizek = 0;
1053
1054 goto second_try;
1055 }
990 } 1056 }
991 1057
992 if (range0_sizek) { 1058 if (range0_sizek) {
@@ -996,50 +1062,28 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
996 (range0_basek + range0_sizek)<<10); 1062 (range0_basek + range0_sizek)<<10);
997 state->reg = range_to_mtrr(state->reg, range0_basek, 1063 state->reg = range_to_mtrr(state->reg, range0_basek,
998 range0_sizek, MTRR_TYPE_WRBACK); 1064 range0_sizek, MTRR_TYPE_WRBACK);
999
1000 }
1001
1002 range_basek = range0_basek + range0_sizek;
1003 range_sizek = chunk_sizek;
1004
1005 if (range_basek + range_sizek > basek &&
1006 range_basek + range_sizek <= (basek + sizek)) {
1007 /* one hole */
1008 second_basek = basek;
1009 second_sizek = range_basek + range_sizek - basek;
1010 } 1065 }
1011 1066
1012 /* if last piece, only could one hole near end */ 1067 if (range0_sizek < state->range_sizek) {
1013 if ((second_basek || !basek) && 1068 /* need to handle left over */
1014 range_sizek - (state->range_sizek - range0_sizek) - second_sizek <
1015 (chunk_sizek >> 1)) {
1016 /*
1017 * one hole in middle (second_sizek is 0) or at end
1018 * (second_sizek is 0 )
1019 */
1020 hole_sizek = range_sizek - (state->range_sizek - range0_sizek)
1021 - second_sizek;
1022 hole_basek = range_basek + range_sizek - hole_sizek
1023 - second_sizek;
1024 } else {
1025 /* fallback for big hole, or several holes */
1026 range_sizek = state->range_sizek - range0_sizek; 1069 range_sizek = state->range_sizek - range0_sizek;
1027 second_basek = 0; 1070
1028 second_sizek = 0; 1071 if (debug_print)
1072 printk(KERN_DEBUG "range: %016lx - %016lx\n",
1073 range_basek<<10,
1074 (range_basek + range_sizek)<<10);
1075 state->reg = range_to_mtrr(state->reg, range_basek,
1076 range_sizek, MTRR_TYPE_WRBACK);
1029 } 1077 }
1030 1078
1031 if (debug_print)
1032 printk(KERN_DEBUG "range: %016lx - %016lx\n", range_basek<<10,
1033 (range_basek + range_sizek)<<10);
1034 state->reg = range_to_mtrr(state->reg, range_basek, range_sizek,
1035 MTRR_TYPE_WRBACK);
1036 if (hole_sizek) { 1079 if (hole_sizek) {
1080 hole_basek = range_basek - hole_sizek - second_sizek;
1037 if (debug_print) 1081 if (debug_print)
1038 printk(KERN_DEBUG "hole: %016lx - %016lx\n", 1082 printk(KERN_DEBUG "hole: %016lx - %016lx\n",
1039 hole_basek<<10, (hole_basek + hole_sizek)<<10); 1083 hole_basek<<10,
1040 state->reg = range_to_mtrr(state->reg, hole_basek, hole_sizek, 1084 (hole_basek + hole_sizek)<<10);
1041 MTRR_TYPE_UNCACHABLE); 1085 state->reg = range_to_mtrr(state->reg, hole_basek,
1042 1086 hole_sizek, MTRR_TYPE_UNCACHABLE);
1043 } 1087 }
1044 1088
1045 return second_sizek; 1089 return second_sizek;
@@ -1154,11 +1198,11 @@ struct mtrr_cleanup_result {
1154}; 1198};
1155 1199
1156/* 1200/*
1157 * gran_size: 1M, 2M, ..., 2G 1201 * gran_size: 64K, 128K, 256K, 512K, 1M, 2M, ..., 2G
1158 * chunk size: gran_size, ..., 4G 1202 * chunk size: gran_size, ..., 2G
1159 * so we need (2+13)*6 1203 * so we need (1+16)*8
1160 */ 1204 */
1161#define NUM_RESULT 90 1205#define NUM_RESULT 136
1162#define PSHIFT (PAGE_SHIFT - 10) 1206#define PSHIFT (PAGE_SHIFT - 10)
1163 1207
1164static struct mtrr_cleanup_result __initdata result[NUM_RESULT]; 1208static struct mtrr_cleanup_result __initdata result[NUM_RESULT];
@@ -1168,13 +1212,14 @@ static unsigned long __initdata min_loss_pfn[RANGE_NUM];
1168static int __init mtrr_cleanup(unsigned address_bits) 1212static int __init mtrr_cleanup(unsigned address_bits)
1169{ 1213{
1170 unsigned long extra_remove_base, extra_remove_size; 1214 unsigned long extra_remove_base, extra_remove_size;
1171 unsigned long i, base, size, def, dummy; 1215 unsigned long base, size, def, dummy;
1172 mtrr_type type; 1216 mtrr_type type;
1173 int nr_range, nr_range_new; 1217 int nr_range, nr_range_new;
1174 u64 chunk_size, gran_size; 1218 u64 chunk_size, gran_size;
1175 unsigned long range_sums, range_sums_new; 1219 unsigned long range_sums, range_sums_new;
1176 int index_good; 1220 int index_good;
1177 int num_reg_good; 1221 int num_reg_good;
1222 int i;
1178 1223
1179 /* extra one for all 0 */ 1224 /* extra one for all 0 */
1180 int num[MTRR_NUM_TYPES + 1]; 1225 int num[MTRR_NUM_TYPES + 1];
@@ -1204,6 +1249,8 @@ static int __init mtrr_cleanup(unsigned address_bits)
1204 continue; 1249 continue;
1205 if (!size) 1250 if (!size)
1206 type = MTRR_NUM_TYPES; 1251 type = MTRR_NUM_TYPES;
1252 if (type == MTRR_TYPE_WRPROT)
1253 type = MTRR_TYPE_UNCACHABLE;
1207 num[type]++; 1254 num[type]++;
1208 } 1255 }
1209 1256
@@ -1216,23 +1263,57 @@ static int __init mtrr_cleanup(unsigned address_bits)
1216 num_var_ranges - num[MTRR_NUM_TYPES]) 1263 num_var_ranges - num[MTRR_NUM_TYPES])
1217 return 0; 1264 return 0;
1218 1265
1266 /* print original var MTRRs at first, for debugging: */
1267 printk(KERN_DEBUG "original variable MTRRs\n");
1268 for (i = 0; i < num_var_ranges; i++) {
1269 char start_factor = 'K', size_factor = 'K';
1270 unsigned long start_base, size_base;
1271
1272 size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10);
1273 if (!size_base)
1274 continue;
1275
1276 size_base = to_size_factor(size_base, &size_factor),
1277 start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10);
1278 start_base = to_size_factor(start_base, &start_factor),
1279 type = range_state[i].type;
1280
1281 printk(KERN_DEBUG "reg %d, base: %ld%cB, range: %ld%cB, type %s\n",
1282 i, start_base, start_factor,
1283 size_base, size_factor,
1284 (type == MTRR_TYPE_UNCACHABLE) ? "UC" :
1285 ((type == MTRR_TYPE_WRPROT) ? "WP" :
1286 ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other"))
1287 );
1288 }
1289
1219 memset(range, 0, sizeof(range)); 1290 memset(range, 0, sizeof(range));
1220 extra_remove_size = 0; 1291 extra_remove_size = 0;
1221 if (mtrr_tom2) { 1292 extra_remove_base = 1 << (32 - PAGE_SHIFT);
1222 extra_remove_base = 1 << (32 - PAGE_SHIFT); 1293 if (mtrr_tom2)
1223 extra_remove_size = 1294 extra_remove_size =
1224 (mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base; 1295 (mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base;
1225 }
1226 nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base, 1296 nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base,
1227 extra_remove_size); 1297 extra_remove_size);
1298 /*
1299 * [0, 1M) should always be coverred by var mtrr with WB
1300 * and fixed mtrrs should take effective before var mtrr for it
1301 */
1302 nr_range = add_range_with_merge(range, nr_range, 0,
1303 (1ULL<<(20 - PAGE_SHIFT)) - 1);
1304 /* sort the ranges */
1305 sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL);
1306
1228 range_sums = sum_ranges(range, nr_range); 1307 range_sums = sum_ranges(range, nr_range);
1229 printk(KERN_INFO "total RAM coverred: %ldM\n", 1308 printk(KERN_INFO "total RAM coverred: %ldM\n",
1230 range_sums >> (20 - PAGE_SHIFT)); 1309 range_sums >> (20 - PAGE_SHIFT));
1231 1310
1232 if (mtrr_chunk_size && mtrr_gran_size) { 1311 if (mtrr_chunk_size && mtrr_gran_size) {
1233 int num_reg; 1312 int num_reg;
1313 char gran_factor, chunk_factor, lose_factor;
1314 unsigned long gran_base, chunk_base, lose_base;
1234 1315
1235 debug_print = 1; 1316 debug_print++;
1236 /* convert ranges to var ranges state */ 1317 /* convert ranges to var ranges state */
1237 num_reg = x86_setup_var_mtrrs(range, nr_range, mtrr_chunk_size, 1318 num_reg = x86_setup_var_mtrrs(range, nr_range, mtrr_chunk_size,
1238 mtrr_gran_size); 1319 mtrr_gran_size);
@@ -1256,34 +1337,48 @@ static int __init mtrr_cleanup(unsigned address_bits)
1256 result[i].lose_cover_sizek = 1337 result[i].lose_cover_sizek =
1257 (range_sums - range_sums_new) << PSHIFT; 1338 (range_sums - range_sums_new) << PSHIFT;
1258 1339
1259 printk(KERN_INFO "%sgran_size: %ldM \tchunk_size: %ldM \t", 1340 gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
1260 result[i].bad?"*BAD*":" ", result[i].gran_sizek >> 10, 1341 chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
1261 result[i].chunk_sizek >> 10); 1342 lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
1262 printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ldM \n", 1343 printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t",
1344 result[i].bad?"*BAD*":" ",
1345 gran_base, gran_factor, chunk_base, chunk_factor);
1346 printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n",
1263 result[i].num_reg, result[i].bad?"-":"", 1347 result[i].num_reg, result[i].bad?"-":"",
1264 result[i].lose_cover_sizek >> 10); 1348 lose_base, lose_factor);
1265 if (!result[i].bad) { 1349 if (!result[i].bad) {
1266 set_var_mtrr_all(address_bits); 1350 set_var_mtrr_all(address_bits);
1267 return 1; 1351 return 1;
1268 } 1352 }
1269 printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, " 1353 printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, "
1270 "will find optimal one\n"); 1354 "will find optimal one\n");
1271 debug_print = 0; 1355 debug_print--;
1272 memset(result, 0, sizeof(result[0])); 1356 memset(result, 0, sizeof(result[0]));
1273 } 1357 }
1274 1358
1275 i = 0; 1359 i = 0;
1276 memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn)); 1360 memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn));
1277 memset(result, 0, sizeof(result)); 1361 memset(result, 0, sizeof(result));
1278 for (gran_size = (1ULL<<20); gran_size < (1ULL<<32); gran_size <<= 1) { 1362 for (gran_size = (1ULL<<16); gran_size < (1ULL<<32); gran_size <<= 1) {
1279 for (chunk_size = gran_size; chunk_size < (1ULL<<33); 1363 char gran_factor;
1364 unsigned long gran_base;
1365
1366 if (debug_print)
1367 gran_base = to_size_factor(gran_size >> 10, &gran_factor);
1368
1369 for (chunk_size = gran_size; chunk_size < (1ULL<<32);
1280 chunk_size <<= 1) { 1370 chunk_size <<= 1) {
1281 int num_reg; 1371 int num_reg;
1282 1372
1283 if (debug_print) 1373 if (debug_print) {
1284 printk(KERN_INFO 1374 char chunk_factor;
1285 "\ngran_size: %lldM chunk_size_size: %lldM\n", 1375 unsigned long chunk_base;
1286 gran_size >> 20, chunk_size >> 20); 1376
1377 chunk_base = to_size_factor(chunk_size>>10, &chunk_factor),
1378 printk(KERN_INFO "\n");
1379 printk(KERN_INFO "gran_size: %ld%c chunk_size: %ld%c \n",
1380 gran_base, gran_factor, chunk_base, chunk_factor);
1381 }
1287 if (i >= NUM_RESULT) 1382 if (i >= NUM_RESULT)
1288 continue; 1383 continue;
1289 1384
@@ -1326,12 +1421,18 @@ static int __init mtrr_cleanup(unsigned address_bits)
1326 1421
1327 /* print out all */ 1422 /* print out all */
1328 for (i = 0; i < NUM_RESULT; i++) { 1423 for (i = 0; i < NUM_RESULT; i++) {
1329 printk(KERN_INFO "%sgran_size: %ldM \tchunk_size: %ldM \t", 1424 char gran_factor, chunk_factor, lose_factor;
1330 result[i].bad?"*BAD* ":" ", result[i].gran_sizek >> 10, 1425 unsigned long gran_base, chunk_base, lose_base;
1331 result[i].chunk_sizek >> 10); 1426
1332 printk(KERN_CONT "num_reg: %d \tlose RAM: %s%ldM\n", 1427 gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
1333 result[i].num_reg, result[i].bad?"-":"", 1428 chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
1334 result[i].lose_cover_sizek >> 10); 1429 lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
1430 printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t",
1431 result[i].bad?"*BAD*":" ",
1432 gran_base, gran_factor, chunk_base, chunk_factor);
1433 printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n",
1434 result[i].num_reg, result[i].bad?"-":"",
1435 lose_base, lose_factor);
1335 } 1436 }
1336 1437
1337 /* try to find the optimal index */ 1438 /* try to find the optimal index */
@@ -1339,10 +1440,8 @@ static int __init mtrr_cleanup(unsigned address_bits)
1339 nr_mtrr_spare_reg = num_var_ranges - 1; 1440 nr_mtrr_spare_reg = num_var_ranges - 1;
1340 num_reg_good = -1; 1441 num_reg_good = -1;
1341 for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) { 1442 for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) {
1342 if (!min_loss_pfn[i]) { 1443 if (!min_loss_pfn[i])
1343 num_reg_good = i; 1444 num_reg_good = i;
1344 break;
1345 }
1346 } 1445 }
1347 1446
1348 index_good = -1; 1447 index_good = -1;
@@ -1358,21 +1457,26 @@ static int __init mtrr_cleanup(unsigned address_bits)
1358 } 1457 }
1359 1458
1360 if (index_good != -1) { 1459 if (index_good != -1) {
1460 char gran_factor, chunk_factor, lose_factor;
1461 unsigned long gran_base, chunk_base, lose_base;
1462
1361 printk(KERN_INFO "Found optimal setting for mtrr clean up\n"); 1463 printk(KERN_INFO "Found optimal setting for mtrr clean up\n");
1362 i = index_good; 1464 i = index_good;
1363 printk(KERN_INFO "gran_size: %ldM \tchunk_size: %ldM \t", 1465 gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
1364 result[i].gran_sizek >> 10, 1466 chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
1365 result[i].chunk_sizek >> 10); 1467 lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
1366 printk(KERN_CONT "num_reg: %d \tlose RAM: %ldM\n", 1468 printk(KERN_INFO "gran_size: %ld%c \tchunk_size: %ld%c \t",
1367 result[i].num_reg, 1469 gran_base, gran_factor, chunk_base, chunk_factor);
1368 result[i].lose_cover_sizek >> 10); 1470 printk(KERN_CONT "num_reg: %d \tlose RAM: %ld%c\n",
1471 result[i].num_reg, lose_base, lose_factor);
1369 /* convert ranges to var ranges state */ 1472 /* convert ranges to var ranges state */
1370 chunk_size = result[i].chunk_sizek; 1473 chunk_size = result[i].chunk_sizek;
1371 chunk_size <<= 10; 1474 chunk_size <<= 10;
1372 gran_size = result[i].gran_sizek; 1475 gran_size = result[i].gran_sizek;
1373 gran_size <<= 10; 1476 gran_size <<= 10;
1374 debug_print = 1; 1477 debug_print++;
1375 x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size); 1478 x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
1479 debug_print--;
1376 set_var_mtrr_all(address_bits); 1480 set_var_mtrr_all(address_bits);
1377 return 1; 1481 return 1;
1378 } 1482 }
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 05cc22dbd4f..6bff382094f 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -295,13 +295,19 @@ static int setup_k7_watchdog(unsigned nmi_hz)
295 /* setup the timer */ 295 /* setup the timer */
296 wrmsr(evntsel_msr, evntsel, 0); 296 wrmsr(evntsel_msr, evntsel, 0);
297 write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz); 297 write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz);
298 apic_write(APIC_LVTPC, APIC_DM_NMI);
299 evntsel |= K7_EVNTSEL_ENABLE;
300 wrmsr(evntsel_msr, evntsel, 0);
301 298
299 /* initialize the wd struct before enabling */
302 wd->perfctr_msr = perfctr_msr; 300 wd->perfctr_msr = perfctr_msr;
303 wd->evntsel_msr = evntsel_msr; 301 wd->evntsel_msr = evntsel_msr;
304 wd->cccr_msr = 0; /* unused */ 302 wd->cccr_msr = 0; /* unused */
303
304 /* ok, everything is initialized, announce that we're set */
305 cpu_nmi_set_wd_enabled();
306
307 apic_write(APIC_LVTPC, APIC_DM_NMI);
308 evntsel |= K7_EVNTSEL_ENABLE;
309 wrmsr(evntsel_msr, evntsel, 0);
310
305 return 1; 311 return 1;
306} 312}
307 313
@@ -379,13 +385,19 @@ static int setup_p6_watchdog(unsigned nmi_hz)
379 wrmsr(evntsel_msr, evntsel, 0); 385 wrmsr(evntsel_msr, evntsel, 0);
380 nmi_hz = adjust_for_32bit_ctr(nmi_hz); 386 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
381 write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz); 387 write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz);
382 apic_write(APIC_LVTPC, APIC_DM_NMI);
383 evntsel |= P6_EVNTSEL0_ENABLE;
384 wrmsr(evntsel_msr, evntsel, 0);
385 388
389 /* initialize the wd struct before enabling */
386 wd->perfctr_msr = perfctr_msr; 390 wd->perfctr_msr = perfctr_msr;
387 wd->evntsel_msr = evntsel_msr; 391 wd->evntsel_msr = evntsel_msr;
388 wd->cccr_msr = 0; /* unused */ 392 wd->cccr_msr = 0; /* unused */
393
394 /* ok, everything is initialized, announce that we're set */
395 cpu_nmi_set_wd_enabled();
396
397 apic_write(APIC_LVTPC, APIC_DM_NMI);
398 evntsel |= P6_EVNTSEL0_ENABLE;
399 wrmsr(evntsel_msr, evntsel, 0);
400
389 return 1; 401 return 1;
390} 402}
391 403
@@ -432,6 +444,27 @@ static const struct wd_ops p6_wd_ops = {
432#define P4_CCCR_ENABLE (1 << 12) 444#define P4_CCCR_ENABLE (1 << 12)
433#define P4_CCCR_OVF (1 << 31) 445#define P4_CCCR_OVF (1 << 31)
434 446
447#define P4_CONTROLS 18
448static unsigned int p4_controls[18] = {
449 MSR_P4_BPU_CCCR0,
450 MSR_P4_BPU_CCCR1,
451 MSR_P4_BPU_CCCR2,
452 MSR_P4_BPU_CCCR3,
453 MSR_P4_MS_CCCR0,
454 MSR_P4_MS_CCCR1,
455 MSR_P4_MS_CCCR2,
456 MSR_P4_MS_CCCR3,
457 MSR_P4_FLAME_CCCR0,
458 MSR_P4_FLAME_CCCR1,
459 MSR_P4_FLAME_CCCR2,
460 MSR_P4_FLAME_CCCR3,
461 MSR_P4_IQ_CCCR0,
462 MSR_P4_IQ_CCCR1,
463 MSR_P4_IQ_CCCR2,
464 MSR_P4_IQ_CCCR3,
465 MSR_P4_IQ_CCCR4,
466 MSR_P4_IQ_CCCR5,
467};
435/* 468/*
436 * Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter 469 * Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
437 * CRU_ESCR0 (with any non-null event selector) through a complemented 470 * CRU_ESCR0 (with any non-null event selector) through a complemented
@@ -473,6 +506,26 @@ static int setup_p4_watchdog(unsigned nmi_hz)
473 evntsel_msr = MSR_P4_CRU_ESCR0; 506 evntsel_msr = MSR_P4_CRU_ESCR0;
474 cccr_msr = MSR_P4_IQ_CCCR0; 507 cccr_msr = MSR_P4_IQ_CCCR0;
475 cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4); 508 cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4);
509
510 /*
511 * If we're on the kdump kernel or other situation, we may
512 * still have other performance counter registers set to
513 * interrupt and they'll keep interrupting forever because
514 * of the P4_CCCR_OVF quirk. So we need to ACK all the
515 * pending interrupts and disable all the registers here,
516 * before reenabling the NMI delivery. Refer to p4_rearm()
517 * about the P4_CCCR_OVF quirk.
518 */
519 if (reset_devices) {
520 unsigned int low, high;
521 int i;
522
523 for (i = 0; i < P4_CONTROLS; i++) {
524 rdmsr(p4_controls[i], low, high);
525 low &= ~(P4_CCCR_ENABLE | P4_CCCR_OVF);
526 wrmsr(p4_controls[i], low, high);
527 }
528 }
476 } else { 529 } else {
477 /* logical cpu 1 */ 530 /* logical cpu 1 */
478 perfctr_msr = MSR_P4_IQ_PERFCTR1; 531 perfctr_msr = MSR_P4_IQ_PERFCTR1;
@@ -499,12 +552,17 @@ static int setup_p4_watchdog(unsigned nmi_hz)
499 wrmsr(evntsel_msr, evntsel, 0); 552 wrmsr(evntsel_msr, evntsel, 0);
500 wrmsr(cccr_msr, cccr_val, 0); 553 wrmsr(cccr_msr, cccr_val, 0);
501 write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0", nmi_hz); 554 write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0", nmi_hz);
502 apic_write(APIC_LVTPC, APIC_DM_NMI); 555
503 cccr_val |= P4_CCCR_ENABLE;
504 wrmsr(cccr_msr, cccr_val, 0);
505 wd->perfctr_msr = perfctr_msr; 556 wd->perfctr_msr = perfctr_msr;
506 wd->evntsel_msr = evntsel_msr; 557 wd->evntsel_msr = evntsel_msr;
507 wd->cccr_msr = cccr_msr; 558 wd->cccr_msr = cccr_msr;
559
560 /* ok, everything is initialized, announce that we're set */
561 cpu_nmi_set_wd_enabled();
562
563 apic_write(APIC_LVTPC, APIC_DM_NMI);
564 cccr_val |= P4_CCCR_ENABLE;
565 wrmsr(cccr_msr, cccr_val, 0);
508 return 1; 566 return 1;
509} 567}
510 568
@@ -620,13 +678,17 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
620 wrmsr(evntsel_msr, evntsel, 0); 678 wrmsr(evntsel_msr, evntsel, 0);
621 nmi_hz = adjust_for_32bit_ctr(nmi_hz); 679 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
622 write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0", nmi_hz); 680 write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0", nmi_hz);
623 apic_write(APIC_LVTPC, APIC_DM_NMI);
624 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
625 wrmsr(evntsel_msr, evntsel, 0);
626 681
627 wd->perfctr_msr = perfctr_msr; 682 wd->perfctr_msr = perfctr_msr;
628 wd->evntsel_msr = evntsel_msr; 683 wd->evntsel_msr = evntsel_msr;
629 wd->cccr_msr = 0; /* unused */ 684 wd->cccr_msr = 0; /* unused */
685
686 /* ok, everything is initialized, announce that we're set */
687 cpu_nmi_set_wd_enabled();
688
689 apic_write(APIC_LVTPC, APIC_DM_NMI);
690 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
691 wrmsr(evntsel_msr, evntsel, 0);
630 intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1); 692 intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1);
631 return 1; 693 return 1;
632} 694}
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 8e9cd6a8ec1..6a44d646599 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -36,7 +36,6 @@
36#include <linux/smp_lock.h> 36#include <linux/smp_lock.h>
37#include <linux/major.h> 37#include <linux/major.h>
38#include <linux/fs.h> 38#include <linux/fs.h>
39#include <linux/smp_lock.h>
40#include <linux/device.h> 39#include <linux/device.h>
41#include <linux/cpu.h> 40#include <linux/cpu.h>
42#include <linux/notifier.h> 41#include <linux/notifier.h>
diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
index 15e6c6bc4a4..e90a60ef10c 100644
--- a/arch/x86/kernel/crash_dump_64.c
+++ b/arch/x86/kernel/crash_dump_64.c
@@ -7,9 +7,8 @@
7 7
8#include <linux/errno.h> 8#include <linux/errno.h>
9#include <linux/crash_dump.h> 9#include <linux/crash_dump.h>
10 10#include <linux/uaccess.h>
11#include <asm/uaccess.h> 11#include <linux/io.h>
12#include <asm/io.h>
13 12
14/** 13/**
15 * copy_oldmem_page - copy one page from "oldmem" 14 * copy_oldmem_page - copy one page from "oldmem"
@@ -25,7 +24,7 @@
25 * in the current kernel. We stitch up a pte, similar to kmap_atomic. 24 * in the current kernel. We stitch up a pte, similar to kmap_atomic.
26 */ 25 */
27ssize_t copy_oldmem_page(unsigned long pfn, char *buf, 26ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
28 size_t csize, unsigned long offset, int userbuf) 27 size_t csize, unsigned long offset, int userbuf)
29{ 28{
30 void *vaddr; 29 void *vaddr;
31 30
@@ -33,14 +32,16 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
33 return 0; 32 return 0;
34 33
35 vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); 34 vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
35 if (!vaddr)
36 return -ENOMEM;
36 37
37 if (userbuf) { 38 if (userbuf) {
38 if (copy_to_user(buf, (vaddr + offset), csize)) { 39 if (copy_to_user(buf, vaddr + offset, csize)) {
39 iounmap(vaddr); 40 iounmap(vaddr);
40 return -EFAULT; 41 return -EFAULT;
41 } 42 }
42 } else 43 } else
43 memcpy(buf, (vaddr + offset), csize); 44 memcpy(buf, vaddr + offset, csize);
44 45
45 iounmap(vaddr); 46 iounmap(vaddr);
46 return csize; 47 return csize;
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index 11c11b8ec48..2b69994fd3a 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -2,26 +2,49 @@
2 * Debug Store support 2 * Debug Store support
3 * 3 *
4 * This provides a low-level interface to the hardware's Debug Store 4 * This provides a low-level interface to the hardware's Debug Store
5 * feature that is used for last branch recording (LBR) and 5 * feature that is used for branch trace store (BTS) and
6 * precise-event based sampling (PEBS). 6 * precise-event based sampling (PEBS).
7 * 7 *
8 * Different architectures use a different DS layout/pointer size. 8 * It manages:
9 * The below functions therefore work on a void*. 9 * - per-thread and per-cpu allocation of BTS and PEBS
10 * - buffer memory allocation (optional)
11 * - buffer overflow handling
12 * - buffer access
10 * 13 *
14 * It assumes:
15 * - get_task_struct on all parameter tasks
16 * - current is allowed to trace parameter tasks
11 * 17 *
12 * Since there is no user for PEBS, yet, only LBR (or branch
13 * trace store, BTS) is supported.
14 * 18 *
15 * 19 * Copyright (C) 2007-2008 Intel Corporation.
16 * Copyright (C) 2007 Intel Corporation. 20 * Markus Metzger <markus.t.metzger@intel.com>, 2007-2008
17 * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
18 */ 21 */
19 22
23
24#ifdef CONFIG_X86_DS
25
20#include <asm/ds.h> 26#include <asm/ds.h>
21 27
22#include <linux/errno.h> 28#include <linux/errno.h>
23#include <linux/string.h> 29#include <linux/string.h>
24#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/sched.h>
32#include <linux/mm.h>
33
34
35/*
36 * The configuration for a particular DS hardware implementation.
37 */
38struct ds_configuration {
39 /* the size of the DS structure in bytes */
40 unsigned char sizeof_ds;
41 /* the size of one pointer-typed field in the DS structure in bytes;
42 this covers the first 8 fields related to buffer management. */
43 unsigned char sizeof_field;
44 /* the size of a BTS/PEBS record in bytes */
45 unsigned char sizeof_rec[2];
46};
47static struct ds_configuration ds_cfg;
25 48
26 49
27/* 50/*
@@ -44,378 +67,747 @@
44 * (interrupt occurs when write pointer passes interrupt pointer) 67 * (interrupt occurs when write pointer passes interrupt pointer)
45 * - value to which counter is reset following counter overflow 68 * - value to which counter is reset following counter overflow
46 * 69 *
47 * On later architectures, the last branch recording hardware uses 70 * Later architectures use 64bit pointers throughout, whereas earlier
48 * 64bit pointers even in 32bit mode. 71 * architectures use 32bit pointers in 32bit mode.
49 *
50 *
51 * Branch Trace Store (BTS) records store information about control
52 * flow changes. They at least provide the following information:
53 * - source linear address
54 * - destination linear address
55 * 72 *
56 * Netburst supported a predicated bit that had been dropped in later
57 * architectures. We do not suppor it.
58 * 73 *
74 * We compute the base address for the first 8 fields based on:
75 * - the field size stored in the DS configuration
76 * - the relative field position
77 * - an offset giving the start of the respective region
59 * 78 *
60 * In order to abstract from the actual DS and BTS layout, we describe 79 * This offset is further used to index various arrays holding
61 * the access to the relevant fields. 80 * information for BTS and PEBS at the respective index.
62 * Thanks to Andi Kleen for proposing this design.
63 * 81 *
64 * The implementation, however, is not as general as it might seem. In 82 * On later 32bit processors, we only access the lower 32bit of the
65 * order to stay somewhat simple and efficient, we assume an 83 * 64bit pointer fields. The upper halves will be zeroed out.
66 * underlying unsigned type (mostly a pointer type) and we expect the
67 * field to be at least as big as that type.
68 */ 84 */
69 85
70/* 86enum ds_field {
71 * A special from_ip address to indicate that the BTS record is an 87 ds_buffer_base = 0,
72 * info record that needs to be interpreted or skipped. 88 ds_index,
73 */ 89 ds_absolute_maximum,
74#define BTS_ESCAPE_ADDRESS (-1) 90 ds_interrupt_threshold,
91};
75 92
76/* 93enum ds_qualifier {
77 * A field access descriptor 94 ds_bts = 0,
78 */ 95 ds_pebs
79struct access_desc {
80 unsigned char offset;
81 unsigned char size;
82}; 96};
83 97
98static inline unsigned long ds_get(const unsigned char *base,
99 enum ds_qualifier qual, enum ds_field field)
100{
101 base += (ds_cfg.sizeof_field * (field + (4 * qual)));
102 return *(unsigned long *)base;
103}
104
105static inline void ds_set(unsigned char *base, enum ds_qualifier qual,
106 enum ds_field field, unsigned long value)
107{
108 base += (ds_cfg.sizeof_field * (field + (4 * qual)));
109 (*(unsigned long *)base) = value;
110}
111
112
84/* 113/*
85 * The configuration for a particular DS/BTS hardware implementation. 114 * Locking is done only for allocating BTS or PEBS resources and for
115 * guarding context and buffer memory allocation.
116 *
117 * Most functions require the current task to own the ds context part
118 * they are going to access. All the locking is done when validating
119 * access to the context.
86 */ 120 */
87struct ds_configuration { 121static spinlock_t ds_lock = __SPIN_LOCK_UNLOCKED(ds_lock);
88 /* the DS configuration */
89 unsigned char sizeof_ds;
90 struct access_desc bts_buffer_base;
91 struct access_desc bts_index;
92 struct access_desc bts_absolute_maximum;
93 struct access_desc bts_interrupt_threshold;
94 /* the BTS configuration */
95 unsigned char sizeof_bts;
96 struct access_desc from_ip;
97 struct access_desc to_ip;
98 /* BTS variants used to store additional information like
99 timestamps */
100 struct access_desc info_type;
101 struct access_desc info_data;
102 unsigned long debugctl_mask;
103};
104 122
105/* 123/*
106 * The global configuration used by the below accessor functions 124 * Validate that the current task is allowed to access the BTS/PEBS
125 * buffer of the parameter task.
126 *
127 * Returns 0, if access is granted; -Eerrno, otherwise.
107 */ 128 */
108static struct ds_configuration ds_cfg; 129static inline int ds_validate_access(struct ds_context *context,
130 enum ds_qualifier qual)
131{
132 if (!context)
133 return -EPERM;
134
135 if (context->owner[qual] == current)
136 return 0;
137
138 return -EPERM;
139}
140
109 141
110/* 142/*
111 * Accessor functions for some DS and BTS fields using the above 143 * We either support (system-wide) per-cpu or per-thread allocation.
112 * global ptrace_bts_cfg. 144 * We distinguish the two based on the task_struct pointer, where a
145 * NULL pointer indicates per-cpu allocation for the current cpu.
146 *
147 * Allocations are use-counted. As soon as resources are allocated,
148 * further allocations must be of the same type (per-cpu or
149 * per-thread). We model this by counting allocations (i.e. the number
150 * of tracers of a certain type) for one type negatively:
151 * =0 no tracers
152 * >0 number of per-thread tracers
153 * <0 number of per-cpu tracers
154 *
155 * The below functions to get and put tracers and to check the
156 * allocation type require the ds_lock to be held by the caller.
157 *
158 * Tracers essentially gives the number of ds contexts for a certain
159 * type of allocation.
113 */ 160 */
114static inline unsigned long get_bts_buffer_base(char *base) 161static long tracers;
162
163static inline void get_tracer(struct task_struct *task)
115{ 164{
116 return *(unsigned long *)(base + ds_cfg.bts_buffer_base.offset); 165 tracers += (task ? 1 : -1);
117} 166}
118static inline void set_bts_buffer_base(char *base, unsigned long value) 167
168static inline void put_tracer(struct task_struct *task)
119{ 169{
120 (*(unsigned long *)(base + ds_cfg.bts_buffer_base.offset)) = value; 170 tracers -= (task ? 1 : -1);
121} 171}
122static inline unsigned long get_bts_index(char *base) 172
173static inline int check_tracer(struct task_struct *task)
123{ 174{
124 return *(unsigned long *)(base + ds_cfg.bts_index.offset); 175 return (task ? (tracers >= 0) : (tracers <= 0));
125} 176}
126static inline void set_bts_index(char *base, unsigned long value) 177
178
179/*
180 * The DS context is either attached to a thread or to a cpu:
181 * - in the former case, the thread_struct contains a pointer to the
182 * attached context.
183 * - in the latter case, we use a static array of per-cpu context
184 * pointers.
185 *
186 * Contexts are use-counted. They are allocated on first access and
187 * deallocated when the last user puts the context.
188 *
189 * We distinguish between an allocating and a non-allocating get of a
190 * context:
191 * - the allocating get is used for requesting BTS/PEBS resources. It
192 * requires the caller to hold the global ds_lock.
193 * - the non-allocating get is used for all other cases. A
194 * non-existing context indicates an error. It acquires and releases
195 * the ds_lock itself for obtaining the context.
196 *
197 * A context and its DS configuration are allocated and deallocated
198 * together. A context always has a DS configuration of the
199 * appropriate size.
200 */
201static DEFINE_PER_CPU(struct ds_context *, system_context);
202
203#define this_system_context per_cpu(system_context, smp_processor_id())
204
205/*
206 * Returns the pointer to the parameter task's context or to the
207 * system-wide context, if task is NULL.
208 *
209 * Increases the use count of the returned context, if not NULL.
210 */
211static inline struct ds_context *ds_get_context(struct task_struct *task)
127{ 212{
128 (*(unsigned long *)(base + ds_cfg.bts_index.offset)) = value; 213 struct ds_context *context;
214
215 spin_lock(&ds_lock);
216
217 context = (task ? task->thread.ds_ctx : this_system_context);
218 if (context)
219 context->count++;
220
221 spin_unlock(&ds_lock);
222
223 return context;
129} 224}
130static inline unsigned long get_bts_absolute_maximum(char *base) 225
226/*
227 * Same as ds_get_context, but allocates the context and it's DS
228 * structure, if necessary; returns NULL; if out of memory.
229 *
230 * pre: requires ds_lock to be held
231 */
232static inline struct ds_context *ds_alloc_context(struct task_struct *task)
131{ 233{
132 return *(unsigned long *)(base + ds_cfg.bts_absolute_maximum.offset); 234 struct ds_context **p_context =
235 (task ? &task->thread.ds_ctx : &this_system_context);
236 struct ds_context *context = *p_context;
237
238 if (!context) {
239 context = kzalloc(sizeof(*context), GFP_KERNEL);
240
241 if (!context)
242 return NULL;
243
244 context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL);
245 if (!context->ds) {
246 kfree(context);
247 return NULL;
248 }
249
250 *p_context = context;
251
252 context->this = p_context;
253 context->task = task;
254
255 if (task)
256 set_tsk_thread_flag(task, TIF_DS_AREA_MSR);
257
258 if (!task || (task == current))
259 wrmsr(MSR_IA32_DS_AREA, (unsigned long)context->ds, 0);
260
261 get_tracer(task);
262 }
263
264 context->count++;
265
266 return context;
133} 267}
134static inline void set_bts_absolute_maximum(char *base, unsigned long value) 268
269/*
270 * Decreases the use count of the parameter context, if not NULL.
271 * Deallocates the context, if the use count reaches zero.
272 */
273static inline void ds_put_context(struct ds_context *context)
135{ 274{
136 (*(unsigned long *)(base + ds_cfg.bts_absolute_maximum.offset)) = value; 275 if (!context)
276 return;
277
278 spin_lock(&ds_lock);
279
280 if (--context->count)
281 goto out;
282
283 *(context->this) = NULL;
284
285 if (context->task)
286 clear_tsk_thread_flag(context->task, TIF_DS_AREA_MSR);
287
288 if (!context->task || (context->task == current))
289 wrmsrl(MSR_IA32_DS_AREA, 0);
290
291 put_tracer(context->task);
292
293 /* free any leftover buffers from tracers that did not
294 * deallocate them properly. */
295 kfree(context->buffer[ds_bts]);
296 kfree(context->buffer[ds_pebs]);
297 kfree(context->ds);
298 kfree(context);
299 out:
300 spin_unlock(&ds_lock);
137} 301}
138static inline unsigned long get_bts_interrupt_threshold(char *base) 302
303
304/*
305 * Handle a buffer overflow
306 *
307 * task: the task whose buffers are overflowing;
308 * NULL for a buffer overflow on the current cpu
309 * context: the ds context
310 * qual: the buffer type
311 */
312static void ds_overflow(struct task_struct *task, struct ds_context *context,
313 enum ds_qualifier qual)
139{ 314{
140 return *(unsigned long *)(base + ds_cfg.bts_interrupt_threshold.offset); 315 if (!context)
316 return;
317
318 if (context->callback[qual])
319 (*context->callback[qual])(task);
320
321 /* todo: do some more overflow handling */
141} 322}
142static inline void set_bts_interrupt_threshold(char *base, unsigned long value) 323
324
325/*
326 * Allocate a non-pageable buffer of the parameter size.
327 * Checks the memory and the locked memory rlimit.
328 *
329 * Returns the buffer, if successful;
330 * NULL, if out of memory or rlimit exceeded.
331 *
332 * size: the requested buffer size in bytes
333 * pages (out): if not NULL, contains the number of pages reserved
334 */
335static inline void *ds_allocate_buffer(size_t size, unsigned int *pages)
143{ 336{
144 (*(unsigned long *)(base + ds_cfg.bts_interrupt_threshold.offset)) = value; 337 unsigned long rlim, vm, pgsz;
338 void *buffer;
339
340 pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
341
342 rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
343 vm = current->mm->total_vm + pgsz;
344 if (rlim < vm)
345 return NULL;
346
347 rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
348 vm = current->mm->locked_vm + pgsz;
349 if (rlim < vm)
350 return NULL;
351
352 buffer = kzalloc(size, GFP_KERNEL);
353 if (!buffer)
354 return NULL;
355
356 current->mm->total_vm += pgsz;
357 current->mm->locked_vm += pgsz;
358
359 if (pages)
360 *pages = pgsz;
361
362 return buffer;
145} 363}
146static inline unsigned long get_from_ip(char *base) 364
365static int ds_request(struct task_struct *task, void *base, size_t size,
366 ds_ovfl_callback_t ovfl, enum ds_qualifier qual)
147{ 367{
148 return *(unsigned long *)(base + ds_cfg.from_ip.offset); 368 struct ds_context *context;
369 unsigned long buffer, adj;
370 const unsigned long alignment = (1 << 3);
371 int error = 0;
372
373 if (!ds_cfg.sizeof_ds)
374 return -EOPNOTSUPP;
375
376 /* we require some space to do alignment adjustments below */
377 if (size < (alignment + ds_cfg.sizeof_rec[qual]))
378 return -EINVAL;
379
380 /* buffer overflow notification is not yet implemented */
381 if (ovfl)
382 return -EOPNOTSUPP;
383
384
385 spin_lock(&ds_lock);
386
387 if (!check_tracer(task))
388 return -EPERM;
389
390 error = -ENOMEM;
391 context = ds_alloc_context(task);
392 if (!context)
393 goto out_unlock;
394
395 error = -EALREADY;
396 if (context->owner[qual] == current)
397 goto out_unlock;
398 error = -EPERM;
399 if (context->owner[qual] != NULL)
400 goto out_unlock;
401 context->owner[qual] = current;
402
403 spin_unlock(&ds_lock);
404
405
406 error = -ENOMEM;
407 if (!base) {
408 base = ds_allocate_buffer(size, &context->pages[qual]);
409 if (!base)
410 goto out_release;
411
412 context->buffer[qual] = base;
413 }
414 error = 0;
415
416 context->callback[qual] = ovfl;
417
418 /* adjust the buffer address and size to meet alignment
419 * constraints:
420 * - buffer is double-word aligned
421 * - size is multiple of record size
422 *
423 * We checked the size at the very beginning; we have enough
424 * space to do the adjustment.
425 */
426 buffer = (unsigned long)base;
427
428 adj = ALIGN(buffer, alignment) - buffer;
429 buffer += adj;
430 size -= adj;
431
432 size /= ds_cfg.sizeof_rec[qual];
433 size *= ds_cfg.sizeof_rec[qual];
434
435 ds_set(context->ds, qual, ds_buffer_base, buffer);
436 ds_set(context->ds, qual, ds_index, buffer);
437 ds_set(context->ds, qual, ds_absolute_maximum, buffer + size);
438
439 if (ovfl) {
440 /* todo: select a suitable interrupt threshold */
441 } else
442 ds_set(context->ds, qual,
443 ds_interrupt_threshold, buffer + size + 1);
444
445 /* we keep the context until ds_release */
446 return error;
447
448 out_release:
449 context->owner[qual] = NULL;
450 ds_put_context(context);
451 return error;
452
453 out_unlock:
454 spin_unlock(&ds_lock);
455 ds_put_context(context);
456 return error;
149} 457}
150static inline void set_from_ip(char *base, unsigned long value) 458
459int ds_request_bts(struct task_struct *task, void *base, size_t size,
460 ds_ovfl_callback_t ovfl)
151{ 461{
152 (*(unsigned long *)(base + ds_cfg.from_ip.offset)) = value; 462 return ds_request(task, base, size, ovfl, ds_bts);
153} 463}
154static inline unsigned long get_to_ip(char *base) 464
465int ds_request_pebs(struct task_struct *task, void *base, size_t size,
466 ds_ovfl_callback_t ovfl)
155{ 467{
156 return *(unsigned long *)(base + ds_cfg.to_ip.offset); 468 return ds_request(task, base, size, ovfl, ds_pebs);
157} 469}
158static inline void set_to_ip(char *base, unsigned long value) 470
471static int ds_release(struct task_struct *task, enum ds_qualifier qual)
159{ 472{
160 (*(unsigned long *)(base + ds_cfg.to_ip.offset)) = value; 473 struct ds_context *context;
474 int error;
475
476 context = ds_get_context(task);
477 error = ds_validate_access(context, qual);
478 if (error < 0)
479 goto out;
480
481 kfree(context->buffer[qual]);
482 context->buffer[qual] = NULL;
483
484 current->mm->total_vm -= context->pages[qual];
485 current->mm->locked_vm -= context->pages[qual];
486 context->pages[qual] = 0;
487 context->owner[qual] = NULL;
488
489 /*
490 * we put the context twice:
491 * once for the ds_get_context
492 * once for the corresponding ds_request
493 */
494 ds_put_context(context);
495 out:
496 ds_put_context(context);
497 return error;
161} 498}
162static inline unsigned char get_info_type(char *base) 499
500int ds_release_bts(struct task_struct *task)
163{ 501{
164 return *(unsigned char *)(base + ds_cfg.info_type.offset); 502 return ds_release(task, ds_bts);
165} 503}
166static inline void set_info_type(char *base, unsigned char value) 504
505int ds_release_pebs(struct task_struct *task)
167{ 506{
168 (*(unsigned char *)(base + ds_cfg.info_type.offset)) = value; 507 return ds_release(task, ds_pebs);
169} 508}
170static inline unsigned long get_info_data(char *base) 509
510static int ds_get_index(struct task_struct *task, size_t *pos,
511 enum ds_qualifier qual)
171{ 512{
172 return *(unsigned long *)(base + ds_cfg.info_data.offset); 513 struct ds_context *context;
514 unsigned long base, index;
515 int error;
516
517 context = ds_get_context(task);
518 error = ds_validate_access(context, qual);
519 if (error < 0)
520 goto out;
521
522 base = ds_get(context->ds, qual, ds_buffer_base);
523 index = ds_get(context->ds, qual, ds_index);
524
525 error = ((index - base) / ds_cfg.sizeof_rec[qual]);
526 if (pos)
527 *pos = error;
528 out:
529 ds_put_context(context);
530 return error;
173} 531}
174static inline void set_info_data(char *base, unsigned long value) 532
533int ds_get_bts_index(struct task_struct *task, size_t *pos)
175{ 534{
176 (*(unsigned long *)(base + ds_cfg.info_data.offset)) = value; 535 return ds_get_index(task, pos, ds_bts);
177} 536}
178 537
538int ds_get_pebs_index(struct task_struct *task, size_t *pos)
539{
540 return ds_get_index(task, pos, ds_pebs);
541}
179 542
180int ds_allocate(void **dsp, size_t bts_size_in_bytes) 543static int ds_get_end(struct task_struct *task, size_t *pos,
544 enum ds_qualifier qual)
181{ 545{
182 size_t bts_size_in_records; 546 struct ds_context *context;
183 unsigned long bts; 547 unsigned long base, end;
184 void *ds; 548 int error;
549
550 context = ds_get_context(task);
551 error = ds_validate_access(context, qual);
552 if (error < 0)
553 goto out;
554
555 base = ds_get(context->ds, qual, ds_buffer_base);
556 end = ds_get(context->ds, qual, ds_absolute_maximum);
557
558 error = ((end - base) / ds_cfg.sizeof_rec[qual]);
559 if (pos)
560 *pos = error;
561 out:
562 ds_put_context(context);
563 return error;
564}
185 565
186 if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts) 566int ds_get_bts_end(struct task_struct *task, size_t *pos)
187 return -EOPNOTSUPP; 567{
568 return ds_get_end(task, pos, ds_bts);
569}
188 570
189 if (bts_size_in_bytes < 0) 571int ds_get_pebs_end(struct task_struct *task, size_t *pos)
190 return -EINVAL; 572{
573 return ds_get_end(task, pos, ds_pebs);
574}
191 575
192 bts_size_in_records = 576static int ds_access(struct task_struct *task, size_t index,
193 bts_size_in_bytes / ds_cfg.sizeof_bts; 577 const void **record, enum ds_qualifier qual)
194 bts_size_in_bytes = 578{
195 bts_size_in_records * ds_cfg.sizeof_bts; 579 struct ds_context *context;
580 unsigned long base, idx;
581 int error;
196 582
197 if (bts_size_in_bytes <= 0) 583 if (!record)
198 return -EINVAL; 584 return -EINVAL;
199 585
200 bts = (unsigned long)kzalloc(bts_size_in_bytes, GFP_KERNEL); 586 context = ds_get_context(task);
201 587 error = ds_validate_access(context, qual);
202 if (!bts) 588 if (error < 0)
203 return -ENOMEM; 589 goto out;
204 590
205 ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL); 591 base = ds_get(context->ds, qual, ds_buffer_base);
592 idx = base + (index * ds_cfg.sizeof_rec[qual]);
206 593
207 if (!ds) { 594 error = -EINVAL;
208 kfree((void *)bts); 595 if (idx > ds_get(context->ds, qual, ds_absolute_maximum))
209 return -ENOMEM; 596 goto out;
210 }
211
212 set_bts_buffer_base(ds, bts);
213 set_bts_index(ds, bts);
214 set_bts_absolute_maximum(ds, bts + bts_size_in_bytes);
215 set_bts_interrupt_threshold(ds, bts + bts_size_in_bytes + 1);
216 597
217 *dsp = ds; 598 *record = (const void *)idx;
218 return 0; 599 error = ds_cfg.sizeof_rec[qual];
600 out:
601 ds_put_context(context);
602 return error;
219} 603}
220 604
221int ds_free(void **dsp) 605int ds_access_bts(struct task_struct *task, size_t index, const void **record)
222{ 606{
223 if (*dsp) { 607 return ds_access(task, index, record, ds_bts);
224 kfree((void *)get_bts_buffer_base(*dsp));
225 kfree(*dsp);
226 *dsp = NULL;
227 }
228 return 0;
229} 608}
230 609
231int ds_get_bts_size(void *ds) 610int ds_access_pebs(struct task_struct *task, size_t index, const void **record)
232{ 611{
233 int size_in_bytes; 612 return ds_access(task, index, record, ds_pebs);
234
235 if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts)
236 return -EOPNOTSUPP;
237
238 if (!ds)
239 return 0;
240
241 size_in_bytes =
242 get_bts_absolute_maximum(ds) -
243 get_bts_buffer_base(ds);
244 return size_in_bytes;
245} 613}
246 614
247int ds_get_bts_end(void *ds) 615static int ds_write(struct task_struct *task, const void *record, size_t size,
616 enum ds_qualifier qual, int force)
248{ 617{
249 int size_in_bytes = ds_get_bts_size(ds); 618 struct ds_context *context;
250 619 int error;
251 if (size_in_bytes <= 0)
252 return size_in_bytes;
253 620
254 return size_in_bytes / ds_cfg.sizeof_bts; 621 if (!record)
255} 622 return -EINVAL;
256 623
257int ds_get_bts_index(void *ds) 624 error = -EPERM;
258{ 625 context = ds_get_context(task);
259 int index_offset_in_bytes; 626 if (!context)
627 goto out;
260 628
261 if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts) 629 if (!force) {
262 return -EOPNOTSUPP; 630 error = ds_validate_access(context, qual);
631 if (error < 0)
632 goto out;
633 }
263 634
264 index_offset_in_bytes = 635 error = 0;
265 get_bts_index(ds) - 636 while (size) {
266 get_bts_buffer_base(ds); 637 unsigned long base, index, end, write_end, int_th;
638 unsigned long write_size, adj_write_size;
639
640 /*
641 * write as much as possible without producing an
642 * overflow interrupt.
643 *
644 * interrupt_threshold must either be
645 * - bigger than absolute_maximum or
646 * - point to a record between buffer_base and absolute_maximum
647 *
648 * index points to a valid record.
649 */
650 base = ds_get(context->ds, qual, ds_buffer_base);
651 index = ds_get(context->ds, qual, ds_index);
652 end = ds_get(context->ds, qual, ds_absolute_maximum);
653 int_th = ds_get(context->ds, qual, ds_interrupt_threshold);
654
655 write_end = min(end, int_th);
656
657 /* if we are already beyond the interrupt threshold,
658 * we fill the entire buffer */
659 if (write_end <= index)
660 write_end = end;
661
662 if (write_end <= index)
663 goto out;
664
665 write_size = min((unsigned long) size, write_end - index);
666 memcpy((void *)index, record, write_size);
667
668 record = (const char *)record + write_size;
669 size -= write_size;
670 error += write_size;
671
672 adj_write_size = write_size / ds_cfg.sizeof_rec[qual];
673 adj_write_size *= ds_cfg.sizeof_rec[qual];
674
675 /* zero out trailing bytes */
676 memset((char *)index + write_size, 0,
677 adj_write_size - write_size);
678 index += adj_write_size;
679
680 if (index >= end)
681 index = base;
682 ds_set(context->ds, qual, ds_index, index);
683
684 if (index >= int_th)
685 ds_overflow(task, context, qual);
686 }
267 687
268 return index_offset_in_bytes / ds_cfg.sizeof_bts; 688 out:
689 ds_put_context(context);
690 return error;
269} 691}
270 692
271int ds_set_overflow(void *ds, int method) 693int ds_write_bts(struct task_struct *task, const void *record, size_t size)
272{ 694{
273 switch (method) { 695 return ds_write(task, record, size, ds_bts, /* force = */ 0);
274 case DS_O_SIGNAL:
275 return -EOPNOTSUPP;
276 case DS_O_WRAP:
277 return 0;
278 default:
279 return -EINVAL;
280 }
281} 696}
282 697
283int ds_get_overflow(void *ds) 698int ds_write_pebs(struct task_struct *task, const void *record, size_t size)
284{ 699{
285 return DS_O_WRAP; 700 return ds_write(task, record, size, ds_pebs, /* force = */ 0);
286} 701}
287 702
288int ds_clear(void *ds) 703int ds_unchecked_write_bts(struct task_struct *task,
704 const void *record, size_t size)
289{ 705{
290 int bts_size = ds_get_bts_size(ds); 706 return ds_write(task, record, size, ds_bts, /* force = */ 1);
291 unsigned long bts_base;
292
293 if (bts_size <= 0)
294 return bts_size;
295
296 bts_base = get_bts_buffer_base(ds);
297 memset((void *)bts_base, 0, bts_size);
298
299 set_bts_index(ds, bts_base);
300 return 0;
301} 707}
302 708
303int ds_read_bts(void *ds, int index, struct bts_struct *out) 709int ds_unchecked_write_pebs(struct task_struct *task,
710 const void *record, size_t size)
304{ 711{
305 void *bts; 712 return ds_write(task, record, size, ds_pebs, /* force = */ 1);
713}
306 714
307 if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts) 715static int ds_reset_or_clear(struct task_struct *task,
308 return -EOPNOTSUPP; 716 enum ds_qualifier qual, int clear)
717{
718 struct ds_context *context;
719 unsigned long base, end;
720 int error;
309 721
310 if (index < 0) 722 context = ds_get_context(task);
311 return -EINVAL; 723 error = ds_validate_access(context, qual);
724 if (error < 0)
725 goto out;
312 726
313 if (index >= ds_get_bts_size(ds)) 727 base = ds_get(context->ds, qual, ds_buffer_base);
314 return -EINVAL; 728 end = ds_get(context->ds, qual, ds_absolute_maximum);
315 729
316 bts = (void *)(get_bts_buffer_base(ds) + (index * ds_cfg.sizeof_bts)); 730 if (clear)
731 memset((void *)base, 0, end - base);
317 732
318 memset(out, 0, sizeof(*out)); 733 ds_set(context->ds, qual, ds_index, base);
319 if (get_from_ip(bts) == BTS_ESCAPE_ADDRESS) {
320 out->qualifier = get_info_type(bts);
321 out->variant.jiffies = get_info_data(bts);
322 } else {
323 out->qualifier = BTS_BRANCH;
324 out->variant.lbr.from_ip = get_from_ip(bts);
325 out->variant.lbr.to_ip = get_to_ip(bts);
326 }
327 734
328 return sizeof(*out);; 735 error = 0;
736 out:
737 ds_put_context(context);
738 return error;
329} 739}
330 740
331int ds_write_bts(void *ds, const struct bts_struct *in) 741int ds_reset_bts(struct task_struct *task)
332{ 742{
333 unsigned long bts; 743 return ds_reset_or_clear(task, ds_bts, /* clear = */ 0);
334 744}
335 if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts)
336 return -EOPNOTSUPP;
337
338 if (ds_get_bts_size(ds) <= 0)
339 return -ENXIO;
340 745
341 bts = get_bts_index(ds); 746int ds_reset_pebs(struct task_struct *task)
747{
748 return ds_reset_or_clear(task, ds_pebs, /* clear = */ 0);
749}
342 750
343 memset((void *)bts, 0, ds_cfg.sizeof_bts); 751int ds_clear_bts(struct task_struct *task)
344 switch (in->qualifier) { 752{
345 case BTS_INVALID: 753 return ds_reset_or_clear(task, ds_bts, /* clear = */ 1);
346 break; 754}
347 755
348 case BTS_BRANCH: 756int ds_clear_pebs(struct task_struct *task)
349 set_from_ip((void *)bts, in->variant.lbr.from_ip); 757{
350 set_to_ip((void *)bts, in->variant.lbr.to_ip); 758 return ds_reset_or_clear(task, ds_pebs, /* clear = */ 1);
351 break; 759}
352 760
353 case BTS_TASK_ARRIVES: 761int ds_get_pebs_reset(struct task_struct *task, u64 *value)
354 case BTS_TASK_DEPARTS: 762{
355 set_from_ip((void *)bts, BTS_ESCAPE_ADDRESS); 763 struct ds_context *context;
356 set_info_type((void *)bts, in->qualifier); 764 int error;
357 set_info_data((void *)bts, in->variant.jiffies);
358 break;
359 765
360 default: 766 if (!value)
361 return -EINVAL; 767 return -EINVAL;
362 }
363 768
364 bts = bts + ds_cfg.sizeof_bts; 769 context = ds_get_context(task);
365 if (bts >= get_bts_absolute_maximum(ds)) 770 error = ds_validate_access(context, ds_pebs);
366 bts = get_bts_buffer_base(ds); 771 if (error < 0)
367 set_bts_index(ds, bts); 772 goto out;
368 773
369 return ds_cfg.sizeof_bts; 774 *value = *(u64 *)(context->ds + (ds_cfg.sizeof_field * 8));
775
776 error = 0;
777 out:
778 ds_put_context(context);
779 return error;
370} 780}
371 781
372unsigned long ds_debugctl_mask(void) 782int ds_set_pebs_reset(struct task_struct *task, u64 value)
373{ 783{
374 return ds_cfg.debugctl_mask; 784 struct ds_context *context;
375} 785 int error;
376 786
377#ifdef __i386__ 787 context = ds_get_context(task);
378static const struct ds_configuration ds_cfg_netburst = { 788 error = ds_validate_access(context, ds_pebs);
379 .sizeof_ds = 9 * 4, 789 if (error < 0)
380 .bts_buffer_base = { 0, 4 }, 790 goto out;
381 .bts_index = { 4, 4 },
382 .bts_absolute_maximum = { 8, 4 },
383 .bts_interrupt_threshold = { 12, 4 },
384 .sizeof_bts = 3 * 4,
385 .from_ip = { 0, 4 },
386 .to_ip = { 4, 4 },
387 .info_type = { 4, 1 },
388 .info_data = { 8, 4 },
389 .debugctl_mask = (1<<2)|(1<<3)
390};
391 791
392static const struct ds_configuration ds_cfg_pentium_m = { 792 *(u64 *)(context->ds + (ds_cfg.sizeof_field * 8)) = value;
393 .sizeof_ds = 9 * 4, 793
394 .bts_buffer_base = { 0, 4 }, 794 error = 0;
395 .bts_index = { 4, 4 }, 795 out:
396 .bts_absolute_maximum = { 8, 4 }, 796 ds_put_context(context);
397 .bts_interrupt_threshold = { 12, 4 }, 797 return error;
398 .sizeof_bts = 3 * 4, 798}
399 .from_ip = { 0, 4 }, 799
400 .to_ip = { 4, 4 }, 800static const struct ds_configuration ds_cfg_var = {
401 .info_type = { 4, 1 }, 801 .sizeof_ds = sizeof(long) * 12,
402 .info_data = { 8, 4 }, 802 .sizeof_field = sizeof(long),
403 .debugctl_mask = (1<<6)|(1<<7) 803 .sizeof_rec[ds_bts] = sizeof(long) * 3,
804 .sizeof_rec[ds_pebs] = sizeof(long) * 10
404}; 805};
405#endif /* _i386_ */ 806static const struct ds_configuration ds_cfg_64 = {
406 807 .sizeof_ds = 8 * 12,
407static const struct ds_configuration ds_cfg_core2 = { 808 .sizeof_field = 8,
408 .sizeof_ds = 9 * 8, 809 .sizeof_rec[ds_bts] = 8 * 3,
409 .bts_buffer_base = { 0, 8 }, 810 .sizeof_rec[ds_pebs] = 8 * 10
410 .bts_index = { 8, 8 },
411 .bts_absolute_maximum = { 16, 8 },
412 .bts_interrupt_threshold = { 24, 8 },
413 .sizeof_bts = 3 * 8,
414 .from_ip = { 0, 8 },
415 .to_ip = { 8, 8 },
416 .info_type = { 8, 1 },
417 .info_data = { 16, 8 },
418 .debugctl_mask = (1<<6)|(1<<7)|(1<<9)
419}; 811};
420 812
421static inline void 813static inline void
@@ -429,14 +821,13 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
429 switch (c->x86) { 821 switch (c->x86) {
430 case 0x6: 822 case 0x6:
431 switch (c->x86_model) { 823 switch (c->x86_model) {
432#ifdef __i386__
433 case 0xD: 824 case 0xD:
434 case 0xE: /* Pentium M */ 825 case 0xE: /* Pentium M */
435 ds_configure(&ds_cfg_pentium_m); 826 ds_configure(&ds_cfg_var);
436 break; 827 break;
437#endif /* _i386_ */
438 case 0xF: /* Core2 */ 828 case 0xF: /* Core2 */
439 ds_configure(&ds_cfg_core2); 829 case 0x1C: /* Atom */
830 ds_configure(&ds_cfg_64);
440 break; 831 break;
441 default: 832 default:
442 /* sorry, don't know about them */ 833 /* sorry, don't know about them */
@@ -445,13 +836,11 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
445 break; 836 break;
446 case 0xF: 837 case 0xF:
447 switch (c->x86_model) { 838 switch (c->x86_model) {
448#ifdef __i386__
449 case 0x0: 839 case 0x0:
450 case 0x1: 840 case 0x1:
451 case 0x2: /* Netburst */ 841 case 0x2: /* Netburst */
452 ds_configure(&ds_cfg_netburst); 842 ds_configure(&ds_cfg_var);
453 break; 843 break;
454#endif /* _i386_ */
455 default: 844 default:
456 /* sorry, don't know about them */ 845 /* sorry, don't know about them */
457 break; 846 break;
@@ -462,3 +851,14 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
462 break; 851 break;
463 } 852 }
464} 853}
854
855void ds_free(struct ds_context *context)
856{
857 /* This is called when the task owning the parameter context
858 * is dying. There should not be any user of that context left
859 * to disturb us, anymore. */
860 unsigned long leftovers = context->count;
861 while (leftovers--)
862 ds_put_context(context);
863}
864#endif /* CONFIG_X86_DS */
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
index 06cc8d4254b..945a31cdd81 100644
--- a/arch/x86/kernel/efi.c
+++ b/arch/x86/kernel/efi.c
@@ -414,9 +414,11 @@ void __init efi_init(void)
414 if (memmap.map == NULL) 414 if (memmap.map == NULL)
415 printk(KERN_ERR "Could not map the EFI memory map!\n"); 415 printk(KERN_ERR "Could not map the EFI memory map!\n");
416 memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size); 416 memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size);
417
417 if (memmap.desc_size != sizeof(efi_memory_desc_t)) 418 if (memmap.desc_size != sizeof(efi_memory_desc_t))
418 printk(KERN_WARNING "Kernel-defined memdesc" 419 printk(KERN_WARNING
419 "doesn't match the one from EFI!\n"); 420 "Kernel-defined memdesc doesn't match the one from EFI!\n");
421
420 if (add_efi_memmap) 422 if (add_efi_memmap)
421 do_add_efi_memmap(); 423 do_add_efi_memmap();
422 424
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 89434d43960..cf3a0b2d005 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -275,9 +275,9 @@ ENTRY(native_usergs_sysret64)
275ENTRY(ret_from_fork) 275ENTRY(ret_from_fork)
276 CFI_DEFAULT_STACK 276 CFI_DEFAULT_STACK
277 push kernel_eflags(%rip) 277 push kernel_eflags(%rip)
278 CFI_ADJUST_CFA_OFFSET 4 278 CFI_ADJUST_CFA_OFFSET 8
279 popf # reset kernel eflags 279 popf # reset kernel eflags
280 CFI_ADJUST_CFA_OFFSET -4 280 CFI_ADJUST_CFA_OFFSET -8
281 call schedule_tail 281 call schedule_tail
282 GET_THREAD_INFO(%rcx) 282 GET_THREAD_INFO(%rcx)
283 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx) 283 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 9bfc4d72fb2..d16084f9064 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -108,12 +108,11 @@ void __init x86_64_start_kernel(char * real_mode_data)
108 } 108 }
109 load_idt((const struct desc_ptr *)&idt_descr); 109 load_idt((const struct desc_ptr *)&idt_descr);
110 110
111 early_printk("Kernel alive\n"); 111 if (console_loglevel == 10)
112 early_printk("Kernel alive\n");
112 113
113 x86_64_init_pda(); 114 x86_64_init_pda();
114 115
115 early_printk("Kernel really alive\n");
116
117 x86_64_start_reservations(real_mode_data); 116 x86_64_start_reservations(real_mode_data);
118} 117}
119 118
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
index 50e5e4a31c8..19191430274 100644
--- a/arch/x86/kernel/ioport.c
+++ b/arch/x86/kernel/ioport.c
@@ -14,6 +14,7 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/thread_info.h> 15#include <linux/thread_info.h>
16#include <linux/syscalls.h> 16#include <linux/syscalls.h>
17#include <asm/syscalls.h>
17 18
18/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */ 19/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
19static void set_bitmap(unsigned long *bitmap, unsigned int base, 20static void set_bitmap(unsigned long *bitmap, unsigned int base,
diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c
index 3f7537b669d..f1c688e46f3 100644
--- a/arch/x86/kernel/ipi.c
+++ b/arch/x86/kernel/ipi.c
@@ -20,6 +20,8 @@
20 20
21#ifdef CONFIG_X86_32 21#ifdef CONFIG_X86_32
22#include <mach_apic.h> 22#include <mach_apic.h>
23#include <mach_ipi.h>
24
23/* 25/*
24 * the following functions deal with sending IPIs between CPUs. 26 * the following functions deal with sending IPIs between CPUs.
25 * 27 *
@@ -147,7 +149,6 @@ void send_IPI_mask_sequence(cpumask_t mask, int vector)
147} 149}
148 150
149/* must come after the send_IPI functions above for inlining */ 151/* must come after the send_IPI functions above for inlining */
150#include <mach_ipi.h>
151static int convert_apicid_to_cpu(int apic_id) 152static int convert_apicid_to_cpu(int apic_id)
152{ 153{
153 int i; 154 int i;
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 1cf8c1fcc08..b71e02d42f4 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -325,7 +325,7 @@ skip:
325 for_each_online_cpu(j) 325 for_each_online_cpu(j)
326 seq_printf(p, "%10u ", 326 seq_printf(p, "%10u ",
327 per_cpu(irq_stat,j).irq_call_count); 327 per_cpu(irq_stat,j).irq_call_count);
328 seq_printf(p, " function call interrupts\n"); 328 seq_printf(p, " Function call interrupts\n");
329 seq_printf(p, "TLB: "); 329 seq_printf(p, "TLB: ");
330 for_each_online_cpu(j) 330 for_each_online_cpu(j)
331 seq_printf(p, "%10u ", 331 seq_printf(p, "%10u ",
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 1f78b238d8d..f065fe9071b 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -129,7 +129,7 @@ skip:
129 seq_printf(p, "CAL: "); 129 seq_printf(p, "CAL: ");
130 for_each_online_cpu(j) 130 for_each_online_cpu(j)
131 seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count); 131 seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count);
132 seq_printf(p, " function call interrupts\n"); 132 seq_printf(p, " Function call interrupts\n");
133 seq_printf(p, "TLB: "); 133 seq_printf(p, "TLB: ");
134 for_each_online_cpu(j) 134 for_each_online_cpu(j)
135 seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count); 135 seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count);
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 8b7a3cf37d2..478bca986ec 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -178,7 +178,7 @@ static void kvm_flush_tlb(void)
178 kvm_deferred_mmu_op(&ftlb, sizeof ftlb); 178 kvm_deferred_mmu_op(&ftlb, sizeof ftlb);
179} 179}
180 180
181static void kvm_release_pt(u32 pfn) 181static void kvm_release_pt(unsigned long pfn)
182{ 182{
183 struct kvm_mmu_op_release_pt rpt = { 183 struct kvm_mmu_op_release_pt rpt = {
184 .header.op = KVM_MMU_OP_RELEASE_PT, 184 .header.op = KVM_MMU_OP_RELEASE_PT,
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index b68e21f06f4..0ed5f939b90 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -18,6 +18,7 @@
18#include <asm/ldt.h> 18#include <asm/ldt.h>
19#include <asm/desc.h> 19#include <asm/desc.h>
20#include <asm/mmu_context.h> 20#include <asm/mmu_context.h>
21#include <asm/syscalls.h>
21 22
22#ifdef CONFIG_SMP 23#ifdef CONFIG_SMP
23static void flush_ldt(void *current_mm) 24static void flush_ldt(void *current_mm)
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index abb78a2cc4a..2c97f07f1c2 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -299,6 +299,15 @@ void acpi_nmi_disable(void)
299 on_each_cpu(__acpi_nmi_disable, NULL, 1); 299 on_each_cpu(__acpi_nmi_disable, NULL, 1);
300} 300}
301 301
302/*
303 * This function is called as soon the LAPIC NMI watchdog driver has everything
304 * in place and it's ready to check if the NMIs belong to the NMI watchdog
305 */
306void cpu_nmi_set_wd_enabled(void)
307{
308 __get_cpu_var(wd_enabled) = 1;
309}
310
302void setup_apic_nmi_watchdog(void *unused) 311void setup_apic_nmi_watchdog(void *unused)
303{ 312{
304 if (__get_cpu_var(wd_enabled)) 313 if (__get_cpu_var(wd_enabled))
@@ -311,8 +320,6 @@ void setup_apic_nmi_watchdog(void *unused)
311 320
312 switch (nmi_watchdog) { 321 switch (nmi_watchdog) {
313 case NMI_LOCAL_APIC: 322 case NMI_LOCAL_APIC:
314 /* enable it before to avoid race with handler */
315 __get_cpu_var(wd_enabled) = 1;
316 if (lapic_watchdog_init(nmi_hz) < 0) { 323 if (lapic_watchdog_init(nmi_hz) < 0) {
317 __get_cpu_var(wd_enabled) = 0; 324 __get_cpu_var(wd_enabled) = 0;
318 return; 325 return;
diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c
index 3e667227480..7a13fac63a1 100644
--- a/arch/x86/kernel/olpc.c
+++ b/arch/x86/kernel/olpc.c
@@ -190,12 +190,12 @@ EXPORT_SYMBOL_GPL(olpc_ec_cmd);
190static void __init platform_detect(void) 190static void __init platform_detect(void)
191{ 191{
192 size_t propsize; 192 size_t propsize;
193 u32 rev; 193 __be32 rev;
194 194
195 if (ofw("getprop", 4, 1, NULL, "board-revision-int", &rev, 4, 195 if (ofw("getprop", 4, 1, NULL, "board-revision-int", &rev, 4,
196 &propsize) || propsize != 4) { 196 &propsize) || propsize != 4) {
197 printk(KERN_ERR "ofw: getprop call failed!\n"); 197 printk(KERN_ERR "ofw: getprop call failed!\n");
198 rev = 0; 198 rev = cpu_to_be32(0);
199 } 199 }
200 olpc_platform_info.boardrev = be32_to_cpu(rev); 200 olpc_platform_info.boardrev = be32_to_cpu(rev);
201} 201}
@@ -203,7 +203,7 @@ static void __init platform_detect(void)
203static void __init platform_detect(void) 203static void __init platform_detect(void)
204{ 204{
205 /* stopgap until OFW support is added to the kernel */ 205 /* stopgap until OFW support is added to the kernel */
206 olpc_platform_info.boardrev = be32_to_cpu(0xc2); 206 olpc_platform_info.boardrev = 0xc2;
207} 207}
208#endif 208#endif
209 209
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 300da17e61c..e2f43768723 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -330,6 +330,7 @@ struct pv_cpu_ops pv_cpu_ops = {
330#endif 330#endif
331 .wbinvd = native_wbinvd, 331 .wbinvd = native_wbinvd,
332 .read_msr = native_read_msr_safe, 332 .read_msr = native_read_msr_safe,
333 .read_msr_amd = native_read_msr_amd_safe,
333 .write_msr = native_write_msr_safe, 334 .write_msr = native_write_msr_safe,
334 .read_tsc = native_read_tsc, 335 .read_tsc = native_read_tsc,
335 .read_pmc = native_read_pmc, 336 .read_pmc = native_read_pmc,
diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c
index 58262218781..9fe644f4861 100644
--- a/arch/x86/kernel/paravirt_patch_32.c
+++ b/arch/x86/kernel/paravirt_patch_32.c
@@ -23,7 +23,7 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
23 start = start_##ops##_##x; \ 23 start = start_##ops##_##x; \
24 end = end_##ops##_##x; \ 24 end = end_##ops##_##x; \
25 goto patch_site 25 goto patch_site
26 switch(type) { 26 switch (type) {
27 PATCH_SITE(pv_irq_ops, irq_disable); 27 PATCH_SITE(pv_irq_ops, irq_disable);
28 PATCH_SITE(pv_irq_ops, irq_enable); 28 PATCH_SITE(pv_irq_ops, irq_enable);
29 PATCH_SITE(pv_irq_ops, restore_fl); 29 PATCH_SITE(pv_irq_ops, restore_fl);
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 87d4d6964ec..f704cb51ff8 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -82,7 +82,7 @@ void __init dma32_reserve_bootmem(void)
82 * using 512M as goal 82 * using 512M as goal
83 */ 83 */
84 align = 64ULL<<20; 84 align = 64ULL<<20;
85 size = round_up(dma32_bootmem_size, align); 85 size = roundup(dma32_bootmem_size, align);
86 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, 86 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
87 512ULL<<20); 87 512ULL<<20);
88 if (dma32_bootmem_ptr) 88 if (dma32_bootmem_ptr)
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index be33a5442d8..1a895a58253 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -82,7 +82,8 @@ AGPEXTERN __u32 *agp_gatt_table;
82static unsigned long next_bit; /* protected by iommu_bitmap_lock */ 82static unsigned long next_bit; /* protected by iommu_bitmap_lock */
83static int need_flush; /* global flush state. set for each gart wrap */ 83static int need_flush; /* global flush state. set for each gart wrap */
84 84
85static unsigned long alloc_iommu(struct device *dev, int size) 85static unsigned long alloc_iommu(struct device *dev, int size,
86 unsigned long align_mask)
86{ 87{
87 unsigned long offset, flags; 88 unsigned long offset, flags;
88 unsigned long boundary_size; 89 unsigned long boundary_size;
@@ -90,16 +91,17 @@ static unsigned long alloc_iommu(struct device *dev, int size)
90 91
91 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), 92 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
92 PAGE_SIZE) >> PAGE_SHIFT; 93 PAGE_SIZE) >> PAGE_SHIFT;
93 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 94 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
94 PAGE_SIZE) >> PAGE_SHIFT; 95 PAGE_SIZE) >> PAGE_SHIFT;
95 96
96 spin_lock_irqsave(&iommu_bitmap_lock, flags); 97 spin_lock_irqsave(&iommu_bitmap_lock, flags);
97 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit, 98 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
98 size, base_index, boundary_size, 0); 99 size, base_index, boundary_size, align_mask);
99 if (offset == -1) { 100 if (offset == -1) {
100 need_flush = 1; 101 need_flush = 1;
101 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0, 102 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
102 size, base_index, boundary_size, 0); 103 size, base_index, boundary_size,
104 align_mask);
103 } 105 }
104 if (offset != -1) { 106 if (offset != -1) {
105 next_bit = offset+size; 107 next_bit = offset+size;
@@ -236,10 +238,10 @@ nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
236 * Caller needs to check if the iommu is needed and flush. 238 * Caller needs to check if the iommu is needed and flush.
237 */ 239 */
238static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, 240static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
239 size_t size, int dir) 241 size_t size, int dir, unsigned long align_mask)
240{ 242{
241 unsigned long npages = iommu_num_pages(phys_mem, size); 243 unsigned long npages = iommu_num_pages(phys_mem, size);
242 unsigned long iommu_page = alloc_iommu(dev, npages); 244 unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
243 int i; 245 int i;
244 246
245 if (iommu_page == -1) { 247 if (iommu_page == -1) {
@@ -262,7 +264,11 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
262static dma_addr_t 264static dma_addr_t
263gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir) 265gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir)
264{ 266{
265 dma_addr_t map = dma_map_area(dev, paddr, size, dir); 267 dma_addr_t map;
268 unsigned long align_mask;
269
270 align_mask = (1UL << get_order(size)) - 1;
271 map = dma_map_area(dev, paddr, size, dir, align_mask);
266 272
267 flush_gart(); 273 flush_gart();
268 274
@@ -281,7 +287,8 @@ gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
281 if (!need_iommu(dev, paddr, size)) 287 if (!need_iommu(dev, paddr, size))
282 return paddr; 288 return paddr;
283 289
284 bus = gart_map_simple(dev, paddr, size, dir); 290 bus = dma_map_area(dev, paddr, size, dir, 0);
291 flush_gart();
285 292
286 return bus; 293 return bus;
287} 294}
@@ -340,7 +347,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
340 unsigned long addr = sg_phys(s); 347 unsigned long addr = sg_phys(s);
341 348
342 if (nonforced_iommu(dev, addr, s->length)) { 349 if (nonforced_iommu(dev, addr, s->length)) {
343 addr = dma_map_area(dev, addr, s->length, dir); 350 addr = dma_map_area(dev, addr, s->length, dir, 0);
344 if (addr == bad_dma_address) { 351 if (addr == bad_dma_address) {
345 if (i > 0) 352 if (i > 0)
346 gart_unmap_sg(dev, sg, i, dir); 353 gart_unmap_sg(dev, sg, i, dir);
@@ -362,7 +369,7 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start,
362 int nelems, struct scatterlist *sout, 369 int nelems, struct scatterlist *sout,
363 unsigned long pages) 370 unsigned long pages)
364{ 371{
365 unsigned long iommu_start = alloc_iommu(dev, pages); 372 unsigned long iommu_start = alloc_iommu(dev, pages, 0);
366 unsigned long iommu_page = iommu_start; 373 unsigned long iommu_page = iommu_start;
367 struct scatterlist *s; 374 struct scatterlist *s;
368 int i; 375 int i;
diff --git a/arch/x86/kernel/pcspeaker.c b/arch/x86/kernel/pcspeaker.c
index bc1f2d3ea27..a311ffcaad1 100644
--- a/arch/x86/kernel/pcspeaker.c
+++ b/arch/x86/kernel/pcspeaker.c
@@ -1,20 +1,13 @@
1#include <linux/platform_device.h> 1#include <linux/platform_device.h>
2#include <linux/errno.h> 2#include <linux/err.h>
3#include <linux/init.h> 3#include <linux/init.h>
4 4
5static __init int add_pcspkr(void) 5static __init int add_pcspkr(void)
6{ 6{
7 struct platform_device *pd; 7 struct platform_device *pd;
8 int ret;
9 8
10 pd = platform_device_alloc("pcspkr", -1); 9 pd = platform_device_register_simple("pcspkr", -1, NULL, 0);
11 if (!pd)
12 return -ENOMEM;
13 10
14 ret = platform_device_add(pd); 11 return IS_ERR(pd) ? PTR_ERR(pd) : 0;
15 if (ret)
16 platform_device_put(pd);
17
18 return ret;
19} 12}
20device_initcall(add_pcspkr); 13device_initcall(add_pcspkr);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 876e9189077..ec7a2ba9bce 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -185,7 +185,8 @@ static void mwait_idle(void)
185static void poll_idle(void) 185static void poll_idle(void)
186{ 186{
187 local_irq_enable(); 187 local_irq_enable();
188 cpu_relax(); 188 while (!need_resched())
189 cpu_relax();
189} 190}
190 191
191/* 192/*
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 31f40b24bf5..205188db962 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -37,6 +37,7 @@
37#include <linux/tick.h> 37#include <linux/tick.h>
38#include <linux/percpu.h> 38#include <linux/percpu.h>
39#include <linux/prctl.h> 39#include <linux/prctl.h>
40#include <linux/dmi.h>
40 41
41#include <asm/uaccess.h> 42#include <asm/uaccess.h>
42#include <asm/pgtable.h> 43#include <asm/pgtable.h>
@@ -56,6 +57,8 @@
56#include <asm/cpu.h> 57#include <asm/cpu.h>
57#include <asm/kdebug.h> 58#include <asm/kdebug.h>
58#include <asm/idle.h> 59#include <asm/idle.h>
60#include <asm/syscalls.h>
61#include <asm/smp.h>
59 62
60asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 63asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
61 64
@@ -161,6 +164,7 @@ void __show_registers(struct pt_regs *regs, int all)
161 unsigned long d0, d1, d2, d3, d6, d7; 164 unsigned long d0, d1, d2, d3, d6, d7;
162 unsigned long sp; 165 unsigned long sp;
163 unsigned short ss, gs; 166 unsigned short ss, gs;
167 const char *board;
164 168
165 if (user_mode_vm(regs)) { 169 if (user_mode_vm(regs)) {
166 sp = regs->sp; 170 sp = regs->sp;
@@ -173,11 +177,15 @@ void __show_registers(struct pt_regs *regs, int all)
173 } 177 }
174 178
175 printk("\n"); 179 printk("\n");
176 printk("Pid: %d, comm: %s %s (%s %.*s)\n", 180
181 board = dmi_get_system_info(DMI_PRODUCT_NAME);
182 if (!board)
183 board = "";
184 printk("Pid: %d, comm: %s %s (%s %.*s) %s\n",
177 task_pid_nr(current), current->comm, 185 task_pid_nr(current), current->comm,
178 print_tainted(), init_utsname()->release, 186 print_tainted(), init_utsname()->release,
179 (int)strcspn(init_utsname()->version, " "), 187 (int)strcspn(init_utsname()->version, " "),
180 init_utsname()->version); 188 init_utsname()->version, board);
181 189
182 printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", 190 printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
183 (u16)regs->cs, regs->ip, regs->flags, 191 (u16)regs->cs, regs->ip, regs->flags,
@@ -277,6 +285,14 @@ void exit_thread(void)
277 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; 285 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
278 put_cpu(); 286 put_cpu();
279 } 287 }
288#ifdef CONFIG_X86_DS
289 /* Free any DS contexts that have not been properly released. */
290 if (unlikely(current->thread.ds_ctx)) {
291 /* we clear debugctl to make sure DS is not used. */
292 update_debugctlmsr(0);
293 ds_free(current->thread.ds_ctx);
294 }
295#endif /* CONFIG_X86_DS */
280} 296}
281 297
282void flush_thread(void) 298void flush_thread(void)
@@ -438,6 +454,35 @@ int set_tsc_mode(unsigned int val)
438 return 0; 454 return 0;
439} 455}
440 456
457#ifdef CONFIG_X86_DS
458static int update_debugctl(struct thread_struct *prev,
459 struct thread_struct *next, unsigned long debugctl)
460{
461 unsigned long ds_prev = 0;
462 unsigned long ds_next = 0;
463
464 if (prev->ds_ctx)
465 ds_prev = (unsigned long)prev->ds_ctx->ds;
466 if (next->ds_ctx)
467 ds_next = (unsigned long)next->ds_ctx->ds;
468
469 if (ds_next != ds_prev) {
470 /* we clear debugctl to make sure DS
471 * is not in use when we change it */
472 debugctl = 0;
473 update_debugctlmsr(0);
474 wrmsr(MSR_IA32_DS_AREA, ds_next, 0);
475 }
476 return debugctl;
477}
478#else
479static int update_debugctl(struct thread_struct *prev,
480 struct thread_struct *next, unsigned long debugctl)
481{
482 return debugctl;
483}
484#endif /* CONFIG_X86_DS */
485
441static noinline void 486static noinline void
442__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, 487__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
443 struct tss_struct *tss) 488 struct tss_struct *tss)
@@ -448,14 +493,7 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
448 prev = &prev_p->thread; 493 prev = &prev_p->thread;
449 next = &next_p->thread; 494 next = &next_p->thread;
450 495
451 debugctl = prev->debugctlmsr; 496 debugctl = update_debugctl(prev, next, prev->debugctlmsr);
452 if (next->ds_area_msr != prev->ds_area_msr) {
453 /* we clear debugctl to make sure DS
454 * is not in use when we change it */
455 debugctl = 0;
456 update_debugctlmsr(0);
457 wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0);
458 }
459 497
460 if (next->debugctlmsr != debugctl) 498 if (next->debugctlmsr != debugctl)
461 update_debugctlmsr(next->debugctlmsr); 499 update_debugctlmsr(next->debugctlmsr);
@@ -479,13 +517,13 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
479 hard_enable_TSC(); 517 hard_enable_TSC();
480 } 518 }
481 519
482#ifdef X86_BTS 520#ifdef CONFIG_X86_PTRACE_BTS
483 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) 521 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
484 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); 522 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
485 523
486 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) 524 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
487 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); 525 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
488#endif 526#endif /* CONFIG_X86_PTRACE_BTS */
489 527
490 528
491 if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { 529 if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index e12e0e4dd25..2a8ccb9238b 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -37,11 +37,11 @@
37#include <linux/kdebug.h> 37#include <linux/kdebug.h>
38#include <linux/tick.h> 38#include <linux/tick.h>
39#include <linux/prctl.h> 39#include <linux/prctl.h>
40#include <linux/uaccess.h>
41#include <linux/io.h>
40 42
41#include <asm/uaccess.h>
42#include <asm/pgtable.h> 43#include <asm/pgtable.h>
43#include <asm/system.h> 44#include <asm/system.h>
44#include <asm/io.h>
45#include <asm/processor.h> 45#include <asm/processor.h>
46#include <asm/i387.h> 46#include <asm/i387.h>
47#include <asm/mmu_context.h> 47#include <asm/mmu_context.h>
@@ -51,6 +51,7 @@
51#include <asm/proto.h> 51#include <asm/proto.h>
52#include <asm/ia32.h> 52#include <asm/ia32.h>
53#include <asm/idle.h> 53#include <asm/idle.h>
54#include <asm/syscalls.h>
54 55
55asmlinkage extern void ret_from_fork(void); 56asmlinkage extern void ret_from_fork(void);
56 57
@@ -88,7 +89,7 @@ void exit_idle(void)
88#ifdef CONFIG_HOTPLUG_CPU 89#ifdef CONFIG_HOTPLUG_CPU
89DECLARE_PER_CPU(int, cpu_state); 90DECLARE_PER_CPU(int, cpu_state);
90 91
91#include <asm/nmi.h> 92#include <linux/nmi.h>
92/* We halt the CPU with physical CPU hotplug */ 93/* We halt the CPU with physical CPU hotplug */
93static inline void play_dead(void) 94static inline void play_dead(void)
94{ 95{
@@ -153,7 +154,7 @@ void cpu_idle(void)
153} 154}
154 155
155/* Prints also some state that isn't saved in the pt_regs */ 156/* Prints also some state that isn't saved in the pt_regs */
156void __show_regs(struct pt_regs * regs) 157void __show_regs(struct pt_regs *regs)
157{ 158{
158 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; 159 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
159 unsigned long d0, d1, d2, d3, d6, d7; 160 unsigned long d0, d1, d2, d3, d6, d7;
@@ -162,59 +163,61 @@ void __show_regs(struct pt_regs * regs)
162 163
163 printk("\n"); 164 printk("\n");
164 print_modules(); 165 print_modules();
165 printk("Pid: %d, comm: %.20s %s %s %.*s\n", 166 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s\n",
166 current->pid, current->comm, print_tainted(), 167 current->pid, current->comm, print_tainted(),
167 init_utsname()->release, 168 init_utsname()->release,
168 (int)strcspn(init_utsname()->version, " "), 169 (int)strcspn(init_utsname()->version, " "),
169 init_utsname()->version); 170 init_utsname()->version);
170 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); 171 printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
171 printk_address(regs->ip, 1); 172 printk_address(regs->ip, 1);
172 printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->sp, 173 printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
173 regs->flags); 174 regs->sp, regs->flags);
174 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n", 175 printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
175 regs->ax, regs->bx, regs->cx); 176 regs->ax, regs->bx, regs->cx);
176 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n", 177 printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n",
177 regs->dx, regs->si, regs->di); 178 regs->dx, regs->si, regs->di);
178 printk("RBP: %016lx R08: %016lx R09: %016lx\n", 179 printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n",
179 regs->bp, regs->r8, regs->r9); 180 regs->bp, regs->r8, regs->r9);
180 printk("R10: %016lx R11: %016lx R12: %016lx\n", 181 printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n",
181 regs->r10, regs->r11, regs->r12); 182 regs->r10, regs->r11, regs->r12);
182 printk("R13: %016lx R14: %016lx R15: %016lx\n", 183 printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
183 regs->r13, regs->r14, regs->r15); 184 regs->r13, regs->r14, regs->r15);
184 185
185 asm("movl %%ds,%0" : "=r" (ds)); 186 asm("movl %%ds,%0" : "=r" (ds));
186 asm("movl %%cs,%0" : "=r" (cs)); 187 asm("movl %%cs,%0" : "=r" (cs));
187 asm("movl %%es,%0" : "=r" (es)); 188 asm("movl %%es,%0" : "=r" (es));
188 asm("movl %%fs,%0" : "=r" (fsindex)); 189 asm("movl %%fs,%0" : "=r" (fsindex));
189 asm("movl %%gs,%0" : "=r" (gsindex)); 190 asm("movl %%gs,%0" : "=r" (gsindex));
190 191
191 rdmsrl(MSR_FS_BASE, fs); 192 rdmsrl(MSR_FS_BASE, fs);
192 rdmsrl(MSR_GS_BASE, gs); 193 rdmsrl(MSR_GS_BASE, gs);
193 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); 194 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
194 195
195 cr0 = read_cr0(); 196 cr0 = read_cr0();
196 cr2 = read_cr2(); 197 cr2 = read_cr2();
197 cr3 = read_cr3(); 198 cr3 = read_cr3();
198 cr4 = read_cr4(); 199 cr4 = read_cr4();
199 200
200 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", 201 printk(KERN_INFO "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
201 fs,fsindex,gs,gsindex,shadowgs); 202 fs, fsindex, gs, gsindex, shadowgs);
202 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0); 203 printk(KERN_INFO "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
203 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4); 204 es, cr0);
205 printk(KERN_INFO "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
206 cr4);
204 207
205 get_debugreg(d0, 0); 208 get_debugreg(d0, 0);
206 get_debugreg(d1, 1); 209 get_debugreg(d1, 1);
207 get_debugreg(d2, 2); 210 get_debugreg(d2, 2);
208 printk("DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2); 211 printk(KERN_INFO "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
209 get_debugreg(d3, 3); 212 get_debugreg(d3, 3);
210 get_debugreg(d6, 6); 213 get_debugreg(d6, 6);
211 get_debugreg(d7, 7); 214 get_debugreg(d7, 7);
212 printk("DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7); 215 printk(KERN_INFO "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
213} 216}
214 217
215void show_regs(struct pt_regs *regs) 218void show_regs(struct pt_regs *regs)
216{ 219{
217 printk("CPU %d:", smp_processor_id()); 220 printk(KERN_INFO "CPU %d:", smp_processor_id());
218 __show_regs(regs); 221 __show_regs(regs);
219 show_trace(NULL, regs, (void *)(regs + 1), regs->bp); 222 show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
220} 223}
@@ -240,6 +243,14 @@ void exit_thread(void)
240 t->io_bitmap_max = 0; 243 t->io_bitmap_max = 0;
241 put_cpu(); 244 put_cpu();
242 } 245 }
246#ifdef CONFIG_X86_DS
247 /* Free any DS contexts that have not been properly released. */
248 if (unlikely(t->ds_ctx)) {
249 /* we clear debugctl to make sure DS is not used. */
250 update_debugctlmsr(0);
251 ds_free(t->ds_ctx);
252 }
253#endif /* CONFIG_X86_DS */
243} 254}
244 255
245void flush_thread(void) 256void flush_thread(void)
@@ -315,10 +326,10 @@ void prepare_to_copy(struct task_struct *tsk)
315 326
316int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, 327int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
317 unsigned long unused, 328 unsigned long unused,
318 struct task_struct * p, struct pt_regs * regs) 329 struct task_struct *p, struct pt_regs *regs)
319{ 330{
320 int err; 331 int err;
321 struct pt_regs * childregs; 332 struct pt_regs *childregs;
322 struct task_struct *me = current; 333 struct task_struct *me = current;
323 334
324 childregs = ((struct pt_regs *) 335 childregs = ((struct pt_regs *)
@@ -363,10 +374,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
363 if (test_thread_flag(TIF_IA32)) 374 if (test_thread_flag(TIF_IA32))
364 err = do_set_thread_area(p, -1, 375 err = do_set_thread_area(p, -1,
365 (struct user_desc __user *)childregs->si, 0); 376 (struct user_desc __user *)childregs->si, 0);
366 else 377 else
367#endif 378#endif
368 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); 379 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
369 if (err) 380 if (err)
370 goto out; 381 goto out;
371 } 382 }
372 err = 0; 383 err = 0;
@@ -473,13 +484,27 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
473 next = &next_p->thread; 484 next = &next_p->thread;
474 485
475 debugctl = prev->debugctlmsr; 486 debugctl = prev->debugctlmsr;
476 if (next->ds_area_msr != prev->ds_area_msr) { 487
477 /* we clear debugctl to make sure DS 488#ifdef CONFIG_X86_DS
478 * is not in use when we change it */ 489 {
479 debugctl = 0; 490 unsigned long ds_prev = 0, ds_next = 0;
480 update_debugctlmsr(0); 491
481 wrmsrl(MSR_IA32_DS_AREA, next->ds_area_msr); 492 if (prev->ds_ctx)
493 ds_prev = (unsigned long)prev->ds_ctx->ds;
494 if (next->ds_ctx)
495 ds_next = (unsigned long)next->ds_ctx->ds;
496
497 if (ds_next != ds_prev) {
498 /*
499 * We clear debugctl to make sure DS
500 * is not in use when we change it:
501 */
502 debugctl = 0;
503 update_debugctlmsr(0);
504 wrmsrl(MSR_IA32_DS_AREA, ds_next);
505 }
482 } 506 }
507#endif /* CONFIG_X86_DS */
483 508
484 if (next->debugctlmsr != debugctl) 509 if (next->debugctlmsr != debugctl)
485 update_debugctlmsr(next->debugctlmsr); 510 update_debugctlmsr(next->debugctlmsr);
@@ -517,13 +542,13 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
517 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); 542 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
518 } 543 }
519 544
520#ifdef X86_BTS 545#ifdef CONFIG_X86_PTRACE_BTS
521 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) 546 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
522 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); 547 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
523 548
524 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) 549 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
525 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); 550 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
526#endif 551#endif /* CONFIG_X86_PTRACE_BTS */
527} 552}
528 553
529/* 554/*
@@ -545,7 +570,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
545 unsigned fsindex, gsindex; 570 unsigned fsindex, gsindex;
546 571
547 /* we're going to use this soon, after a few expensive things */ 572 /* we're going to use this soon, after a few expensive things */
548 if (next_p->fpu_counter>5) 573 if (next_p->fpu_counter > 5)
549 prefetch(next->xstate); 574 prefetch(next->xstate);
550 575
551 /* 576 /*
@@ -553,13 +578,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
553 */ 578 */
554 load_sp0(tss, next); 579 load_sp0(tss, next);
555 580
556 /* 581 /*
557 * Switch DS and ES. 582 * Switch DS and ES.
558 * This won't pick up thread selector changes, but I guess that is ok. 583 * This won't pick up thread selector changes, but I guess that is ok.
559 */ 584 */
560 savesegment(es, prev->es); 585 savesegment(es, prev->es);
561 if (unlikely(next->es | prev->es)) 586 if (unlikely(next->es | prev->es))
562 loadsegment(es, next->es); 587 loadsegment(es, next->es);
563 588
564 savesegment(ds, prev->ds); 589 savesegment(ds, prev->ds);
565 if (unlikely(next->ds | prev->ds)) 590 if (unlikely(next->ds | prev->ds))
@@ -585,7 +610,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
585 */ 610 */
586 arch_leave_lazy_cpu_mode(); 611 arch_leave_lazy_cpu_mode();
587 612
588 /* 613 /*
589 * Switch FS and GS. 614 * Switch FS and GS.
590 * 615 *
591 * Segment register != 0 always requires a reload. Also 616 * Segment register != 0 always requires a reload. Also
@@ -594,13 +619,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
594 */ 619 */
595 if (unlikely(fsindex | next->fsindex | prev->fs)) { 620 if (unlikely(fsindex | next->fsindex | prev->fs)) {
596 loadsegment(fs, next->fsindex); 621 loadsegment(fs, next->fsindex);
597 /* 622 /*
598 * Check if the user used a selector != 0; if yes 623 * Check if the user used a selector != 0; if yes
599 * clear 64bit base, since overloaded base is always 624 * clear 64bit base, since overloaded base is always
600 * mapped to the Null selector 625 * mapped to the Null selector
601 */ 626 */
602 if (fsindex) 627 if (fsindex)
603 prev->fs = 0; 628 prev->fs = 0;
604 } 629 }
605 /* when next process has a 64bit base use it */ 630 /* when next process has a 64bit base use it */
606 if (next->fs) 631 if (next->fs)
@@ -610,7 +635,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
610 if (unlikely(gsindex | next->gsindex | prev->gs)) { 635 if (unlikely(gsindex | next->gsindex | prev->gs)) {
611 load_gs_index(next->gsindex); 636 load_gs_index(next->gsindex);
612 if (gsindex) 637 if (gsindex)
613 prev->gs = 0; 638 prev->gs = 0;
614 } 639 }
615 if (next->gs) 640 if (next->gs)
616 wrmsrl(MSR_KERNEL_GS_BASE, next->gs); 641 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
@@ -619,12 +644,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
619 /* Must be after DS reload */ 644 /* Must be after DS reload */
620 unlazy_fpu(prev_p); 645 unlazy_fpu(prev_p);
621 646
622 /* 647 /*
623 * Switch the PDA and FPU contexts. 648 * Switch the PDA and FPU contexts.
624 */ 649 */
625 prev->usersp = read_pda(oldrsp); 650 prev->usersp = read_pda(oldrsp);
626 write_pda(oldrsp, next->usersp); 651 write_pda(oldrsp, next->usersp);
627 write_pda(pcurrent, next_p); 652 write_pda(pcurrent, next_p);
628 653
629 write_pda(kernelstack, 654 write_pda(kernelstack,
630 (unsigned long)task_stack_page(next_p) + 655 (unsigned long)task_stack_page(next_p) +
@@ -665,7 +690,7 @@ long sys_execve(char __user *name, char __user * __user *argv,
665 char __user * __user *envp, struct pt_regs *regs) 690 char __user * __user *envp, struct pt_regs *regs)
666{ 691{
667 long error; 692 long error;
668 char * filename; 693 char *filename;
669 694
670 filename = getname(name); 695 filename = getname(name);
671 error = PTR_ERR(filename); 696 error = PTR_ERR(filename);
@@ -723,55 +748,55 @@ asmlinkage long sys_vfork(struct pt_regs *regs)
723unsigned long get_wchan(struct task_struct *p) 748unsigned long get_wchan(struct task_struct *p)
724{ 749{
725 unsigned long stack; 750 unsigned long stack;
726 u64 fp,ip; 751 u64 fp, ip;
727 int count = 0; 752 int count = 0;
728 753
729 if (!p || p == current || p->state==TASK_RUNNING) 754 if (!p || p == current || p->state == TASK_RUNNING)
730 return 0; 755 return 0;
731 stack = (unsigned long)task_stack_page(p); 756 stack = (unsigned long)task_stack_page(p);
732 if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE) 757 if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE)
733 return 0; 758 return 0;
734 fp = *(u64 *)(p->thread.sp); 759 fp = *(u64 *)(p->thread.sp);
735 do { 760 do {
736 if (fp < (unsigned long)stack || 761 if (fp < (unsigned long)stack ||
737 fp > (unsigned long)stack+THREAD_SIZE) 762 fp > (unsigned long)stack+THREAD_SIZE)
738 return 0; 763 return 0;
739 ip = *(u64 *)(fp+8); 764 ip = *(u64 *)(fp+8);
740 if (!in_sched_functions(ip)) 765 if (!in_sched_functions(ip))
741 return ip; 766 return ip;
742 fp = *(u64 *)fp; 767 fp = *(u64 *)fp;
743 } while (count++ < 16); 768 } while (count++ < 16);
744 return 0; 769 return 0;
745} 770}
746 771
747long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) 772long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
748{ 773{
749 int ret = 0; 774 int ret = 0;
750 int doit = task == current; 775 int doit = task == current;
751 int cpu; 776 int cpu;
752 777
753 switch (code) { 778 switch (code) {
754 case ARCH_SET_GS: 779 case ARCH_SET_GS:
755 if (addr >= TASK_SIZE_OF(task)) 780 if (addr >= TASK_SIZE_OF(task))
756 return -EPERM; 781 return -EPERM;
757 cpu = get_cpu(); 782 cpu = get_cpu();
758 /* handle small bases via the GDT because that's faster to 783 /* handle small bases via the GDT because that's faster to
759 switch. */ 784 switch. */
760 if (addr <= 0xffffffff) { 785 if (addr <= 0xffffffff) {
761 set_32bit_tls(task, GS_TLS, addr); 786 set_32bit_tls(task, GS_TLS, addr);
762 if (doit) { 787 if (doit) {
763 load_TLS(&task->thread, cpu); 788 load_TLS(&task->thread, cpu);
764 load_gs_index(GS_TLS_SEL); 789 load_gs_index(GS_TLS_SEL);
765 } 790 }
766 task->thread.gsindex = GS_TLS_SEL; 791 task->thread.gsindex = GS_TLS_SEL;
767 task->thread.gs = 0; 792 task->thread.gs = 0;
768 } else { 793 } else {
769 task->thread.gsindex = 0; 794 task->thread.gsindex = 0;
770 task->thread.gs = addr; 795 task->thread.gs = addr;
771 if (doit) { 796 if (doit) {
772 load_gs_index(0); 797 load_gs_index(0);
773 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); 798 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
774 } 799 }
775 } 800 }
776 put_cpu(); 801 put_cpu();
777 break; 802 break;
@@ -825,8 +850,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
825 rdmsrl(MSR_KERNEL_GS_BASE, base); 850 rdmsrl(MSR_KERNEL_GS_BASE, base);
826 else 851 else
827 base = task->thread.gs; 852 base = task->thread.gs;
828 } 853 } else
829 else
830 base = task->thread.gs; 854 base = task->thread.gs;
831 ret = put_user(base, (unsigned long __user *)addr); 855 ret = put_user(base, (unsigned long __user *)addr);
832 break; 856 break;
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index e37dccce85d..e375b658efc 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -14,6 +14,7 @@
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/ptrace.h> 15#include <linux/ptrace.h>
16#include <linux/regset.h> 16#include <linux/regset.h>
17#include <linux/tracehook.h>
17#include <linux/user.h> 18#include <linux/user.h>
18#include <linux/elf.h> 19#include <linux/elf.h>
19#include <linux/security.h> 20#include <linux/security.h>
@@ -69,7 +70,7 @@ static inline bool invalid_selector(u16 value)
69 70
70#define FLAG_MASK FLAG_MASK_32 71#define FLAG_MASK FLAG_MASK_32
71 72
72static long *pt_regs_access(struct pt_regs *regs, unsigned long regno) 73static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
73{ 74{
74 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); 75 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
75 regno >>= 2; 76 regno >>= 2;
@@ -554,45 +555,115 @@ static int ptrace_set_debugreg(struct task_struct *child,
554 return 0; 555 return 0;
555} 556}
556 557
557#ifdef X86_BTS 558#ifdef CONFIG_X86_PTRACE_BTS
559/*
560 * The configuration for a particular BTS hardware implementation.
561 */
562struct bts_configuration {
563 /* the size of a BTS record in bytes; at most BTS_MAX_RECORD_SIZE */
564 unsigned char sizeof_bts;
565 /* the size of a field in the BTS record in bytes */
566 unsigned char sizeof_field;
567 /* a bitmask to enable/disable BTS in DEBUGCTL MSR */
568 unsigned long debugctl_mask;
569};
570static struct bts_configuration bts_cfg;
571
572#define BTS_MAX_RECORD_SIZE (8 * 3)
573
574
575/*
576 * Branch Trace Store (BTS) uses the following format. Different
577 * architectures vary in the size of those fields.
578 * - source linear address
579 * - destination linear address
580 * - flags
581 *
582 * Later architectures use 64bit pointers throughout, whereas earlier
583 * architectures use 32bit pointers in 32bit mode.
584 *
585 * We compute the base address for the first 8 fields based on:
586 * - the field size stored in the DS configuration
587 * - the relative field position
588 *
589 * In order to store additional information in the BTS buffer, we use
590 * a special source address to indicate that the record requires
591 * special interpretation.
592 *
593 * Netburst indicated via a bit in the flags field whether the branch
594 * was predicted; this is ignored.
595 */
596
597enum bts_field {
598 bts_from = 0,
599 bts_to,
600 bts_flags,
601
602 bts_escape = (unsigned long)-1,
603 bts_qual = bts_to,
604 bts_jiffies = bts_flags
605};
606
607static inline unsigned long bts_get(const char *base, enum bts_field field)
608{
609 base += (bts_cfg.sizeof_field * field);
610 return *(unsigned long *)base;
611}
558 612
559static int ptrace_bts_get_size(struct task_struct *child) 613static inline void bts_set(char *base, enum bts_field field, unsigned long val)
560{ 614{
561 if (!child->thread.ds_area_msr) 615 base += (bts_cfg.sizeof_field * field);;
562 return -ENXIO; 616 (*(unsigned long *)base) = val;
617}
563 618
564 return ds_get_bts_index((void *)child->thread.ds_area_msr); 619/*
620 * Translate a BTS record from the raw format into the bts_struct format
621 *
622 * out (out): bts_struct interpretation
623 * raw: raw BTS record
624 */
625static void ptrace_bts_translate_record(struct bts_struct *out, const void *raw)
626{
627 memset(out, 0, sizeof(*out));
628 if (bts_get(raw, bts_from) == bts_escape) {
629 out->qualifier = bts_get(raw, bts_qual);
630 out->variant.jiffies = bts_get(raw, bts_jiffies);
631 } else {
632 out->qualifier = BTS_BRANCH;
633 out->variant.lbr.from_ip = bts_get(raw, bts_from);
634 out->variant.lbr.to_ip = bts_get(raw, bts_to);
635 }
565} 636}
566 637
567static int ptrace_bts_read_record(struct task_struct *child, 638static int ptrace_bts_read_record(struct task_struct *child, size_t index,
568 long index,
569 struct bts_struct __user *out) 639 struct bts_struct __user *out)
570{ 640{
571 struct bts_struct ret; 641 struct bts_struct ret;
572 int retval; 642 const void *bts_record;
573 int bts_end; 643 size_t bts_index, bts_end;
574 int bts_index; 644 int error;
575
576 if (!child->thread.ds_area_msr)
577 return -ENXIO;
578 645
579 if (index < 0) 646 error = ds_get_bts_end(child, &bts_end);
580 return -EINVAL; 647 if (error < 0)
648 return error;
581 649
582 bts_end = ds_get_bts_end((void *)child->thread.ds_area_msr);
583 if (bts_end <= index) 650 if (bts_end <= index)
584 return -EINVAL; 651 return -EINVAL;
585 652
653 error = ds_get_bts_index(child, &bts_index);
654 if (error < 0)
655 return error;
656
586 /* translate the ptrace bts index into the ds bts index */ 657 /* translate the ptrace bts index into the ds bts index */
587 bts_index = ds_get_bts_index((void *)child->thread.ds_area_msr); 658 bts_index += bts_end - (index + 1);
588 bts_index -= (index + 1); 659 if (bts_end <= bts_index)
589 if (bts_index < 0) 660 bts_index -= bts_end;
590 bts_index += bts_end;
591 661
592 retval = ds_read_bts((void *)child->thread.ds_area_msr, 662 error = ds_access_bts(child, bts_index, &bts_record);
593 bts_index, &ret); 663 if (error < 0)
594 if (retval < 0) 664 return error;
595 return retval; 665
666 ptrace_bts_translate_record(&ret, bts_record);
596 667
597 if (copy_to_user(out, &ret, sizeof(ret))) 668 if (copy_to_user(out, &ret, sizeof(ret)))
598 return -EFAULT; 669 return -EFAULT;
@@ -600,101 +671,106 @@ static int ptrace_bts_read_record(struct task_struct *child,
600 return sizeof(ret); 671 return sizeof(ret);
601} 672}
602 673
603static int ptrace_bts_clear(struct task_struct *child)
604{
605 if (!child->thread.ds_area_msr)
606 return -ENXIO;
607
608 return ds_clear((void *)child->thread.ds_area_msr);
609}
610
611static int ptrace_bts_drain(struct task_struct *child, 674static int ptrace_bts_drain(struct task_struct *child,
612 long size, 675 long size,
613 struct bts_struct __user *out) 676 struct bts_struct __user *out)
614{ 677{
615 int end, i; 678 struct bts_struct ret;
616 void *ds = (void *)child->thread.ds_area_msr; 679 const unsigned char *raw;
617 680 size_t end, i;
618 if (!ds) 681 int error;
619 return -ENXIO;
620 682
621 end = ds_get_bts_index(ds); 683 error = ds_get_bts_index(child, &end);
622 if (end <= 0) 684 if (error < 0)
623 return end; 685 return error;
624 686
625 if (size < (end * sizeof(struct bts_struct))) 687 if (size < (end * sizeof(struct bts_struct)))
626 return -EIO; 688 return -EIO;
627 689
628 for (i = 0; i < end; i++, out++) { 690 error = ds_access_bts(child, 0, (const void **)&raw);
629 struct bts_struct ret; 691 if (error < 0)
630 int retval; 692 return error;
631 693
632 retval = ds_read_bts(ds, i, &ret); 694 for (i = 0; i < end; i++, out++, raw += bts_cfg.sizeof_bts) {
633 if (retval < 0) 695 ptrace_bts_translate_record(&ret, raw);
634 return retval;
635 696
636 if (copy_to_user(out, &ret, sizeof(ret))) 697 if (copy_to_user(out, &ret, sizeof(ret)))
637 return -EFAULT; 698 return -EFAULT;
638 } 699 }
639 700
640 ds_clear(ds); 701 error = ds_clear_bts(child);
702 if (error < 0)
703 return error;
641 704
642 return end; 705 return end;
643} 706}
644 707
708static void ptrace_bts_ovfl(struct task_struct *child)
709{
710 send_sig(child->thread.bts_ovfl_signal, child, 0);
711}
712
645static int ptrace_bts_config(struct task_struct *child, 713static int ptrace_bts_config(struct task_struct *child,
646 long cfg_size, 714 long cfg_size,
647 const struct ptrace_bts_config __user *ucfg) 715 const struct ptrace_bts_config __user *ucfg)
648{ 716{
649 struct ptrace_bts_config cfg; 717 struct ptrace_bts_config cfg;
650 int bts_size, ret = 0; 718 int error = 0;
651 void *ds; 719
720 error = -EOPNOTSUPP;
721 if (!bts_cfg.sizeof_bts)
722 goto errout;
652 723
724 error = -EIO;
653 if (cfg_size < sizeof(cfg)) 725 if (cfg_size < sizeof(cfg))
654 return -EIO; 726 goto errout;
655 727
728 error = -EFAULT;
656 if (copy_from_user(&cfg, ucfg, sizeof(cfg))) 729 if (copy_from_user(&cfg, ucfg, sizeof(cfg)))
657 return -EFAULT; 730 goto errout;
658 731
659 if ((int)cfg.size < 0) 732 error = -EINVAL;
660 return -EINVAL; 733 if ((cfg.flags & PTRACE_BTS_O_SIGNAL) &&
734 !(cfg.flags & PTRACE_BTS_O_ALLOC))
735 goto errout;
661 736
662 bts_size = 0; 737 if (cfg.flags & PTRACE_BTS_O_ALLOC) {
663 ds = (void *)child->thread.ds_area_msr; 738 ds_ovfl_callback_t ovfl = NULL;
664 if (ds) { 739 unsigned int sig = 0;
665 bts_size = ds_get_bts_size(ds); 740
666 if (bts_size < 0) 741 /* we ignore the error in case we were not tracing child */
667 return bts_size; 742 (void)ds_release_bts(child);
668 }
669 cfg.size = PAGE_ALIGN(cfg.size);
670 743
671 if (bts_size != cfg.size) { 744 if (cfg.flags & PTRACE_BTS_O_SIGNAL) {
672 ret = ptrace_bts_realloc(child, cfg.size, 745 if (!cfg.signal)
673 cfg.flags & PTRACE_BTS_O_CUT_SIZE); 746 goto errout;
674 if (ret < 0) 747
748 sig = cfg.signal;
749 ovfl = ptrace_bts_ovfl;
750 }
751
752 error = ds_request_bts(child, /* base = */ NULL, cfg.size, ovfl);
753 if (error < 0)
675 goto errout; 754 goto errout;
676 755
677 ds = (void *)child->thread.ds_area_msr; 756 child->thread.bts_ovfl_signal = sig;
678 } 757 }
679 758
680 if (cfg.flags & PTRACE_BTS_O_SIGNAL) 759 error = -EINVAL;
681 ret = ds_set_overflow(ds, DS_O_SIGNAL); 760 if (!child->thread.ds_ctx && cfg.flags)
682 else
683 ret = ds_set_overflow(ds, DS_O_WRAP);
684 if (ret < 0)
685 goto errout; 761 goto errout;
686 762
687 if (cfg.flags & PTRACE_BTS_O_TRACE) 763 if (cfg.flags & PTRACE_BTS_O_TRACE)
688 child->thread.debugctlmsr |= ds_debugctl_mask(); 764 child->thread.debugctlmsr |= bts_cfg.debugctl_mask;
689 else 765 else
690 child->thread.debugctlmsr &= ~ds_debugctl_mask(); 766 child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask;
691 767
692 if (cfg.flags & PTRACE_BTS_O_SCHED) 768 if (cfg.flags & PTRACE_BTS_O_SCHED)
693 set_tsk_thread_flag(child, TIF_BTS_TRACE_TS); 769 set_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
694 else 770 else
695 clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS); 771 clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
696 772
697 ret = sizeof(cfg); 773 error = sizeof(cfg);
698 774
699out: 775out:
700 if (child->thread.debugctlmsr) 776 if (child->thread.debugctlmsr)
@@ -702,10 +778,10 @@ out:
702 else 778 else
703 clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR); 779 clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
704 780
705 return ret; 781 return error;
706 782
707errout: 783errout:
708 child->thread.debugctlmsr &= ~ds_debugctl_mask(); 784 child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask;
709 clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS); 785 clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
710 goto out; 786 goto out;
711} 787}
@@ -714,29 +790,40 @@ static int ptrace_bts_status(struct task_struct *child,
714 long cfg_size, 790 long cfg_size,
715 struct ptrace_bts_config __user *ucfg) 791 struct ptrace_bts_config __user *ucfg)
716{ 792{
717 void *ds = (void *)child->thread.ds_area_msr;
718 struct ptrace_bts_config cfg; 793 struct ptrace_bts_config cfg;
794 size_t end;
795 const void *base, *max;
796 int error;
719 797
720 if (cfg_size < sizeof(cfg)) 798 if (cfg_size < sizeof(cfg))
721 return -EIO; 799 return -EIO;
722 800
723 memset(&cfg, 0, sizeof(cfg)); 801 error = ds_get_bts_end(child, &end);
802 if (error < 0)
803 return error;
724 804
725 if (ds) { 805 error = ds_access_bts(child, /* index = */ 0, &base);
726 cfg.size = ds_get_bts_size(ds); 806 if (error < 0)
807 return error;
727 808
728 if (ds_get_overflow(ds) == DS_O_SIGNAL) 809 error = ds_access_bts(child, /* index = */ end, &max);
729 cfg.flags |= PTRACE_BTS_O_SIGNAL; 810 if (error < 0)
811 return error;
730 812
731 if (test_tsk_thread_flag(child, TIF_DEBUGCTLMSR) && 813 memset(&cfg, 0, sizeof(cfg));
732 child->thread.debugctlmsr & ds_debugctl_mask()) 814 cfg.size = (max - base);
733 cfg.flags |= PTRACE_BTS_O_TRACE; 815 cfg.signal = child->thread.bts_ovfl_signal;
816 cfg.bts_size = sizeof(struct bts_struct);
734 817
735 if (test_tsk_thread_flag(child, TIF_BTS_TRACE_TS)) 818 if (cfg.signal)
736 cfg.flags |= PTRACE_BTS_O_SCHED; 819 cfg.flags |= PTRACE_BTS_O_SIGNAL;
737 }
738 820
739 cfg.bts_size = sizeof(struct bts_struct); 821 if (test_tsk_thread_flag(child, TIF_DEBUGCTLMSR) &&
822 child->thread.debugctlmsr & bts_cfg.debugctl_mask)
823 cfg.flags |= PTRACE_BTS_O_TRACE;
824
825 if (test_tsk_thread_flag(child, TIF_BTS_TRACE_TS))
826 cfg.flags |= PTRACE_BTS_O_SCHED;
740 827
741 if (copy_to_user(ucfg, &cfg, sizeof(cfg))) 828 if (copy_to_user(ucfg, &cfg, sizeof(cfg)))
742 return -EFAULT; 829 return -EFAULT;
@@ -744,89 +831,38 @@ static int ptrace_bts_status(struct task_struct *child,
744 return sizeof(cfg); 831 return sizeof(cfg);
745} 832}
746 833
747
748static int ptrace_bts_write_record(struct task_struct *child, 834static int ptrace_bts_write_record(struct task_struct *child,
749 const struct bts_struct *in) 835 const struct bts_struct *in)
750{ 836{
751 int retval; 837 unsigned char bts_record[BTS_MAX_RECORD_SIZE];
752 838
753 if (!child->thread.ds_area_msr) 839 BUG_ON(BTS_MAX_RECORD_SIZE < bts_cfg.sizeof_bts);
754 return -ENXIO;
755 840
756 retval = ds_write_bts((void *)child->thread.ds_area_msr, in); 841 memset(bts_record, 0, bts_cfg.sizeof_bts);
757 if (retval) 842 switch (in->qualifier) {
758 return retval; 843 case BTS_INVALID:
844 break;
759 845
760 return sizeof(*in); 846 case BTS_BRANCH:
761} 847 bts_set(bts_record, bts_from, in->variant.lbr.from_ip);
848 bts_set(bts_record, bts_to, in->variant.lbr.to_ip);
849 break;
762 850
763static int ptrace_bts_realloc(struct task_struct *child, 851 case BTS_TASK_ARRIVES:
764 int size, int reduce_size) 852 case BTS_TASK_DEPARTS:
765{ 853 bts_set(bts_record, bts_from, bts_escape);
766 unsigned long rlim, vm; 854 bts_set(bts_record, bts_qual, in->qualifier);
767 int ret, old_size; 855 bts_set(bts_record, bts_jiffies, in->variant.jiffies);
856 break;
768 857
769 if (size < 0) 858 default:
770 return -EINVAL; 859 return -EINVAL;
771
772 old_size = ds_get_bts_size((void *)child->thread.ds_area_msr);
773 if (old_size < 0)
774 return old_size;
775
776 ret = ds_free((void **)&child->thread.ds_area_msr);
777 if (ret < 0)
778 goto out;
779
780 size >>= PAGE_SHIFT;
781 old_size >>= PAGE_SHIFT;
782
783 current->mm->total_vm -= old_size;
784 current->mm->locked_vm -= old_size;
785
786 if (size == 0)
787 goto out;
788
789 rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
790 vm = current->mm->total_vm + size;
791 if (rlim < vm) {
792 ret = -ENOMEM;
793
794 if (!reduce_size)
795 goto out;
796
797 size = rlim - current->mm->total_vm;
798 if (size <= 0)
799 goto out;
800 }
801
802 rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
803 vm = current->mm->locked_vm + size;
804 if (rlim < vm) {
805 ret = -ENOMEM;
806
807 if (!reduce_size)
808 goto out;
809
810 size = rlim - current->mm->locked_vm;
811 if (size <= 0)
812 goto out;
813 } 860 }
814 861
815 ret = ds_allocate((void **)&child->thread.ds_area_msr, 862 /* The writing task will be the switched-to task on a context
816 size << PAGE_SHIFT); 863 * switch. It needs to write into the switched-from task's BTS
817 if (ret < 0) 864 * buffer. */
818 goto out; 865 return ds_unchecked_write_bts(child, bts_record, bts_cfg.sizeof_bts);
819
820 current->mm->total_vm += size;
821 current->mm->locked_vm += size;
822
823out:
824 if (child->thread.ds_area_msr)
825 set_tsk_thread_flag(child, TIF_DS_AREA_MSR);
826 else
827 clear_tsk_thread_flag(child, TIF_DS_AREA_MSR);
828
829 return ret;
830} 866}
831 867
832void ptrace_bts_take_timestamp(struct task_struct *tsk, 868void ptrace_bts_take_timestamp(struct task_struct *tsk,
@@ -839,7 +875,66 @@ void ptrace_bts_take_timestamp(struct task_struct *tsk,
839 875
840 ptrace_bts_write_record(tsk, &rec); 876 ptrace_bts_write_record(tsk, &rec);
841} 877}
842#endif /* X86_BTS */ 878
879static const struct bts_configuration bts_cfg_netburst = {
880 .sizeof_bts = sizeof(long) * 3,
881 .sizeof_field = sizeof(long),
882 .debugctl_mask = (1<<2)|(1<<3)|(1<<5)
883};
884
885static const struct bts_configuration bts_cfg_pentium_m = {
886 .sizeof_bts = sizeof(long) * 3,
887 .sizeof_field = sizeof(long),
888 .debugctl_mask = (1<<6)|(1<<7)
889};
890
891static const struct bts_configuration bts_cfg_core2 = {
892 .sizeof_bts = 8 * 3,
893 .sizeof_field = 8,
894 .debugctl_mask = (1<<6)|(1<<7)|(1<<9)
895};
896
897static inline void bts_configure(const struct bts_configuration *cfg)
898{
899 bts_cfg = *cfg;
900}
901
902void __cpuinit ptrace_bts_init_intel(struct cpuinfo_x86 *c)
903{
904 switch (c->x86) {
905 case 0x6:
906 switch (c->x86_model) {
907 case 0xD:
908 case 0xE: /* Pentium M */
909 bts_configure(&bts_cfg_pentium_m);
910 break;
911 case 0xF: /* Core2 */
912 case 0x1C: /* Atom */
913 bts_configure(&bts_cfg_core2);
914 break;
915 default:
916 /* sorry, don't know about them */
917 break;
918 }
919 break;
920 case 0xF:
921 switch (c->x86_model) {
922 case 0x0:
923 case 0x1:
924 case 0x2: /* Netburst */
925 bts_configure(&bts_cfg_netburst);
926 break;
927 default:
928 /* sorry, don't know about them */
929 break;
930 }
931 break;
932 default:
933 /* sorry, don't know about them */
934 break;
935 }
936}
937#endif /* CONFIG_X86_PTRACE_BTS */
843 938
844/* 939/*
845 * Called by kernel/ptrace.c when detaching.. 940 * Called by kernel/ptrace.c when detaching..
@@ -852,15 +947,15 @@ void ptrace_disable(struct task_struct *child)
852#ifdef TIF_SYSCALL_EMU 947#ifdef TIF_SYSCALL_EMU
853 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); 948 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
854#endif 949#endif
855 if (child->thread.ds_area_msr) { 950#ifdef CONFIG_X86_PTRACE_BTS
856#ifdef X86_BTS 951 (void)ds_release_bts(child);
857 ptrace_bts_realloc(child, 0, 0); 952
858#endif 953 child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask;
859 child->thread.debugctlmsr &= ~ds_debugctl_mask(); 954 if (!child->thread.debugctlmsr)
860 if (!child->thread.debugctlmsr) 955 clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
861 clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR); 956
862 clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS); 957 clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
863 } 958#endif /* CONFIG_X86_PTRACE_BTS */
864} 959}
865 960
866#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 961#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
@@ -980,7 +1075,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
980 /* 1075 /*
981 * These bits need more cooking - not enabled yet: 1076 * These bits need more cooking - not enabled yet:
982 */ 1077 */
983#ifdef X86_BTS 1078#ifdef CONFIG_X86_PTRACE_BTS
984 case PTRACE_BTS_CONFIG: 1079 case PTRACE_BTS_CONFIG:
985 ret = ptrace_bts_config 1080 ret = ptrace_bts_config
986 (child, data, (struct ptrace_bts_config __user *)addr); 1081 (child, data, (struct ptrace_bts_config __user *)addr);
@@ -992,7 +1087,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
992 break; 1087 break;
993 1088
994 case PTRACE_BTS_SIZE: 1089 case PTRACE_BTS_SIZE:
995 ret = ptrace_bts_get_size(child); 1090 ret = ds_get_bts_index(child, /* pos = */ NULL);
996 break; 1091 break;
997 1092
998 case PTRACE_BTS_GET: 1093 case PTRACE_BTS_GET:
@@ -1001,14 +1096,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
1001 break; 1096 break;
1002 1097
1003 case PTRACE_BTS_CLEAR: 1098 case PTRACE_BTS_CLEAR:
1004 ret = ptrace_bts_clear(child); 1099 ret = ds_clear_bts(child);
1005 break; 1100 break;
1006 1101
1007 case PTRACE_BTS_DRAIN: 1102 case PTRACE_BTS_DRAIN:
1008 ret = ptrace_bts_drain 1103 ret = ptrace_bts_drain
1009 (child, data, (struct bts_struct __user *) addr); 1104 (child, data, (struct bts_struct __user *) addr);
1010 break; 1105 break;
1011#endif 1106#endif /* CONFIG_X86_PTRACE_BTS */
1012 1107
1013 default: 1108 default:
1014 ret = ptrace_request(child, request, addr, data); 1109 ret = ptrace_request(child, request, addr, data);
@@ -1375,30 +1470,6 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
1375 force_sig_info(SIGTRAP, &info, tsk); 1470 force_sig_info(SIGTRAP, &info, tsk);
1376} 1471}
1377 1472
1378static void syscall_trace(struct pt_regs *regs)
1379{
1380 if (!(current->ptrace & PT_PTRACED))
1381 return;
1382
1383#if 0
1384 printk("trace %s ip %lx sp %lx ax %d origrax %d caller %lx tiflags %x ptrace %x\n",
1385 current->comm,
1386 regs->ip, regs->sp, regs->ax, regs->orig_ax, __builtin_return_address(0),
1387 current_thread_info()->flags, current->ptrace);
1388#endif
1389
1390 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
1391 ? 0x80 : 0));
1392 /*
1393 * this isn't the same as continuing with a signal, but it will do
1394 * for normal use. strace only continues with a signal if the
1395 * stopping signal is not SIGTRAP. -brl
1396 */
1397 if (current->exit_code) {
1398 send_sig(current->exit_code, current, 1);
1399 current->exit_code = 0;
1400 }
1401}
1402 1473
1403#ifdef CONFIG_X86_32 1474#ifdef CONFIG_X86_32
1404# define IS_IA32 1 1475# define IS_IA32 1
@@ -1432,8 +1503,9 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
1432 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU))) 1503 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
1433 ret = -1L; 1504 ret = -1L;
1434 1505
1435 if (ret || test_thread_flag(TIF_SYSCALL_TRACE)) 1506 if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
1436 syscall_trace(regs); 1507 tracehook_report_syscall_entry(regs))
1508 ret = -1L;
1437 1509
1438 if (unlikely(current->audit_context)) { 1510 if (unlikely(current->audit_context)) {
1439 if (IS_IA32) 1511 if (IS_IA32)
@@ -1459,7 +1531,7 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs)
1459 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); 1531 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
1460 1532
1461 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1533 if (test_thread_flag(TIF_SYSCALL_TRACE))
1462 syscall_trace(regs); 1534 tracehook_report_syscall_exit(regs, 0);
1463 1535
1464 /* 1536 /*
1465 * If TIF_SYSCALL_EMU is set, we only get here because of 1537 * If TIF_SYSCALL_EMU is set, we only get here because of
@@ -1475,6 +1547,6 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs)
1475 * system call instruction. 1547 * system call instruction.
1476 */ 1548 */
1477 if (test_thread_flag(TIF_SINGLESTEP) && 1549 if (test_thread_flag(TIF_SINGLESTEP) &&
1478 (current->ptrace & PT_PTRACED)) 1550 tracehook_consider_fatal_signal(current, SIGTRAP, SIG_DFL))
1479 send_sigtrap(current, regs, 0); 1551 send_sigtrap(current, regs, 0);
1480} 1552}
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 724adfc63cb..f4c93f1cfc1 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -29,7 +29,11 @@ EXPORT_SYMBOL(pm_power_off);
29 29
30static const struct desc_ptr no_idt = {}; 30static const struct desc_ptr no_idt = {};
31static int reboot_mode; 31static int reboot_mode;
32enum reboot_type reboot_type = BOOT_KBD; 32/*
33 * Keyboard reset and triple fault may result in INIT, not RESET, which
34 * doesn't work when we're in vmx root mode. Try ACPI first.
35 */
36enum reboot_type reboot_type = BOOT_ACPI;
33int reboot_force; 37int reboot_force;
34 38
35#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) 39#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 9838f2539df..141efab5240 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -223,6 +223,9 @@ unsigned long saved_video_mode;
223#define RAMDISK_LOAD_FLAG 0x4000 223#define RAMDISK_LOAD_FLAG 0x4000
224 224
225static char __initdata command_line[COMMAND_LINE_SIZE]; 225static char __initdata command_line[COMMAND_LINE_SIZE];
226#ifdef CONFIG_CMDLINE_BOOL
227static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
228#endif
226 229
227#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) 230#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
228struct edd edd; 231struct edd edd;
@@ -665,6 +668,19 @@ void __init setup_arch(char **cmdline_p)
665 bss_resource.start = virt_to_phys(&__bss_start); 668 bss_resource.start = virt_to_phys(&__bss_start);
666 bss_resource.end = virt_to_phys(&__bss_stop)-1; 669 bss_resource.end = virt_to_phys(&__bss_stop)-1;
667 670
671#ifdef CONFIG_CMDLINE_BOOL
672#ifdef CONFIG_CMDLINE_OVERRIDE
673 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
674#else
675 if (builtin_cmdline[0]) {
676 /* append boot loader cmdline to builtin */
677 strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
678 strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
679 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
680 }
681#endif
682#endif
683
668 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); 684 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
669 *cmdline_p = command_line; 685 *cmdline_p = command_line;
670 686
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 76e305e064f..0e67f72d931 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -162,9 +162,16 @@ void __init setup_per_cpu_areas(void)
162 printk(KERN_INFO 162 printk(KERN_INFO
163 "cpu %d has no node %d or node-local memory\n", 163 "cpu %d has no node %d or node-local memory\n",
164 cpu, node); 164 cpu, node);
165 if (ptr)
166 printk(KERN_DEBUG "per cpu data for cpu%d at %016lx\n",
167 cpu, __pa(ptr));
165 } 168 }
166 else 169 else {
167 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); 170 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
171 if (ptr)
172 printk(KERN_DEBUG "per cpu data for cpu%d on node%d at %016lx\n",
173 cpu, node, __pa(ptr));
174 }
168#endif 175#endif
169 per_cpu_offset(cpu) = ptr - __per_cpu_start; 176 per_cpu_offset(cpu) = ptr - __per_cpu_start;
170 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 177 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
diff --git a/arch/x86/kernel/sigframe.h b/arch/x86/kernel/sigframe.h
index 72bbb519d2d..8b4956e800a 100644
--- a/arch/x86/kernel/sigframe.h
+++ b/arch/x86/kernel/sigframe.h
@@ -24,4 +24,9 @@ struct rt_sigframe {
24 struct ucontext uc; 24 struct ucontext uc;
25 struct siginfo info; 25 struct siginfo info;
26}; 26};
27
28int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
29 sigset_t *set, struct pt_regs *regs);
30int ia32_setup_frame(int sig, struct k_sigaction *ka,
31 sigset_t *set, struct pt_regs *regs);
27#endif 32#endif
diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c
index 6fb5bcdd893..2a2435d3037 100644
--- a/arch/x86/kernel/signal_32.c
+++ b/arch/x86/kernel/signal_32.c
@@ -17,6 +17,7 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/wait.h> 19#include <linux/wait.h>
20#include <linux/tracehook.h>
20#include <linux/elf.h> 21#include <linux/elf.h>
21#include <linux/smp.h> 22#include <linux/smp.h>
22#include <linux/mm.h> 23#include <linux/mm.h>
@@ -26,6 +27,7 @@
26#include <asm/uaccess.h> 27#include <asm/uaccess.h>
27#include <asm/i387.h> 28#include <asm/i387.h>
28#include <asm/vdso.h> 29#include <asm/vdso.h>
30#include <asm/syscalls.h>
29 31
30#include "sigframe.h" 32#include "sigframe.h"
31 33
@@ -558,8 +560,6 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
558 * handler too. 560 * handler too.
559 */ 561 */
560 regs->flags &= ~X86_EFLAGS_TF; 562 regs->flags &= ~X86_EFLAGS_TF;
561 if (test_thread_flag(TIF_SINGLESTEP))
562 ptrace_notify(SIGTRAP);
563 563
564 spin_lock_irq(&current->sighand->siglock); 564 spin_lock_irq(&current->sighand->siglock);
565 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask); 565 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
@@ -568,6 +568,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
568 recalc_sigpending(); 568 recalc_sigpending();
569 spin_unlock_irq(&current->sighand->siglock); 569 spin_unlock_irq(&current->sighand->siglock);
570 570
571 tracehook_signal_handler(sig, info, ka, regs,
572 test_thread_flag(TIF_SINGLESTEP));
573
571 return 0; 574 return 0;
572} 575}
573 576
@@ -661,5 +664,10 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
661 if (thread_info_flags & _TIF_SIGPENDING) 664 if (thread_info_flags & _TIF_SIGPENDING)
662 do_signal(regs); 665 do_signal(regs);
663 666
667 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
668 clear_thread_flag(TIF_NOTIFY_RESUME);
669 tracehook_notify_resume(regs);
670 }
671
664 clear_thread_flag(TIF_IRET); 672 clear_thread_flag(TIF_IRET);
665} 673}
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
index ca316b5b742..694aa888bb1 100644
--- a/arch/x86/kernel/signal_64.c
+++ b/arch/x86/kernel/signal_64.c
@@ -15,17 +15,21 @@
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/wait.h> 16#include <linux/wait.h>
17#include <linux/ptrace.h> 17#include <linux/ptrace.h>
18#include <linux/tracehook.h>
18#include <linux/unistd.h> 19#include <linux/unistd.h>
19#include <linux/stddef.h> 20#include <linux/stddef.h>
20#include <linux/personality.h> 21#include <linux/personality.h>
21#include <linux/compiler.h> 22#include <linux/compiler.h>
23#include <linux/uaccess.h>
24
22#include <asm/processor.h> 25#include <asm/processor.h>
23#include <asm/ucontext.h> 26#include <asm/ucontext.h>
24#include <asm/uaccess.h>
25#include <asm/i387.h> 27#include <asm/i387.h>
26#include <asm/proto.h> 28#include <asm/proto.h>
27#include <asm/ia32_unistd.h> 29#include <asm/ia32_unistd.h>
28#include <asm/mce.h> 30#include <asm/mce.h>
31#include <asm/syscall.h>
32#include <asm/syscalls.h>
29#include "sigframe.h" 33#include "sigframe.h"
30 34
31#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 35#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
@@ -41,11 +45,6 @@
41# define FIX_EFLAGS __FIX_EFLAGS 45# define FIX_EFLAGS __FIX_EFLAGS
42#endif 46#endif
43 47
44int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
45 sigset_t *set, struct pt_regs * regs);
46int ia32_setup_frame(int sig, struct k_sigaction *ka,
47 sigset_t *set, struct pt_regs * regs);
48
49asmlinkage long 48asmlinkage long
50sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, 49sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
51 struct pt_regs *regs) 50 struct pt_regs *regs)
@@ -128,7 +127,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
128 /* Always make any pending restarted system calls return -EINTR */ 127 /* Always make any pending restarted system calls return -EINTR */
129 current_thread_info()->restart_block.fn = do_no_restart_syscall; 128 current_thread_info()->restart_block.fn = do_no_restart_syscall;
130 129
131#define COPY(x) err |= __get_user(regs->x, &sc->x) 130#define COPY(x) (err |= __get_user(regs->x, &sc->x))
132 131
133 COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); 132 COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
134 COPY(dx); COPY(cx); COPY(ip); 133 COPY(dx); COPY(cx); COPY(ip);
@@ -158,7 +157,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
158 } 157 }
159 158
160 { 159 {
161 struct _fpstate __user * buf; 160 struct _fpstate __user *buf;
162 err |= __get_user(buf, &sc->fpstate); 161 err |= __get_user(buf, &sc->fpstate);
163 162
164 if (buf) { 163 if (buf) {
@@ -198,7 +197,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
198 current->blocked = set; 197 current->blocked = set;
199 recalc_sigpending(); 198 recalc_sigpending();
200 spin_unlock_irq(&current->sighand->siglock); 199 spin_unlock_irq(&current->sighand->siglock);
201 200
202 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) 201 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
203 goto badframe; 202 goto badframe;
204 203
@@ -208,16 +207,17 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
208 return ax; 207 return ax;
209 208
210badframe: 209badframe:
211 signal_fault(regs,frame,"sigreturn"); 210 signal_fault(regs, frame, "sigreturn");
212 return 0; 211 return 0;
213} 212}
214 213
215/* 214/*
216 * Set up a signal frame. 215 * Set up a signal frame.
217 */ 216 */
218 217
219static inline int 218static inline int
220setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned long mask, struct task_struct *me) 219setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
220 unsigned long mask, struct task_struct *me)
221{ 221{
222 int err = 0; 222 int err = 0;
223 223
@@ -273,35 +273,35 @@ get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size)
273} 273}
274 274
275static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 275static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
276 sigset_t *set, struct pt_regs * regs) 276 sigset_t *set, struct pt_regs *regs)
277{ 277{
278 struct rt_sigframe __user *frame; 278 struct rt_sigframe __user *frame;
279 struct _fpstate __user *fp = NULL; 279 struct _fpstate __user *fp = NULL;
280 int err = 0; 280 int err = 0;
281 struct task_struct *me = current; 281 struct task_struct *me = current;
282 282
283 if (used_math()) { 283 if (used_math()) {
284 fp = get_stack(ka, regs, sizeof(struct _fpstate)); 284 fp = get_stack(ka, regs, sizeof(struct _fpstate));
285 frame = (void __user *)round_down( 285 frame = (void __user *)round_down(
286 (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8; 286 (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8;
287 287
288 if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate))) 288 if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate)))
289 goto give_sigsegv; 289 goto give_sigsegv;
290 290
291 if (save_i387(fp) < 0) 291 if (save_i387(fp) < 0)
292 err |= -1; 292 err |= -1;
293 } else 293 } else
294 frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8; 294 frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8;
295 295
296 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 296 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
297 goto give_sigsegv; 297 goto give_sigsegv;
298 298
299 if (ka->sa.sa_flags & SA_SIGINFO) { 299 if (ka->sa.sa_flags & SA_SIGINFO) {
300 err |= copy_siginfo_to_user(&frame->info, info); 300 err |= copy_siginfo_to_user(&frame->info, info);
301 if (err) 301 if (err)
302 goto give_sigsegv; 302 goto give_sigsegv;
303 } 303 }
304 304
305 /* Create the ucontext. */ 305 /* Create the ucontext. */
306 err |= __put_user(0, &frame->uc.uc_flags); 306 err |= __put_user(0, &frame->uc.uc_flags);
307 err |= __put_user(0, &frame->uc.uc_link); 307 err |= __put_user(0, &frame->uc.uc_link);
@@ -311,9 +311,9 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
311 err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); 311 err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
312 err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me); 312 err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me);
313 err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate); 313 err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate);
314 if (sizeof(*set) == 16) { 314 if (sizeof(*set) == 16) {
315 __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); 315 __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
316 __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); 316 __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
317 } else 317 } else
318 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); 318 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
319 319
@@ -324,7 +324,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
324 err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); 324 err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
325 } else { 325 } else {
326 /* could use a vstub here */ 326 /* could use a vstub here */
327 goto give_sigsegv; 327 goto give_sigsegv;
328 } 328 }
329 329
330 if (err) 330 if (err)
@@ -332,7 +332,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
332 332
333 /* Set up registers for signal handler */ 333 /* Set up registers for signal handler */
334 regs->di = sig; 334 regs->di = sig;
335 /* In case the signal handler was declared without prototypes */ 335 /* In case the signal handler was declared without prototypes */
336 regs->ax = 0; 336 regs->ax = 0;
337 337
338 /* This also works for non SA_SIGINFO handlers because they expect the 338 /* This also works for non SA_SIGINFO handlers because they expect the
@@ -355,37 +355,8 @@ give_sigsegv:
355} 355}
356 356
357/* 357/*
358 * Return -1L or the syscall number that @regs is executing.
359 */
360static long current_syscall(struct pt_regs *regs)
361{
362 /*
363 * We always sign-extend a -1 value being set here,
364 * so this is always either -1L or a syscall number.
365 */
366 return regs->orig_ax;
367}
368
369/*
370 * Return a value that is -EFOO if the system call in @regs->orig_ax
371 * returned an error. This only works for @regs from @current.
372 */
373static long current_syscall_ret(struct pt_regs *regs)
374{
375#ifdef CONFIG_IA32_EMULATION
376 if (test_thread_flag(TIF_IA32))
377 /*
378 * Sign-extend the value so (int)-EFOO becomes (long)-EFOO
379 * and will match correctly in comparisons.
380 */
381 return (int) regs->ax;
382#endif
383 return regs->ax;
384}
385
386/*
387 * OK, we're invoking a handler 358 * OK, we're invoking a handler
388 */ 359 */
389 360
390static int 361static int
391handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, 362handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
@@ -394,9 +365,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
394 int ret; 365 int ret;
395 366
396 /* Are we from a system call? */ 367 /* Are we from a system call? */
397 if (current_syscall(regs) >= 0) { 368 if (syscall_get_nr(current, regs) >= 0) {
398 /* If so, check system call restarting.. */ 369 /* If so, check system call restarting.. */
399 switch (current_syscall_ret(regs)) { 370 switch (syscall_get_error(current, regs)) {
400 case -ERESTART_RESTARTBLOCK: 371 case -ERESTART_RESTARTBLOCK:
401 case -ERESTARTNOHAND: 372 case -ERESTARTNOHAND:
402 regs->ax = -EINTR; 373 regs->ax = -EINTR;
@@ -429,7 +400,7 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
429 ret = ia32_setup_rt_frame(sig, ka, info, oldset, regs); 400 ret = ia32_setup_rt_frame(sig, ka, info, oldset, regs);
430 else 401 else
431 ret = ia32_setup_frame(sig, ka, oldset, regs); 402 ret = ia32_setup_frame(sig, ka, oldset, regs);
432 } else 403 } else
433#endif 404#endif
434 ret = setup_rt_frame(sig, ka, info, oldset, regs); 405 ret = setup_rt_frame(sig, ka, info, oldset, regs);
435 406
@@ -453,15 +424,16 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
453 * handler too. 424 * handler too.
454 */ 425 */
455 regs->flags &= ~X86_EFLAGS_TF; 426 regs->flags &= ~X86_EFLAGS_TF;
456 if (test_thread_flag(TIF_SINGLESTEP))
457 ptrace_notify(SIGTRAP);
458 427
459 spin_lock_irq(&current->sighand->siglock); 428 spin_lock_irq(&current->sighand->siglock);
460 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 429 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
461 if (!(ka->sa.sa_flags & SA_NODEFER)) 430 if (!(ka->sa.sa_flags & SA_NODEFER))
462 sigaddset(&current->blocked,sig); 431 sigaddset(&current->blocked, sig);
463 recalc_sigpending(); 432 recalc_sigpending();
464 spin_unlock_irq(&current->sighand->siglock); 433 spin_unlock_irq(&current->sighand->siglock);
434
435 tracehook_signal_handler(sig, info, ka, regs,
436 test_thread_flag(TIF_SINGLESTEP));
465 } 437 }
466 438
467 return ret; 439 return ret;
@@ -518,9 +490,9 @@ static void do_signal(struct pt_regs *regs)
518 } 490 }
519 491
520 /* Did we come from a system call? */ 492 /* Did we come from a system call? */
521 if (current_syscall(regs) >= 0) { 493 if (syscall_get_nr(current, regs) >= 0) {
522 /* Restart the system call - no handlers present */ 494 /* Restart the system call - no handlers present */
523 switch (current_syscall_ret(regs)) { 495 switch (syscall_get_error(current, regs)) {
524 case -ERESTARTNOHAND: 496 case -ERESTARTNOHAND:
525 case -ERESTARTSYS: 497 case -ERESTARTSYS:
526 case -ERESTARTNOINTR: 498 case -ERESTARTNOINTR:
@@ -558,17 +530,23 @@ void do_notify_resume(struct pt_regs *regs, void *unused,
558 /* deal with pending signal delivery */ 530 /* deal with pending signal delivery */
559 if (thread_info_flags & _TIF_SIGPENDING) 531 if (thread_info_flags & _TIF_SIGPENDING)
560 do_signal(regs); 532 do_signal(regs);
533
534 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
535 clear_thread_flag(TIF_NOTIFY_RESUME);
536 tracehook_notify_resume(regs);
537 }
561} 538}
562 539
563void signal_fault(struct pt_regs *regs, void __user *frame, char *where) 540void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
564{ 541{
565 struct task_struct *me = current; 542 struct task_struct *me = current;
566 if (show_unhandled_signals && printk_ratelimit()) { 543 if (show_unhandled_signals && printk_ratelimit()) {
567 printk("%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx", 544 printk("%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx",
568 me->comm,me->pid,where,frame,regs->ip,regs->sp,regs->orig_ax); 545 me->comm, me->pid, where, frame, regs->ip,
546 regs->sp, regs->orig_ax);
569 print_vma_addr(" in ", regs->ip); 547 print_vma_addr(" in ", regs->ip);
570 printk("\n"); 548 printk("\n");
571 } 549 }
572 550
573 force_sig(SIGSEGV, me); 551 force_sig(SIGSEGV, me);
574} 552}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 7985c5b3f91..45531e3ba19 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -88,7 +88,7 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
88#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) 88#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
89#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) 89#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
90#else 90#else
91struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; 91static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
92#define get_idle_for_cpu(x) (idle_thread_array[(x)]) 92#define get_idle_for_cpu(x) (idle_thread_array[(x)])
93#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) 93#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
94#endif 94#endif
@@ -129,7 +129,7 @@ static int boot_cpu_logical_apicid;
129static cpumask_t cpu_sibling_setup_map; 129static cpumask_t cpu_sibling_setup_map;
130 130
131/* Set if we find a B stepping CPU */ 131/* Set if we find a B stepping CPU */
132int __cpuinitdata smp_b_stepping; 132static int __cpuinitdata smp_b_stepping;
133 133
134#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) 134#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
135 135
@@ -1313,16 +1313,13 @@ __init void prefill_possible_map(void)
1313 if (!num_processors) 1313 if (!num_processors)
1314 num_processors = 1; 1314 num_processors = 1;
1315 1315
1316#ifdef CONFIG_HOTPLUG_CPU
1317 if (additional_cpus == -1) { 1316 if (additional_cpus == -1) {
1318 if (disabled_cpus > 0) 1317 if (disabled_cpus > 0)
1319 additional_cpus = disabled_cpus; 1318 additional_cpus = disabled_cpus;
1320 else 1319 else
1321 additional_cpus = 0; 1320 additional_cpus = 0;
1322 } 1321 }
1323#else 1322
1324 additional_cpus = 0;
1325#endif
1326 possible = num_processors + additional_cpus; 1323 possible = num_processors + additional_cpus;
1327 if (possible > NR_CPUS) 1324 if (possible > NR_CPUS)
1328 possible = NR_CPUS; 1325 possible = NR_CPUS;
diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
index 7066cb855a6..1884a8d12bf 100644
--- a/arch/x86/kernel/sys_i386_32.c
+++ b/arch/x86/kernel/sys_i386_32.c
@@ -22,6 +22,8 @@
22#include <linux/uaccess.h> 22#include <linux/uaccess.h>
23#include <linux/unistd.h> 23#include <linux/unistd.h>
24 24
25#include <asm/syscalls.h>
26
25asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, 27asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
26 unsigned long prot, unsigned long flags, 28 unsigned long prot, unsigned long flags,
27 unsigned long fd, unsigned long pgoff) 29 unsigned long fd, unsigned long pgoff)
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 3b360ef3381..6bc211accf0 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -13,15 +13,17 @@
13#include <linux/utsname.h> 13#include <linux/utsname.h>
14#include <linux/personality.h> 14#include <linux/personality.h>
15#include <linux/random.h> 15#include <linux/random.h>
16#include <linux/uaccess.h>
16 17
17#include <asm/uaccess.h>
18#include <asm/ia32.h> 18#include <asm/ia32.h>
19#include <asm/syscalls.h>
19 20
20asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, 21asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
21 unsigned long fd, unsigned long off) 22 unsigned long prot, unsigned long flags,
23 unsigned long fd, unsigned long off)
22{ 24{
23 long error; 25 long error;
24 struct file * file; 26 struct file *file;
25 27
26 error = -EINVAL; 28 error = -EINVAL;
27 if (off & ~PAGE_MASK) 29 if (off & ~PAGE_MASK)
@@ -56,9 +58,9 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
56 unmapped base down for this case. This can give 58 unmapped base down for this case. This can give
57 conflicts with the heap, but we assume that glibc 59 conflicts with the heap, but we assume that glibc
58 malloc knows how to fall back to mmap. Give it 1GB 60 malloc knows how to fall back to mmap. Give it 1GB
59 of playground for now. -AK */ 61 of playground for now. -AK */
60 *begin = 0x40000000; 62 *begin = 0x40000000;
61 *end = 0x80000000; 63 *end = 0x80000000;
62 if (current->flags & PF_RANDOMIZE) { 64 if (current->flags & PF_RANDOMIZE) {
63 new_begin = randomize_range(*begin, *begin + 0x02000000, 0); 65 new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
64 if (new_begin) 66 if (new_begin)
@@ -66,9 +68,9 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
66 } 68 }
67 } else { 69 } else {
68 *begin = TASK_UNMAPPED_BASE; 70 *begin = TASK_UNMAPPED_BASE;
69 *end = TASK_SIZE; 71 *end = TASK_SIZE;
70 } 72 }
71} 73}
72 74
73unsigned long 75unsigned long
74arch_get_unmapped_area(struct file *filp, unsigned long addr, 76arch_get_unmapped_area(struct file *filp, unsigned long addr,
@@ -78,11 +80,11 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
78 struct vm_area_struct *vma; 80 struct vm_area_struct *vma;
79 unsigned long start_addr; 81 unsigned long start_addr;
80 unsigned long begin, end; 82 unsigned long begin, end;
81 83
82 if (flags & MAP_FIXED) 84 if (flags & MAP_FIXED)
83 return addr; 85 return addr;
84 86
85 find_start_end(flags, &begin, &end); 87 find_start_end(flags, &begin, &end);
86 88
87 if (len > end) 89 if (len > end)
88 return -ENOMEM; 90 return -ENOMEM;
@@ -96,12 +98,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
96 } 98 }
97 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) 99 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
98 && len <= mm->cached_hole_size) { 100 && len <= mm->cached_hole_size) {
99 mm->cached_hole_size = 0; 101 mm->cached_hole_size = 0;
100 mm->free_area_cache = begin; 102 mm->free_area_cache = begin;
101 } 103 }
102 addr = mm->free_area_cache; 104 addr = mm->free_area_cache;
103 if (addr < begin) 105 if (addr < begin)
104 addr = begin; 106 addr = begin;
105 start_addr = addr; 107 start_addr = addr;
106 108
107full_search: 109full_search:
@@ -127,7 +129,7 @@ full_search:
127 return addr; 129 return addr;
128 } 130 }
129 if (addr + mm->cached_hole_size < vma->vm_start) 131 if (addr + mm->cached_hole_size < vma->vm_start)
130 mm->cached_hole_size = vma->vm_start - addr; 132 mm->cached_hole_size = vma->vm_start - addr;
131 133
132 addr = vma->vm_end; 134 addr = vma->vm_end;
133 } 135 }
@@ -177,7 +179,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
177 vma = find_vma(mm, addr-len); 179 vma = find_vma(mm, addr-len);
178 if (!vma || addr <= vma->vm_start) 180 if (!vma || addr <= vma->vm_start)
179 /* remember the address as a hint for next time */ 181 /* remember the address as a hint for next time */
180 return (mm->free_area_cache = addr-len); 182 return mm->free_area_cache = addr-len;
181 } 183 }
182 184
183 if (mm->mmap_base < len) 185 if (mm->mmap_base < len)
@@ -194,7 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
194 vma = find_vma(mm, addr); 196 vma = find_vma(mm, addr);
195 if (!vma || addr+len <= vma->vm_start) 197 if (!vma || addr+len <= vma->vm_start)
196 /* remember the address as a hint for next time */ 198 /* remember the address as a hint for next time */
197 return (mm->free_area_cache = addr); 199 return mm->free_area_cache = addr;
198 200
199 /* remember the largest hole we saw so far */ 201 /* remember the largest hole we saw so far */
200 if (addr + mm->cached_hole_size < vma->vm_start) 202 if (addr + mm->cached_hole_size < vma->vm_start)
@@ -224,13 +226,13 @@ bottomup:
224} 226}
225 227
226 228
227asmlinkage long sys_uname(struct new_utsname __user * name) 229asmlinkage long sys_uname(struct new_utsname __user *name)
228{ 230{
229 int err; 231 int err;
230 down_read(&uts_sem); 232 down_read(&uts_sem);
231 err = copy_to_user(name, utsname(), sizeof (*name)); 233 err = copy_to_user(name, utsname(), sizeof(*name));
232 up_read(&uts_sem); 234 up_read(&uts_sem);
233 if (personality(current->personality) == PER_LINUX32) 235 if (personality(current->personality) == PER_LINUX32)
234 err |= copy_to_user(&name->machine, "i686", 5); 236 err |= copy_to_user(&name->machine, "i686", 5);
235 return err ? -EFAULT : 0; 237 return err ? -EFAULT : 0;
236} 238}
diff --git a/arch/x86/kernel/syscall_64.c b/arch/x86/kernel/syscall_64.c
index 170d43c1748..3d1be4f0fac 100644
--- a/arch/x86/kernel/syscall_64.c
+++ b/arch/x86/kernel/syscall_64.c
@@ -8,12 +8,12 @@
8#define __NO_STUBS 8#define __NO_STUBS
9 9
10#define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ; 10#define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ;
11#undef _ASM_X86_64_UNISTD_H_ 11#undef ASM_X86__UNISTD_64_H
12#include <asm/unistd_64.h> 12#include <asm/unistd_64.h>
13 13
14#undef __SYSCALL 14#undef __SYSCALL
15#define __SYSCALL(nr, sym) [nr] = sym, 15#define __SYSCALL(nr, sym) [nr] = sym,
16#undef _ASM_X86_64_UNISTD_H_ 16#undef ASM_X86__UNISTD_64_H
17 17
18typedef void (*sys_call_ptr_t)(void); 18typedef void (*sys_call_ptr_t)(void);
19 19
diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c
index ffe3c664afc..bbecf8b6bf9 100644
--- a/arch/x86/kernel/time_32.c
+++ b/arch/x86/kernel/time_32.c
@@ -36,6 +36,7 @@
36#include <asm/arch_hooks.h> 36#include <asm/arch_hooks.h>
37#include <asm/hpet.h> 37#include <asm/hpet.h>
38#include <asm/time.h> 38#include <asm/time.h>
39#include <asm/timer.h>
39 40
40#include "do_timer.h" 41#include "do_timer.h"
41 42
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
index ab6bf375a30..6bb7b8579e7 100644
--- a/arch/x86/kernel/tls.c
+++ b/arch/x86/kernel/tls.c
@@ -10,6 +10,7 @@
10#include <asm/ldt.h> 10#include <asm/ldt.h>
11#include <asm/processor.h> 11#include <asm/processor.h>
12#include <asm/proto.h> 12#include <asm/proto.h>
13#include <asm/syscalls.h>
13 14
14#include "tls.h" 15#include "tls.h"
15 16
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
index 513caaca711..7a31f104bef 100644
--- a/arch/x86/kernel/traps_64.c
+++ b/arch/x86/kernel/traps_64.c
@@ -32,6 +32,8 @@
32#include <linux/bug.h> 32#include <linux/bug.h>
33#include <linux/nmi.h> 33#include <linux/nmi.h>
34#include <linux/mm.h> 34#include <linux/mm.h>
35#include <linux/smp.h>
36#include <linux/io.h>
35 37
36#if defined(CONFIG_EDAC) 38#if defined(CONFIG_EDAC)
37#include <linux/edac.h> 39#include <linux/edac.h>
@@ -45,9 +47,6 @@
45#include <asm/unwind.h> 47#include <asm/unwind.h>
46#include <asm/desc.h> 48#include <asm/desc.h>
47#include <asm/i387.h> 49#include <asm/i387.h>
48#include <asm/nmi.h>
49#include <asm/smp.h>
50#include <asm/io.h>
51#include <asm/pgalloc.h> 50#include <asm/pgalloc.h>
52#include <asm/proto.h> 51#include <asm/proto.h>
53#include <asm/pda.h> 52#include <asm/pda.h>
@@ -85,7 +84,8 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
85 84
86void printk_address(unsigned long address, int reliable) 85void printk_address(unsigned long address, int reliable)
87{ 86{
88 printk(" [<%016lx>] %s%pS\n", address, reliable ? "": "? ", (void *) address); 87 printk(" [<%016lx>] %s%pS\n",
88 address, reliable ? "" : "? ", (void *) address);
89} 89}
90 90
91static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, 91static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
@@ -98,7 +98,8 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
98 [STACKFAULT_STACK - 1] = "#SS", 98 [STACKFAULT_STACK - 1] = "#SS",
99 [MCE_STACK - 1] = "#MC", 99 [MCE_STACK - 1] = "#MC",
100#if DEBUG_STKSZ > EXCEPTION_STKSZ 100#if DEBUG_STKSZ > EXCEPTION_STKSZ
101 [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]" 101 [N_EXCEPTION_STACKS ...
102 N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
102#endif 103#endif
103 }; 104 };
104 unsigned k; 105 unsigned k;
@@ -163,7 +164,7 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
163} 164}
164 165
165/* 166/*
166 * x86-64 can have up to three kernel stacks: 167 * x86-64 can have up to three kernel stacks:
167 * process stack 168 * process stack
168 * interrupt stack 169 * interrupt stack
169 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack 170 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
@@ -219,7 +220,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
219 const struct stacktrace_ops *ops, void *data) 220 const struct stacktrace_ops *ops, void *data)
220{ 221{
221 const unsigned cpu = get_cpu(); 222 const unsigned cpu = get_cpu();
222 unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr; 223 unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
223 unsigned used = 0; 224 unsigned used = 0;
224 struct thread_info *tinfo; 225 struct thread_info *tinfo;
225 226
@@ -237,7 +238,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
237 if (!bp) { 238 if (!bp) {
238 if (task == current) { 239 if (task == current) {
239 /* Grab bp right from our regs */ 240 /* Grab bp right from our regs */
240 asm("movq %%rbp, %0" : "=r" (bp) :); 241 asm("movq %%rbp, %0" : "=r" (bp) : );
241 } else { 242 } else {
242 /* bp is the last reg pushed by switch_to */ 243 /* bp is the last reg pushed by switch_to */
243 bp = *(unsigned long *) task->thread.sp; 244 bp = *(unsigned long *) task->thread.sp;
@@ -339,9 +340,8 @@ static void
339show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, 340show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
340 unsigned long *stack, unsigned long bp, char *log_lvl) 341 unsigned long *stack, unsigned long bp, char *log_lvl)
341{ 342{
342 printk("\nCall Trace:\n"); 343 printk("Call Trace:\n");
343 dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); 344 dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
344 printk("\n");
345} 345}
346 346
347void show_trace(struct task_struct *task, struct pt_regs *regs, 347void show_trace(struct task_struct *task, struct pt_regs *regs,
@@ -357,11 +357,15 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
357 unsigned long *stack; 357 unsigned long *stack;
358 int i; 358 int i;
359 const int cpu = smp_processor_id(); 359 const int cpu = smp_processor_id();
360 unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr); 360 unsigned long *irqstack_end =
361 unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE); 361 (unsigned long *) (cpu_pda(cpu)->irqstackptr);
362 unsigned long *irqstack =
363 (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
362 364
363 // debugging aid: "show_stack(NULL, NULL);" prints the 365 /*
364 // back trace for this cpu. 366 * debugging aid: "show_stack(NULL, NULL);" prints the
367 * back trace for this cpu.
368 */
365 369
366 if (sp == NULL) { 370 if (sp == NULL) {
367 if (task) 371 if (task)
@@ -386,6 +390,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
386 printk(" %016lx", *stack++); 390 printk(" %016lx", *stack++);
387 touch_nmi_watchdog(); 391 touch_nmi_watchdog();
388 } 392 }
393 printk("\n");
389 show_trace_log_lvl(task, regs, sp, bp, log_lvl); 394 show_trace_log_lvl(task, regs, sp, bp, log_lvl);
390} 395}
391 396
@@ -404,7 +409,7 @@ void dump_stack(void)
404 409
405#ifdef CONFIG_FRAME_POINTER 410#ifdef CONFIG_FRAME_POINTER
406 if (!bp) 411 if (!bp)
407 asm("movq %%rbp, %0" : "=r" (bp):); 412 asm("movq %%rbp, %0" : "=r" (bp) : );
408#endif 413#endif
409 414
410 printk("Pid: %d, comm: %.20s %s %s %.*s\n", 415 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
@@ -414,7 +419,6 @@ void dump_stack(void)
414 init_utsname()->version); 419 init_utsname()->version);
415 show_trace(NULL, NULL, &stack, bp); 420 show_trace(NULL, NULL, &stack, bp);
416} 421}
417
418EXPORT_SYMBOL(dump_stack); 422EXPORT_SYMBOL(dump_stack);
419 423
420void show_registers(struct pt_regs *regs) 424void show_registers(struct pt_regs *regs)
@@ -443,7 +447,6 @@ void show_registers(struct pt_regs *regs)
443 printk("Stack: "); 447 printk("Stack: ");
444 show_stack_log_lvl(NULL, regs, (unsigned long *)sp, 448 show_stack_log_lvl(NULL, regs, (unsigned long *)sp,
445 regs->bp, ""); 449 regs->bp, "");
446 printk("\n");
447 450
448 printk(KERN_EMERG "Code: "); 451 printk(KERN_EMERG "Code: ");
449 452
@@ -493,7 +496,7 @@ unsigned __kprobes long oops_begin(void)
493 raw_local_irq_save(flags); 496 raw_local_irq_save(flags);
494 cpu = smp_processor_id(); 497 cpu = smp_processor_id();
495 if (!__raw_spin_trylock(&die_lock)) { 498 if (!__raw_spin_trylock(&die_lock)) {
496 if (cpu == die_owner) 499 if (cpu == die_owner)
497 /* nested oops. should stop eventually */; 500 /* nested oops. should stop eventually */;
498 else 501 else
499 __raw_spin_lock(&die_lock); 502 __raw_spin_lock(&die_lock);
@@ -638,7 +641,7 @@ kernel_trap:
638} 641}
639 642
640#define DO_ERROR(trapnr, signr, str, name) \ 643#define DO_ERROR(trapnr, signr, str, name) \
641asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ 644asmlinkage void do_##name(struct pt_regs *regs, long error_code) \
642{ \ 645{ \
643 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ 646 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
644 == NOTIFY_STOP) \ 647 == NOTIFY_STOP) \
@@ -648,7 +651,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
648} 651}
649 652
650#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ 653#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
651asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ 654asmlinkage void do_##name(struct pt_regs *regs, long error_code) \
652{ \ 655{ \
653 siginfo_t info; \ 656 siginfo_t info; \
654 info.si_signo = signr; \ 657 info.si_signo = signr; \
@@ -683,7 +686,7 @@ asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code)
683 preempt_conditional_cli(regs); 686 preempt_conditional_cli(regs);
684} 687}
685 688
686asmlinkage void do_double_fault(struct pt_regs * regs, long error_code) 689asmlinkage void do_double_fault(struct pt_regs *regs, long error_code)
687{ 690{
688 static const char str[] = "double fault"; 691 static const char str[] = "double fault";
689 struct task_struct *tsk = current; 692 struct task_struct *tsk = current;
@@ -778,9 +781,10 @@ io_check_error(unsigned char reason, struct pt_regs *regs)
778} 781}
779 782
780static notrace __kprobes void 783static notrace __kprobes void
781unknown_nmi_error(unsigned char reason, struct pt_regs * regs) 784unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
782{ 785{
783 if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) 786 if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) ==
787 NOTIFY_STOP)
784 return; 788 return;
785 printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n", 789 printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
786 reason); 790 reason);
@@ -882,7 +886,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
882 else if (user_mode(eregs)) 886 else if (user_mode(eregs))
883 regs = task_pt_regs(current); 887 regs = task_pt_regs(current);
884 /* Exception from kernel and interrupts are enabled. Move to 888 /* Exception from kernel and interrupts are enabled. Move to
885 kernel process stack. */ 889 kernel process stack. */
886 else if (eregs->flags & X86_EFLAGS_IF) 890 else if (eregs->flags & X86_EFLAGS_IF)
887 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs)); 891 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
888 if (eregs != regs) 892 if (eregs != regs)
@@ -891,7 +895,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
891} 895}
892 896
893/* runs on IST stack. */ 897/* runs on IST stack. */
894asmlinkage void __kprobes do_debug(struct pt_regs * regs, 898asmlinkage void __kprobes do_debug(struct pt_regs *regs,
895 unsigned long error_code) 899 unsigned long error_code)
896{ 900{
897 struct task_struct *tsk = current; 901 struct task_struct *tsk = current;
@@ -1035,7 +1039,7 @@ asmlinkage void do_coprocessor_error(struct pt_regs *regs)
1035 1039
1036asmlinkage void bad_intr(void) 1040asmlinkage void bad_intr(void)
1037{ 1041{
1038 printk("bad interrupt"); 1042 printk("bad interrupt");
1039} 1043}
1040 1044
1041asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) 1045asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
@@ -1047,7 +1051,7 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
1047 1051
1048 conditional_sti(regs); 1052 conditional_sti(regs);
1049 if (!user_mode(regs) && 1053 if (!user_mode(regs) &&
1050 kernel_math_error(regs, "kernel simd math error", 19)) 1054 kernel_math_error(regs, "kernel simd math error", 19))
1051 return; 1055 return;
1052 1056
1053 /* 1057 /*
@@ -1092,7 +1096,7 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
1092 force_sig_info(SIGFPE, &info, task); 1096 force_sig_info(SIGFPE, &info, task);
1093} 1097}
1094 1098
1095asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs) 1099asmlinkage void do_spurious_interrupt_bug(struct pt_regs *regs)
1096{ 1100{
1097} 1101}
1098 1102
@@ -1149,8 +1153,10 @@ void __init trap_init(void)
1149 set_intr_gate(0, &divide_error); 1153 set_intr_gate(0, &divide_error);
1150 set_intr_gate_ist(1, &debug, DEBUG_STACK); 1154 set_intr_gate_ist(1, &debug, DEBUG_STACK);
1151 set_intr_gate_ist(2, &nmi, NMI_STACK); 1155 set_intr_gate_ist(2, &nmi, NMI_STACK);
1152 set_system_gate_ist(3, &int3, DEBUG_STACK); /* int3 can be called from all */ 1156 /* int3 can be called from all */
1153 set_system_gate(4, &overflow); /* int4 can be called from all */ 1157 set_system_gate_ist(3, &int3, DEBUG_STACK);
1158 /* int4 can be called from all */
1159 set_system_gate(4, &overflow);
1154 set_intr_gate(5, &bounds); 1160 set_intr_gate(5, &bounds);
1155 set_intr_gate(6, &invalid_op); 1161 set_intr_gate(6, &invalid_op);
1156 set_intr_gate(7, &device_not_available); 1162 set_intr_gate(7, &device_not_available);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 8f98e9de1b8..161bb850fc4 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -104,7 +104,7 @@ __setup("notsc", notsc_setup);
104/* 104/*
105 * Read TSC and the reference counters. Take care of SMI disturbance 105 * Read TSC and the reference counters. Take care of SMI disturbance
106 */ 106 */
107static u64 tsc_read_refs(u64 *pm, u64 *hpet) 107static u64 tsc_read_refs(u64 *p, int hpet)
108{ 108{
109 u64 t1, t2; 109 u64 t1, t2;
110 int i; 110 int i;
@@ -112,9 +112,9 @@ static u64 tsc_read_refs(u64 *pm, u64 *hpet)
112 for (i = 0; i < MAX_RETRIES; i++) { 112 for (i = 0; i < MAX_RETRIES; i++) {
113 t1 = get_cycles(); 113 t1 = get_cycles();
114 if (hpet) 114 if (hpet)
115 *hpet = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF; 115 *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
116 else 116 else
117 *pm = acpi_pm_read_early(); 117 *p = acpi_pm_read_early();
118 t2 = get_cycles(); 118 t2 = get_cycles();
119 if ((t2 - t1) < SMI_TRESHOLD) 119 if ((t2 - t1) < SMI_TRESHOLD)
120 return t2; 120 return t2;
@@ -123,13 +123,59 @@ static u64 tsc_read_refs(u64 *pm, u64 *hpet)
123} 123}
124 124
125/* 125/*
126 * Calculate the TSC frequency from HPET reference
127 */
128static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
129{
130 u64 tmp;
131
132 if (hpet2 < hpet1)
133 hpet2 += 0x100000000ULL;
134 hpet2 -= hpet1;
135 tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
136 do_div(tmp, 1000000);
137 do_div(deltatsc, tmp);
138
139 return (unsigned long) deltatsc;
140}
141
142/*
143 * Calculate the TSC frequency from PMTimer reference
144 */
145static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2)
146{
147 u64 tmp;
148
149 if (!pm1 && !pm2)
150 return ULONG_MAX;
151
152 if (pm2 < pm1)
153 pm2 += (u64)ACPI_PM_OVRRUN;
154 pm2 -= pm1;
155 tmp = pm2 * 1000000000LL;
156 do_div(tmp, PMTMR_TICKS_PER_SEC);
157 do_div(deltatsc, tmp);
158
159 return (unsigned long) deltatsc;
160}
161
162#define CAL_MS 10
163#define CAL_LATCH (CLOCK_TICK_RATE / (1000 / CAL_MS))
164#define CAL_PIT_LOOPS 1000
165
166#define CAL2_MS 50
167#define CAL2_LATCH (CLOCK_TICK_RATE / (1000 / CAL2_MS))
168#define CAL2_PIT_LOOPS 5000
169
170
171/*
126 * Try to calibrate the TSC against the Programmable 172 * Try to calibrate the TSC against the Programmable
127 * Interrupt Timer and return the frequency of the TSC 173 * Interrupt Timer and return the frequency of the TSC
128 * in kHz. 174 * in kHz.
129 * 175 *
130 * Return ULONG_MAX on failure to calibrate. 176 * Return ULONG_MAX on failure to calibrate.
131 */ 177 */
132static unsigned long pit_calibrate_tsc(void) 178static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
133{ 179{
134 u64 tsc, t1, t2, delta; 180 u64 tsc, t1, t2, delta;
135 unsigned long tscmin, tscmax; 181 unsigned long tscmin, tscmax;
@@ -144,8 +190,8 @@ static unsigned long pit_calibrate_tsc(void)
144 * (LSB then MSB) to begin countdown. 190 * (LSB then MSB) to begin countdown.
145 */ 191 */
146 outb(0xb0, 0x43); 192 outb(0xb0, 0x43);
147 outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42); 193 outb(latch & 0xff, 0x42);
148 outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42); 194 outb(latch >> 8, 0x42);
149 195
150 tsc = t1 = t2 = get_cycles(); 196 tsc = t1 = t2 = get_cycles();
151 197
@@ -166,31 +212,154 @@ static unsigned long pit_calibrate_tsc(void)
166 /* 212 /*
167 * Sanity checks: 213 * Sanity checks:
168 * 214 *
169 * If we were not able to read the PIT more than 5000 215 * If we were not able to read the PIT more than loopmin
170 * times, then we have been hit by a massive SMI 216 * times, then we have been hit by a massive SMI
171 * 217 *
172 * If the maximum is 10 times larger than the minimum, 218 * If the maximum is 10 times larger than the minimum,
173 * then we got hit by an SMI as well. 219 * then we got hit by an SMI as well.
174 */ 220 */
175 if (pitcnt < 5000 || tscmax > 10 * tscmin) 221 if (pitcnt < loopmin || tscmax > 10 * tscmin)
176 return ULONG_MAX; 222 return ULONG_MAX;
177 223
178 /* Calculate the PIT value */ 224 /* Calculate the PIT value */
179 delta = t2 - t1; 225 delta = t2 - t1;
180 do_div(delta, 50); 226 do_div(delta, ms);
181 return delta; 227 return delta;
182} 228}
183 229
230/*
231 * This reads the current MSB of the PIT counter, and
232 * checks if we are running on sufficiently fast and
233 * non-virtualized hardware.
234 *
235 * Our expectations are:
236 *
237 * - the PIT is running at roughly 1.19MHz
238 *
239 * - each IO is going to take about 1us on real hardware,
240 * but we allow it to be much faster (by a factor of 10) or
241 * _slightly_ slower (ie we allow up to a 2us read+counter
242 * update - anything else implies a unacceptably slow CPU
243 * or PIT for the fast calibration to work.
244 *
245 * - with 256 PIT ticks to read the value, we have 214us to
246 * see the same MSB (and overhead like doing a single TSC
247 * read per MSB value etc).
248 *
249 * - We're doing 2 reads per loop (LSB, MSB), and we expect
250 * them each to take about a microsecond on real hardware.
251 * So we expect a count value of around 100. But we'll be
252 * generous, and accept anything over 50.
253 *
254 * - if the PIT is stuck, and we see *many* more reads, we
255 * return early (and the next caller of pit_expect_msb()
256 * then consider it a failure when they don't see the
257 * next expected value).
258 *
259 * These expectations mean that we know that we have seen the
260 * transition from one expected value to another with a fairly
261 * high accuracy, and we didn't miss any events. We can thus
262 * use the TSC value at the transitions to calculate a pretty
263 * good value for the TSC frequencty.
264 */
265static inline int pit_expect_msb(unsigned char val)
266{
267 int count = 0;
268
269 for (count = 0; count < 50000; count++) {
270 /* Ignore LSB */
271 inb(0x42);
272 if (inb(0x42) != val)
273 break;
274 }
275 return count > 50;
276}
277
278/*
279 * How many MSB values do we want to see? We aim for a
280 * 15ms calibration, which assuming a 2us counter read
281 * error should give us roughly 150 ppm precision for
282 * the calibration.
283 */
284#define QUICK_PIT_MS 15
285#define QUICK_PIT_ITERATIONS (QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
286
287static unsigned long quick_pit_calibrate(void)
288{
289 /* Set the Gate high, disable speaker */
290 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
291
292 /*
293 * Counter 2, mode 0 (one-shot), binary count
294 *
295 * NOTE! Mode 2 decrements by two (and then the
296 * output is flipped each time, giving the same
297 * final output frequency as a decrement-by-one),
298 * so mode 0 is much better when looking at the
299 * individual counts.
300 */
301 outb(0xb0, 0x43);
302
303 /* Start at 0xffff */
304 outb(0xff, 0x42);
305 outb(0xff, 0x42);
306
307 if (pit_expect_msb(0xff)) {
308 int i;
309 u64 t1, t2, delta;
310 unsigned char expect = 0xfe;
311
312 t1 = get_cycles();
313 for (i = 0; i < QUICK_PIT_ITERATIONS; i++, expect--) {
314 if (!pit_expect_msb(expect))
315 goto failed;
316 }
317 t2 = get_cycles();
318
319 /*
320 * Make sure we can rely on the second TSC timestamp:
321 */
322 if (!pit_expect_msb(expect))
323 goto failed;
324
325 /*
326 * Ok, if we get here, then we've seen the
327 * MSB of the PIT decrement QUICK_PIT_ITERATIONS
328 * times, and each MSB had many hits, so we never
329 * had any sudden jumps.
330 *
331 * As a result, we can depend on there not being
332 * any odd delays anywhere, and the TSC reads are
333 * reliable.
334 *
335 * kHz = ticks / time-in-seconds / 1000;
336 * kHz = (t2 - t1) / (QPI * 256 / PIT_TICK_RATE) / 1000
337 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (QPI * 256 * 1000)
338 */
339 delta = (t2 - t1)*PIT_TICK_RATE;
340 do_div(delta, QUICK_PIT_ITERATIONS*256*1000);
341 printk("Fast TSC calibration using PIT\n");
342 return delta;
343 }
344failed:
345 return 0;
346}
184 347
185/** 348/**
186 * native_calibrate_tsc - calibrate the tsc on boot 349 * native_calibrate_tsc - calibrate the tsc on boot
187 */ 350 */
188unsigned long native_calibrate_tsc(void) 351unsigned long native_calibrate_tsc(void)
189{ 352{
190 u64 tsc1, tsc2, delta, pm1, pm2, hpet1, hpet2; 353 u64 tsc1, tsc2, delta, ref1, ref2;
191 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; 354 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
192 unsigned long flags; 355 unsigned long flags, latch, ms, fast_calibrate;
193 int hpet = is_hpet_enabled(), i; 356 int hpet = is_hpet_enabled(), i, loopmin;
357
358 local_irq_save(flags);
359 fast_calibrate = quick_pit_calibrate();
360 local_irq_restore(flags);
361 if (fast_calibrate)
362 return fast_calibrate;
194 363
195 /* 364 /*
196 * Run 5 calibration loops to get the lowest frequency value 365 * Run 5 calibration loops to get the lowest frequency value
@@ -216,7 +385,13 @@ unsigned long native_calibrate_tsc(void)
216 * calibration delay loop as we have to wait for a certain 385 * calibration delay loop as we have to wait for a certain
217 * amount of time anyway. 386 * amount of time anyway.
218 */ 387 */
219 for (i = 0; i < 5; i++) { 388
389 /* Preset PIT loop values */
390 latch = CAL_LATCH;
391 ms = CAL_MS;
392 loopmin = CAL_PIT_LOOPS;
393
394 for (i = 0; i < 3; i++) {
220 unsigned long tsc_pit_khz; 395 unsigned long tsc_pit_khz;
221 396
222 /* 397 /*
@@ -226,16 +401,16 @@ unsigned long native_calibrate_tsc(void)
226 * read the end value. 401 * read the end value.
227 */ 402 */
228 local_irq_save(flags); 403 local_irq_save(flags);
229 tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL); 404 tsc1 = tsc_read_refs(&ref1, hpet);
230 tsc_pit_khz = pit_calibrate_tsc(); 405 tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin);
231 tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL); 406 tsc2 = tsc_read_refs(&ref2, hpet);
232 local_irq_restore(flags); 407 local_irq_restore(flags);
233 408
234 /* Pick the lowest PIT TSC calibration so far */ 409 /* Pick the lowest PIT TSC calibration so far */
235 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz); 410 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
236 411
237 /* hpet or pmtimer available ? */ 412 /* hpet or pmtimer available ? */
238 if (!hpet && !pm1 && !pm2) 413 if (!hpet && !ref1 && !ref2)
239 continue; 414 continue;
240 415
241 /* Check, whether the sampling was disturbed by an SMI */ 416 /* Check, whether the sampling was disturbed by an SMI */
@@ -243,23 +418,41 @@ unsigned long native_calibrate_tsc(void)
243 continue; 418 continue;
244 419
245 tsc2 = (tsc2 - tsc1) * 1000000LL; 420 tsc2 = (tsc2 - tsc1) * 1000000LL;
421 if (hpet)
422 tsc2 = calc_hpet_ref(tsc2, ref1, ref2);
423 else
424 tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2);
246 425
247 if (hpet) { 426 tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
248 if (hpet2 < hpet1) 427
249 hpet2 += 0x100000000ULL; 428 /* Check the reference deviation */
250 hpet2 -= hpet1; 429 delta = ((u64) tsc_pit_min) * 100;
251 tsc1 = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); 430 do_div(delta, tsc_ref_min);
252 do_div(tsc1, 1000000); 431
253 } else { 432 /*
254 if (pm2 < pm1) 433 * If both calibration results are inside a 10% window
255 pm2 += (u64)ACPI_PM_OVRRUN; 434 * then we can be sure, that the calibration
256 pm2 -= pm1; 435 * succeeded. We break out of the loop right away. We
257 tsc1 = pm2 * 1000000000LL; 436 * use the reference value, as it is more precise.
258 do_div(tsc1, PMTMR_TICKS_PER_SEC); 437 */
438 if (delta >= 90 && delta <= 110) {
439 printk(KERN_INFO
440 "TSC: PIT calibration matches %s. %d loops\n",
441 hpet ? "HPET" : "PMTIMER", i + 1);
442 return tsc_ref_min;
259 } 443 }
260 444
261 do_div(tsc2, tsc1); 445 /*
262 tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2); 446 * Check whether PIT failed more than once. This
447 * happens in virtualized environments. We need to
448 * give the virtual PC a slightly longer timeframe for
449 * the HPET/PMTIMER to make the result precise.
450 */
451 if (i == 1 && tsc_pit_min == ULONG_MAX) {
452 latch = CAL2_LATCH;
453 ms = CAL2_MS;
454 loopmin = CAL2_PIT_LOOPS;
455 }
263 } 456 }
264 457
265 /* 458 /*
@@ -270,7 +463,7 @@ unsigned long native_calibrate_tsc(void)
270 printk(KERN_WARNING "TSC: Unable to calibrate against PIT\n"); 463 printk(KERN_WARNING "TSC: Unable to calibrate against PIT\n");
271 464
272 /* We don't have an alternative source, disable TSC */ 465 /* We don't have an alternative source, disable TSC */
273 if (!hpet && !pm1 && !pm2) { 466 if (!hpet && !ref1 && !ref2) {
274 printk("TSC: No reference (HPET/PMTIMER) available\n"); 467 printk("TSC: No reference (HPET/PMTIMER) available\n");
275 return 0; 468 return 0;
276 } 469 }
@@ -278,7 +471,7 @@ unsigned long native_calibrate_tsc(void)
278 /* The alternative source failed as well, disable TSC */ 471 /* The alternative source failed as well, disable TSC */
279 if (tsc_ref_min == ULONG_MAX) { 472 if (tsc_ref_min == ULONG_MAX) {
280 printk(KERN_WARNING "TSC: HPET/PMTIMER calibration " 473 printk(KERN_WARNING "TSC: HPET/PMTIMER calibration "
281 "failed due to SMI disturbance.\n"); 474 "failed.\n");
282 return 0; 475 return 0;
283 } 476 }
284 477
@@ -290,44 +483,25 @@ unsigned long native_calibrate_tsc(void)
290 } 483 }
291 484
292 /* We don't have an alternative source, use the PIT calibration value */ 485 /* We don't have an alternative source, use the PIT calibration value */
293 if (!hpet && !pm1 && !pm2) { 486 if (!hpet && !ref1 && !ref2) {
294 printk(KERN_INFO "TSC: Using PIT calibration value\n"); 487 printk(KERN_INFO "TSC: Using PIT calibration value\n");
295 return tsc_pit_min; 488 return tsc_pit_min;
296 } 489 }
297 490
298 /* The alternative source failed, use the PIT calibration value */ 491 /* The alternative source failed, use the PIT calibration value */
299 if (tsc_ref_min == ULONG_MAX) { 492 if (tsc_ref_min == ULONG_MAX) {
300 printk(KERN_WARNING "TSC: HPET/PMTIMER calibration failed due " 493 printk(KERN_WARNING "TSC: HPET/PMTIMER calibration failed. "
301 "to SMI disturbance. Using PIT calibration\n"); 494 "Using PIT calibration\n");
302 return tsc_pit_min; 495 return tsc_pit_min;
303 } 496 }
304 497
305 /* Check the reference deviation */
306 delta = ((u64) tsc_pit_min) * 100;
307 do_div(delta, tsc_ref_min);
308
309 /*
310 * If both calibration results are inside a 5% window, the we
311 * use the lower frequency of those as it is probably the
312 * closest estimate.
313 */
314 if (delta >= 95 && delta <= 105) {
315 printk(KERN_INFO "TSC: PIT calibration confirmed by %s.\n",
316 hpet ? "HPET" : "PMTIMER");
317 printk(KERN_INFO "TSC: using %s calibration value\n",
318 tsc_pit_min <= tsc_ref_min ? "PIT" :
319 hpet ? "HPET" : "PMTIMER");
320 return tsc_pit_min <= tsc_ref_min ? tsc_pit_min : tsc_ref_min;
321 }
322
323 printk(KERN_WARNING "TSC: PIT calibration deviates from %s: %lu %lu.\n",
324 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
325
326 /* 498 /*
327 * The calibration values differ too much. In doubt, we use 499 * The calibration values differ too much. In doubt, we use
328 * the PIT value as we know that there are PMTIMERs around 500 * the PIT value as we know that there are PMTIMERs around
329 * running at double speed. 501 * running at double speed. At least we let the user know:
330 */ 502 */
503 printk(KERN_WARNING "TSC: PIT calibration deviates from %s: %lu %lu.\n",
504 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
331 printk(KERN_INFO "TSC: Using PIT calibration value\n"); 505 printk(KERN_INFO "TSC: Using PIT calibration value\n");
332 return tsc_pit_min; 506 return tsc_pit_min;
333} 507}
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index 594ef47f0a6..61a97e616f7 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -25,45 +25,31 @@
25#include <asm/visws/cobalt.h> 25#include <asm/visws/cobalt.h>
26#include <asm/visws/piix4.h> 26#include <asm/visws/piix4.h>
27#include <asm/arch_hooks.h> 27#include <asm/arch_hooks.h>
28#include <asm/io_apic.h>
28#include <asm/fixmap.h> 29#include <asm/fixmap.h>
29#include <asm/reboot.h> 30#include <asm/reboot.h>
30#include <asm/setup.h> 31#include <asm/setup.h>
31#include <asm/e820.h> 32#include <asm/e820.h>
32#include <asm/smp.h>
33#include <asm/io.h> 33#include <asm/io.h>
34 34
35#include <mach_ipi.h> 35#include <mach_ipi.h>
36 36
37#include "mach_apic.h" 37#include "mach_apic.h"
38 38
39#include <linux/init.h>
40#include <linux/smp.h>
41
42#include <linux/kernel_stat.h> 39#include <linux/kernel_stat.h>
43#include <linux/interrupt.h>
44#include <linux/init.h>
45 40
46#include <asm/io.h>
47#include <asm/apic.h>
48#include <asm/i8259.h> 41#include <asm/i8259.h>
49#include <asm/irq_vectors.h> 42#include <asm/irq_vectors.h>
50#include <asm/visws/cobalt.h>
51#include <asm/visws/lithium.h> 43#include <asm/visws/lithium.h>
52#include <asm/visws/piix4.h>
53 44
54#include <linux/sched.h> 45#include <linux/sched.h>
55#include <linux/kernel.h> 46#include <linux/kernel.h>
56#include <linux/init.h>
57#include <linux/pci.h> 47#include <linux/pci.h>
58#include <linux/pci_ids.h> 48#include <linux/pci_ids.h>
59 49
60extern int no_broadcast; 50extern int no_broadcast;
61 51
62#include <asm/io.h>
63#include <asm/apic.h> 52#include <asm/apic.h>
64#include <asm/arch_hooks.h>
65#include <asm/visws/cobalt.h>
66#include <asm/visws/lithium.h>
67 53
68char visws_board_type = -1; 54char visws_board_type = -1;
69char visws_board_rev = -1; 55char visws_board_rev = -1;
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 38f566fa27d..4eeb5cf9720 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -46,6 +46,7 @@
46#include <asm/io.h> 46#include <asm/io.h>
47#include <asm/tlbflush.h> 47#include <asm/tlbflush.h>
48#include <asm/irq.h> 48#include <asm/irq.h>
49#include <asm/syscalls.h>
49 50
50/* 51/*
51 * Known problems: 52 * Known problems:
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index edfb09f3047..8c9ad02af5a 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -393,13 +393,13 @@ static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
393} 393}
394#endif 394#endif
395 395
396static void vmi_allocate_pte(struct mm_struct *mm, u32 pfn) 396static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn)
397{ 397{
398 vmi_set_page_type(pfn, VMI_PAGE_L1); 398 vmi_set_page_type(pfn, VMI_PAGE_L1);
399 vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0); 399 vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
400} 400}
401 401
402static void vmi_allocate_pmd(struct mm_struct *mm, u32 pfn) 402static void vmi_allocate_pmd(struct mm_struct *mm, unsigned long pfn)
403{ 403{
404 /* 404 /*
405 * This call comes in very early, before mem_map is setup. 405 * This call comes in very early, before mem_map is setup.
@@ -410,20 +410,20 @@ static void vmi_allocate_pmd(struct mm_struct *mm, u32 pfn)
410 vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0); 410 vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0);
411} 411}
412 412
413static void vmi_allocate_pmd_clone(u32 pfn, u32 clonepfn, u32 start, u32 count) 413static void vmi_allocate_pmd_clone(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count)
414{ 414{
415 vmi_set_page_type(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE); 415 vmi_set_page_type(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE);
416 vmi_check_page_type(clonepfn, VMI_PAGE_L2); 416 vmi_check_page_type(clonepfn, VMI_PAGE_L2);
417 vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count); 417 vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count);
418} 418}
419 419
420static void vmi_release_pte(u32 pfn) 420static void vmi_release_pte(unsigned long pfn)
421{ 421{
422 vmi_ops.release_page(pfn, VMI_PAGE_L1); 422 vmi_ops.release_page(pfn, VMI_PAGE_L1);
423 vmi_set_page_type(pfn, VMI_PAGE_NORMAL); 423 vmi_set_page_type(pfn, VMI_PAGE_NORMAL);
424} 424}
425 425
426static void vmi_release_pmd(u32 pfn) 426static void vmi_release_pmd(unsigned long pfn)
427{ 427{
428 vmi_ops.release_page(pfn, VMI_PAGE_L2); 428 vmi_ops.release_page(pfn, VMI_PAGE_L2);
429 vmi_set_page_type(pfn, VMI_PAGE_NORMAL); 429 vmi_set_page_type(pfn, VMI_PAGE_NORMAL);
diff --git a/arch/x86/lib/msr-on-cpu.c b/arch/x86/lib/msr-on-cpu.c
index 01b868ba82f..321cf720dbb 100644
--- a/arch/x86/lib/msr-on-cpu.c
+++ b/arch/x86/lib/msr-on-cpu.c
@@ -16,37 +16,46 @@ static void __rdmsr_on_cpu(void *info)
16 rdmsr(rv->msr_no, rv->l, rv->h); 16 rdmsr(rv->msr_no, rv->l, rv->h);
17} 17}
18 18
19static void __rdmsr_safe_on_cpu(void *info) 19static void __wrmsr_on_cpu(void *info)
20{ 20{
21 struct msr_info *rv = info; 21 struct msr_info *rv = info;
22 22
23 rv->err = rdmsr_safe(rv->msr_no, &rv->l, &rv->h); 23 wrmsr(rv->msr_no, rv->l, rv->h);
24} 24}
25 25
26static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe) 26int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
27{ 27{
28 int err = 0; 28 int err;
29 struct msr_info rv; 29 struct msr_info rv;
30 30
31 rv.msr_no = msr_no; 31 rv.msr_no = msr_no;
32 if (safe) { 32 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
33 err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu,
34 &rv, 1);
35 err = err ? err : rv.err;
36 } else {
37 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
38 }
39 *l = rv.l; 33 *l = rv.l;
40 *h = rv.h; 34 *h = rv.h;
41 35
42 return err; 36 return err;
43} 37}
44 38
45static void __wrmsr_on_cpu(void *info) 39int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
40{
41 int err;
42 struct msr_info rv;
43
44 rv.msr_no = msr_no;
45 rv.l = l;
46 rv.h = h;
47 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
48
49 return err;
50}
51
52/* These "safe" variants are slower and should be used when the target MSR
53 may not actually exist. */
54static void __rdmsr_safe_on_cpu(void *info)
46{ 55{
47 struct msr_info *rv = info; 56 struct msr_info *rv = info;
48 57
49 wrmsr(rv->msr_no, rv->l, rv->h); 58 rv->err = rdmsr_safe(rv->msr_no, &rv->l, &rv->h);
50} 59}
51 60
52static void __wrmsr_safe_on_cpu(void *info) 61static void __wrmsr_safe_on_cpu(void *info)
@@ -56,45 +65,30 @@ static void __wrmsr_safe_on_cpu(void *info)
56 rv->err = wrmsr_safe(rv->msr_no, rv->l, rv->h); 65 rv->err = wrmsr_safe(rv->msr_no, rv->l, rv->h);
57} 66}
58 67
59static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe) 68int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
60{ 69{
61 int err = 0; 70 int err;
62 struct msr_info rv; 71 struct msr_info rv;
63 72
64 rv.msr_no = msr_no; 73 rv.msr_no = msr_no;
65 rv.l = l; 74 err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
66 rv.h = h; 75 *l = rv.l;
67 if (safe) { 76 *h = rv.h;
68 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu,
69 &rv, 1);
70 err = err ? err : rv.err;
71 } else {
72 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
73 }
74
75 return err;
76}
77 77
78int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 78 return err ? err : rv.err;
79{
80 return _wrmsr_on_cpu(cpu, msr_no, l, h, 0);
81} 79}
82 80
83int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
84{
85 return _rdmsr_on_cpu(cpu, msr_no, l, h, 0);
86}
87
88/* These "safe" variants are slower and should be used when the target MSR
89 may not actually exist. */
90int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 81int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
91{ 82{
92 return _wrmsr_on_cpu(cpu, msr_no, l, h, 1); 83 int err;
93} 84 struct msr_info rv;
94 85
95int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 86 rv.msr_no = msr_no;
96{ 87 rv.l = l;
97 return _rdmsr_on_cpu(cpu, msr_no, l, h, 1); 88 rv.h = h;
89 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
90
91 return err ? err : rv.err;
98} 92}
99 93
100EXPORT_SYMBOL(rdmsr_on_cpu); 94EXPORT_SYMBOL(rdmsr_on_cpu);
diff --git a/arch/x86/lib/string_32.c b/arch/x86/lib/string_32.c
index 94972e7c094..82004d2bf05 100644
--- a/arch/x86/lib/string_32.c
+++ b/arch/x86/lib/string_32.c
@@ -22,7 +22,7 @@ char *strcpy(char *dest, const char *src)
22 "testb %%al,%%al\n\t" 22 "testb %%al,%%al\n\t"
23 "jne 1b" 23 "jne 1b"
24 : "=&S" (d0), "=&D" (d1), "=&a" (d2) 24 : "=&S" (d0), "=&D" (d1), "=&a" (d2)
25 :"0" (src), "1" (dest) : "memory"); 25 : "0" (src), "1" (dest) : "memory");
26 return dest; 26 return dest;
27} 27}
28EXPORT_SYMBOL(strcpy); 28EXPORT_SYMBOL(strcpy);
@@ -42,7 +42,7 @@ char *strncpy(char *dest, const char *src, size_t count)
42 "stosb\n" 42 "stosb\n"
43 "2:" 43 "2:"
44 : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3) 44 : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3)
45 :"0" (src), "1" (dest), "2" (count) : "memory"); 45 : "0" (src), "1" (dest), "2" (count) : "memory");
46 return dest; 46 return dest;
47} 47}
48EXPORT_SYMBOL(strncpy); 48EXPORT_SYMBOL(strncpy);
@@ -60,7 +60,7 @@ char *strcat(char *dest, const char *src)
60 "testb %%al,%%al\n\t" 60 "testb %%al,%%al\n\t"
61 "jne 1b" 61 "jne 1b"
62 : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) 62 : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
63 : "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu): "memory"); 63 : "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu) : "memory");
64 return dest; 64 return dest;
65} 65}
66EXPORT_SYMBOL(strcat); 66EXPORT_SYMBOL(strcat);
@@ -105,9 +105,9 @@ int strcmp(const char *cs, const char *ct)
105 "2:\tsbbl %%eax,%%eax\n\t" 105 "2:\tsbbl %%eax,%%eax\n\t"
106 "orb $1,%%al\n" 106 "orb $1,%%al\n"
107 "3:" 107 "3:"
108 :"=a" (res), "=&S" (d0), "=&D" (d1) 108 : "=a" (res), "=&S" (d0), "=&D" (d1)
109 :"1" (cs), "2" (ct) 109 : "1" (cs), "2" (ct)
110 :"memory"); 110 : "memory");
111 return res; 111 return res;
112} 112}
113EXPORT_SYMBOL(strcmp); 113EXPORT_SYMBOL(strcmp);
@@ -130,9 +130,9 @@ int strncmp(const char *cs, const char *ct, size_t count)
130 "3:\tsbbl %%eax,%%eax\n\t" 130 "3:\tsbbl %%eax,%%eax\n\t"
131 "orb $1,%%al\n" 131 "orb $1,%%al\n"
132 "4:" 132 "4:"
133 :"=a" (res), "=&S" (d0), "=&D" (d1), "=&c" (d2) 133 : "=a" (res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
134 :"1" (cs), "2" (ct), "3" (count) 134 : "1" (cs), "2" (ct), "3" (count)
135 :"memory"); 135 : "memory");
136 return res; 136 return res;
137} 137}
138EXPORT_SYMBOL(strncmp); 138EXPORT_SYMBOL(strncmp);
@@ -152,9 +152,9 @@ char *strchr(const char *s, int c)
152 "movl $1,%1\n" 152 "movl $1,%1\n"
153 "2:\tmovl %1,%0\n\t" 153 "2:\tmovl %1,%0\n\t"
154 "decl %0" 154 "decl %0"
155 :"=a" (res), "=&S" (d0) 155 : "=a" (res), "=&S" (d0)
156 :"1" (s), "0" (c) 156 : "1" (s), "0" (c)
157 :"memory"); 157 : "memory");
158 return res; 158 return res;
159} 159}
160EXPORT_SYMBOL(strchr); 160EXPORT_SYMBOL(strchr);
@@ -169,9 +169,9 @@ size_t strlen(const char *s)
169 "scasb\n\t" 169 "scasb\n\t"
170 "notl %0\n\t" 170 "notl %0\n\t"
171 "decl %0" 171 "decl %0"
172 :"=c" (res), "=&D" (d0) 172 : "=c" (res), "=&D" (d0)
173 :"1" (s), "a" (0), "0" (0xffffffffu) 173 : "1" (s), "a" (0), "0" (0xffffffffu)
174 :"memory"); 174 : "memory");
175 return res; 175 return res;
176} 176}
177EXPORT_SYMBOL(strlen); 177EXPORT_SYMBOL(strlen);
@@ -189,9 +189,9 @@ void *memchr(const void *cs, int c, size_t count)
189 "je 1f\n\t" 189 "je 1f\n\t"
190 "movl $1,%0\n" 190 "movl $1,%0\n"
191 "1:\tdecl %0" 191 "1:\tdecl %0"
192 :"=D" (res), "=&c" (d0) 192 : "=D" (res), "=&c" (d0)
193 :"a" (c), "0" (cs), "1" (count) 193 : "a" (c), "0" (cs), "1" (count)
194 :"memory"); 194 : "memory");
195 return res; 195 return res;
196} 196}
197EXPORT_SYMBOL(memchr); 197EXPORT_SYMBOL(memchr);
@@ -228,9 +228,9 @@ size_t strnlen(const char *s, size_t count)
228 "cmpl $-1,%1\n\t" 228 "cmpl $-1,%1\n\t"
229 "jne 1b\n" 229 "jne 1b\n"
230 "3:\tsubl %2,%0" 230 "3:\tsubl %2,%0"
231 :"=a" (res), "=&d" (d0) 231 : "=a" (res), "=&d" (d0)
232 :"c" (s), "1" (count) 232 : "c" (s), "1" (count)
233 :"memory"); 233 : "memory");
234 return res; 234 return res;
235} 235}
236EXPORT_SYMBOL(strnlen); 236EXPORT_SYMBOL(strnlen);
diff --git a/arch/x86/lib/strstr_32.c b/arch/x86/lib/strstr_32.c
index 42e8a50303f..8e2d55f754b 100644
--- a/arch/x86/lib/strstr_32.c
+++ b/arch/x86/lib/strstr_32.c
@@ -23,9 +23,9 @@ __asm__ __volatile__(
23 "jne 1b\n\t" 23 "jne 1b\n\t"
24 "xorl %%eax,%%eax\n\t" 24 "xorl %%eax,%%eax\n\t"
25 "2:" 25 "2:"
26 :"=a" (__res), "=&c" (d0), "=&S" (d1) 26 : "=a" (__res), "=&c" (d0), "=&S" (d1)
27 :"0" (0), "1" (0xffffffff), "2" (cs), "g" (ct) 27 : "0" (0), "1" (0xffffffff), "2" (cs), "g" (ct)
28 :"dx", "di"); 28 : "dx", "di");
29return __res; 29return __res;
30} 30}
31 31
diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c
index 3d317836be9..3f2cf11f201 100644
--- a/arch/x86/mach-default/setup.c
+++ b/arch/x86/mach-default/setup.c
@@ -10,13 +10,15 @@
10#include <asm/e820.h> 10#include <asm/e820.h>
11#include <asm/setup.h> 11#include <asm/setup.h>
12 12
13#include <mach_ipi.h>
14
13#ifdef CONFIG_HOTPLUG_CPU 15#ifdef CONFIG_HOTPLUG_CPU
14#define DEFAULT_SEND_IPI (1) 16#define DEFAULT_SEND_IPI (1)
15#else 17#else
16#define DEFAULT_SEND_IPI (0) 18#define DEFAULT_SEND_IPI (0)
17#endif 19#endif
18 20
19int no_broadcast=DEFAULT_SEND_IPI; 21int no_broadcast = DEFAULT_SEND_IPI;
20 22
21/** 23/**
22 * pre_intr_init_hook - initialisation prior to setting up interrupt vectors 24 * pre_intr_init_hook - initialisation prior to setting up interrupt vectors
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c
index 62fa440678d..847c164725f 100644
--- a/arch/x86/mm/discontig_32.c
+++ b/arch/x86/mm/discontig_32.c
@@ -328,7 +328,7 @@ void __init initmem_init(unsigned long start_pfn,
328 328
329 get_memcfg_numa(); 329 get_memcfg_numa();
330 330
331 kva_pages = round_up(calculate_numa_remap_pages(), PTRS_PER_PTE); 331 kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE);
332 332
333 kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE); 333 kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE);
334 do { 334 do {
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index a20d1fa64b4..e7277cbcfb4 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -148,8 +148,8 @@ static void note_page(struct seq_file *m, struct pg_state *st,
148 * we have now. "break" is either changing perms, levels or 148 * we have now. "break" is either changing perms, levels or
149 * address space marker. 149 * address space marker.
150 */ 150 */
151 prot = pgprot_val(new_prot) & ~(PTE_PFN_MASK); 151 prot = pgprot_val(new_prot) & PTE_FLAGS_MASK;
152 cur = pgprot_val(st->current_prot) & ~(PTE_PFN_MASK); 152 cur = pgprot_val(st->current_prot) & PTE_FLAGS_MASK;
153 153
154 if (!st->level) { 154 if (!st->level) {
155 /* First entry */ 155 /* First entry */
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 455f3fe67b4..8f92cac4e6d 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -35,6 +35,7 @@
35#include <asm/tlbflush.h> 35#include <asm/tlbflush.h>
36#include <asm/proto.h> 36#include <asm/proto.h>
37#include <asm-generic/sections.h> 37#include <asm-generic/sections.h>
38#include <asm/traps.h>
38 39
39/* 40/*
40 * Page fault error code bits 41 * Page fault error code bits
@@ -357,8 +358,6 @@ static int is_errata100(struct pt_regs *regs, unsigned long address)
357 return 0; 358 return 0;
358} 359}
359 360
360void do_invalid_op(struct pt_regs *, unsigned long);
361
362static int is_f00f_bug(struct pt_regs *regs, unsigned long address) 361static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
363{ 362{
364#ifdef CONFIG_X86_F00F_BUG 363#ifdef CONFIG_X86_F00F_BUG
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 60ec1d08ff2..6b9a9358b33 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -47,6 +47,7 @@
47#include <asm/paravirt.h> 47#include <asm/paravirt.h>
48#include <asm/setup.h> 48#include <asm/setup.h>
49#include <asm/cacheflush.h> 49#include <asm/cacheflush.h>
50#include <asm/smp.h>
50 51
51unsigned int __VMALLOC_RESERVE = 128 << 20; 52unsigned int __VMALLOC_RESERVE = 128 << 20;
52 53
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index d3746efb060..770536ebf7e 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -225,7 +225,7 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
225void __init cleanup_highmap(void) 225void __init cleanup_highmap(void)
226{ 226{
227 unsigned long vaddr = __START_KERNEL_map; 227 unsigned long vaddr = __START_KERNEL_map;
228 unsigned long end = round_up((unsigned long)_end, PMD_SIZE) - 1; 228 unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1;
229 pmd_t *pmd = level2_kernel_pgt; 229 pmd_t *pmd = level2_kernel_pgt;
230 pmd_t *last_pmd = pmd + PTRS_PER_PMD; 230 pmd_t *last_pmd = pmd + PTRS_PER_PMD;
231 231
@@ -451,14 +451,14 @@ static void __init find_early_table_space(unsigned long end)
451 unsigned long puds, pmds, ptes, tables, start; 451 unsigned long puds, pmds, ptes, tables, start;
452 452
453 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; 453 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
454 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE); 454 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
455 if (direct_gbpages) { 455 if (direct_gbpages) {
456 unsigned long extra; 456 unsigned long extra;
457 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); 457 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
458 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; 458 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
459 } else 459 } else
460 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; 460 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
461 tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE); 461 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
462 462
463 if (cpu_has_pse) { 463 if (cpu_has_pse) {
464 unsigned long extra; 464 unsigned long extra;
@@ -466,7 +466,7 @@ static void __init find_early_table_space(unsigned long end)
466 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; 466 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
467 } else 467 } else
468 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; 468 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
469 tables += round_up(ptes * sizeof(pte_t), PAGE_SIZE); 469 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
470 470
471 /* 471 /*
472 * RED-PEN putting page tables only on node 0 could 472 * RED-PEN putting page tables only on node 0 could
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index d4b6e6a29ae..cac6da54203 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -421,7 +421,7 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
421 return; 421 return;
422} 422}
423 423
424int __initdata early_ioremap_debug; 424static int __initdata early_ioremap_debug;
425 425
426static int __init early_ioremap_debug_setup(char *str) 426static int __init early_ioremap_debug_setup(char *str)
427{ 427{
@@ -547,7 +547,7 @@ static inline void __init early_clear_fixmap(enum fixed_addresses idx)
547} 547}
548 548
549 549
550int __initdata early_ioremap_nested; 550static int __initdata early_ioremap_nested;
551 551
552static int __init check_early_ioremap_leak(void) 552static int __init check_early_ioremap_leak(void)
553{ 553{
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index a4dd793d600..cebcbf152d4 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -79,7 +79,7 @@ static int __init allocate_cachealigned_memnodemap(void)
79 return 0; 79 return 0;
80 80
81 addr = 0x8000; 81 addr = 0x8000;
82 nodemap_size = round_up(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); 82 nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
83 nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT, 83 nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT,
84 nodemap_size, L1_CACHE_BYTES); 84 nodemap_size, L1_CACHE_BYTES);
85 if (nodemap_addr == -1UL) { 85 if (nodemap_addr == -1UL) {
@@ -176,10 +176,10 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
176 unsigned long start_pfn, last_pfn, bootmap_pages, bootmap_size; 176 unsigned long start_pfn, last_pfn, bootmap_pages, bootmap_size;
177 unsigned long bootmap_start, nodedata_phys; 177 unsigned long bootmap_start, nodedata_phys;
178 void *bootmap; 178 void *bootmap;
179 const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE); 179 const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
180 int nid; 180 int nid;
181 181
182 start = round_up(start, ZONE_ALIGN); 182 start = roundup(start, ZONE_ALIGN);
183 183
184 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, 184 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid,
185 start, end); 185 start, end);
@@ -210,9 +210,9 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
210 bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn); 210 bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn);
211 nid = phys_to_nid(nodedata_phys); 211 nid = phys_to_nid(nodedata_phys);
212 if (nid == nodeid) 212 if (nid == nodeid)
213 bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE); 213 bootmap_start = roundup(nodedata_phys + pgdat_size, PAGE_SIZE);
214 else 214 else
215 bootmap_start = round_up(start, PAGE_SIZE); 215 bootmap_start = roundup(start, PAGE_SIZE);
216 /* 216 /*
217 * SMP_CACHE_BYTES could be enough, but init_bootmem_node like 217 * SMP_CACHE_BYTES could be enough, but init_bootmem_node like
218 * to use that to align to PAGE_SIZE 218 * to use that to align to PAGE_SIZE
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 43e2f8483e4..898fad617ab 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -84,7 +84,7 @@ static inline unsigned long highmap_start_pfn(void)
84 84
85static inline unsigned long highmap_end_pfn(void) 85static inline unsigned long highmap_end_pfn(void)
86{ 86{
87 return __pa(round_up((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT; 87 return __pa(roundup((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT;
88} 88}
89 89
90#endif 90#endif
@@ -906,11 +906,13 @@ int set_memory_ro(unsigned long addr, int numpages)
906{ 906{
907 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW)); 907 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW));
908} 908}
909EXPORT_SYMBOL_GPL(set_memory_ro);
909 910
910int set_memory_rw(unsigned long addr, int numpages) 911int set_memory_rw(unsigned long addr, int numpages)
911{ 912{
912 return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW)); 913 return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
913} 914}
915EXPORT_SYMBOL_GPL(set_memory_rw);
914 916
915int set_memory_np(unsigned long addr, int numpages) 917int set_memory_np(unsigned long addr, int numpages)
916{ 918{
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index d50302774fe..86f2ffc43c3 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -63,10 +63,8 @@ static inline void pgd_list_del(pgd_t *pgd)
63#define UNSHARED_PTRS_PER_PGD \ 63#define UNSHARED_PTRS_PER_PGD \
64 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) 64 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
65 65
66static void pgd_ctor(void *p) 66static void pgd_ctor(pgd_t *pgd)
67{ 67{
68 pgd_t *pgd = p;
69
70 /* If the pgd points to a shared pagetable level (either the 68 /* If the pgd points to a shared pagetable level (either the
71 ptes in non-PAE, or shared PMD in PAE), then just copy the 69 ptes in non-PAE, or shared PMD in PAE), then just copy the
72 references from swapper_pg_dir. */ 70 references from swapper_pg_dir. */
@@ -87,7 +85,7 @@ static void pgd_ctor(void *p)
87 pgd_list_add(pgd); 85 pgd_list_add(pgd);
88} 86}
89 87
90static void pgd_dtor(void *pgd) 88static void pgd_dtor(pgd_t *pgd)
91{ 89{
92 unsigned long flags; /* can be called from interrupt context */ 90 unsigned long flags; /* can be called from interrupt context */
93 91
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index cab0abbd1eb..0951db9ee51 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -123,7 +123,8 @@ static int __init parse_vmalloc(char *arg)
123 if (!arg) 123 if (!arg)
124 return -EINVAL; 124 return -EINVAL;
125 125
126 __VMALLOC_RESERVE = memparse(arg, &arg); 126 /* Add VMALLOC_OFFSET to the parsed value due to vm area guard hole*/
127 __VMALLOC_RESERVE = memparse(arg, &arg) + VMALLOC_OFFSET;
127 return 0; 128 return 0;
128} 129}
129early_param("vmalloc", parse_vmalloc); 130early_param("vmalloc", parse_vmalloc);
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
index 56b4757a1f4..43ac5af338d 100644
--- a/arch/x86/oprofile/op_model_p4.c
+++ b/arch/x86/oprofile/op_model_p4.c
@@ -10,11 +10,12 @@
10 10
11#include <linux/oprofile.h> 11#include <linux/oprofile.h>
12#include <linux/smp.h> 12#include <linux/smp.h>
13#include <linux/ptrace.h>
14#include <linux/nmi.h>
13#include <asm/msr.h> 15#include <asm/msr.h>
14#include <asm/ptrace.h>
15#include <asm/fixmap.h> 16#include <asm/fixmap.h>
16#include <asm/apic.h> 17#include <asm/apic.h>
17#include <asm/nmi.h> 18
18 19
19#include "op_x86_model.h" 20#include "op_x86_model.h"
20#include "op_counter.h" 21#include "op_counter.h"
@@ -40,7 +41,7 @@ static unsigned int num_controls = NUM_CONTROLS_NON_HT;
40static inline void setup_num_counters(void) 41static inline void setup_num_counters(void)
41{ 42{
42#ifdef CONFIG_SMP 43#ifdef CONFIG_SMP
43 if (smp_num_siblings == 2){ 44 if (smp_num_siblings == 2) {
44 num_counters = NUM_COUNTERS_HT2; 45 num_counters = NUM_COUNTERS_HT2;
45 num_controls = NUM_CONTROLS_HT2; 46 num_controls = NUM_CONTROLS_HT2;
46 } 47 }
@@ -86,7 +87,7 @@ struct p4_event_binding {
86#define CTR_FLAME_2 (1 << 6) 87#define CTR_FLAME_2 (1 << 6)
87#define CTR_IQ_5 (1 << 7) 88#define CTR_IQ_5 (1 << 7)
88 89
89static struct p4_counter_binding p4_counters [NUM_COUNTERS_NON_HT] = { 90static struct p4_counter_binding p4_counters[NUM_COUNTERS_NON_HT] = {
90 { CTR_BPU_0, MSR_P4_BPU_PERFCTR0, MSR_P4_BPU_CCCR0 }, 91 { CTR_BPU_0, MSR_P4_BPU_PERFCTR0, MSR_P4_BPU_CCCR0 },
91 { CTR_MS_0, MSR_P4_MS_PERFCTR0, MSR_P4_MS_CCCR0 }, 92 { CTR_MS_0, MSR_P4_MS_PERFCTR0, MSR_P4_MS_CCCR0 },
92 { CTR_FLAME_0, MSR_P4_FLAME_PERFCTR0, MSR_P4_FLAME_CCCR0 }, 93 { CTR_FLAME_0, MSR_P4_FLAME_PERFCTR0, MSR_P4_FLAME_CCCR0 },
@@ -97,32 +98,32 @@ static struct p4_counter_binding p4_counters [NUM_COUNTERS_NON_HT] = {
97 { CTR_IQ_5, MSR_P4_IQ_PERFCTR5, MSR_P4_IQ_CCCR5 } 98 { CTR_IQ_5, MSR_P4_IQ_PERFCTR5, MSR_P4_IQ_CCCR5 }
98}; 99};
99 100
100#define NUM_UNUSED_CCCRS NUM_CCCRS_NON_HT - NUM_COUNTERS_NON_HT 101#define NUM_UNUSED_CCCRS (NUM_CCCRS_NON_HT - NUM_COUNTERS_NON_HT)
101 102
102/* p4 event codes in libop/op_event.h are indices into this table. */ 103/* p4 event codes in libop/op_event.h are indices into this table. */
103 104
104static struct p4_event_binding p4_events[NUM_EVENTS] = { 105static struct p4_event_binding p4_events[NUM_EVENTS] = {
105 106
106 { /* BRANCH_RETIRED */ 107 { /* BRANCH_RETIRED */
107 0x05, 0x06, 108 0x05, 0x06,
108 { {CTR_IQ_4, MSR_P4_CRU_ESCR2}, 109 { {CTR_IQ_4, MSR_P4_CRU_ESCR2},
109 {CTR_IQ_5, MSR_P4_CRU_ESCR3} } 110 {CTR_IQ_5, MSR_P4_CRU_ESCR3} }
110 }, 111 },
111 112
112 { /* MISPRED_BRANCH_RETIRED */ 113 { /* MISPRED_BRANCH_RETIRED */
113 0x04, 0x03, 114 0x04, 0x03,
114 { { CTR_IQ_4, MSR_P4_CRU_ESCR0}, 115 { { CTR_IQ_4, MSR_P4_CRU_ESCR0},
115 { CTR_IQ_5, MSR_P4_CRU_ESCR1} } 116 { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
116 }, 117 },
117 118
118 { /* TC_DELIVER_MODE */ 119 { /* TC_DELIVER_MODE */
119 0x01, 0x01, 120 0x01, 0x01,
120 { { CTR_MS_0, MSR_P4_TC_ESCR0}, 121 { { CTR_MS_0, MSR_P4_TC_ESCR0},
121 { CTR_MS_2, MSR_P4_TC_ESCR1} } 122 { CTR_MS_2, MSR_P4_TC_ESCR1} }
122 }, 123 },
123 124
124 { /* BPU_FETCH_REQUEST */ 125 { /* BPU_FETCH_REQUEST */
125 0x00, 0x03, 126 0x00, 0x03,
126 { { CTR_BPU_0, MSR_P4_BPU_ESCR0}, 127 { { CTR_BPU_0, MSR_P4_BPU_ESCR0},
127 { CTR_BPU_2, MSR_P4_BPU_ESCR1} } 128 { CTR_BPU_2, MSR_P4_BPU_ESCR1} }
128 }, 129 },
@@ -146,7 +147,7 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
146 }, 147 },
147 148
148 { /* LOAD_PORT_REPLAY */ 149 { /* LOAD_PORT_REPLAY */
149 0x02, 0x04, 150 0x02, 0x04,
150 { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0}, 151 { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0},
151 { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} } 152 { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} }
152 }, 153 },
@@ -170,43 +171,43 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
170 }, 171 },
171 172
172 { /* BSQ_CACHE_REFERENCE */ 173 { /* BSQ_CACHE_REFERENCE */
173 0x07, 0x0c, 174 0x07, 0x0c,
174 { { CTR_BPU_0, MSR_P4_BSU_ESCR0}, 175 { { CTR_BPU_0, MSR_P4_BSU_ESCR0},
175 { CTR_BPU_2, MSR_P4_BSU_ESCR1} } 176 { CTR_BPU_2, MSR_P4_BSU_ESCR1} }
176 }, 177 },
177 178
178 { /* IOQ_ALLOCATION */ 179 { /* IOQ_ALLOCATION */
179 0x06, 0x03, 180 0x06, 0x03,
180 { { CTR_BPU_0, MSR_P4_FSB_ESCR0}, 181 { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
181 { 0, 0 } } 182 { 0, 0 } }
182 }, 183 },
183 184
184 { /* IOQ_ACTIVE_ENTRIES */ 185 { /* IOQ_ACTIVE_ENTRIES */
185 0x06, 0x1a, 186 0x06, 0x1a,
186 { { CTR_BPU_2, MSR_P4_FSB_ESCR1}, 187 { { CTR_BPU_2, MSR_P4_FSB_ESCR1},
187 { 0, 0 } } 188 { 0, 0 } }
188 }, 189 },
189 190
190 { /* FSB_DATA_ACTIVITY */ 191 { /* FSB_DATA_ACTIVITY */
191 0x06, 0x17, 192 0x06, 0x17,
192 { { CTR_BPU_0, MSR_P4_FSB_ESCR0}, 193 { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
193 { CTR_BPU_2, MSR_P4_FSB_ESCR1} } 194 { CTR_BPU_2, MSR_P4_FSB_ESCR1} }
194 }, 195 },
195 196
196 { /* BSQ_ALLOCATION */ 197 { /* BSQ_ALLOCATION */
197 0x07, 0x05, 198 0x07, 0x05,
198 { { CTR_BPU_0, MSR_P4_BSU_ESCR0}, 199 { { CTR_BPU_0, MSR_P4_BSU_ESCR0},
199 { 0, 0 } } 200 { 0, 0 } }
200 }, 201 },
201 202
202 { /* BSQ_ACTIVE_ENTRIES */ 203 { /* BSQ_ACTIVE_ENTRIES */
203 0x07, 0x06, 204 0x07, 0x06,
204 { { CTR_BPU_2, MSR_P4_BSU_ESCR1 /* guess */}, 205 { { CTR_BPU_2, MSR_P4_BSU_ESCR1 /* guess */},
205 { 0, 0 } } 206 { 0, 0 } }
206 }, 207 },
207 208
208 { /* X87_ASSIST */ 209 { /* X87_ASSIST */
209 0x05, 0x03, 210 0x05, 0x03,
210 { { CTR_IQ_4, MSR_P4_CRU_ESCR2}, 211 { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
211 { CTR_IQ_5, MSR_P4_CRU_ESCR3} } 212 { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
212 }, 213 },
@@ -216,21 +217,21 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
216 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 217 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
217 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 218 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
218 }, 219 },
219 220
220 { /* PACKED_SP_UOP */ 221 { /* PACKED_SP_UOP */
221 0x01, 0x08, 222 0x01, 0x08,
222 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 223 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
223 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 224 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
224 }, 225 },
225 226
226 { /* PACKED_DP_UOP */ 227 { /* PACKED_DP_UOP */
227 0x01, 0x0c, 228 0x01, 0x0c,
228 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 229 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
229 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 230 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
230 }, 231 },
231 232
232 { /* SCALAR_SP_UOP */ 233 { /* SCALAR_SP_UOP */
233 0x01, 0x0a, 234 0x01, 0x0a,
234 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 235 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
235 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 236 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
236 }, 237 },
@@ -242,31 +243,31 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
242 }, 243 },
243 244
244 { /* 64BIT_MMX_UOP */ 245 { /* 64BIT_MMX_UOP */
245 0x01, 0x02, 246 0x01, 0x02,
246 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 247 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
247 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 248 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
248 }, 249 },
249 250
250 { /* 128BIT_MMX_UOP */ 251 { /* 128BIT_MMX_UOP */
251 0x01, 0x1a, 252 0x01, 0x1a,
252 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 253 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
253 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 254 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
254 }, 255 },
255 256
256 { /* X87_FP_UOP */ 257 { /* X87_FP_UOP */
257 0x01, 0x04, 258 0x01, 0x04,
258 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 259 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
259 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 260 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
260 }, 261 },
261 262
262 { /* X87_SIMD_MOVES_UOP */ 263 { /* X87_SIMD_MOVES_UOP */
263 0x01, 0x2e, 264 0x01, 0x2e,
264 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 265 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
265 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 266 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
266 }, 267 },
267 268
268 { /* MACHINE_CLEAR */ 269 { /* MACHINE_CLEAR */
269 0x05, 0x02, 270 0x05, 0x02,
270 { { CTR_IQ_4, MSR_P4_CRU_ESCR2}, 271 { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
271 { CTR_IQ_5, MSR_P4_CRU_ESCR3} } 272 { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
272 }, 273 },
@@ -276,9 +277,9 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
276 { { CTR_BPU_0, MSR_P4_FSB_ESCR0}, 277 { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
277 { CTR_BPU_2, MSR_P4_FSB_ESCR1} } 278 { CTR_BPU_2, MSR_P4_FSB_ESCR1} }
278 }, 279 },
279 280
280 { /* TC_MS_XFER */ 281 { /* TC_MS_XFER */
281 0x00, 0x05, 282 0x00, 0x05,
282 { { CTR_MS_0, MSR_P4_MS_ESCR0}, 283 { { CTR_MS_0, MSR_P4_MS_ESCR0},
283 { CTR_MS_2, MSR_P4_MS_ESCR1} } 284 { CTR_MS_2, MSR_P4_MS_ESCR1} }
284 }, 285 },
@@ -308,7 +309,7 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
308 }, 309 },
309 310
310 { /* INSTR_RETIRED */ 311 { /* INSTR_RETIRED */
311 0x04, 0x02, 312 0x04, 0x02,
312 { { CTR_IQ_4, MSR_P4_CRU_ESCR0}, 313 { { CTR_IQ_4, MSR_P4_CRU_ESCR0},
313 { CTR_IQ_5, MSR_P4_CRU_ESCR1} } 314 { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
314 }, 315 },
@@ -319,14 +320,14 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
319 { CTR_IQ_5, MSR_P4_CRU_ESCR1} } 320 { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
320 }, 321 },
321 322
322 { /* UOP_TYPE */ 323 { /* UOP_TYPE */
323 0x02, 0x02, 324 0x02, 0x02,
324 { { CTR_IQ_4, MSR_P4_RAT_ESCR0}, 325 { { CTR_IQ_4, MSR_P4_RAT_ESCR0},
325 { CTR_IQ_5, MSR_P4_RAT_ESCR1} } 326 { CTR_IQ_5, MSR_P4_RAT_ESCR1} }
326 }, 327 },
327 328
328 { /* RETIRED_MISPRED_BRANCH_TYPE */ 329 { /* RETIRED_MISPRED_BRANCH_TYPE */
329 0x02, 0x05, 330 0x02, 0x05,
330 { { CTR_MS_0, MSR_P4_TBPU_ESCR0}, 331 { { CTR_MS_0, MSR_P4_TBPU_ESCR0},
331 { CTR_MS_2, MSR_P4_TBPU_ESCR1} } 332 { CTR_MS_2, MSR_P4_TBPU_ESCR1} }
332 }, 333 },
@@ -349,8 +350,8 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
349#define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1)) 350#define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1))
350#define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25)) 351#define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25))
351#define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9)) 352#define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9))
352#define ESCR_READ(escr,high,ev,i) do {rdmsr(ev->bindings[(i)].escr_address, (escr), (high));} while (0) 353#define ESCR_READ(escr, high, ev, i) do {rdmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0)
353#define ESCR_WRITE(escr,high,ev,i) do {wrmsr(ev->bindings[(i)].escr_address, (escr), (high));} while (0) 354#define ESCR_WRITE(escr, high, ev, i) do {wrmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0)
354 355
355#define CCCR_RESERVED_BITS 0x38030FFF 356#define CCCR_RESERVED_BITS 0x38030FFF
356#define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS) 357#define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS)
@@ -360,15 +361,15 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
360#define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27)) 361#define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27))
361#define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12)) 362#define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12))
362#define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12)) 363#define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12))
363#define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high));} while (0) 364#define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0)
364#define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high));} while (0) 365#define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0)
365#define CCCR_OVF_P(cccr) ((cccr) & (1U<<31)) 366#define CCCR_OVF_P(cccr) ((cccr) & (1U<<31))
366#define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31))) 367#define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31)))
367 368
368#define CTRL_IS_RESERVED(msrs,c) (msrs->controls[(c)].addr ? 1 : 0) 369#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
369#define CTR_IS_RESERVED(msrs,c) (msrs->counters[(c)].addr ? 1 : 0) 370#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
370#define CTR_READ(l,h,i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h));} while (0) 371#define CTR_READ(l, h, i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h)); } while (0)
371#define CTR_WRITE(l,i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1);} while (0) 372#define CTR_WRITE(l, i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1); } while (0)
372#define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000)) 373#define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000))
373 374
374 375
@@ -380,7 +381,7 @@ static unsigned int get_stagger(void)
380#ifdef CONFIG_SMP 381#ifdef CONFIG_SMP
381 int cpu = smp_processor_id(); 382 int cpu = smp_processor_id();
382 return (cpu != first_cpu(per_cpu(cpu_sibling_map, cpu))); 383 return (cpu != first_cpu(per_cpu(cpu_sibling_map, cpu)));
383#endif 384#endif
384 return 0; 385 return 0;
385} 386}
386 387
@@ -395,25 +396,23 @@ static unsigned long reset_value[NUM_COUNTERS_NON_HT];
395 396
396static void p4_fill_in_addresses(struct op_msrs * const msrs) 397static void p4_fill_in_addresses(struct op_msrs * const msrs)
397{ 398{
398 unsigned int i; 399 unsigned int i;
399 unsigned int addr, cccraddr, stag; 400 unsigned int addr, cccraddr, stag;
400 401
401 setup_num_counters(); 402 setup_num_counters();
402 stag = get_stagger(); 403 stag = get_stagger();
403 404
404 /* initialize some registers */ 405 /* initialize some registers */
405 for (i = 0; i < num_counters; ++i) { 406 for (i = 0; i < num_counters; ++i)
406 msrs->counters[i].addr = 0; 407 msrs->counters[i].addr = 0;
407 } 408 for (i = 0; i < num_controls; ++i)
408 for (i = 0; i < num_controls; ++i) {
409 msrs->controls[i].addr = 0; 409 msrs->controls[i].addr = 0;
410 } 410
411
412 /* the counter & cccr registers we pay attention to */ 411 /* the counter & cccr registers we pay attention to */
413 for (i = 0; i < num_counters; ++i) { 412 for (i = 0; i < num_counters; ++i) {
414 addr = p4_counters[VIRT_CTR(stag, i)].counter_address; 413 addr = p4_counters[VIRT_CTR(stag, i)].counter_address;
415 cccraddr = p4_counters[VIRT_CTR(stag, i)].cccr_address; 414 cccraddr = p4_counters[VIRT_CTR(stag, i)].cccr_address;
416 if (reserve_perfctr_nmi(addr)){ 415 if (reserve_perfctr_nmi(addr)) {
417 msrs->counters[i].addr = addr; 416 msrs->counters[i].addr = addr;
418 msrs->controls[i].addr = cccraddr; 417 msrs->controls[i].addr = cccraddr;
419 } 418 }
@@ -447,22 +446,22 @@ static void p4_fill_in_addresses(struct op_msrs * const msrs)
447 if (reserve_evntsel_nmi(addr)) 446 if (reserve_evntsel_nmi(addr))
448 msrs->controls[i].addr = addr; 447 msrs->controls[i].addr = addr;
449 } 448 }
450 449
451 for (addr = MSR_P4_MS_ESCR0 + stag; 450 for (addr = MSR_P4_MS_ESCR0 + stag;
452 addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) { 451 addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) {
453 if (reserve_evntsel_nmi(addr)) 452 if (reserve_evntsel_nmi(addr))
454 msrs->controls[i].addr = addr; 453 msrs->controls[i].addr = addr;
455 } 454 }
456 455
457 for (addr = MSR_P4_IX_ESCR0 + stag; 456 for (addr = MSR_P4_IX_ESCR0 + stag;
458 addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) { 457 addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) {
459 if (reserve_evntsel_nmi(addr)) 458 if (reserve_evntsel_nmi(addr))
460 msrs->controls[i].addr = addr; 459 msrs->controls[i].addr = addr;
461 } 460 }
462 461
463 /* there are 2 remaining non-contiguously located ESCRs */ 462 /* there are 2 remaining non-contiguously located ESCRs */
464 463
465 if (num_counters == NUM_COUNTERS_NON_HT) { 464 if (num_counters == NUM_COUNTERS_NON_HT) {
466 /* standard non-HT CPUs handle both remaining ESCRs*/ 465 /* standard non-HT CPUs handle both remaining ESCRs*/
467 if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR5)) 466 if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR5))
468 msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; 467 msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
@@ -498,20 +497,20 @@ static void pmc_setup_one_p4_counter(unsigned int ctr)
498 unsigned int stag; 497 unsigned int stag;
499 498
500 stag = get_stagger(); 499 stag = get_stagger();
501 500
502 /* convert from counter *number* to counter *bit* */ 501 /* convert from counter *number* to counter *bit* */
503 counter_bit = 1 << VIRT_CTR(stag, ctr); 502 counter_bit = 1 << VIRT_CTR(stag, ctr);
504 503
505 /* find our event binding structure. */ 504 /* find our event binding structure. */
506 if (counter_config[ctr].event <= 0 || counter_config[ctr].event > NUM_EVENTS) { 505 if (counter_config[ctr].event <= 0 || counter_config[ctr].event > NUM_EVENTS) {
507 printk(KERN_ERR 506 printk(KERN_ERR
508 "oprofile: P4 event code 0x%lx out of range\n", 507 "oprofile: P4 event code 0x%lx out of range\n",
509 counter_config[ctr].event); 508 counter_config[ctr].event);
510 return; 509 return;
511 } 510 }
512 511
513 ev = &(p4_events[counter_config[ctr].event - 1]); 512 ev = &(p4_events[counter_config[ctr].event - 1]);
514 513
515 for (i = 0; i < maxbind; i++) { 514 for (i = 0; i < maxbind; i++) {
516 if (ev->bindings[i].virt_counter & counter_bit) { 515 if (ev->bindings[i].virt_counter & counter_bit) {
517 516
@@ -526,25 +525,24 @@ static void pmc_setup_one_p4_counter(unsigned int ctr)
526 ESCR_SET_OS_1(escr, counter_config[ctr].kernel); 525 ESCR_SET_OS_1(escr, counter_config[ctr].kernel);
527 } 526 }
528 ESCR_SET_EVENT_SELECT(escr, ev->event_select); 527 ESCR_SET_EVENT_SELECT(escr, ev->event_select);
529 ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask); 528 ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask);
530 ESCR_WRITE(escr, high, ev, i); 529 ESCR_WRITE(escr, high, ev, i);
531 530
532 /* modify CCCR */ 531 /* modify CCCR */
533 CCCR_READ(cccr, high, VIRT_CTR(stag, ctr)); 532 CCCR_READ(cccr, high, VIRT_CTR(stag, ctr));
534 CCCR_CLEAR(cccr); 533 CCCR_CLEAR(cccr);
535 CCCR_SET_REQUIRED_BITS(cccr); 534 CCCR_SET_REQUIRED_BITS(cccr);
536 CCCR_SET_ESCR_SELECT(cccr, ev->escr_select); 535 CCCR_SET_ESCR_SELECT(cccr, ev->escr_select);
537 if (stag == 0) { 536 if (stag == 0)
538 CCCR_SET_PMI_OVF_0(cccr); 537 CCCR_SET_PMI_OVF_0(cccr);
539 } else { 538 else
540 CCCR_SET_PMI_OVF_1(cccr); 539 CCCR_SET_PMI_OVF_1(cccr);
541 }
542 CCCR_WRITE(cccr, high, VIRT_CTR(stag, ctr)); 540 CCCR_WRITE(cccr, high, VIRT_CTR(stag, ctr));
543 return; 541 return;
544 } 542 }
545 } 543 }
546 544
547 printk(KERN_ERR 545 printk(KERN_ERR
548 "oprofile: P4 event code 0x%lx no binding, stag %d ctr %d\n", 546 "oprofile: P4 event code 0x%lx no binding, stag %d ctr %d\n",
549 counter_config[ctr].event, stag, ctr); 547 counter_config[ctr].event, stag, ctr);
550} 548}
@@ -559,14 +557,14 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs)
559 stag = get_stagger(); 557 stag = get_stagger();
560 558
561 rdmsr(MSR_IA32_MISC_ENABLE, low, high); 559 rdmsr(MSR_IA32_MISC_ENABLE, low, high);
562 if (! MISC_PMC_ENABLED_P(low)) { 560 if (!MISC_PMC_ENABLED_P(low)) {
563 printk(KERN_ERR "oprofile: P4 PMC not available\n"); 561 printk(KERN_ERR "oprofile: P4 PMC not available\n");
564 return; 562 return;
565 } 563 }
566 564
567 /* clear the cccrs we will use */ 565 /* clear the cccrs we will use */
568 for (i = 0 ; i < num_counters ; i++) { 566 for (i = 0 ; i < num_counters ; i++) {
569 if (unlikely(!CTRL_IS_RESERVED(msrs,i))) 567 if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
570 continue; 568 continue;
571 rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); 569 rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
572 CCCR_CLEAR(low); 570 CCCR_CLEAR(low);
@@ -576,14 +574,14 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs)
576 574
577 /* clear all escrs (including those outside our concern) */ 575 /* clear all escrs (including those outside our concern) */
578 for (i = num_counters; i < num_controls; i++) { 576 for (i = num_counters; i < num_controls; i++) {
579 if (unlikely(!CTRL_IS_RESERVED(msrs,i))) 577 if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
580 continue; 578 continue;
581 wrmsr(msrs->controls[i].addr, 0, 0); 579 wrmsr(msrs->controls[i].addr, 0, 0);
582 } 580 }
583 581
584 /* setup all counters */ 582 /* setup all counters */
585 for (i = 0 ; i < num_counters ; ++i) { 583 for (i = 0 ; i < num_counters ; ++i) {
586 if ((counter_config[i].enabled) && (CTRL_IS_RESERVED(msrs,i))) { 584 if ((counter_config[i].enabled) && (CTRL_IS_RESERVED(msrs, i))) {
587 reset_value[i] = counter_config[i].count; 585 reset_value[i] = counter_config[i].count;
588 pmc_setup_one_p4_counter(i); 586 pmc_setup_one_p4_counter(i);
589 CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i)); 587 CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i));
@@ -603,11 +601,11 @@ static int p4_check_ctrs(struct pt_regs * const regs,
603 stag = get_stagger(); 601 stag = get_stagger();
604 602
605 for (i = 0; i < num_counters; ++i) { 603 for (i = 0; i < num_counters; ++i) {
606 604
607 if (!reset_value[i]) 605 if (!reset_value[i])
608 continue; 606 continue;
609 607
610 /* 608 /*
611 * there is some eccentricity in the hardware which 609 * there is some eccentricity in the hardware which
612 * requires that we perform 2 extra corrections: 610 * requires that we perform 2 extra corrections:
613 * 611 *
@@ -616,24 +614,24 @@ static int p4_check_ctrs(struct pt_regs * const regs,
616 * 614 *
617 * - write the counter back twice to ensure it gets 615 * - write the counter back twice to ensure it gets
618 * updated properly. 616 * updated properly.
619 * 617 *
620 * the former seems to be related to extra NMIs happening 618 * the former seems to be related to extra NMIs happening
621 * during the current NMI; the latter is reported as errata 619 * during the current NMI; the latter is reported as errata
622 * N15 in intel doc 249199-029, pentium 4 specification 620 * N15 in intel doc 249199-029, pentium 4 specification
623 * update, though their suggested work-around does not 621 * update, though their suggested work-around does not
624 * appear to solve the problem. 622 * appear to solve the problem.
625 */ 623 */
626 624
627 real = VIRT_CTR(stag, i); 625 real = VIRT_CTR(stag, i);
628 626
629 CCCR_READ(low, high, real); 627 CCCR_READ(low, high, real);
630 CTR_READ(ctr, high, real); 628 CTR_READ(ctr, high, real);
631 if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) { 629 if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) {
632 oprofile_add_sample(regs, i); 630 oprofile_add_sample(regs, i);
633 CTR_WRITE(reset_value[i], real); 631 CTR_WRITE(reset_value[i], real);
634 CCCR_CLEAR_OVF(low); 632 CCCR_CLEAR_OVF(low);
635 CCCR_WRITE(low, high, real); 633 CCCR_WRITE(low, high, real);
636 CTR_WRITE(reset_value[i], real); 634 CTR_WRITE(reset_value[i], real);
637 } 635 }
638 } 636 }
639 637
@@ -683,15 +681,16 @@ static void p4_shutdown(struct op_msrs const * const msrs)
683 int i; 681 int i;
684 682
685 for (i = 0 ; i < num_counters ; ++i) { 683 for (i = 0 ; i < num_counters ; ++i) {
686 if (CTR_IS_RESERVED(msrs,i)) 684 if (CTR_IS_RESERVED(msrs, i))
687 release_perfctr_nmi(msrs->counters[i].addr); 685 release_perfctr_nmi(msrs->counters[i].addr);
688 } 686 }
689 /* some of the control registers are specially reserved in 687 /*
688 * some of the control registers are specially reserved in
690 * conjunction with the counter registers (hence the starting offset). 689 * conjunction with the counter registers (hence the starting offset).
691 * This saves a few bits. 690 * This saves a few bits.
692 */ 691 */
693 for (i = num_counters ; i < num_controls ; ++i) { 692 for (i = num_counters ; i < num_controls ; ++i) {
694 if (CTRL_IS_RESERVED(msrs,i)) 693 if (CTRL_IS_RESERVED(msrs, i))
695 release_evntsel_nmi(msrs->controls[i].addr); 694 release_evntsel_nmi(msrs->controls[i].addr);
696 } 695 }
697} 696}
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index 6a0fca78c36..22e057665e5 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -580,7 +580,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
580 unsigned long action, void *hcpu) 580 unsigned long action, void *hcpu)
581{ 581{
582 int cpu = (long)hcpu; 582 int cpu = (long)hcpu;
583 switch(action) { 583 switch (action) {
584 case CPU_ONLINE: 584 case CPU_ONLINE:
585 case CPU_ONLINE_FROZEN: 585 case CPU_ONLINE_FROZEN:
586 smp_call_function_single(cpu, enable_pci_io_ecs, NULL, 0); 586 smp_call_function_single(cpu, enable_pci_io_ecs, NULL, 0);
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 8e077185e18..006599db0dc 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -1043,35 +1043,44 @@ static void __init pcibios_fixup_irqs(void)
1043 if (io_apic_assign_pci_irqs) { 1043 if (io_apic_assign_pci_irqs) {
1044 int irq; 1044 int irq;
1045 1045
1046 if (pin) { 1046 if (!pin)
1047 /* 1047 continue;
1048 * interrupt pins are numbered starting 1048
1049 * from 1 1049 /*
1050 */ 1050 * interrupt pins are numbered starting from 1
1051 pin--; 1051 */
1052 irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, 1052 pin--;
1053 PCI_SLOT(dev->devfn), pin); 1053 irq = IO_APIC_get_PCI_irq_vector(dev->bus->number,
1054 /* 1054 PCI_SLOT(dev->devfn), pin);
1055 * Busses behind bridges are typically not listed in the MP-table. 1055 /*
1056 * In this case we have to look up the IRQ based on the parent bus, 1056 * Busses behind bridges are typically not listed in the
1057 * parent slot, and pin number. The SMP code detects such bridged 1057 * MP-table. In this case we have to look up the IRQ
1058 * busses itself so we should get into this branch reliably. 1058 * based on the parent bus, parent slot, and pin number.
1059 */ 1059 * The SMP code detects such bridged busses itself so we
1060 if (irq < 0 && dev->bus->parent) { /* go back to the bridge */ 1060 * should get into this branch reliably.
1061 struct pci_dev *bridge = dev->bus->self; 1061 */
1062 1062 if (irq < 0 && dev->bus->parent) {
1063 pin = (pin + PCI_SLOT(dev->devfn)) % 4; 1063 /* go back to the bridge */
1064 irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, 1064 struct pci_dev *bridge = dev->bus->self;
1065 PCI_SLOT(bridge->devfn), pin); 1065 int bus;
1066 if (irq >= 0) 1066
1067 dev_warn(&dev->dev, "using bridge %s INT %c to get IRQ %d\n", 1067 pin = (pin + PCI_SLOT(dev->devfn)) % 4;
1068 pci_name(bridge), 1068 bus = bridge->bus->number;
1069 'A' + pin, irq); 1069 irq = IO_APIC_get_PCI_irq_vector(bus,
1070 } 1070 PCI_SLOT(bridge->devfn), pin);
1071 if (irq >= 0) { 1071 if (irq >= 0)
1072 dev_info(&dev->dev, "PCI->APIC IRQ transform: INT %c -> IRQ %d\n", 'A' + pin, irq); 1072 dev_warn(&dev->dev,
1073 dev->irq = irq; 1073 "using bridge %s INT %c to "
1074 } 1074 "get IRQ %d\n",
1075 pci_name(bridge),
1076 'A' + pin, irq);
1077 }
1078 if (irq >= 0) {
1079 dev_info(&dev->dev,
1080 "PCI->APIC IRQ transform: INT %c "
1081 "-> IRQ %d\n",
1082 'A' + pin, irq);
1083 dev->irq = irq;
1075 } 1084 }
1076 } 1085 }
1077#endif 1086#endif
diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S
index 4fc7e872c85..d1e9b53f9d3 100644
--- a/arch/x86/power/hibernate_asm_32.S
+++ b/arch/x86/power/hibernate_asm_32.S
@@ -1,5 +1,3 @@
1.text
2
3/* 1/*
4 * This may not use any stack, nor any variable that is not "NoSave": 2 * This may not use any stack, nor any variable that is not "NoSave":
5 * 3 *
@@ -12,17 +10,18 @@
12#include <asm/segment.h> 10#include <asm/segment.h>
13#include <asm/page.h> 11#include <asm/page.h>
14#include <asm/asm-offsets.h> 12#include <asm/asm-offsets.h>
13#include <asm/processor-flags.h>
15 14
16 .text 15.text
17 16
18ENTRY(swsusp_arch_suspend) 17ENTRY(swsusp_arch_suspend)
19
20 movl %esp, saved_context_esp 18 movl %esp, saved_context_esp
21 movl %ebx, saved_context_ebx 19 movl %ebx, saved_context_ebx
22 movl %ebp, saved_context_ebp 20 movl %ebp, saved_context_ebp
23 movl %esi, saved_context_esi 21 movl %esi, saved_context_esi
24 movl %edi, saved_context_edi 22 movl %edi, saved_context_edi
25 pushfl ; popl saved_context_eflags 23 pushfl
24 popl saved_context_eflags
26 25
27 call swsusp_save 26 call swsusp_save
28 ret 27 ret
@@ -59,7 +58,7 @@ done:
59 movl mmu_cr4_features, %ecx 58 movl mmu_cr4_features, %ecx
60 jecxz 1f # cr4 Pentium and higher, skip if zero 59 jecxz 1f # cr4 Pentium and higher, skip if zero
61 movl %ecx, %edx 60 movl %ecx, %edx
62 andl $~(1<<7), %edx; # PGE 61 andl $~(X86_CR4_PGE), %edx
63 movl %edx, %cr4; # turn off PGE 62 movl %edx, %cr4; # turn off PGE
641: 631:
65 movl %cr3, %eax; # flush TLB 64 movl %cr3, %eax; # flush TLB
@@ -74,7 +73,8 @@ done:
74 movl saved_context_esi, %esi 73 movl saved_context_esi, %esi
75 movl saved_context_edi, %edi 74 movl saved_context_edi, %edi
76 75
77 pushl saved_context_eflags ; popfl 76 pushl saved_context_eflags
77 popfl
78 78
79 xorl %eax, %eax 79 xorl %eax, %eax
80 80
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index a4e201b47f6..7dcd321a050 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -812,7 +812,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
812 812
813/* Early in boot, while setting up the initial pagetable, assume 813/* Early in boot, while setting up the initial pagetable, assume
814 everything is pinned. */ 814 everything is pinned. */
815static __init void xen_alloc_pte_init(struct mm_struct *mm, u32 pfn) 815static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
816{ 816{
817#ifdef CONFIG_FLATMEM 817#ifdef CONFIG_FLATMEM
818 BUG_ON(mem_map); /* should only be used early */ 818 BUG_ON(mem_map); /* should only be used early */
@@ -822,7 +822,7 @@ static __init void xen_alloc_pte_init(struct mm_struct *mm, u32 pfn)
822 822
823/* Early release_pte assumes that all pts are pinned, since there's 823/* Early release_pte assumes that all pts are pinned, since there's
824 only init_mm and anything attached to that is pinned. */ 824 only init_mm and anything attached to that is pinned. */
825static void xen_release_pte_init(u32 pfn) 825static void xen_release_pte_init(unsigned long pfn)
826{ 826{
827 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); 827 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
828} 828}
@@ -838,7 +838,7 @@ static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
838 838
839/* This needs to make sure the new pte page is pinned iff its being 839/* This needs to make sure the new pte page is pinned iff its being
840 attached to a pinned pagetable. */ 840 attached to a pinned pagetable. */
841static void xen_alloc_ptpage(struct mm_struct *mm, u32 pfn, unsigned level) 841static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
842{ 842{
843 struct page *page = pfn_to_page(pfn); 843 struct page *page = pfn_to_page(pfn);
844 844
@@ -856,12 +856,12 @@ static void xen_alloc_ptpage(struct mm_struct *mm, u32 pfn, unsigned level)
856 } 856 }
857} 857}
858 858
859static void xen_alloc_pte(struct mm_struct *mm, u32 pfn) 859static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
860{ 860{
861 xen_alloc_ptpage(mm, pfn, PT_PTE); 861 xen_alloc_ptpage(mm, pfn, PT_PTE);
862} 862}
863 863
864static void xen_alloc_pmd(struct mm_struct *mm, u32 pfn) 864static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
865{ 865{
866 xen_alloc_ptpage(mm, pfn, PT_PMD); 866 xen_alloc_ptpage(mm, pfn, PT_PMD);
867} 867}
@@ -909,7 +909,7 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
909} 909}
910 910
911/* This should never happen until we're OK to use struct page */ 911/* This should never happen until we're OK to use struct page */
912static void xen_release_ptpage(u32 pfn, unsigned level) 912static void xen_release_ptpage(unsigned long pfn, unsigned level)
913{ 913{
914 struct page *page = pfn_to_page(pfn); 914 struct page *page = pfn_to_page(pfn);
915 915
@@ -923,23 +923,23 @@ static void xen_release_ptpage(u32 pfn, unsigned level)
923 } 923 }
924} 924}
925 925
926static void xen_release_pte(u32 pfn) 926static void xen_release_pte(unsigned long pfn)
927{ 927{
928 xen_release_ptpage(pfn, PT_PTE); 928 xen_release_ptpage(pfn, PT_PTE);
929} 929}
930 930
931static void xen_release_pmd(u32 pfn) 931static void xen_release_pmd(unsigned long pfn)
932{ 932{
933 xen_release_ptpage(pfn, PT_PMD); 933 xen_release_ptpage(pfn, PT_PMD);
934} 934}
935 935
936#if PAGETABLE_LEVELS == 4 936#if PAGETABLE_LEVELS == 4
937static void xen_alloc_pud(struct mm_struct *mm, u32 pfn) 937static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
938{ 938{
939 xen_alloc_ptpage(mm, pfn, PT_PUD); 939 xen_alloc_ptpage(mm, pfn, PT_PUD);
940} 940}
941 941
942static void xen_release_pud(u32 pfn) 942static void xen_release_pud(unsigned long pfn)
943{ 943{
944 xen_release_ptpage(pfn, PT_PUD); 944 xen_release_ptpage(pfn, PT_PUD);
945} 945}
diff --git a/include/asm-x86/a.out-core.h b/include/asm-x86/a.out-core.h
index 714207a1c38..f5705761a37 100644
--- a/include/asm-x86/a.out-core.h
+++ b/include/asm-x86/a.out-core.h
@@ -9,8 +9,8 @@
9 * 2 of the Licence, or (at your option) any later version. 9 * 2 of the Licence, or (at your option) any later version.
10 */ 10 */
11 11
12#ifndef _ASM_A_OUT_CORE_H 12#ifndef ASM_X86__A_OUT_CORE_H
13#define _ASM_A_OUT_CORE_H 13#define ASM_X86__A_OUT_CORE_H
14 14
15#ifdef __KERNEL__ 15#ifdef __KERNEL__
16#ifdef CONFIG_X86_32 16#ifdef CONFIG_X86_32
@@ -70,4 +70,4 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
70 70
71#endif /* CONFIG_X86_32 */ 71#endif /* CONFIG_X86_32 */
72#endif /* __KERNEL__ */ 72#endif /* __KERNEL__ */
73#endif /* _ASM_A_OUT_CORE_H */ 73#endif /* ASM_X86__A_OUT_CORE_H */
diff --git a/include/asm-x86/a.out.h b/include/asm-x86/a.out.h
index 4684f97a5bb..0948748bc69 100644
--- a/include/asm-x86/a.out.h
+++ b/include/asm-x86/a.out.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_A_OUT_H 1#ifndef ASM_X86__A_OUT_H
2#define _ASM_X86_A_OUT_H 2#define ASM_X86__A_OUT_H
3 3
4struct exec 4struct exec
5{ 5{
@@ -17,4 +17,4 @@ struct exec
17#define N_DRSIZE(a) ((a).a_drsize) 17#define N_DRSIZE(a) ((a).a_drsize)
18#define N_SYMSIZE(a) ((a).a_syms) 18#define N_SYMSIZE(a) ((a).a_syms)
19 19
20#endif /* _ASM_X86_A_OUT_H */ 20#endif /* ASM_X86__A_OUT_H */
diff --git a/include/asm-x86/acpi.h b/include/asm-x86/acpi.h
index 35d1743b57a..392e17336be 100644
--- a/include/asm-x86/acpi.h
+++ b/include/asm-x86/acpi.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_ACPI_H 1#ifndef ASM_X86__ACPI_H
2#define _ASM_X86_ACPI_H 2#define ASM_X86__ACPI_H
3 3
4/* 4/*
5 * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 5 * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
@@ -175,4 +175,4 @@ static inline void acpi_fake_nodes(const struct bootnode *fake_nodes,
175 175
176#define acpi_unlazy_tlb(x) leave_mm(x) 176#define acpi_unlazy_tlb(x) leave_mm(x)
177 177
178#endif /*__X86_ASM_ACPI_H*/ 178#endif /* ASM_X86__ACPI_H */
diff --git a/include/asm-x86/agp.h b/include/asm-x86/agp.h
index e4004a9f6a9..3617fd4fcdf 100644
--- a/include/asm-x86/agp.h
+++ b/include/asm-x86/agp.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_AGP_H 1#ifndef ASM_X86__AGP_H
2#define _ASM_X86_AGP_H 2#define ASM_X86__AGP_H
3 3
4#include <asm/pgtable.h> 4#include <asm/pgtable.h>
5#include <asm/cacheflush.h> 5#include <asm/cacheflush.h>
@@ -32,4 +32,4 @@
32#define free_gatt_pages(table, order) \ 32#define free_gatt_pages(table, order) \
33 free_pages((unsigned long)(table), (order)) 33 free_pages((unsigned long)(table), (order))
34 34
35#endif 35#endif /* ASM_X86__AGP_H */
diff --git a/include/asm-x86/alternative.h b/include/asm-x86/alternative.h
index f6aa18eadf7..22d3c9862bf 100644
--- a/include/asm-x86/alternative.h
+++ b/include/asm-x86/alternative.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_ALTERNATIVE_H 1#ifndef ASM_X86__ALTERNATIVE_H
2#define _ASM_X86_ALTERNATIVE_H 2#define ASM_X86__ALTERNATIVE_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/stddef.h> 5#include <linux/stddef.h>
@@ -180,4 +180,4 @@ extern void add_nops(void *insns, unsigned int len);
180extern void *text_poke(void *addr, const void *opcode, size_t len); 180extern void *text_poke(void *addr, const void *opcode, size_t len);
181extern void *text_poke_early(void *addr, const void *opcode, size_t len); 181extern void *text_poke_early(void *addr, const void *opcode, size_t len);
182 182
183#endif /* _ASM_X86_ALTERNATIVE_H */ 183#endif /* ASM_X86__ALTERNATIVE_H */
diff --git a/include/asm-x86/amd_iommu.h b/include/asm-x86/amd_iommu.h
index 30a12049353..783f43e5805 100644
--- a/include/asm-x86/amd_iommu.h
+++ b/include/asm-x86/amd_iommu.h
@@ -17,8 +17,8 @@
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19 19
20#ifndef _ASM_X86_AMD_IOMMU_H 20#ifndef ASM_X86__AMD_IOMMU_H
21#define _ASM_X86_AMD_IOMMU_H 21#define ASM_X86__AMD_IOMMU_H
22 22
23#ifdef CONFIG_AMD_IOMMU 23#ifdef CONFIG_AMD_IOMMU
24extern int amd_iommu_init(void); 24extern int amd_iommu_init(void);
@@ -29,4 +29,4 @@ static inline int amd_iommu_init(void) { return -ENODEV; }
29static inline void amd_iommu_detect(void) { } 29static inline void amd_iommu_detect(void) { }
30#endif 30#endif
31 31
32#endif 32#endif /* ASM_X86__AMD_IOMMU_H */
diff --git a/include/asm-x86/amd_iommu_types.h b/include/asm-x86/amd_iommu_types.h
index dcc81206739..1ffa4e53c98 100644
--- a/include/asm-x86/amd_iommu_types.h
+++ b/include/asm-x86/amd_iommu_types.h
@@ -17,8 +17,8 @@
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19 19
20#ifndef __AMD_IOMMU_TYPES_H__ 20#ifndef ASM_X86__AMD_IOMMU_TYPES_H
21#define __AMD_IOMMU_TYPES_H__ 21#define ASM_X86__AMD_IOMMU_TYPES_H
22 22
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/list.h> 24#include <linux/list.h>
@@ -341,4 +341,4 @@ static inline u16 calc_devid(u8 bus, u8 devfn)
341 return (((u16)bus) << 8) | devfn; 341 return (((u16)bus) << 8) | devfn;
342} 342}
343 343
344#endif 344#endif /* ASM_X86__AMD_IOMMU_TYPES_H */
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h
index 133c998161c..65590c9aecd 100644
--- a/include/asm-x86/apic.h
+++ b/include/asm-x86/apic.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_APIC_H 1#ifndef ASM_X86__APIC_H
2#define _ASM_X86_APIC_H 2#define ASM_X86__APIC_H
3 3
4#include <linux/pm.h> 4#include <linux/pm.h>
5#include <linux/delay.h> 5#include <linux/delay.h>
@@ -54,6 +54,11 @@ extern int disable_apic;
54#endif 54#endif
55 55
56extern int is_vsmp_box(void); 56extern int is_vsmp_box(void);
57extern void xapic_wait_icr_idle(void);
58extern u32 safe_xapic_wait_icr_idle(void);
59extern u64 xapic_icr_read(void);
60extern void xapic_icr_write(u32, u32);
61extern int setup_profiling_timer(unsigned int);
57 62
58static inline void native_apic_write(unsigned long reg, u32 v) 63static inline void native_apic_write(unsigned long reg, u32 v)
59{ 64{
@@ -76,9 +81,7 @@ extern int get_physical_broadcast(void);
76static inline void ack_APIC_irq(void) 81static inline void ack_APIC_irq(void)
77{ 82{
78 /* 83 /*
79 * ack_APIC_irq() actually gets compiled as a single instruction: 84 * ack_APIC_irq() actually gets compiled as a single instruction
80 * - a single rmw on Pentium/82489DX
81 * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC)
82 * ... yummie. 85 * ... yummie.
83 */ 86 */
84 87
@@ -128,4 +131,4 @@ static inline void init_apic_mappings(void) { }
128 131
129#endif /* !CONFIG_X86_LOCAL_APIC */ 132#endif /* !CONFIG_X86_LOCAL_APIC */
130 133
131#endif /* __ASM_APIC_H */ 134#endif /* ASM_X86__APIC_H */
diff --git a/include/asm-x86/apicdef.h b/include/asm-x86/apicdef.h
index 6b9008c7873..c40687da20f 100644
--- a/include/asm-x86/apicdef.h
+++ b/include/asm-x86/apicdef.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_APICDEF_H 1#ifndef ASM_X86__APICDEF_H
2#define _ASM_X86_APICDEF_H 2#define ASM_X86__APICDEF_H
3 3
4/* 4/*
5 * Constants for various Intel APICs. (local APIC, IOAPIC, etc.) 5 * Constants for various Intel APICs. (local APIC, IOAPIC, etc.)
@@ -411,4 +411,4 @@ struct local_apic {
411#else 411#else
412 #define BAD_APICID 0xFFFFu 412 #define BAD_APICID 0xFFFFu
413#endif 413#endif
414#endif 414#endif /* ASM_X86__APICDEF_H */
diff --git a/include/asm-x86/arch_hooks.h b/include/asm-x86/arch_hooks.h
index 8411750ceb6..72adc3a109c 100644
--- a/include/asm-x86/arch_hooks.h
+++ b/include/asm-x86/arch_hooks.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_ARCH_HOOKS_H 1#ifndef ASM_X86__ARCH_HOOKS_H
2#define _ASM_ARCH_HOOKS_H 2#define ASM_X86__ARCH_HOOKS_H
3 3
4#include <linux/interrupt.h> 4#include <linux/interrupt.h>
5 5
@@ -25,4 +25,4 @@ extern void pre_time_init_hook(void);
25extern void time_init_hook(void); 25extern void time_init_hook(void);
26extern void mca_nmi_hook(void); 26extern void mca_nmi_hook(void);
27 27
28#endif 28#endif /* ASM_X86__ARCH_HOOKS_H */
diff --git a/include/asm-x86/asm.h b/include/asm-x86/asm.h
index 97220321f39..e1355f44d7c 100644
--- a/include/asm-x86/asm.h
+++ b/include/asm-x86/asm.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_ASM_H 1#ifndef ASM_X86__ASM_H
2#define _ASM_X86_ASM_H 2#define ASM_X86__ASM_H
3 3
4#ifdef __ASSEMBLY__ 4#ifdef __ASSEMBLY__
5# define __ASM_FORM(x) x 5# define __ASM_FORM(x) x
@@ -20,17 +20,22 @@
20 20
21#define _ASM_PTR __ASM_SEL(.long, .quad) 21#define _ASM_PTR __ASM_SEL(.long, .quad)
22#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8) 22#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
23#define _ASM_MOV_UL __ASM_SIZE(mov)
24 23
24#define _ASM_MOV __ASM_SIZE(mov)
25#define _ASM_INC __ASM_SIZE(inc) 25#define _ASM_INC __ASM_SIZE(inc)
26#define _ASM_DEC __ASM_SIZE(dec) 26#define _ASM_DEC __ASM_SIZE(dec)
27#define _ASM_ADD __ASM_SIZE(add) 27#define _ASM_ADD __ASM_SIZE(add)
28#define _ASM_SUB __ASM_SIZE(sub) 28#define _ASM_SUB __ASM_SIZE(sub)
29#define _ASM_XADD __ASM_SIZE(xadd) 29#define _ASM_XADD __ASM_SIZE(xadd)
30
30#define _ASM_AX __ASM_REG(ax) 31#define _ASM_AX __ASM_REG(ax)
31#define _ASM_BX __ASM_REG(bx) 32#define _ASM_BX __ASM_REG(bx)
32#define _ASM_CX __ASM_REG(cx) 33#define _ASM_CX __ASM_REG(cx)
33#define _ASM_DX __ASM_REG(dx) 34#define _ASM_DX __ASM_REG(dx)
35#define _ASM_SP __ASM_REG(sp)
36#define _ASM_BP __ASM_REG(bp)
37#define _ASM_SI __ASM_REG(si)
38#define _ASM_DI __ASM_REG(di)
34 39
35/* Exception table entry */ 40/* Exception table entry */
36# define _ASM_EXTABLE(from,to) \ 41# define _ASM_EXTABLE(from,to) \
@@ -39,4 +44,4 @@
39 _ASM_PTR #from "," #to "\n" \ 44 _ASM_PTR #from "," #to "\n" \
40 " .previous\n" 45 " .previous\n"
41 46
42#endif /* _ASM_X86_ASM_H */ 47#endif /* ASM_X86__ASM_H */
diff --git a/include/asm-x86/atomic_32.h b/include/asm-x86/atomic_32.h
index 21a4825148c..14d3f0beb88 100644
--- a/include/asm-x86/atomic_32.h
+++ b/include/asm-x86/atomic_32.h
@@ -1,5 +1,5 @@
1#ifndef __ARCH_I386_ATOMIC__ 1#ifndef ASM_X86__ATOMIC_32_H
2#define __ARCH_I386_ATOMIC__ 2#define ASM_X86__ATOMIC_32_H
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <asm/processor.h> 5#include <asm/processor.h>
@@ -256,4 +256,4 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
256#define smp_mb__after_atomic_inc() barrier() 256#define smp_mb__after_atomic_inc() barrier()
257 257
258#include <asm-generic/atomic.h> 258#include <asm-generic/atomic.h>
259#endif 259#endif /* ASM_X86__ATOMIC_32_H */
diff --git a/include/asm-x86/atomic_64.h b/include/asm-x86/atomic_64.h
index 91c7d03e65b..2cb218c4a35 100644
--- a/include/asm-x86/atomic_64.h
+++ b/include/asm-x86/atomic_64.h
@@ -1,5 +1,5 @@
1#ifndef __ARCH_X86_64_ATOMIC__ 1#ifndef ASM_X86__ATOMIC_64_H
2#define __ARCH_X86_64_ATOMIC__ 2#define ASM_X86__ATOMIC_64_H
3 3
4#include <asm/alternative.h> 4#include <asm/alternative.h>
5#include <asm/cmpxchg.h> 5#include <asm/cmpxchg.h>
@@ -470,4 +470,4 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
470#define smp_mb__after_atomic_inc() barrier() 470#define smp_mb__after_atomic_inc() barrier()
471 471
472#include <asm-generic/atomic.h> 472#include <asm-generic/atomic.h>
473#endif 473#endif /* ASM_X86__ATOMIC_64_H */
diff --git a/include/asm-x86/auxvec.h b/include/asm-x86/auxvec.h
index 87f5e6d5a02..12c7cac7420 100644
--- a/include/asm-x86/auxvec.h
+++ b/include/asm-x86/auxvec.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_AUXVEC_H 1#ifndef ASM_X86__AUXVEC_H
2#define _ASM_X86_AUXVEC_H 2#define ASM_X86__AUXVEC_H
3/* 3/*
4 * Architecture-neutral AT_ values in 0-17, leave some room 4 * Architecture-neutral AT_ values in 0-17, leave some room
5 * for more of them, start the x86-specific ones at 32. 5 * for more of them, start the x86-specific ones at 32.
@@ -9,4 +9,4 @@
9#endif 9#endif
10#define AT_SYSINFO_EHDR 33 10#define AT_SYSINFO_EHDR 33
11 11
12#endif 12#endif /* ASM_X86__AUXVEC_H */
diff --git a/include/asm-x86/bios_ebda.h b/include/asm-x86/bios_ebda.h
index 0033e50c13b..ec42ed87459 100644
--- a/include/asm-x86/bios_ebda.h
+++ b/include/asm-x86/bios_ebda.h
@@ -1,5 +1,5 @@
1#ifndef _MACH_BIOS_EBDA_H 1#ifndef ASM_X86__BIOS_EBDA_H
2#define _MACH_BIOS_EBDA_H 2#define ASM_X86__BIOS_EBDA_H
3 3
4#include <asm/io.h> 4#include <asm/io.h>
5 5
@@ -16,4 +16,4 @@ static inline unsigned int get_bios_ebda(void)
16 16
17void reserve_ebda_region(void); 17void reserve_ebda_region(void);
18 18
19#endif /* _MACH_BIOS_EBDA_H */ 19#endif /* ASM_X86__BIOS_EBDA_H */
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h
index cfb2b64f76e..61989b93b47 100644
--- a/include/asm-x86/bitops.h
+++ b/include/asm-x86/bitops.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_BITOPS_H 1#ifndef ASM_X86__BITOPS_H
2#define _ASM_X86_BITOPS_H 2#define ASM_X86__BITOPS_H
3 3
4/* 4/*
5 * Copyright 1992, Linus Torvalds. 5 * Copyright 1992, Linus Torvalds.
@@ -458,4 +458,4 @@ static inline void set_bit_string(unsigned long *bitmap,
458#include <asm-generic/bitops/minix.h> 458#include <asm-generic/bitops/minix.h>
459 459
460#endif /* __KERNEL__ */ 460#endif /* __KERNEL__ */
461#endif /* _ASM_X86_BITOPS_H */ 461#endif /* ASM_X86__BITOPS_H */
diff --git a/include/asm-x86/boot.h b/include/asm-x86/boot.h
index 2faed7ecb09..825de5dc867 100644
--- a/include/asm-x86/boot.h
+++ b/include/asm-x86/boot.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_BOOT_H 1#ifndef ASM_X86__BOOT_H
2#define _ASM_BOOT_H 2#define ASM_X86__BOOT_H
3 3
4/* Don't touch these, unless you really know what you're doing. */ 4/* Don't touch these, unless you really know what you're doing. */
5#define DEF_INITSEG 0x9000 5#define DEF_INITSEG 0x9000
@@ -25,4 +25,4 @@
25#define BOOT_STACK_SIZE 0x1000 25#define BOOT_STACK_SIZE 0x1000
26#endif 26#endif
27 27
28#endif /* _ASM_BOOT_H */ 28#endif /* ASM_X86__BOOT_H */
diff --git a/include/asm-x86/bootparam.h b/include/asm-x86/bootparam.h
index ae22bdf0ab1..ccf027e2d97 100644
--- a/include/asm-x86/bootparam.h
+++ b/include/asm-x86/bootparam.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_BOOTPARAM_H 1#ifndef ASM_X86__BOOTPARAM_H
2#define _ASM_BOOTPARAM_H 2#define ASM_X86__BOOTPARAM_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/screen_info.h> 5#include <linux/screen_info.h>
@@ -108,4 +108,4 @@ struct boot_params {
108 __u8 _pad9[276]; /* 0xeec */ 108 __u8 _pad9[276]; /* 0xeec */
109} __attribute__((packed)); 109} __attribute__((packed));
110 110
111#endif /* _ASM_BOOTPARAM_H */ 111#endif /* ASM_X86__BOOTPARAM_H */
diff --git a/include/asm-x86/bug.h b/include/asm-x86/bug.h
index b69aa64b82a..91ad43a54c4 100644
--- a/include/asm-x86/bug.h
+++ b/include/asm-x86/bug.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_BUG_H 1#ifndef ASM_X86__BUG_H
2#define _ASM_X86_BUG_H 2#define ASM_X86__BUG_H
3 3
4#ifdef CONFIG_BUG 4#ifdef CONFIG_BUG
5#define HAVE_ARCH_BUG 5#define HAVE_ARCH_BUG
@@ -36,4 +36,4 @@ do { \
36#endif /* !CONFIG_BUG */ 36#endif /* !CONFIG_BUG */
37 37
38#include <asm-generic/bug.h> 38#include <asm-generic/bug.h>
39#endif 39#endif /* ASM_X86__BUG_H */
diff --git a/include/asm-x86/bugs.h b/include/asm-x86/bugs.h
index 021cbdd5f25..4761c461d23 100644
--- a/include/asm-x86/bugs.h
+++ b/include/asm-x86/bugs.h
@@ -1,7 +1,7 @@
1#ifndef _ASM_X86_BUGS_H 1#ifndef ASM_X86__BUGS_H
2#define _ASM_X86_BUGS_H 2#define ASM_X86__BUGS_H
3 3
4extern void check_bugs(void); 4extern void check_bugs(void);
5int ppro_with_ram_bug(void); 5int ppro_with_ram_bug(void);
6 6
7#endif /* _ASM_X86_BUGS_H */ 7#endif /* ASM_X86__BUGS_H */
diff --git a/include/asm-x86/byteorder.h b/include/asm-x86/byteorder.h
index e02ae2d89ac..722f27d6810 100644
--- a/include/asm-x86/byteorder.h
+++ b/include/asm-x86/byteorder.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_BYTEORDER_H 1#ifndef ASM_X86__BYTEORDER_H
2#define _ASM_X86_BYTEORDER_H 2#define ASM_X86__BYTEORDER_H
3 3
4#include <asm/types.h> 4#include <asm/types.h>
5#include <linux/compiler.h> 5#include <linux/compiler.h>
@@ -78,4 +78,4 @@ static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
78 78
79#include <linux/byteorder/little_endian.h> 79#include <linux/byteorder/little_endian.h>
80 80
81#endif /* _ASM_X86_BYTEORDER_H */ 81#endif /* ASM_X86__BYTEORDER_H */
diff --git a/include/asm-x86/cache.h b/include/asm-x86/cache.h
index 1e0bac86f38..ea3f1cc06a9 100644
--- a/include/asm-x86/cache.h
+++ b/include/asm-x86/cache.h
@@ -1,5 +1,5 @@
1#ifndef _ARCH_X86_CACHE_H 1#ifndef ASM_X86__CACHE_H
2#define _ARCH_X86_CACHE_H 2#define ASM_X86__CACHE_H
3 3
4/* L1 cache line size */ 4/* L1 cache line size */
5#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) 5#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
@@ -17,4 +17,4 @@
17#endif 17#endif
18#endif 18#endif
19 19
20#endif 20#endif /* ASM_X86__CACHE_H */
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h
index f4c0ab50d2c..59859cb28a3 100644
--- a/include/asm-x86/cacheflush.h
+++ b/include/asm-x86/cacheflush.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_CACHEFLUSH_H 1#ifndef ASM_X86__CACHEFLUSH_H
2#define _ASM_X86_CACHEFLUSH_H 2#define ASM_X86__CACHEFLUSH_H
3 3
4/* Keep includes the same across arches. */ 4/* Keep includes the same across arches. */
5#include <linux/mm.h> 5#include <linux/mm.h>
@@ -112,4 +112,4 @@ static inline int rodata_test(void)
112} 112}
113#endif 113#endif
114 114
115#endif 115#endif /* ASM_X86__CACHEFLUSH_H */
diff --git a/include/asm-x86/calgary.h b/include/asm-x86/calgary.h
index 67f60406e2d..933fd272f82 100644
--- a/include/asm-x86/calgary.h
+++ b/include/asm-x86/calgary.h
@@ -21,8 +21,8 @@
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */ 22 */
23 23
24#ifndef _ASM_X86_64_CALGARY_H 24#ifndef ASM_X86__CALGARY_H
25#define _ASM_X86_64_CALGARY_H 25#define ASM_X86__CALGARY_H
26 26
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <linux/device.h> 28#include <linux/device.h>
@@ -69,4 +69,4 @@ static inline int calgary_iommu_init(void) { return 1; }
69static inline void detect_calgary(void) { return; } 69static inline void detect_calgary(void) { return; }
70#endif 70#endif
71 71
72#endif /* _ASM_X86_64_CALGARY_H */ 72#endif /* ASM_X86__CALGARY_H */
diff --git a/include/asm-x86/checksum_32.h b/include/asm-x86/checksum_32.h
index 52bbb0d8c4c..d041e8cda22 100644
--- a/include/asm-x86/checksum_32.h
+++ b/include/asm-x86/checksum_32.h
@@ -1,5 +1,5 @@
1#ifndef _I386_CHECKSUM_H 1#ifndef ASM_X86__CHECKSUM_32_H
2#define _I386_CHECKSUM_H 2#define ASM_X86__CHECKSUM_32_H
3 3
4#include <linux/in6.h> 4#include <linux/in6.h>
5 5
@@ -186,4 +186,4 @@ static inline __wsum csum_and_copy_to_user(const void *src,
186 return (__force __wsum)-1; /* invalid checksum */ 186 return (__force __wsum)-1; /* invalid checksum */
187} 187}
188 188
189#endif 189#endif /* ASM_X86__CHECKSUM_32_H */
diff --git a/include/asm-x86/checksum_64.h b/include/asm-x86/checksum_64.h
index 8bd861cc526..110f403beb8 100644
--- a/include/asm-x86/checksum_64.h
+++ b/include/asm-x86/checksum_64.h
@@ -1,5 +1,5 @@
1#ifndef _X86_64_CHECKSUM_H 1#ifndef ASM_X86__CHECKSUM_64_H
2#define _X86_64_CHECKSUM_H 2#define ASM_X86__CHECKSUM_64_H
3 3
4/* 4/*
5 * Checksums for x86-64 5 * Checksums for x86-64
@@ -188,4 +188,4 @@ static inline unsigned add32_with_carry(unsigned a, unsigned b)
188 return a; 188 return a;
189} 189}
190 190
191#endif 191#endif /* ASM_X86__CHECKSUM_64_H */
diff --git a/include/asm-x86/cmpxchg_32.h b/include/asm-x86/cmpxchg_32.h
index bf5a69d1329..0622e45cdf7 100644
--- a/include/asm-x86/cmpxchg_32.h
+++ b/include/asm-x86/cmpxchg_32.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_CMPXCHG_H 1#ifndef ASM_X86__CMPXCHG_32_H
2#define __ASM_CMPXCHG_H 2#define ASM_X86__CMPXCHG_32_H
3 3
4#include <linux/bitops.h> /* for LOCK_PREFIX */ 4#include <linux/bitops.h> /* for LOCK_PREFIX */
5 5
@@ -341,4 +341,4 @@ extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
341 341
342#endif 342#endif
343 343
344#endif 344#endif /* ASM_X86__CMPXCHG_32_H */
diff --git a/include/asm-x86/cmpxchg_64.h b/include/asm-x86/cmpxchg_64.h
index 17463ccf816..63c1a5e61b9 100644
--- a/include/asm-x86/cmpxchg_64.h
+++ b/include/asm-x86/cmpxchg_64.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_CMPXCHG_H 1#ifndef ASM_X86__CMPXCHG_64_H
2#define __ASM_CMPXCHG_H 2#define ASM_X86__CMPXCHG_64_H
3 3
4#include <asm/alternative.h> /* Provides LOCK_PREFIX */ 4#include <asm/alternative.h> /* Provides LOCK_PREFIX */
5 5
@@ -182,4 +182,4 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
182 cmpxchg_local((ptr), (o), (n)); \ 182 cmpxchg_local((ptr), (o), (n)); \
183}) 183})
184 184
185#endif 185#endif /* ASM_X86__CMPXCHG_64_H */
diff --git a/include/asm-x86/compat.h b/include/asm-x86/compat.h
index 1793ac317a3..6732b150949 100644
--- a/include/asm-x86/compat.h
+++ b/include/asm-x86/compat.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_64_COMPAT_H 1#ifndef ASM_X86__COMPAT_H
2#define _ASM_X86_64_COMPAT_H 2#define ASM_X86__COMPAT_H
3 3
4/* 4/*
5 * Architecture specific compatibility types 5 * Architecture specific compatibility types
@@ -215,4 +215,4 @@ static inline int is_compat_task(void)
215 return current_thread_info()->status & TS_COMPAT; 215 return current_thread_info()->status & TS_COMPAT;
216} 216}
217 217
218#endif /* _ASM_X86_64_COMPAT_H */ 218#endif /* ASM_X86__COMPAT_H */
diff --git a/include/asm-x86/cpu.h b/include/asm-x86/cpu.h
index 73f2ea84fd7..83a115083f0 100644
--- a/include/asm-x86/cpu.h
+++ b/include/asm-x86/cpu.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_I386_CPU_H_ 1#ifndef ASM_X86__CPU_H
2#define _ASM_I386_CPU_H_ 2#define ASM_X86__CPU_H
3 3
4#include <linux/device.h> 4#include <linux/device.h>
5#include <linux/cpu.h> 5#include <linux/cpu.h>
@@ -17,4 +17,4 @@ extern void arch_unregister_cpu(int);
17#endif 17#endif
18 18
19DECLARE_PER_CPU(int, cpu_state); 19DECLARE_PER_CPU(int, cpu_state);
20#endif /* _ASM_I386_CPU_H_ */ 20#endif /* ASM_X86__CPU_H */
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h
index cfcfb0a806b..250fa0cb144 100644
--- a/include/asm-x86/cpufeature.h
+++ b/include/asm-x86/cpufeature.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * Defines x86 CPU feature bits 2 * Defines x86 CPU feature bits
3 */ 3 */
4#ifndef _ASM_X86_CPUFEATURE_H 4#ifndef ASM_X86__CPUFEATURE_H
5#define _ASM_X86_CPUFEATURE_H 5#define ASM_X86__CPUFEATURE_H
6 6
7#include <asm/required-features.h> 7#include <asm/required-features.h>
8 8
@@ -224,4 +224,4 @@ extern const char * const x86_power_flags[32];
224 224
225#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ 225#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
226 226
227#endif /* _ASM_X86_CPUFEATURE_H */ 227#endif /* ASM_X86__CPUFEATURE_H */
diff --git a/include/asm-x86/current.h b/include/asm-x86/current.h
index 7515c19d498..a863ead856f 100644
--- a/include/asm-x86/current.h
+++ b/include/asm-x86/current.h
@@ -1,5 +1,5 @@
1#ifndef _X86_CURRENT_H 1#ifndef ASM_X86__CURRENT_H
2#define _X86_CURRENT_H 2#define ASM_X86__CURRENT_H
3 3
4#ifdef CONFIG_X86_32 4#ifdef CONFIG_X86_32
5#include <linux/compiler.h> 5#include <linux/compiler.h>
@@ -36,4 +36,4 @@ static __always_inline struct task_struct *get_current(void)
36 36
37#define current get_current() 37#define current get_current()
38 38
39#endif /* X86_CURRENT_H */ 39#endif /* ASM_X86__CURRENT_H */
diff --git a/include/asm-x86/debugreg.h b/include/asm-x86/debugreg.h
index c6344d572b0..ecb6907c3ea 100644
--- a/include/asm-x86/debugreg.h
+++ b/include/asm-x86/debugreg.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_DEBUGREG_H 1#ifndef ASM_X86__DEBUGREG_H
2#define _ASM_X86_DEBUGREG_H 2#define ASM_X86__DEBUGREG_H
3 3
4 4
5/* Indicate the register numbers for a number of the specific 5/* Indicate the register numbers for a number of the specific
@@ -67,4 +67,4 @@
67#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */ 67#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */
68#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */ 68#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */
69 69
70#endif 70#endif /* ASM_X86__DEBUGREG_H */
diff --git a/include/asm-x86/delay.h b/include/asm-x86/delay.h
index 409a649204a..8a0da95b4fc 100644
--- a/include/asm-x86/delay.h
+++ b/include/asm-x86/delay.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_DELAY_H 1#ifndef ASM_X86__DELAY_H
2#define _ASM_X86_DELAY_H 2#define ASM_X86__DELAY_H
3 3
4/* 4/*
5 * Copyright (C) 1993 Linus Torvalds 5 * Copyright (C) 1993 Linus Torvalds
@@ -28,4 +28,4 @@ extern void __delay(unsigned long loops);
28 28
29void use_tsc_delay(void); 29void use_tsc_delay(void);
30 30
31#endif /* _ASM_X86_DELAY_H */ 31#endif /* ASM_X86__DELAY_H */
diff --git a/include/asm-x86/desc.h b/include/asm-x86/desc.h
index a44c4dc7059..b73fea54def 100644
--- a/include/asm-x86/desc.h
+++ b/include/asm-x86/desc.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_DESC_H_ 1#ifndef ASM_X86__DESC_H
2#define _ASM_DESC_H_ 2#define ASM_X86__DESC_H
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5#include <asm/desc_defs.h> 5#include <asm/desc_defs.h>
@@ -397,4 +397,4 @@ static inline void set_system_gate_ist(int n, void *addr, unsigned ist)
397 397
398#endif /* __ASSEMBLY__ */ 398#endif /* __ASSEMBLY__ */
399 399
400#endif 400#endif /* ASM_X86__DESC_H */
diff --git a/include/asm-x86/desc_defs.h b/include/asm-x86/desc_defs.h
index f7bacf357da..b881db664b4 100644
--- a/include/asm-x86/desc_defs.h
+++ b/include/asm-x86/desc_defs.h
@@ -1,6 +1,6 @@
1/* Written 2000 by Andi Kleen */ 1/* Written 2000 by Andi Kleen */
2#ifndef __ARCH_DESC_DEFS_H 2#ifndef ASM_X86__DESC_DEFS_H
3#define __ARCH_DESC_DEFS_H 3#define ASM_X86__DESC_DEFS_H
4 4
5/* 5/*
6 * Segment descriptor structure definitions, usable from both x86_64 and i386 6 * Segment descriptor structure definitions, usable from both x86_64 and i386
@@ -92,4 +92,4 @@ struct desc_ptr {
92 92
93#endif /* !__ASSEMBLY__ */ 93#endif /* !__ASSEMBLY__ */
94 94
95#endif 95#endif /* ASM_X86__DESC_DEFS_H */
diff --git a/include/asm-x86/device.h b/include/asm-x86/device.h
index 3c034f48fdb..1bece04c7d9 100644
--- a/include/asm-x86/device.h
+++ b/include/asm-x86/device.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_DEVICE_H 1#ifndef ASM_X86__DEVICE_H
2#define _ASM_X86_DEVICE_H 2#define ASM_X86__DEVICE_H
3 3
4struct dev_archdata { 4struct dev_archdata {
5#ifdef CONFIG_ACPI 5#ifdef CONFIG_ACPI
@@ -13,4 +13,4 @@ struct dma_mapping_ops *dma_ops;
13#endif 13#endif
14}; 14};
15 15
16#endif /* _ASM_X86_DEVICE_H */ 16#endif /* ASM_X86__DEVICE_H */
diff --git a/include/asm-x86/div64.h b/include/asm-x86/div64.h
index 9a2d644c08e..f9530f23f1d 100644
--- a/include/asm-x86/div64.h
+++ b/include/asm-x86/div64.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_DIV64_H 1#ifndef ASM_X86__DIV64_H
2#define _ASM_X86_DIV64_H 2#define ASM_X86__DIV64_H
3 3
4#ifdef CONFIG_X86_32 4#ifdef CONFIG_X86_32
5 5
@@ -57,4 +57,4 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
57# include <asm-generic/div64.h> 57# include <asm-generic/div64.h>
58#endif /* CONFIG_X86_32 */ 58#endif /* CONFIG_X86_32 */
59 59
60#endif /* _ASM_X86_DIV64_H */ 60#endif /* ASM_X86__DIV64_H */
diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h
index ad9cd6d49bf..5d200e78bd8 100644
--- a/include/asm-x86/dma-mapping.h
+++ b/include/asm-x86/dma-mapping.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_DMA_MAPPING_H_ 1#ifndef ASM_X86__DMA_MAPPING_H
2#define _ASM_DMA_MAPPING_H_ 2#define ASM_X86__DMA_MAPPING_H
3 3
4/* 4/*
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for 5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
@@ -250,4 +250,4 @@ static inline int dma_get_cache_alignment(void)
250#define dma_is_consistent(d, h) (1) 250#define dma_is_consistent(d, h) (1)
251 251
252#include <asm-generic/dma-coherent.h> 252#include <asm-generic/dma-coherent.h>
253#endif 253#endif /* ASM_X86__DMA_MAPPING_H */
diff --git a/include/asm-x86/dma.h b/include/asm-x86/dma.h
index ca1098a7e58..c9f7a4eec55 100644
--- a/include/asm-x86/dma.h
+++ b/include/asm-x86/dma.h
@@ -5,8 +5,8 @@
5 * and John Boyd, Nov. 1992. 5 * and John Boyd, Nov. 1992.
6 */ 6 */
7 7
8#ifndef _ASM_X86_DMA_H 8#ifndef ASM_X86__DMA_H
9#define _ASM_X86_DMA_H 9#define ASM_X86__DMA_H
10 10
11#include <linux/spinlock.h> /* And spinlocks */ 11#include <linux/spinlock.h> /* And spinlocks */
12#include <asm/io.h> /* need byte IO */ 12#include <asm/io.h> /* need byte IO */
@@ -315,4 +315,4 @@ extern int isa_dma_bridge_buggy;
315#define isa_dma_bridge_buggy (0) 315#define isa_dma_bridge_buggy (0)
316#endif 316#endif
317 317
318#endif /* _ASM_X86_DMA_H */ 318#endif /* ASM_X86__DMA_H */
diff --git a/include/asm-x86/dmi.h b/include/asm-x86/dmi.h
index 58a86571fe0..1cff6fe81fa 100644
--- a/include/asm-x86/dmi.h
+++ b/include/asm-x86/dmi.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_DMI_H 1#ifndef ASM_X86__DMI_H
2#define _ASM_X86_DMI_H 2#define ASM_X86__DMI_H
3 3
4#include <asm/io.h> 4#include <asm/io.h>
5 5
@@ -23,4 +23,4 @@ static inline void *dmi_alloc(unsigned len)
23#define dmi_ioremap early_ioremap 23#define dmi_ioremap early_ioremap
24#define dmi_iounmap early_iounmap 24#define dmi_iounmap early_iounmap
25 25
26#endif 26#endif /* ASM_X86__DMI_H */
diff --git a/include/asm-x86/ds.h b/include/asm-x86/ds.h
index 7881368142f..c3c953a45b2 100644
--- a/include/asm-x86/ds.h
+++ b/include/asm-x86/ds.h
@@ -2,71 +2,237 @@
2 * Debug Store (DS) support 2 * Debug Store (DS) support
3 * 3 *
4 * This provides a low-level interface to the hardware's Debug Store 4 * This provides a low-level interface to the hardware's Debug Store
5 * feature that is used for last branch recording (LBR) and 5 * feature that is used for branch trace store (BTS) and
6 * precise-event based sampling (PEBS). 6 * precise-event based sampling (PEBS).
7 * 7 *
8 * Different architectures use a different DS layout/pointer size. 8 * It manages:
9 * The below functions therefore work on a void*. 9 * - per-thread and per-cpu allocation of BTS and PEBS
10 * - buffer memory allocation (optional)
11 * - buffer overflow handling
12 * - buffer access
10 * 13 *
14 * It assumes:
15 * - get_task_struct on all parameter tasks
16 * - current is allowed to trace parameter tasks
11 * 17 *
12 * Since there is no user for PEBS, yet, only LBR (or branch
13 * trace store, BTS) is supported.
14 * 18 *
15 * 19 * Copyright (C) 2007-2008 Intel Corporation.
16 * Copyright (C) 2007 Intel Corporation. 20 * Markus Metzger <markus.t.metzger@intel.com>, 2007-2008
17 * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
18 */ 21 */
19 22
20#ifndef _ASM_X86_DS_H 23#ifndef ASM_X86__DS_H
21#define _ASM_X86_DS_H 24#define ASM_X86__DS_H
25
26#ifdef CONFIG_X86_DS
22 27
23#include <linux/types.h> 28#include <linux/types.h>
24#include <linux/init.h> 29#include <linux/init.h>
25 30
26struct cpuinfo_x86;
27 31
32struct task_struct;
28 33
29/* a branch trace record entry 34/*
35 * Request BTS or PEBS
36 *
37 * Due to alignement constraints, the actual buffer may be slightly
38 * smaller than the requested or provided buffer.
30 * 39 *
31 * In order to unify the interface between various processor versions, 40 * Returns 0 on success; -Eerrno otherwise
32 * we use the below data structure for all processors. 41 *
42 * task: the task to request recording for;
43 * NULL for per-cpu recording on the current cpu
44 * base: the base pointer for the (non-pageable) buffer;
45 * NULL if buffer allocation requested
46 * size: the size of the requested or provided buffer
47 * ovfl: pointer to a function to be called on buffer overflow;
48 * NULL if cyclic buffer requested
33 */ 49 */
34enum bts_qualifier { 50typedef void (*ds_ovfl_callback_t)(struct task_struct *);
35 BTS_INVALID = 0, 51extern int ds_request_bts(struct task_struct *task, void *base, size_t size,
36 BTS_BRANCH, 52 ds_ovfl_callback_t ovfl);
37 BTS_TASK_ARRIVES, 53extern int ds_request_pebs(struct task_struct *task, void *base, size_t size,
38 BTS_TASK_DEPARTS 54 ds_ovfl_callback_t ovfl);
39};
40 55
41struct bts_struct { 56/*
42 u64 qualifier; 57 * Release BTS or PEBS resources
43 union { 58 *
44 /* BTS_BRANCH */ 59 * Frees buffers allocated on ds_request.
45 struct { 60 *
46 u64 from_ip; 61 * Returns 0 on success; -Eerrno otherwise
47 u64 to_ip; 62 *
48 } lbr; 63 * task: the task to release resources for;
49 /* BTS_TASK_ARRIVES or 64 * NULL to release resources for the current cpu
50 BTS_TASK_DEPARTS */ 65 */
51 u64 jiffies; 66extern int ds_release_bts(struct task_struct *task);
52 } variant; 67extern int ds_release_pebs(struct task_struct *task);
68
69/*
70 * Return the (array) index of the write pointer.
71 * (assuming an array of BTS/PEBS records)
72 *
73 * Returns -Eerrno on error
74 *
75 * task: the task to access;
76 * NULL to access the current cpu
77 * pos (out): if not NULL, will hold the result
78 */
79extern int ds_get_bts_index(struct task_struct *task, size_t *pos);
80extern int ds_get_pebs_index(struct task_struct *task, size_t *pos);
81
82/*
83 * Return the (array) index one record beyond the end of the array.
84 * (assuming an array of BTS/PEBS records)
85 *
86 * Returns -Eerrno on error
87 *
88 * task: the task to access;
89 * NULL to access the current cpu
90 * pos (out): if not NULL, will hold the result
91 */
92extern int ds_get_bts_end(struct task_struct *task, size_t *pos);
93extern int ds_get_pebs_end(struct task_struct *task, size_t *pos);
94
95/*
96 * Provide a pointer to the BTS/PEBS record at parameter index.
97 * (assuming an array of BTS/PEBS records)
98 *
99 * The pointer points directly into the buffer. The user is
100 * responsible for copying the record.
101 *
102 * Returns the size of a single record on success; -Eerrno on error
103 *
104 * task: the task to access;
105 * NULL to access the current cpu
106 * index: the index of the requested record
107 * record (out): pointer to the requested record
108 */
109extern int ds_access_bts(struct task_struct *task,
110 size_t index, const void **record);
111extern int ds_access_pebs(struct task_struct *task,
112 size_t index, const void **record);
113
114/*
115 * Write one or more BTS/PEBS records at the write pointer index and
116 * advance the write pointer.
117 *
118 * If size is not a multiple of the record size, trailing bytes are
119 * zeroed out.
120 *
121 * May result in one or more overflow notifications.
122 *
123 * If called during overflow handling, that is, with index >=
124 * interrupt threshold, the write will wrap around.
125 *
126 * An overflow notification is given if and when the interrupt
127 * threshold is reached during or after the write.
128 *
129 * Returns the number of bytes written or -Eerrno.
130 *
131 * task: the task to access;
132 * NULL to access the current cpu
133 * buffer: the buffer to write
134 * size: the size of the buffer
135 */
136extern int ds_write_bts(struct task_struct *task,
137 const void *buffer, size_t size);
138extern int ds_write_pebs(struct task_struct *task,
139 const void *buffer, size_t size);
140
141/*
142 * Same as ds_write_bts/pebs, but omit ownership checks.
143 *
144 * This is needed to have some other task than the owner of the
145 * BTS/PEBS buffer or the parameter task itself write into the
146 * respective buffer.
147 */
148extern int ds_unchecked_write_bts(struct task_struct *task,
149 const void *buffer, size_t size);
150extern int ds_unchecked_write_pebs(struct task_struct *task,
151 const void *buffer, size_t size);
152
153/*
154 * Reset the write pointer of the BTS/PEBS buffer.
155 *
156 * Returns 0 on success; -Eerrno on error
157 *
158 * task: the task to access;
159 * NULL to access the current cpu
160 */
161extern int ds_reset_bts(struct task_struct *task);
162extern int ds_reset_pebs(struct task_struct *task);
163
164/*
165 * Clear the BTS/PEBS buffer and reset the write pointer.
166 * The entire buffer will be zeroed out.
167 *
168 * Returns 0 on success; -Eerrno on error
169 *
170 * task: the task to access;
171 * NULL to access the current cpu
172 */
173extern int ds_clear_bts(struct task_struct *task);
174extern int ds_clear_pebs(struct task_struct *task);
175
176/*
177 * Provide the PEBS counter reset value.
178 *
179 * Returns 0 on success; -Eerrno on error
180 *
181 * task: the task to access;
182 * NULL to access the current cpu
183 * value (out): the counter reset value
184 */
185extern int ds_get_pebs_reset(struct task_struct *task, u64 *value);
186
187/*
188 * Set the PEBS counter reset value.
189 *
190 * Returns 0 on success; -Eerrno on error
191 *
192 * task: the task to access;
193 * NULL to access the current cpu
194 * value: the new counter reset value
195 */
196extern int ds_set_pebs_reset(struct task_struct *task, u64 value);
197
198/*
199 * Initialization
200 */
201struct cpuinfo_x86;
202extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *);
203
204
205
206/*
207 * The DS context - part of struct thread_struct.
208 */
209struct ds_context {
210 /* pointer to the DS configuration; goes into MSR_IA32_DS_AREA */
211 unsigned char *ds;
212 /* the owner of the BTS and PEBS configuration, respectively */
213 struct task_struct *owner[2];
214 /* buffer overflow notification function for BTS and PEBS */
215 ds_ovfl_callback_t callback[2];
216 /* the original buffer address */
217 void *buffer[2];
218 /* the number of allocated pages for on-request allocated buffers */
219 unsigned int pages[2];
220 /* use count */
221 unsigned long count;
222 /* a pointer to the context location inside the thread_struct
223 * or the per_cpu context array */
224 struct ds_context **this;
225 /* a pointer to the task owning this context, or NULL, if the
226 * context is owned by a cpu */
227 struct task_struct *task;
53}; 228};
54 229
55/* Overflow handling mechanisms */ 230/* called by exit_thread() to free leftover contexts */
56#define DS_O_SIGNAL 1 /* send overflow signal */ 231extern void ds_free(struct ds_context *context);
57#define DS_O_WRAP 2 /* wrap around */ 232
58 233#else /* CONFIG_X86_DS */
59extern int ds_allocate(void **, size_t); 234
60extern int ds_free(void **); 235#define ds_init_intel(config) do {} while (0)
61extern int ds_get_bts_size(void *); 236
62extern int ds_get_bts_end(void *); 237#endif /* CONFIG_X86_DS */
63extern int ds_get_bts_index(void *); 238#endif /* ASM_X86__DS_H */
64extern int ds_set_overflow(void *, int);
65extern int ds_get_overflow(void *);
66extern int ds_clear(void *);
67extern int ds_read_bts(void *, int, struct bts_struct *);
68extern int ds_write_bts(void *, const struct bts_struct *);
69extern unsigned long ds_debugctl_mask(void);
70extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *c);
71
72#endif /* _ASM_X86_DS_H */
diff --git a/include/asm-x86/dwarf2.h b/include/asm-x86/dwarf2.h
index 738bb9fb3e5..21d1bc32ad7 100644
--- a/include/asm-x86/dwarf2.h
+++ b/include/asm-x86/dwarf2.h
@@ -1,5 +1,5 @@
1#ifndef _DWARF2_H 1#ifndef ASM_X86__DWARF2_H
2#define _DWARF2_H 2#define ASM_X86__DWARF2_H
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5#warning "asm/dwarf2.h should be only included in pure assembly files" 5#warning "asm/dwarf2.h should be only included in pure assembly files"
@@ -58,4 +58,4 @@
58 58
59#endif 59#endif
60 60
61#endif 61#endif /* ASM_X86__DWARF2_H */
diff --git a/include/asm-x86/e820.h b/include/asm-x86/e820.h
index 16a31e2c7c5..f52daf176bc 100644
--- a/include/asm-x86/e820.h
+++ b/include/asm-x86/e820.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_E820_H 1#ifndef ASM_X86__E820_H
2#define __ASM_E820_H 2#define ASM_X86__E820_H
3#define E820MAP 0x2d0 /* our map */ 3#define E820MAP 0x2d0 /* our map */
4#define E820MAX 128 /* number of entries in E820MAP */ 4#define E820MAX 128 /* number of entries in E820MAP */
5 5
@@ -64,6 +64,7 @@ struct e820map {
64extern struct e820map e820; 64extern struct e820map e820;
65extern struct e820map e820_saved; 65extern struct e820map e820_saved;
66 66
67extern unsigned long pci_mem_start;
67extern int e820_any_mapped(u64 start, u64 end, unsigned type); 68extern int e820_any_mapped(u64 start, u64 end, unsigned type);
68extern int e820_all_mapped(u64 start, u64 end, unsigned type); 69extern int e820_all_mapped(u64 start, u64 end, unsigned type);
69extern void e820_add_region(u64 start, u64 size, int type); 70extern void e820_add_region(u64 start, u64 size, int type);
@@ -140,4 +141,4 @@ extern char *memory_setup(void);
140#define HIGH_MEMORY (1024*1024) 141#define HIGH_MEMORY (1024*1024)
141#endif /* __KERNEL__ */ 142#endif /* __KERNEL__ */
142 143
143#endif /* __ASM_E820_H */ 144#endif /* ASM_X86__E820_H */
diff --git a/include/asm-x86/edac.h b/include/asm-x86/edac.h
index a8088f63a30..9493c5b27bb 100644
--- a/include/asm-x86/edac.h
+++ b/include/asm-x86/edac.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_EDAC_H 1#ifndef ASM_X86__EDAC_H
2#define _ASM_X86_EDAC_H 2#define ASM_X86__EDAC_H
3 3
4/* ECC atomic, DMA, SMP and interrupt safe scrub function */ 4/* ECC atomic, DMA, SMP and interrupt safe scrub function */
5 5
@@ -15,4 +15,4 @@ static inline void atomic_scrub(void *va, u32 size)
15 asm volatile("lock; addl $0, %0"::"m" (*virt_addr)); 15 asm volatile("lock; addl $0, %0"::"m" (*virt_addr));
16} 16}
17 17
18#endif 18#endif /* ASM_X86__EDAC_H */
diff --git a/include/asm-x86/efi.h b/include/asm-x86/efi.h
index d4f2b0abe92..ed2de22e870 100644
--- a/include/asm-x86/efi.h
+++ b/include/asm-x86/efi.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_EFI_H 1#ifndef ASM_X86__EFI_H
2#define _ASM_X86_EFI_H 2#define ASM_X86__EFI_H
3 3
4#ifdef CONFIG_X86_32 4#ifdef CONFIG_X86_32
5 5
@@ -94,4 +94,4 @@ extern void efi_reserve_early(void);
94extern void efi_call_phys_prelog(void); 94extern void efi_call_phys_prelog(void);
95extern void efi_call_phys_epilog(void); 95extern void efi_call_phys_epilog(void);
96 96
97#endif 97#endif /* ASM_X86__EFI_H */
diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h
index 7be4733c793..5c4745bec90 100644
--- a/include/asm-x86/elf.h
+++ b/include/asm-x86/elf.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_ELF_H 1#ifndef ASM_X86__ELF_H
2#define _ASM_X86_ELF_H 2#define ASM_X86__ELF_H
3 3
4/* 4/*
5 * ELF register definitions.. 5 * ELF register definitions..
@@ -148,8 +148,9 @@ do { \
148 148
149static inline void start_ia32_thread(struct pt_regs *regs, u32 ip, u32 sp) 149static inline void start_ia32_thread(struct pt_regs *regs, u32 ip, u32 sp)
150{ 150{
151 asm volatile("movl %0,%%fs" :: "r" (0)); 151 loadsegment(fs, 0);
152 asm volatile("movl %0,%%es; movl %0,%%ds" : : "r" (__USER32_DS)); 152 loadsegment(ds, __USER32_DS);
153 loadsegment(es, __USER32_DS);
153 load_gs_index(0); 154 load_gs_index(0);
154 regs->ip = ip; 155 regs->ip = ip;
155 regs->sp = sp; 156 regs->sp = sp;
@@ -332,4 +333,4 @@ extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
332extern unsigned long arch_randomize_brk(struct mm_struct *mm); 333extern unsigned long arch_randomize_brk(struct mm_struct *mm);
333#define arch_randomize_brk arch_randomize_brk 334#define arch_randomize_brk arch_randomize_brk
334 335
335#endif 336#endif /* ASM_X86__ELF_H */
diff --git a/include/asm-x86/emergency-restart.h b/include/asm-x86/emergency-restart.h
index 8e6aef19f8f..190d0d8b71e 100644
--- a/include/asm-x86/emergency-restart.h
+++ b/include/asm-x86/emergency-restart.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_EMERGENCY_RESTART_H 1#ifndef ASM_X86__EMERGENCY_RESTART_H
2#define _ASM_EMERGENCY_RESTART_H 2#define ASM_X86__EMERGENCY_RESTART_H
3 3
4enum reboot_type { 4enum reboot_type {
5 BOOT_TRIPLE = 't', 5 BOOT_TRIPLE = 't',
@@ -15,4 +15,4 @@ extern enum reboot_type reboot_type;
15 15
16extern void machine_emergency_restart(void); 16extern void machine_emergency_restart(void);
17 17
18#endif /* _ASM_EMERGENCY_RESTART_H */ 18#endif /* ASM_X86__EMERGENCY_RESTART_H */
diff --git a/include/asm-x86/fb.h b/include/asm-x86/fb.h
index 53018464aea..aca38dbd9a6 100644
--- a/include/asm-x86/fb.h
+++ b/include/asm-x86/fb.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_FB_H 1#ifndef ASM_X86__FB_H
2#define _ASM_X86_FB_H 2#define ASM_X86__FB_H
3 3
4#include <linux/fb.h> 4#include <linux/fb.h>
5#include <linux/fs.h> 5#include <linux/fs.h>
@@ -18,4 +18,4 @@ extern int fb_is_primary_device(struct fb_info *info);
18static inline int fb_is_primary_device(struct fb_info *info) { return 0; } 18static inline int fb_is_primary_device(struct fb_info *info) { return 0; }
19#endif 19#endif
20 20
21#endif /* _ASM_X86_FB_H */ 21#endif /* ASM_X86__FB_H */
diff --git a/include/asm-x86/fixmap.h b/include/asm-x86/fixmap.h
index 44d4f821734..78e33a1bc59 100644
--- a/include/asm-x86/fixmap.h
+++ b/include/asm-x86/fixmap.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_FIXMAP_H 1#ifndef ASM_X86__FIXMAP_H
2#define _ASM_FIXMAP_H 2#define ASM_X86__FIXMAP_H
3 3
4#ifdef CONFIG_X86_32 4#ifdef CONFIG_X86_32
5# include "fixmap_32.h" 5# include "fixmap_32.h"
@@ -65,4 +65,4 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
65 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); 65 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
66 return __virt_to_fix(vaddr); 66 return __virt_to_fix(vaddr);
67} 67}
68#endif 68#endif /* ASM_X86__FIXMAP_H */
diff --git a/include/asm-x86/fixmap_32.h b/include/asm-x86/fixmap_32.h
index f1ac2b2167d..784e3e75986 100644
--- a/include/asm-x86/fixmap_32.h
+++ b/include/asm-x86/fixmap_32.h
@@ -10,8 +10,8 @@
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 */ 11 */
12 12
13#ifndef _ASM_FIXMAP_32_H 13#ifndef ASM_X86__FIXMAP_32_H
14#define _ASM_FIXMAP_32_H 14#define ASM_X86__FIXMAP_32_H
15 15
16 16
17/* used by vmalloc.c, vsyscall.lds.S. 17/* used by vmalloc.c, vsyscall.lds.S.
@@ -120,4 +120,4 @@ extern void reserve_top_address(unsigned long reserve);
120#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE) 120#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE)
121 121
122#endif /* !__ASSEMBLY__ */ 122#endif /* !__ASSEMBLY__ */
123#endif 123#endif /* ASM_X86__FIXMAP_32_H */
diff --git a/include/asm-x86/fixmap_64.h b/include/asm-x86/fixmap_64.h
index 00f3d74a052..dafb24bc042 100644
--- a/include/asm-x86/fixmap_64.h
+++ b/include/asm-x86/fixmap_64.h
@@ -8,8 +8,8 @@
8 * Copyright (C) 1998 Ingo Molnar 8 * Copyright (C) 1998 Ingo Molnar
9 */ 9 */
10 10
11#ifndef _ASM_FIXMAP_64_H 11#ifndef ASM_X86__FIXMAP_64_H
12#define _ASM_FIXMAP_64_H 12#define ASM_X86__FIXMAP_64_H
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <asm/acpi.h> 15#include <asm/acpi.h>
@@ -80,4 +80,4 @@ enum fixed_addresses {
80#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL) 80#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
81#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE) 81#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
82 82
83#endif 83#endif /* ASM_X86__FIXMAP_64_H */
diff --git a/include/asm-x86/floppy.h b/include/asm-x86/floppy.h
index dbe82a5c5ea..7d83a3a83e3 100644
--- a/include/asm-x86/floppy.h
+++ b/include/asm-x86/floppy.h
@@ -7,8 +7,8 @@
7 * 7 *
8 * Copyright (C) 1995 8 * Copyright (C) 1995
9 */ 9 */
10#ifndef _ASM_X86_FLOPPY_H 10#ifndef ASM_X86__FLOPPY_H
11#define _ASM_X86_FLOPPY_H 11#define ASM_X86__FLOPPY_H
12 12
13#include <linux/vmalloc.h> 13#include <linux/vmalloc.h>
14 14
@@ -278,4 +278,4 @@ static int FDC2 = -1;
278 278
279#define EXTRA_FLOPPY_PARAMS 279#define EXTRA_FLOPPY_PARAMS
280 280
281#endif /* _ASM_X86_FLOPPY_H */ 281#endif /* ASM_X86__FLOPPY_H */
diff --git a/include/asm-x86/ftrace.h b/include/asm-x86/ftrace.h
index 5c68b32ee1c..be0e004ad14 100644
--- a/include/asm-x86/ftrace.h
+++ b/include/asm-x86/ftrace.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_FTRACE 1#ifndef ASM_X86__FTRACE_H
2#define _ASM_X86_FTRACE 2#define ASM_X86__FTRACE_H
3 3
4#ifdef CONFIG_FTRACE 4#ifdef CONFIG_FTRACE
5#define MCOUNT_ADDR ((long)(mcount)) 5#define MCOUNT_ADDR ((long)(mcount))
@@ -11,4 +11,4 @@ extern void mcount(void);
11 11
12#endif /* CONFIG_FTRACE */ 12#endif /* CONFIG_FTRACE */
13 13
14#endif /* _ASM_X86_FTRACE */ 14#endif /* ASM_X86__FTRACE_H */
diff --git a/include/asm-x86/futex.h b/include/asm-x86/futex.h
index e7a76b37b33..06b924ef6fa 100644
--- a/include/asm-x86/futex.h
+++ b/include/asm-x86/futex.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_FUTEX_H 1#ifndef ASM_X86__FUTEX_H
2#define _ASM_X86_FUTEX_H 2#define ASM_X86__FUTEX_H
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
@@ -25,7 +25,7 @@
25 asm volatile("1:\tmovl %2, %0\n" \ 25 asm volatile("1:\tmovl %2, %0\n" \
26 "\tmovl\t%0, %3\n" \ 26 "\tmovl\t%0, %3\n" \
27 "\t" insn "\n" \ 27 "\t" insn "\n" \
28 "2:\tlock; cmpxchgl %3, %2\n" \ 28 "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
29 "\tjnz\t1b\n" \ 29 "\tjnz\t1b\n" \
30 "3:\t.section .fixup,\"ax\"\n" \ 30 "3:\t.section .fixup,\"ax\"\n" \
31 "4:\tmov\t%5, %1\n" \ 31 "4:\tmov\t%5, %1\n" \
@@ -64,7 +64,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
64 __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); 64 __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
65 break; 65 break;
66 case FUTEX_OP_ADD: 66 case FUTEX_OP_ADD:
67 __futex_atomic_op1("lock; xaddl %0, %2", ret, oldval, 67 __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
68 uaddr, oparg); 68 uaddr, oparg);
69 break; 69 break;
70 case FUTEX_OP_OR: 70 case FUTEX_OP_OR:
@@ -122,7 +122,7 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
122 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) 122 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
123 return -EFAULT; 123 return -EFAULT;
124 124
125 asm volatile("1:\tlock; cmpxchgl %3, %1\n" 125 asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
126 "2:\t.section .fixup, \"ax\"\n" 126 "2:\t.section .fixup, \"ax\"\n"
127 "3:\tmov %2, %0\n" 127 "3:\tmov %2, %0\n"
128 "\tjmp 2b\n" 128 "\tjmp 2b\n"
@@ -137,4 +137,4 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
137} 137}
138 138
139#endif 139#endif
140#endif 140#endif /* ASM_X86__FUTEX_H */
diff --git a/include/asm-x86/gart.h b/include/asm-x86/gart.h
index 3f62a83887f..baa54faba89 100644
--- a/include/asm-x86/gart.h
+++ b/include/asm-x86/gart.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X8664_GART_H 1#ifndef ASM_X86__GART_H
2#define _ASM_X8664_GART_H 1 2#define ASM_X86__GART_H
3 3
4#include <asm/e820.h> 4#include <asm/e820.h>
5 5
@@ -52,15 +52,15 @@ static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
52 return 0; 52 return 0;
53 53
54 if (aper_base + aper_size > 0x100000000ULL) { 54 if (aper_base + aper_size > 0x100000000ULL) {
55 printk(KERN_ERR "Aperture beyond 4GB. Ignoring.\n"); 55 printk(KERN_INFO "Aperture beyond 4GB. Ignoring.\n");
56 return 0; 56 return 0;
57 } 57 }
58 if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) { 58 if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
59 printk(KERN_ERR "Aperture pointing to e820 RAM. Ignoring.\n"); 59 printk(KERN_INFO "Aperture pointing to e820 RAM. Ignoring.\n");
60 return 0; 60 return 0;
61 } 61 }
62 if (aper_size < min_size) { 62 if (aper_size < min_size) {
63 printk(KERN_ERR "Aperture too small (%d MB) than (%d MB)\n", 63 printk(KERN_INFO "Aperture too small (%d MB) than (%d MB)\n",
64 aper_size>>20, min_size>>20); 64 aper_size>>20, min_size>>20);
65 return 0; 65 return 0;
66 } 66 }
@@ -68,4 +68,4 @@ static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
68 return 1; 68 return 1;
69} 69}
70 70
71#endif 71#endif /* ASM_X86__GART_H */
diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h
index 754d635f90f..34280f02766 100644
--- a/include/asm-x86/genapic_32.h
+++ b/include/asm-x86/genapic_32.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_GENAPIC_H 1#ifndef ASM_X86__GENAPIC_32_H
2#define _ASM_GENAPIC_H 1 2#define ASM_X86__GENAPIC_32_H
3 3
4#include <asm/mpspec.h> 4#include <asm/mpspec.h>
5 5
@@ -121,4 +121,4 @@ enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
121#define uv_system_init() do {} while (0) 121#define uv_system_init() do {} while (0)
122 122
123 123
124#endif 124#endif /* ASM_X86__GENAPIC_32_H */
diff --git a/include/asm-x86/genapic_64.h b/include/asm-x86/genapic_64.h
index a47d6312913..25097a8cc5e 100644
--- a/include/asm-x86/genapic_64.h
+++ b/include/asm-x86/genapic_64.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_GENAPIC_H 1#ifndef ASM_X86__GENAPIC_64_H
2#define _ASM_GENAPIC_H 1 2#define ASM_X86__GENAPIC_64_H
3 3
4/* 4/*
5 * Copyright 2004 James Cleverdon, IBM. 5 * Copyright 2004 James Cleverdon, IBM.
@@ -47,4 +47,4 @@ extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
47 47
48extern void setup_apic_routing(void); 48extern void setup_apic_routing(void);
49 49
50#endif 50#endif /* ASM_X86__GENAPIC_64_H */
diff --git a/include/asm-x86/geode.h b/include/asm-x86/geode.h
index 2c1cda0b8a8..3f3444be263 100644
--- a/include/asm-x86/geode.h
+++ b/include/asm-x86/geode.h
@@ -7,8 +7,8 @@
7 * as published by the Free Software Foundation. 7 * as published by the Free Software Foundation.
8 */ 8 */
9 9
10#ifndef _ASM_GEODE_H_ 10#ifndef ASM_X86__GEODE_H
11#define _ASM_GEODE_H_ 11#define ASM_X86__GEODE_H
12 12
13#include <asm/processor.h> 13#include <asm/processor.h>
14#include <linux/io.h> 14#include <linux/io.h>
@@ -250,4 +250,4 @@ extern int __init mfgpt_timer_setup(void);
250static inline int mfgpt_timer_setup(void) { return 0; } 250static inline int mfgpt_timer_setup(void) { return 0; }
251#endif 251#endif
252 252
253#endif 253#endif /* ASM_X86__GEODE_H */
diff --git a/include/asm-x86/gpio.h b/include/asm-x86/gpio.h
index c4c91b37c10..497fb980d96 100644
--- a/include/asm-x86/gpio.h
+++ b/include/asm-x86/gpio.h
@@ -53,4 +53,4 @@ static inline int irq_to_gpio(unsigned int irq)
53 53
54#endif /* CONFIG_GPIOLIB */ 54#endif /* CONFIG_GPIOLIB */
55 55
56#endif /* _ASM_I386_GPIO_H */ 56#endif /* ASM_X86__GPIO_H */
diff --git a/include/asm-x86/hardirq_32.h b/include/asm-x86/hardirq_32.h
index 4f85f0f4b56..700fe230d91 100644
--- a/include/asm-x86/hardirq_32.h
+++ b/include/asm-x86/hardirq_32.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_HARDIRQ_H 1#ifndef ASM_X86__HARDIRQ_32_H
2#define __ASM_HARDIRQ_H 2#define ASM_X86__HARDIRQ_32_H
3 3
4#include <linux/threads.h> 4#include <linux/threads.h>
5#include <linux/irq.h> 5#include <linux/irq.h>
@@ -25,4 +25,4 @@ DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
25void ack_bad_irq(unsigned int irq); 25void ack_bad_irq(unsigned int irq);
26#include <linux/irq_cpustat.h> 26#include <linux/irq_cpustat.h>
27 27
28#endif /* __ASM_HARDIRQ_H */ 28#endif /* ASM_X86__HARDIRQ_32_H */
diff --git a/include/asm-x86/hardirq_64.h b/include/asm-x86/hardirq_64.h
index 95d5e090ed8..f8bd2919a8c 100644
--- a/include/asm-x86/hardirq_64.h
+++ b/include/asm-x86/hardirq_64.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_HARDIRQ_H 1#ifndef ASM_X86__HARDIRQ_64_H
2#define __ASM_HARDIRQ_H 2#define ASM_X86__HARDIRQ_64_H
3 3
4#include <linux/threads.h> 4#include <linux/threads.h>
5#include <linux/irq.h> 5#include <linux/irq.h>
@@ -20,4 +20,4 @@
20 20
21extern void ack_bad_irq(unsigned int irq); 21extern void ack_bad_irq(unsigned int irq);
22 22
23#endif /* __ASM_HARDIRQ_H */ 23#endif /* ASM_X86__HARDIRQ_64_H */
diff --git a/include/asm-x86/highmem.h b/include/asm-x86/highmem.h
index 4514b16cc72..bc3f6a28031 100644
--- a/include/asm-x86/highmem.h
+++ b/include/asm-x86/highmem.h
@@ -15,8 +15,8 @@
15 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> 15 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
16 */ 16 */
17 17
18#ifndef _ASM_HIGHMEM_H 18#ifndef ASM_X86__HIGHMEM_H
19#define _ASM_HIGHMEM_H 19#define ASM_X86__HIGHMEM_H
20 20
21#ifdef __KERNEL__ 21#ifdef __KERNEL__
22 22
@@ -79,4 +79,4 @@ extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
79 79
80#endif /* __KERNEL__ */ 80#endif /* __KERNEL__ */
81 81
82#endif /* _ASM_HIGHMEM_H */ 82#endif /* ASM_X86__HIGHMEM_H */
diff --git a/include/asm-x86/hpet.h b/include/asm-x86/hpet.h
index 82f1ac641bd..cbbbb6d4dd3 100644
--- a/include/asm-x86/hpet.h
+++ b/include/asm-x86/hpet.h
@@ -1,5 +1,5 @@
1#ifndef ASM_X86_HPET_H 1#ifndef ASM_X86__HPET_H
2#define ASM_X86_HPET_H 2#define ASM_X86__HPET_H
3 3
4#ifdef CONFIG_HPET_TIMER 4#ifdef CONFIG_HPET_TIMER
5 5
@@ -90,4 +90,4 @@ static inline int is_hpet_enabled(void) { return 0; }
90#define hpet_readl(a) 0 90#define hpet_readl(a) 0
91 91
92#endif 92#endif
93#endif /* ASM_X86_HPET_H */ 93#endif /* ASM_X86__HPET_H */
diff --git a/include/asm-x86/hugetlb.h b/include/asm-x86/hugetlb.h
index 439a9acc132..0b7ec5dc088 100644
--- a/include/asm-x86/hugetlb.h
+++ b/include/asm-x86/hugetlb.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_HUGETLB_H 1#ifndef ASM_X86__HUGETLB_H
2#define _ASM_X86_HUGETLB_H 2#define ASM_X86__HUGETLB_H
3 3
4#include <asm/page.h> 4#include <asm/page.h>
5 5
@@ -90,4 +90,4 @@ static inline void arch_release_hugepage(struct page *page)
90{ 90{
91} 91}
92 92
93#endif /* _ASM_X86_HUGETLB_H */ 93#endif /* ASM_X86__HUGETLB_H */
diff --git a/include/asm-x86/hw_irq.h b/include/asm-x86/hw_irq.h
index edd0b95f14d..65997b15d56 100644
--- a/include/asm-x86/hw_irq.h
+++ b/include/asm-x86/hw_irq.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_HW_IRQ_H 1#ifndef ASM_X86__HW_IRQ_H
2#define _ASM_HW_IRQ_H 2#define ASM_X86__HW_IRQ_H
3 3
4/* 4/*
5 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar 5 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
@@ -93,6 +93,26 @@ extern asmlinkage void qic_reschedule_interrupt(void);
93extern asmlinkage void qic_enable_irq_interrupt(void); 93extern asmlinkage void qic_enable_irq_interrupt(void);
94extern asmlinkage void qic_call_function_interrupt(void); 94extern asmlinkage void qic_call_function_interrupt(void);
95 95
96/* SMP */
97extern void smp_apic_timer_interrupt(struct pt_regs *);
98#ifdef CONFIG_X86_32
99extern void smp_spurious_interrupt(struct pt_regs *);
100extern void smp_error_interrupt(struct pt_regs *);
101#else
102extern asmlinkage void smp_spurious_interrupt(void);
103extern asmlinkage void smp_error_interrupt(void);
104#endif
105#ifdef CONFIG_X86_SMP
106extern void smp_reschedule_interrupt(struct pt_regs *);
107extern void smp_call_function_interrupt(struct pt_regs *);
108extern void smp_call_function_single_interrupt(struct pt_regs *);
109#ifdef CONFIG_X86_32
110extern void smp_invalidate_interrupt(struct pt_regs *);
111#else
112extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *);
113#endif
114#endif
115
96#ifdef CONFIG_X86_32 116#ifdef CONFIG_X86_32
97extern void (*const interrupt[NR_IRQS])(void); 117extern void (*const interrupt[NR_IRQS])(void);
98#else 118#else
@@ -112,4 +132,4 @@ static inline void __setup_vector_irq(int cpu) {}
112 132
113#endif /* !ASSEMBLY_ */ 133#endif /* !ASSEMBLY_ */
114 134
115#endif 135#endif /* ASM_X86__HW_IRQ_H */
diff --git a/include/asm-x86/hypertransport.h b/include/asm-x86/hypertransport.h
index d2bbd238b3e..cc011a3bc1c 100644
--- a/include/asm-x86/hypertransport.h
+++ b/include/asm-x86/hypertransport.h
@@ -1,5 +1,5 @@
1#ifndef ASM_HYPERTRANSPORT_H 1#ifndef ASM_X86__HYPERTRANSPORT_H
2#define ASM_HYPERTRANSPORT_H 2#define ASM_X86__HYPERTRANSPORT_H
3 3
4/* 4/*
5 * Constants for x86 Hypertransport Interrupts. 5 * Constants for x86 Hypertransport Interrupts.
@@ -42,4 +42,4 @@
42#define HT_IRQ_HIGH_DEST_ID(v) \ 42#define HT_IRQ_HIGH_DEST_ID(v) \
43 ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK) 43 ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK)
44 44
45#endif /* ASM_HYPERTRANSPORT_H */ 45#endif /* ASM_X86__HYPERTRANSPORT_H */
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h
index 56d00e31aec..1ecdc3ed96e 100644
--- a/include/asm-x86/i387.h
+++ b/include/asm-x86/i387.h
@@ -7,8 +7,8 @@
7 * x86-64 work by Andi Kleen 2002 7 * x86-64 work by Andi Kleen 2002
8 */ 8 */
9 9
10#ifndef _ASM_X86_I387_H 10#ifndef ASM_X86__I387_H
11#define _ASM_X86_I387_H 11#define ASM_X86__I387_H
12 12
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/kernel_stat.h> 14#include <linux/kernel_stat.h>
@@ -25,6 +25,7 @@ extern void mxcsr_feature_mask_init(void);
25extern int init_fpu(struct task_struct *child); 25extern int init_fpu(struct task_struct *child);
26extern asmlinkage void math_state_restore(void); 26extern asmlinkage void math_state_restore(void);
27extern void init_thread_xstate(void); 27extern void init_thread_xstate(void);
28extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
28 29
29extern user_regset_active_fn fpregs_active, xfpregs_active; 30extern user_regset_active_fn fpregs_active, xfpregs_active;
30extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get; 31extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get;
@@ -336,4 +337,4 @@ static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
336 } 337 }
337} 338}
338 339
339#endif /* _ASM_X86_I387_H */ 340#endif /* ASM_X86__I387_H */
diff --git a/include/asm-x86/i8253.h b/include/asm-x86/i8253.h
index b51c0487fc4..15a5b530044 100644
--- a/include/asm-x86/i8253.h
+++ b/include/asm-x86/i8253.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_I8253_H__ 1#ifndef ASM_X86__I8253_H
2#define __ASM_I8253_H__ 2#define ASM_X86__I8253_H
3 3
4/* i8253A PIT registers */ 4/* i8253A PIT registers */
5#define PIT_MODE 0x43 5#define PIT_MODE 0x43
@@ -15,4 +15,4 @@ extern void setup_pit_timer(void);
15#define inb_pit inb_p 15#define inb_pit inb_p
16#define outb_pit outb_p 16#define outb_pit outb_p
17 17
18#endif /* __ASM_I8253_H__ */ 18#endif /* ASM_X86__I8253_H */
diff --git a/include/asm-x86/i8259.h b/include/asm-x86/i8259.h
index 2f98df91f1f..c586559a695 100644
--- a/include/asm-x86/i8259.h
+++ b/include/asm-x86/i8259.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_I8259_H__ 1#ifndef ASM_X86__I8259_H
2#define __ASM_I8259_H__ 2#define ASM_X86__I8259_H
3 3
4#include <linux/delay.h> 4#include <linux/delay.h>
5 5
@@ -57,4 +57,4 @@ static inline void outb_pic(unsigned char value, unsigned int port)
57 57
58extern struct irq_chip i8259A_chip; 58extern struct irq_chip i8259A_chip;
59 59
60#endif /* __ASM_I8259_H__ */ 60#endif /* ASM_X86__I8259_H */
diff --git a/include/asm-x86/ia32.h b/include/asm-x86/ia32.h
index 55d3abe5276..f932f7ad51d 100644
--- a/include/asm-x86/ia32.h
+++ b/include/asm-x86/ia32.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_64_IA32_H 1#ifndef ASM_X86__IA32_H
2#define _ASM_X86_64_IA32_H 2#define ASM_X86__IA32_H
3 3
4 4
5#ifdef CONFIG_IA32_EMULATION 5#ifdef CONFIG_IA32_EMULATION
@@ -167,4 +167,4 @@ extern void ia32_pick_mmap_layout(struct mm_struct *mm);
167 167
168#endif /* !CONFIG_IA32_SUPPORT */ 168#endif /* !CONFIG_IA32_SUPPORT */
169 169
170#endif 170#endif /* ASM_X86__IA32_H */
diff --git a/include/asm-x86/ia32_unistd.h b/include/asm-x86/ia32_unistd.h
index 61cea9e7c5c..dbd887d8a5a 100644
--- a/include/asm-x86/ia32_unistd.h
+++ b/include/asm-x86/ia32_unistd.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_64_IA32_UNISTD_H_ 1#ifndef ASM_X86__IA32_UNISTD_H
2#define _ASM_X86_64_IA32_UNISTD_H_ 2#define ASM_X86__IA32_UNISTD_H
3 3
4/* 4/*
5 * This file contains the system call numbers of the ia32 port, 5 * This file contains the system call numbers of the ia32 port,
@@ -15,4 +15,4 @@
15#define __NR_ia32_sigreturn 119 15#define __NR_ia32_sigreturn 119
16#define __NR_ia32_rt_sigreturn 173 16#define __NR_ia32_rt_sigreturn 173
17 17
18#endif /* _ASM_X86_64_IA32_UNISTD_H_ */ 18#endif /* ASM_X86__IA32_UNISTD_H */
diff --git a/include/asm-x86/idle.h b/include/asm-x86/idle.h
index cbb64912361..baa3f783d27 100644
--- a/include/asm-x86/idle.h
+++ b/include/asm-x86/idle.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_64_IDLE_H 1#ifndef ASM_X86__IDLE_H
2#define _ASM_X86_64_IDLE_H 1 2#define ASM_X86__IDLE_H
3 3
4#define IDLE_START 1 4#define IDLE_START 1
5#define IDLE_END 2 5#define IDLE_END 2
@@ -12,4 +12,4 @@ void exit_idle(void);
12 12
13void c1e_remove_cpu(int cpu); 13void c1e_remove_cpu(int cpu);
14 14
15#endif 15#endif /* ASM_X86__IDLE_H */
diff --git a/include/asm-x86/intel_arch_perfmon.h b/include/asm-x86/intel_arch_perfmon.h
index fa0fd068bc2..07c03c6c9a1 100644
--- a/include/asm-x86/intel_arch_perfmon.h
+++ b/include/asm-x86/intel_arch_perfmon.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_INTEL_ARCH_PERFMON_H 1#ifndef ASM_X86__INTEL_ARCH_PERFMON_H
2#define _ASM_X86_INTEL_ARCH_PERFMON_H 2#define ASM_X86__INTEL_ARCH_PERFMON_H
3 3
4#define MSR_ARCH_PERFMON_PERFCTR0 0xc1 4#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
5#define MSR_ARCH_PERFMON_PERFCTR1 0xc2 5#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
@@ -28,4 +28,4 @@ union cpuid10_eax {
28 unsigned int full; 28 unsigned int full;
29}; 29};
30 30
31#endif /* _ASM_X86_INTEL_ARCH_PERFMON_H */ 31#endif /* ASM_X86__INTEL_ARCH_PERFMON_H */
diff --git a/include/asm-x86/io.h b/include/asm-x86/io.h
index 0f954dc89cb..72b7719523b 100644
--- a/include/asm-x86/io.h
+++ b/include/asm-x86/io.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_IO_H 1#ifndef ASM_X86__IO_H
2#define _ASM_X86_IO_H 2#define ASM_X86__IO_H
3 3
4#define ARCH_HAS_IOREMAP_WC 4#define ARCH_HAS_IOREMAP_WC
5 5
@@ -73,6 +73,8 @@ build_mmio_write(__writeq, "q", unsigned long, "r", )
73#define writeq writeq 73#define writeq writeq
74#endif 74#endif
75 75
76extern int iommu_bio_merge;
77
76#ifdef CONFIG_X86_32 78#ifdef CONFIG_X86_32
77# include "io_32.h" 79# include "io_32.h"
78#else 80#else
@@ -99,4 +101,4 @@ extern void early_iounmap(void *addr, unsigned long size);
99extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); 101extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
100 102
101 103
102#endif /* _ASM_X86_IO_H */ 104#endif /* ASM_X86__IO_H */
diff --git a/include/asm-x86/io_32.h b/include/asm-x86/io_32.h
index e876d89ac15..4f7d878bda1 100644
--- a/include/asm-x86/io_32.h
+++ b/include/asm-x86/io_32.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_IO_H 1#ifndef ASM_X86__IO_32_H
2#define _ASM_IO_H 2#define ASM_X86__IO_32_H
3 3
4#include <linux/string.h> 4#include <linux/string.h>
5#include <linux/compiler.h> 5#include <linux/compiler.h>
@@ -281,4 +281,4 @@ BUILDIO(b, b, char)
281BUILDIO(w, w, short) 281BUILDIO(w, w, short)
282BUILDIO(l, , int) 282BUILDIO(l, , int)
283 283
284#endif 284#endif /* ASM_X86__IO_32_H */
diff --git a/include/asm-x86/io_64.h b/include/asm-x86/io_64.h
index 22995c5c5ad..64429e9431a 100644
--- a/include/asm-x86/io_64.h
+++ b/include/asm-x86/io_64.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_IO_H 1#ifndef ASM_X86__IO_64_H
2#define _ASM_IO_H 2#define ASM_X86__IO_64_H
3 3
4 4
5/* 5/*
@@ -235,7 +235,6 @@ void memset_io(volatile void __iomem *a, int b, size_t c);
235 235
236#define flush_write_buffers() 236#define flush_write_buffers()
237 237
238extern int iommu_bio_merge;
239#define BIO_VMERGE_BOUNDARY iommu_bio_merge 238#define BIO_VMERGE_BOUNDARY iommu_bio_merge
240 239
241/* 240/*
@@ -245,4 +244,4 @@ extern int iommu_bio_merge;
245 244
246#endif /* __KERNEL__ */ 245#endif /* __KERNEL__ */
247 246
248#endif 247#endif /* ASM_X86__IO_64_H */
diff --git a/include/asm-x86/io_apic.h b/include/asm-x86/io_apic.h
index 14f82bbcb5f..be62847ab07 100644
--- a/include/asm-x86/io_apic.h
+++ b/include/asm-x86/io_apic.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_IO_APIC_H 1#ifndef ASM_X86__IO_APIC_H
2#define __ASM_IO_APIC_H 2#define ASM_X86__IO_APIC_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <asm/mpspec.h> 5#include <asm/mpspec.h>
@@ -189,4 +189,4 @@ static const int timer_through_8259 = 0;
189static inline void ioapic_init_mappings(void) { } 189static inline void ioapic_init_mappings(void) { }
190#endif 190#endif
191 191
192#endif 192#endif /* ASM_X86__IO_APIC_H */
diff --git a/include/asm-x86/ioctls.h b/include/asm-x86/ioctls.h
index c0c338bd406..33660351239 100644
--- a/include/asm-x86/ioctls.h
+++ b/include/asm-x86/ioctls.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_IOCTLS_H 1#ifndef ASM_X86__IOCTLS_H
2#define _ASM_X86_IOCTLS_H 2#define ASM_X86__IOCTLS_H
3 3
4#include <asm/ioctl.h> 4#include <asm/ioctl.h>
5 5
@@ -85,4 +85,4 @@
85 85
86#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ 86#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
87 87
88#endif 88#endif /* ASM_X86__IOCTLS_H */
diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h
index 5f888cc5be4..e86f44148c6 100644
--- a/include/asm-x86/iommu.h
+++ b/include/asm-x86/iommu.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X8664_IOMMU_H 1#ifndef ASM_X86__IOMMU_H
2#define _ASM_X8664_IOMMU_H 1 2#define ASM_X86__IOMMU_H
3 3
4extern void pci_iommu_shutdown(void); 4extern void pci_iommu_shutdown(void);
5extern void no_iommu_init(void); 5extern void no_iommu_init(void);
@@ -42,4 +42,4 @@ static inline void gart_iommu_hole_init(void)
42} 42}
43#endif 43#endif
44 44
45#endif 45#endif /* ASM_X86__IOMMU_H */
diff --git a/include/asm-x86/ipcbuf.h b/include/asm-x86/ipcbuf.h
index ee678fd5159..910304fbdc8 100644
--- a/include/asm-x86/ipcbuf.h
+++ b/include/asm-x86/ipcbuf.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_IPCBUF_H 1#ifndef ASM_X86__IPCBUF_H
2#define _ASM_X86_IPCBUF_H 2#define ASM_X86__IPCBUF_H
3 3
4/* 4/*
5 * The ipc64_perm structure for x86 architecture. 5 * The ipc64_perm structure for x86 architecture.
@@ -25,4 +25,4 @@ struct ipc64_perm {
25 unsigned long __unused2; 25 unsigned long __unused2;
26}; 26};
27 27
28#endif /* _ASM_X86_IPCBUF_H */ 28#endif /* ASM_X86__IPCBUF_H */
diff --git a/include/asm-x86/ipi.h b/include/asm-x86/ipi.h
index bb1c09f7a76..c1b22679751 100644
--- a/include/asm-x86/ipi.h
+++ b/include/asm-x86/ipi.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_IPI_H 1#ifndef ASM_X86__IPI_H
2#define __ASM_IPI_H 2#define ASM_X86__IPI_H
3 3
4/* 4/*
5 * Copyright 2004 James Cleverdon, IBM. 5 * Copyright 2004 James Cleverdon, IBM.
@@ -129,4 +129,4 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
129 local_irq_restore(flags); 129 local_irq_restore(flags);
130} 130}
131 131
132#endif /* __ASM_IPI_H */ 132#endif /* ASM_X86__IPI_H */
diff --git a/include/asm-x86/irq.h b/include/asm-x86/irq.h
index 1a292575731..1e5f2909c1d 100644
--- a/include/asm-x86/irq.h
+++ b/include/asm-x86/irq.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_IRQ_H 1#ifndef ASM_X86__IRQ_H
2#define _ASM_IRQ_H 2#define ASM_X86__IRQ_H
3/* 3/*
4 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar 4 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
5 * 5 *
@@ -47,4 +47,4 @@ extern void native_init_IRQ(void);
47/* Interrupt vector management */ 47/* Interrupt vector management */
48extern DECLARE_BITMAP(used_vectors, NR_VECTORS); 48extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
49 49
50#endif /* _ASM_IRQ_H */ 50#endif /* ASM_X86__IRQ_H */
diff --git a/include/asm-x86/irq_regs_32.h b/include/asm-x86/irq_regs_32.h
index 3368b20c0b4..316a3b25887 100644
--- a/include/asm-x86/irq_regs_32.h
+++ b/include/asm-x86/irq_regs_32.h
@@ -4,8 +4,8 @@
4 * 4 *
5 * Jeremy Fitzhardinge <jeremy@goop.org> 5 * Jeremy Fitzhardinge <jeremy@goop.org>
6 */ 6 */
7#ifndef _ASM_I386_IRQ_REGS_H 7#ifndef ASM_X86__IRQ_REGS_32_H
8#define _ASM_I386_IRQ_REGS_H 8#define ASM_X86__IRQ_REGS_32_H
9 9
10#include <asm/percpu.h> 10#include <asm/percpu.h>
11 11
@@ -26,4 +26,4 @@ static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
26 return old_regs; 26 return old_regs;
27} 27}
28 28
29#endif /* _ASM_I386_IRQ_REGS_H */ 29#endif /* ASM_X86__IRQ_REGS_32_H */
diff --git a/include/asm-x86/irq_vectors.h b/include/asm-x86/irq_vectors.h
index a48c7f2dbdc..c5d2d767a1f 100644
--- a/include/asm-x86/irq_vectors.h
+++ b/include/asm-x86/irq_vectors.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_IRQ_VECTORS_H 1#ifndef ASM_X86__IRQ_VECTORS_H
2#define _ASM_IRQ_VECTORS_H 2#define ASM_X86__IRQ_VECTORS_H
3 3
4#include <linux/threads.h> 4#include <linux/threads.h>
5 5
@@ -179,4 +179,4 @@
179#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8) 179#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8)
180 180
181 181
182#endif /* _ASM_IRQ_VECTORS_H */ 182#endif /* ASM_X86__IRQ_VECTORS_H */
diff --git a/include/asm-x86/ist.h b/include/asm-x86/ist.h
index 6ec6ceed95a..35a2fe9bc92 100644
--- a/include/asm-x86/ist.h
+++ b/include/asm-x86/ist.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_IST_H 1#ifndef ASM_X86__IST_H
2#define _ASM_IST_H 2#define ASM_X86__IST_H
3 3
4/* 4/*
5 * Include file for the interface to IST BIOS 5 * Include file for the interface to IST BIOS
@@ -31,4 +31,4 @@ struct ist_info {
31extern struct ist_info ist_info; 31extern struct ist_info ist_info;
32 32
33#endif /* __KERNEL__ */ 33#endif /* __KERNEL__ */
34#endif /* _ASM_IST_H */ 34#endif /* ASM_X86__IST_H */
diff --git a/include/asm-x86/k8.h b/include/asm-x86/k8.h
index 452e2b696ff..2bbaf4370a5 100644
--- a/include/asm-x86/k8.h
+++ b/include/asm-x86/k8.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_K8_H 1#ifndef ASM_X86__K8_H
2#define _ASM_K8_H 1 2#define ASM_X86__K8_H
3 3
4#include <linux/pci.h> 4#include <linux/pci.h>
5 5
@@ -12,4 +12,4 @@ extern int cache_k8_northbridges(void);
12extern void k8_flush_garts(void); 12extern void k8_flush_garts(void);
13extern int k8_scan_nodes(unsigned long start, unsigned long end); 13extern int k8_scan_nodes(unsigned long start, unsigned long end);
14 14
15#endif 15#endif /* ASM_X86__K8_H */
diff --git a/include/asm-x86/kdebug.h b/include/asm-x86/kdebug.h
index 96651bb59ba..5ec3ad3e825 100644
--- a/include/asm-x86/kdebug.h
+++ b/include/asm-x86/kdebug.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_KDEBUG_H 1#ifndef ASM_X86__KDEBUG_H
2#define _ASM_X86_KDEBUG_H 2#define ASM_X86__KDEBUG_H
3 3
4#include <linux/notifier.h> 4#include <linux/notifier.h>
5 5
@@ -35,4 +35,4 @@ extern void show_regs(struct pt_regs *regs);
35extern unsigned long oops_begin(void); 35extern unsigned long oops_begin(void);
36extern void oops_end(unsigned long, struct pt_regs *, int signr); 36extern void oops_end(unsigned long, struct pt_regs *, int signr);
37 37
38#endif 38#endif /* ASM_X86__KDEBUG_H */
diff --git a/include/asm-x86/kexec.h b/include/asm-x86/kexec.h
index 4246ab7dc98..ea09600d612 100644
--- a/include/asm-x86/kexec.h
+++ b/include/asm-x86/kexec.h
@@ -1,5 +1,5 @@
1#ifndef _KEXEC_H 1#ifndef ASM_X86__KEXEC_H
2#define _KEXEC_H 2#define ASM_X86__KEXEC_H
3 3
4#ifdef CONFIG_X86_32 4#ifdef CONFIG_X86_32
5# define PA_CONTROL_PAGE 0 5# define PA_CONTROL_PAGE 0
@@ -172,4 +172,4 @@ relocate_kernel(unsigned long indirection_page,
172 172
173#endif /* __ASSEMBLY__ */ 173#endif /* __ASSEMBLY__ */
174 174
175#endif /* _KEXEC_H */ 175#endif /* ASM_X86__KEXEC_H */
diff --git a/include/asm-x86/kgdb.h b/include/asm-x86/kgdb.h
index 94d63db1036..d283863354d 100644
--- a/include/asm-x86/kgdb.h
+++ b/include/asm-x86/kgdb.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_KGDB_H_ 1#ifndef ASM_X86__KGDB_H
2#define _ASM_KGDB_H_ 2#define ASM_X86__KGDB_H
3 3
4/* 4/*
5 * Copyright (C) 2001-2004 Amit S. Kale 5 * Copyright (C) 2001-2004 Amit S. Kale
@@ -76,4 +76,4 @@ static inline void arch_kgdb_breakpoint(void)
76#define BREAK_INSTR_SIZE 1 76#define BREAK_INSTR_SIZE 1
77#define CACHE_FLUSH_IS_SAFE 1 77#define CACHE_FLUSH_IS_SAFE 1
78 78
79#endif /* _ASM_KGDB_H_ */ 79#endif /* ASM_X86__KGDB_H */
diff --git a/include/asm-x86/kmap_types.h b/include/asm-x86/kmap_types.h
index 5f4174132a2..89f44493e64 100644
--- a/include/asm-x86/kmap_types.h
+++ b/include/asm-x86/kmap_types.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_KMAP_TYPES_H 1#ifndef ASM_X86__KMAP_TYPES_H
2#define _ASM_X86_KMAP_TYPES_H 2#define ASM_X86__KMAP_TYPES_H
3 3
4#if defined(CONFIG_X86_32) && defined(CONFIG_DEBUG_HIGHMEM) 4#if defined(CONFIG_X86_32) && defined(CONFIG_DEBUG_HIGHMEM)
5# define D(n) __KM_FENCE_##n , 5# define D(n) __KM_FENCE_##n ,
@@ -26,4 +26,4 @@ D(13) KM_TYPE_NR
26 26
27#undef D 27#undef D
28 28
29#endif 29#endif /* ASM_X86__KMAP_TYPES_H */
diff --git a/include/asm-x86/kprobes.h b/include/asm-x86/kprobes.h
index 54980b0b389..bd8407863c1 100644
--- a/include/asm-x86/kprobes.h
+++ b/include/asm-x86/kprobes.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_KPROBES_H 1#ifndef ASM_X86__KPROBES_H
2#define _ASM_KPROBES_H 2#define ASM_X86__KPROBES_H
3/* 3/*
4 * Kernel Probes (KProbes) 4 * Kernel Probes (KProbes)
5 * 5 *
@@ -94,4 +94,4 @@ static inline void restore_interrupts(struct pt_regs *regs)
94extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); 94extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
95extern int kprobe_exceptions_notify(struct notifier_block *self, 95extern int kprobe_exceptions_notify(struct notifier_block *self,
96 unsigned long val, void *data); 96 unsigned long val, void *data);
97#endif /* _ASM_KPROBES_H */ 97#endif /* ASM_X86__KPROBES_H */
diff --git a/include/asm-x86/kvm.h b/include/asm-x86/kvm.h
index 6f1840812e5..78e954db1e7 100644
--- a/include/asm-x86/kvm.h
+++ b/include/asm-x86/kvm.h
@@ -1,5 +1,5 @@
1#ifndef __LINUX_KVM_X86_H 1#ifndef ASM_X86__KVM_H
2#define __LINUX_KVM_X86_H 2#define ASM_X86__KVM_H
3 3
4/* 4/*
5 * KVM x86 specific structures and definitions 5 * KVM x86 specific structures and definitions
@@ -230,4 +230,4 @@ struct kvm_pit_state {
230#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14) 230#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14)
231#define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15) 231#define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15)
232 232
233#endif 233#endif /* ASM_X86__KVM_H */
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index c2e34c27590..69794547f51 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -1,4 +1,4 @@
1#/* 1/*
2 * Kernel-based Virtual Machine driver for Linux 2 * Kernel-based Virtual Machine driver for Linux
3 * 3 *
4 * This header defines architecture specific interfaces, x86 version 4 * This header defines architecture specific interfaces, x86 version
@@ -8,8 +8,8 @@
8 * 8 *
9 */ 9 */
10 10
11#ifndef ASM_KVM_HOST_H 11#ifndef ASM_X86__KVM_HOST_H
12#define ASM_KVM_HOST_H 12#define ASM_X86__KVM_HOST_H
13 13
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/mm.h> 15#include <linux/mm.h>
@@ -735,4 +735,4 @@ asmlinkage void kvm_handle_fault_on_reboot(void);
735int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); 735int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
736int kvm_age_hva(struct kvm *kvm, unsigned long hva); 736int kvm_age_hva(struct kvm *kvm, unsigned long hva);
737 737
738#endif 738#endif /* ASM_X86__KVM_HOST_H */
diff --git a/include/asm-x86/kvm_para.h b/include/asm-x86/kvm_para.h
index 76f392146da..30054fded4f 100644
--- a/include/asm-x86/kvm_para.h
+++ b/include/asm-x86/kvm_para.h
@@ -1,5 +1,5 @@
1#ifndef __X86_KVM_PARA_H 1#ifndef ASM_X86__KVM_PARA_H
2#define __X86_KVM_PARA_H 2#define ASM_X86__KVM_PARA_H
3 3
4/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It 4/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It
5 * should be used to determine that a VM is running under KVM. 5 * should be used to determine that a VM is running under KVM.
@@ -144,4 +144,4 @@ static inline unsigned int kvm_arch_para_features(void)
144 144
145#endif 145#endif
146 146
147#endif 147#endif /* ASM_X86__KVM_PARA_H */
diff --git a/include/asm-x86/kvm_x86_emulate.h b/include/asm-x86/kvm_x86_emulate.h
index 4e8c1e48d91..e2d9b030c1a 100644
--- a/include/asm-x86/kvm_x86_emulate.h
+++ b/include/asm-x86/kvm_x86_emulate.h
@@ -8,8 +8,8 @@
8 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4 8 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
9 */ 9 */
10 10
11#ifndef __X86_EMULATE_H__ 11#ifndef ASM_X86__KVM_X86_EMULATE_H
12#define __X86_EMULATE_H__ 12#define ASM_X86__KVM_X86_EMULATE_H
13 13
14struct x86_emulate_ctxt; 14struct x86_emulate_ctxt;
15 15
@@ -181,4 +181,4 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt,
181int x86_emulate_insn(struct x86_emulate_ctxt *ctxt, 181int x86_emulate_insn(struct x86_emulate_ctxt *ctxt,
182 struct x86_emulate_ops *ops); 182 struct x86_emulate_ops *ops);
183 183
184#endif /* __X86_EMULATE_H__ */ 184#endif /* ASM_X86__KVM_X86_EMULATE_H */
diff --git a/include/asm-x86/ldt.h b/include/asm-x86/ldt.h
index 20c597242b5..a5228504d86 100644
--- a/include/asm-x86/ldt.h
+++ b/include/asm-x86/ldt.h
@@ -3,8 +3,8 @@
3 * 3 *
4 * Definitions of structures used with the modify_ldt system call. 4 * Definitions of structures used with the modify_ldt system call.
5 */ 5 */
6#ifndef _ASM_X86_LDT_H 6#ifndef ASM_X86__LDT_H
7#define _ASM_X86_LDT_H 7#define ASM_X86__LDT_H
8 8
9/* Maximum number of LDT entries supported. */ 9/* Maximum number of LDT entries supported. */
10#define LDT_ENTRIES 8192 10#define LDT_ENTRIES 8192
@@ -37,4 +37,4 @@ struct user_desc {
37#define MODIFY_LDT_CONTENTS_CODE 2 37#define MODIFY_LDT_CONTENTS_CODE 2
38 38
39#endif /* !__ASSEMBLY__ */ 39#endif /* !__ASSEMBLY__ */
40#endif 40#endif /* ASM_X86__LDT_H */
diff --git a/include/asm-x86/lguest.h b/include/asm-x86/lguest.h
index be4a7247fa2..7505e947ed2 100644
--- a/include/asm-x86/lguest.h
+++ b/include/asm-x86/lguest.h
@@ -1,5 +1,5 @@
1#ifndef _X86_LGUEST_H 1#ifndef ASM_X86__LGUEST_H
2#define _X86_LGUEST_H 2#define ASM_X86__LGUEST_H
3 3
4#define GDT_ENTRY_LGUEST_CS 10 4#define GDT_ENTRY_LGUEST_CS 10
5#define GDT_ENTRY_LGUEST_DS 11 5#define GDT_ENTRY_LGUEST_DS 11
@@ -91,4 +91,4 @@ static inline void lguest_set_ts(void)
91 91
92#endif /* __ASSEMBLY__ */ 92#endif /* __ASSEMBLY__ */
93 93
94#endif 94#endif /* ASM_X86__LGUEST_H */
diff --git a/include/asm-x86/lguest_hcall.h b/include/asm-x86/lguest_hcall.h
index a3241f28e34..8f034ba4b53 100644
--- a/include/asm-x86/lguest_hcall.h
+++ b/include/asm-x86/lguest_hcall.h
@@ -1,6 +1,6 @@
1/* Architecture specific portion of the lguest hypercalls */ 1/* Architecture specific portion of the lguest hypercalls */
2#ifndef _X86_LGUEST_HCALL_H 2#ifndef ASM_X86__LGUEST_HCALL_H
3#define _X86_LGUEST_HCALL_H 3#define ASM_X86__LGUEST_HCALL_H
4 4
5#define LHCALL_FLUSH_ASYNC 0 5#define LHCALL_FLUSH_ASYNC 0
6#define LHCALL_LGUEST_INIT 1 6#define LHCALL_LGUEST_INIT 1
@@ -68,4 +68,4 @@ struct hcall_args {
68}; 68};
69 69
70#endif /* !__ASSEMBLY__ */ 70#endif /* !__ASSEMBLY__ */
71#endif /* _I386_LGUEST_HCALL_H */ 71#endif /* ASM_X86__LGUEST_HCALL_H */
diff --git a/include/asm-x86/linkage.h b/include/asm-x86/linkage.h
index 64e444f8e85..42d8b62ee8a 100644
--- a/include/asm-x86/linkage.h
+++ b/include/asm-x86/linkage.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_LINKAGE_H 1#ifndef ASM_X86__LINKAGE_H
2#define __ASM_LINKAGE_H 2#define ASM_X86__LINKAGE_H
3 3
4#undef notrace 4#undef notrace
5#define notrace __attribute__((no_instrument_function)) 5#define notrace __attribute__((no_instrument_function))
@@ -57,5 +57,5 @@
57#define __ALIGN_STR ".align 16,0x90" 57#define __ALIGN_STR ".align 16,0x90"
58#endif 58#endif
59 59
60#endif 60#endif /* ASM_X86__LINKAGE_H */
61 61
diff --git a/include/asm-x86/local.h b/include/asm-x86/local.h
index 330a72496ab..ae91994fd6c 100644
--- a/include/asm-x86/local.h
+++ b/include/asm-x86/local.h
@@ -1,5 +1,5 @@
1#ifndef _ARCH_LOCAL_H 1#ifndef ASM_X86__LOCAL_H
2#define _ARCH_LOCAL_H 2#define ASM_X86__LOCAL_H
3 3
4#include <linux/percpu.h> 4#include <linux/percpu.h>
5 5
@@ -232,4 +232,4 @@ static inline long local_sub_return(long i, local_t *l)
232#define __cpu_local_add(i, l) cpu_local_add((i), (l)) 232#define __cpu_local_add(i, l) cpu_local_add((i), (l))
233#define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) 233#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
234 234
235#endif /* _ARCH_LOCAL_H */ 235#endif /* ASM_X86__LOCAL_H */
diff --git a/include/asm-x86/mach-bigsmp/mach_apic.h b/include/asm-x86/mach-bigsmp/mach_apic.h
index c3b9dc6970c..05362d44a3e 100644
--- a/include/asm-x86/mach-bigsmp/mach_apic.h
+++ b/include/asm-x86/mach-bigsmp/mach_apic.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APIC_H 1#ifndef ASM_X86__MACH_BIGSMP__MACH_APIC_H
2#define __ASM_MACH_APIC_H 2#define ASM_X86__MACH_BIGSMP__MACH_APIC_H
3 3
4#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu)) 4#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
5#define esr_disable (1) 5#define esr_disable (1)
@@ -141,4 +141,4 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
141 return cpuid_apic >> index_msb; 141 return cpuid_apic >> index_msb;
142} 142}
143 143
144#endif /* __ASM_MACH_APIC_H */ 144#endif /* ASM_X86__MACH_BIGSMP__MACH_APIC_H */
diff --git a/include/asm-x86/mach-bigsmp/mach_apicdef.h b/include/asm-x86/mach-bigsmp/mach_apicdef.h
index a58ab5a75c8..811935d9d49 100644
--- a/include/asm-x86/mach-bigsmp/mach_apicdef.h
+++ b/include/asm-x86/mach-bigsmp/mach_apicdef.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APICDEF_H 1#ifndef ASM_X86__MACH_BIGSMP__MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H 2#define ASM_X86__MACH_BIGSMP__MACH_APICDEF_H
3 3
4#define APIC_ID_MASK (0xFF<<24) 4#define APIC_ID_MASK (0xFF<<24)
5 5
@@ -10,4 +10,4 @@ static inline unsigned get_apic_id(unsigned long x)
10 10
11#define GET_APIC_ID(x) get_apic_id(x) 11#define GET_APIC_ID(x) get_apic_id(x)
12 12
13#endif 13#endif /* ASM_X86__MACH_BIGSMP__MACH_APICDEF_H */
diff --git a/include/asm-x86/mach-bigsmp/mach_ipi.h b/include/asm-x86/mach-bigsmp/mach_ipi.h
index 9404c535b7e..b1b0f966a00 100644
--- a/include/asm-x86/mach-bigsmp/mach_ipi.h
+++ b/include/asm-x86/mach-bigsmp/mach_ipi.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_IPI_H 1#ifndef ASM_X86__MACH_BIGSMP__MACH_IPI_H
2#define __ASM_MACH_IPI_H 2#define ASM_X86__MACH_BIGSMP__MACH_IPI_H
3 3
4void send_IPI_mask_sequence(cpumask_t mask, int vector); 4void send_IPI_mask_sequence(cpumask_t mask, int vector);
5 5
@@ -22,4 +22,4 @@ static inline void send_IPI_all(int vector)
22 send_IPI_mask(cpu_online_map, vector); 22 send_IPI_mask(cpu_online_map, vector);
23} 23}
24 24
25#endif /* __ASM_MACH_IPI_H */ 25#endif /* ASM_X86__MACH_BIGSMP__MACH_IPI_H */
diff --git a/include/asm-x86/mach-default/apm.h b/include/asm-x86/mach-default/apm.h
index 989f34c37d3..2aa61b54fbd 100644
--- a/include/asm-x86/mach-default/apm.h
+++ b/include/asm-x86/mach-default/apm.h
@@ -3,8 +3,8 @@
3 * Split out from apm.c by Osamu Tomita <tomita@cinet.co.jp> 3 * Split out from apm.c by Osamu Tomita <tomita@cinet.co.jp>
4 */ 4 */
5 5
6#ifndef _ASM_APM_H 6#ifndef ASM_X86__MACH_DEFAULT__APM_H
7#define _ASM_APM_H 7#define ASM_X86__MACH_DEFAULT__APM_H
8 8
9#ifdef APM_ZERO_SEGS 9#ifdef APM_ZERO_SEGS
10# define APM_DO_ZERO_SEGS \ 10# define APM_DO_ZERO_SEGS \
@@ -70,4 +70,4 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
70 return error; 70 return error;
71} 71}
72 72
73#endif /* _ASM_APM_H */ 73#endif /* ASM_X86__MACH_DEFAULT__APM_H */
diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h
index f3226b9a6b8..b615f40736b 100644
--- a/include/asm-x86/mach-default/mach_apic.h
+++ b/include/asm-x86/mach-default/mach_apic.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APIC_H 1#ifndef ASM_X86__MACH_DEFAULT__MACH_APIC_H
2#define __ASM_MACH_APIC_H 2#define ASM_X86__MACH_DEFAULT__MACH_APIC_H
3 3
4#ifdef CONFIG_X86_LOCAL_APIC 4#ifdef CONFIG_X86_LOCAL_APIC
5 5
@@ -138,4 +138,4 @@ static inline void enable_apic_mode(void)
138} 138}
139 139
140#endif /* CONFIG_X86_LOCAL_APIC */ 140#endif /* CONFIG_X86_LOCAL_APIC */
141#endif /* __ASM_MACH_APIC_H */ 141#endif /* ASM_X86__MACH_DEFAULT__MACH_APIC_H */
diff --git a/include/asm-x86/mach-default/mach_apicdef.h b/include/asm-x86/mach-default/mach_apicdef.h
index e4b29ba37de..936704f816d 100644
--- a/include/asm-x86/mach-default/mach_apicdef.h
+++ b/include/asm-x86/mach-default/mach_apicdef.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APICDEF_H 1#ifndef ASM_X86__MACH_DEFAULT__MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H 2#define ASM_X86__MACH_DEFAULT__MACH_APICDEF_H
3 3
4#include <asm/apic.h> 4#include <asm/apic.h>
5 5
@@ -21,4 +21,4 @@ static inline unsigned get_apic_id(unsigned long x)
21#define GET_APIC_ID(x) get_apic_id(x) 21#define GET_APIC_ID(x) get_apic_id(x)
22#endif 22#endif
23 23
24#endif 24#endif /* ASM_X86__MACH_DEFAULT__MACH_APICDEF_H */
diff --git a/include/asm-x86/mach-default/mach_ipi.h b/include/asm-x86/mach-default/mach_ipi.h
index be323364e68..674bc7e50c3 100644
--- a/include/asm-x86/mach-default/mach_ipi.h
+++ b/include/asm-x86/mach-default/mach_ipi.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_IPI_H 1#ifndef ASM_X86__MACH_DEFAULT__MACH_IPI_H
2#define __ASM_MACH_IPI_H 2#define ASM_X86__MACH_DEFAULT__MACH_IPI_H
3 3
4/* Avoid include hell */ 4/* Avoid include hell */
5#define NMI_VECTOR 0x02 5#define NMI_VECTOR 0x02
@@ -61,4 +61,4 @@ static inline void send_IPI_all(int vector)
61} 61}
62#endif 62#endif
63 63
64#endif /* __ASM_MACH_IPI_H */ 64#endif /* ASM_X86__MACH_DEFAULT__MACH_IPI_H */
diff --git a/include/asm-x86/mach-default/mach_mpparse.h b/include/asm-x86/mach-default/mach_mpparse.h
index d14108505bb..9c381f2815a 100644
--- a/include/asm-x86/mach-default/mach_mpparse.h
+++ b/include/asm-x86/mach-default/mach_mpparse.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_MPPARSE_H 1#ifndef ASM_X86__MACH_DEFAULT__MACH_MPPARSE_H
2#define __ASM_MACH_MPPARSE_H 2#define ASM_X86__MACH_DEFAULT__MACH_MPPARSE_H
3 3
4static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, 4static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
5 char *productid) 5 char *productid)
@@ -14,4 +14,4 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
14} 14}
15 15
16 16
17#endif /* __ASM_MACH_MPPARSE_H */ 17#endif /* ASM_X86__MACH_DEFAULT__MACH_MPPARSE_H */
diff --git a/include/asm-x86/mach-default/mach_mpspec.h b/include/asm-x86/mach-default/mach_mpspec.h
index 51c9a977593..d77646f011f 100644
--- a/include/asm-x86/mach-default/mach_mpspec.h
+++ b/include/asm-x86/mach-default/mach_mpspec.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_MPSPEC_H 1#ifndef ASM_X86__MACH_DEFAULT__MACH_MPSPEC_H
2#define __ASM_MACH_MPSPEC_H 2#define ASM_X86__MACH_DEFAULT__MACH_MPSPEC_H
3 3
4#define MAX_IRQ_SOURCES 256 4#define MAX_IRQ_SOURCES 256
5 5
@@ -9,4 +9,4 @@
9#define MAX_MP_BUSSES 32 9#define MAX_MP_BUSSES 32
10#endif 10#endif
11 11
12#endif /* __ASM_MACH_MPSPEC_H */ 12#endif /* ASM_X86__MACH_DEFAULT__MACH_MPSPEC_H */
diff --git a/include/asm-x86/mach-default/mach_timer.h b/include/asm-x86/mach-default/mach_timer.h
index 4b76e536cd9..990b1583383 100644
--- a/include/asm-x86/mach-default/mach_timer.h
+++ b/include/asm-x86/mach-default/mach_timer.h
@@ -10,8 +10,8 @@
10 * directly because of the awkward 8-bit access mechanism of the 82C54 10 * directly because of the awkward 8-bit access mechanism of the 82C54
11 * device. 11 * device.
12 */ 12 */
13#ifndef _MACH_TIMER_H 13#ifndef ASM_X86__MACH_DEFAULT__MACH_TIMER_H
14#define _MACH_TIMER_H 14#define ASM_X86__MACH_DEFAULT__MACH_TIMER_H
15 15
16#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */ 16#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */
17#define CALIBRATE_LATCH \ 17#define CALIBRATE_LATCH \
@@ -45,4 +45,4 @@ static inline void mach_countup(unsigned long *count_p)
45 *count_p = count; 45 *count_p = count;
46} 46}
47 47
48#endif /* !_MACH_TIMER_H */ 48#endif /* ASM_X86__MACH_DEFAULT__MACH_TIMER_H */
diff --git a/include/asm-x86/mach-default/mach_traps.h b/include/asm-x86/mach-default/mach_traps.h
index 2fe7705c048..de9ac3f5c4c 100644
--- a/include/asm-x86/mach-default/mach_traps.h
+++ b/include/asm-x86/mach-default/mach_traps.h
@@ -2,8 +2,8 @@
2 * Machine specific NMI handling for generic. 2 * Machine specific NMI handling for generic.
3 * Split out from traps.c by Osamu Tomita <tomita@cinet.co.jp> 3 * Split out from traps.c by Osamu Tomita <tomita@cinet.co.jp>
4 */ 4 */
5#ifndef _MACH_TRAPS_H 5#ifndef ASM_X86__MACH_DEFAULT__MACH_TRAPS_H
6#define _MACH_TRAPS_H 6#define ASM_X86__MACH_DEFAULT__MACH_TRAPS_H
7 7
8#include <asm/mc146818rtc.h> 8#include <asm/mc146818rtc.h>
9 9
@@ -36,4 +36,4 @@ static inline void reassert_nmi(void)
36 unlock_cmos(); 36 unlock_cmos();
37} 37}
38 38
39#endif /* !_MACH_TRAPS_H */ 39#endif /* ASM_X86__MACH_DEFAULT__MACH_TRAPS_H */
diff --git a/include/asm-x86/mach-default/mach_wakecpu.h b/include/asm-x86/mach-default/mach_wakecpu.h
index 3ebb17893aa..361b810f516 100644
--- a/include/asm-x86/mach-default/mach_wakecpu.h
+++ b/include/asm-x86/mach-default/mach_wakecpu.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_WAKECPU_H 1#ifndef ASM_X86__MACH_DEFAULT__MACH_WAKECPU_H
2#define __ASM_MACH_WAKECPU_H 2#define ASM_X86__MACH_DEFAULT__MACH_WAKECPU_H
3 3
4/* 4/*
5 * This file copes with machines that wakeup secondary CPUs by the 5 * This file copes with machines that wakeup secondary CPUs by the
@@ -39,4 +39,4 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
39 #define inquire_remote_apic(apicid) {} 39 #define inquire_remote_apic(apicid) {}
40#endif 40#endif
41 41
42#endif /* __ASM_MACH_WAKECPU_H */ 42#endif /* ASM_X86__MACH_DEFAULT__MACH_WAKECPU_H */
diff --git a/include/asm-x86/mach-es7000/mach_apic.h b/include/asm-x86/mach-es7000/mach_apic.h
index 0a3fdf93067..c1f6f682d61 100644
--- a/include/asm-x86/mach-es7000/mach_apic.h
+++ b/include/asm-x86/mach-es7000/mach_apic.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APIC_H 1#ifndef ASM_X86__MACH_ES7000__MACH_APIC_H
2#define __ASM_MACH_APIC_H 2#define ASM_X86__MACH_ES7000__MACH_APIC_H
3 3
4#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu) 4#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
5#define esr_disable (1) 5#define esr_disable (1)
@@ -191,4 +191,4 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
191 return cpuid_apic >> index_msb; 191 return cpuid_apic >> index_msb;
192} 192}
193 193
194#endif /* __ASM_MACH_APIC_H */ 194#endif /* ASM_X86__MACH_ES7000__MACH_APIC_H */
diff --git a/include/asm-x86/mach-es7000/mach_apicdef.h b/include/asm-x86/mach-es7000/mach_apicdef.h
index a58ab5a75c8..a07e5674402 100644
--- a/include/asm-x86/mach-es7000/mach_apicdef.h
+++ b/include/asm-x86/mach-es7000/mach_apicdef.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APICDEF_H 1#ifndef ASM_X86__MACH_ES7000__MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H 2#define ASM_X86__MACH_ES7000__MACH_APICDEF_H
3 3
4#define APIC_ID_MASK (0xFF<<24) 4#define APIC_ID_MASK (0xFF<<24)
5 5
@@ -10,4 +10,4 @@ static inline unsigned get_apic_id(unsigned long x)
10 10
11#define GET_APIC_ID(x) get_apic_id(x) 11#define GET_APIC_ID(x) get_apic_id(x)
12 12
13#endif 13#endif /* ASM_X86__MACH_ES7000__MACH_APICDEF_H */
diff --git a/include/asm-x86/mach-es7000/mach_ipi.h b/include/asm-x86/mach-es7000/mach_ipi.h
index 5e61bd220b0..3a21240e03d 100644
--- a/include/asm-x86/mach-es7000/mach_ipi.h
+++ b/include/asm-x86/mach-es7000/mach_ipi.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_IPI_H 1#ifndef ASM_X86__MACH_ES7000__MACH_IPI_H
2#define __ASM_MACH_IPI_H 2#define ASM_X86__MACH_ES7000__MACH_IPI_H
3 3
4void send_IPI_mask_sequence(cpumask_t mask, int vector); 4void send_IPI_mask_sequence(cpumask_t mask, int vector);
5 5
@@ -21,4 +21,4 @@ static inline void send_IPI_all(int vector)
21 send_IPI_mask(cpu_online_map, vector); 21 send_IPI_mask(cpu_online_map, vector);
22} 22}
23 23
24#endif /* __ASM_MACH_IPI_H */ 24#endif /* ASM_X86__MACH_ES7000__MACH_IPI_H */
diff --git a/include/asm-x86/mach-es7000/mach_mpparse.h b/include/asm-x86/mach-es7000/mach_mpparse.h
index ef26d352362..befde24705b 100644
--- a/include/asm-x86/mach-es7000/mach_mpparse.h
+++ b/include/asm-x86/mach-es7000/mach_mpparse.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_MPPARSE_H 1#ifndef ASM_X86__MACH_ES7000__MACH_MPPARSE_H
2#define __ASM_MACH_MPPARSE_H 2#define ASM_X86__MACH_ES7000__MACH_MPPARSE_H
3 3
4#include <linux/acpi.h> 4#include <linux/acpi.h>
5 5
@@ -26,4 +26,4 @@ static inline int es7000_check_dsdt(void)
26} 26}
27#endif 27#endif
28 28
29#endif /* __ASM_MACH_MPPARSE_H */ 29#endif /* ASM_X86__MACH_ES7000__MACH_MPPARSE_H */
diff --git a/include/asm-x86/mach-es7000/mach_wakecpu.h b/include/asm-x86/mach-es7000/mach_wakecpu.h
index 84ff5831450..97c776ce13f 100644
--- a/include/asm-x86/mach-es7000/mach_wakecpu.h
+++ b/include/asm-x86/mach-es7000/mach_wakecpu.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_WAKECPU_H 1#ifndef ASM_X86__MACH_ES7000__MACH_WAKECPU_H
2#define __ASM_MACH_WAKECPU_H 2#define ASM_X86__MACH_ES7000__MACH_WAKECPU_H
3 3
4/* 4/*
5 * This file copes with machines that wakeup secondary CPUs by the 5 * This file copes with machines that wakeup secondary CPUs by the
@@ -56,4 +56,4 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
56 #define inquire_remote_apic(apicid) {} 56 #define inquire_remote_apic(apicid) {}
57#endif 57#endif
58 58
59#endif /* __ASM_MACH_WAKECPU_H */ 59#endif /* ASM_X86__MACH_ES7000__MACH_WAKECPU_H */
diff --git a/include/asm-x86/mach-generic/gpio.h b/include/asm-x86/mach-generic/gpio.h
index 5305dcb96df..6ce0f7786ef 100644
--- a/include/asm-x86/mach-generic/gpio.h
+++ b/include/asm-x86/mach-generic/gpio.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_GENERIC_GPIO_H 1#ifndef ASM_X86__MACH_GENERIC__GPIO_H
2#define __ASM_MACH_GENERIC_GPIO_H 2#define ASM_X86__MACH_GENERIC__GPIO_H
3 3
4int gpio_request(unsigned gpio, const char *label); 4int gpio_request(unsigned gpio, const char *label);
5void gpio_free(unsigned gpio); 5void gpio_free(unsigned gpio);
@@ -12,4 +12,4 @@ int irq_to_gpio(unsigned irq);
12 12
13#include <asm-generic/gpio.h> /* cansleep wrappers */ 13#include <asm-generic/gpio.h> /* cansleep wrappers */
14 14
15#endif /* __ASM_MACH_GENERIC_GPIO_H */ 15#endif /* ASM_X86__MACH_GENERIC__GPIO_H */
diff --git a/include/asm-x86/mach-generic/irq_vectors_limits.h b/include/asm-x86/mach-generic/irq_vectors_limits.h
index 890ce3f5e09..f7870e1a220 100644
--- a/include/asm-x86/mach-generic/irq_vectors_limits.h
+++ b/include/asm-x86/mach-generic/irq_vectors_limits.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_IRQ_VECTORS_LIMITS_H 1#ifndef ASM_X86__MACH_GENERIC__IRQ_VECTORS_LIMITS_H
2#define _ASM_IRQ_VECTORS_LIMITS_H 2#define ASM_X86__MACH_GENERIC__IRQ_VECTORS_LIMITS_H
3 3
4/* 4/*
5 * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs, 5 * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs,
@@ -11,4 +11,4 @@
11#define NR_IRQS 224 11#define NR_IRQS 224
12#define NR_IRQ_VECTORS 1024 12#define NR_IRQ_VECTORS 1024
13 13
14#endif /* _ASM_IRQ_VECTORS_LIMITS_H */ 14#endif /* ASM_X86__MACH_GENERIC__IRQ_VECTORS_LIMITS_H */
diff --git a/include/asm-x86/mach-generic/mach_apic.h b/include/asm-x86/mach-generic/mach_apic.h
index 6eff343e123..5d010c6881d 100644
--- a/include/asm-x86/mach-generic/mach_apic.h
+++ b/include/asm-x86/mach-generic/mach_apic.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APIC_H 1#ifndef ASM_X86__MACH_GENERIC__MACH_APIC_H
2#define __ASM_MACH_APIC_H 2#define ASM_X86__MACH_GENERIC__MACH_APIC_H
3 3
4#include <asm/genapic.h> 4#include <asm/genapic.h>
5 5
@@ -29,4 +29,4 @@
29 29
30extern void generic_bigsmp_probe(void); 30extern void generic_bigsmp_probe(void);
31 31
32#endif /* __ASM_MACH_APIC_H */ 32#endif /* ASM_X86__MACH_GENERIC__MACH_APIC_H */
diff --git a/include/asm-x86/mach-generic/mach_apicdef.h b/include/asm-x86/mach-generic/mach_apicdef.h
index 28ed98972ca..1657f38b8f2 100644
--- a/include/asm-x86/mach-generic/mach_apicdef.h
+++ b/include/asm-x86/mach-generic/mach_apicdef.h
@@ -1,5 +1,5 @@
1#ifndef _GENAPIC_MACH_APICDEF_H 1#ifndef ASM_X86__MACH_GENERIC__MACH_APICDEF_H
2#define _GENAPIC_MACH_APICDEF_H 1 2#define ASM_X86__MACH_GENERIC__MACH_APICDEF_H
3 3
4#ifndef APIC_DEFINITION 4#ifndef APIC_DEFINITION
5#include <asm/genapic.h> 5#include <asm/genapic.h>
@@ -8,4 +8,4 @@
8#define APIC_ID_MASK (genapic->apic_id_mask) 8#define APIC_ID_MASK (genapic->apic_id_mask)
9#endif 9#endif
10 10
11#endif 11#endif /* ASM_X86__MACH_GENERIC__MACH_APICDEF_H */
diff --git a/include/asm-x86/mach-generic/mach_ipi.h b/include/asm-x86/mach-generic/mach_ipi.h
index 441b0fe3ed1..f67433dbd65 100644
--- a/include/asm-x86/mach-generic/mach_ipi.h
+++ b/include/asm-x86/mach-generic/mach_ipi.h
@@ -1,5 +1,5 @@
1#ifndef _MACH_IPI_H 1#ifndef ASM_X86__MACH_GENERIC__MACH_IPI_H
2#define _MACH_IPI_H 1 2#define ASM_X86__MACH_GENERIC__MACH_IPI_H
3 3
4#include <asm/genapic.h> 4#include <asm/genapic.h>
5 5
@@ -7,4 +7,4 @@
7#define send_IPI_allbutself (genapic->send_IPI_allbutself) 7#define send_IPI_allbutself (genapic->send_IPI_allbutself)
8#define send_IPI_all (genapic->send_IPI_all) 8#define send_IPI_all (genapic->send_IPI_all)
9 9
10#endif 10#endif /* ASM_X86__MACH_GENERIC__MACH_IPI_H */
diff --git a/include/asm-x86/mach-generic/mach_mpparse.h b/include/asm-x86/mach-generic/mach_mpparse.h
index 586cadbf378..3115564e557 100644
--- a/include/asm-x86/mach-generic/mach_mpparse.h
+++ b/include/asm-x86/mach-generic/mach_mpparse.h
@@ -1,5 +1,5 @@
1#ifndef _MACH_MPPARSE_H 1#ifndef ASM_X86__MACH_GENERIC__MACH_MPPARSE_H
2#define _MACH_MPPARSE_H 1 2#define ASM_X86__MACH_GENERIC__MACH_MPPARSE_H
3 3
4 4
5extern int mps_oem_check(struct mp_config_table *mpc, char *oem, 5extern int mps_oem_check(struct mp_config_table *mpc, char *oem,
@@ -7,4 +7,4 @@ extern int mps_oem_check(struct mp_config_table *mpc, char *oem,
7 7
8extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id); 8extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
9 9
10#endif 10#endif /* ASM_X86__MACH_GENERIC__MACH_MPPARSE_H */
diff --git a/include/asm-x86/mach-generic/mach_mpspec.h b/include/asm-x86/mach-generic/mach_mpspec.h
index c83c120be53..6061b153613 100644
--- a/include/asm-x86/mach-generic/mach_mpspec.h
+++ b/include/asm-x86/mach-generic/mach_mpspec.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_MPSPEC_H 1#ifndef ASM_X86__MACH_GENERIC__MACH_MPSPEC_H
2#define __ASM_MACH_MPSPEC_H 2#define ASM_X86__MACH_GENERIC__MACH_MPSPEC_H
3 3
4#define MAX_IRQ_SOURCES 256 4#define MAX_IRQ_SOURCES 256
5 5
@@ -9,4 +9,4 @@
9 9
10extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem, 10extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem,
11 char *productid); 11 char *productid);
12#endif /* __ASM_MACH_MPSPEC_H */ 12#endif /* ASM_X86__MACH_GENERIC__MACH_MPSPEC_H */
diff --git a/include/asm-x86/mach-numaq/mach_apic.h b/include/asm-x86/mach-numaq/mach_apic.h
index d802465e026..7a0d39edfcf 100644
--- a/include/asm-x86/mach-numaq/mach_apic.h
+++ b/include/asm-x86/mach-numaq/mach_apic.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APIC_H 1#ifndef ASM_X86__MACH_NUMAQ__MACH_APIC_H
2#define __ASM_MACH_APIC_H 2#define ASM_X86__MACH_NUMAQ__MACH_APIC_H
3 3
4#include <asm/io.h> 4#include <asm/io.h>
5#include <linux/mmzone.h> 5#include <linux/mmzone.h>
@@ -135,4 +135,4 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
135 return cpuid_apic >> index_msb; 135 return cpuid_apic >> index_msb;
136} 136}
137 137
138#endif /* __ASM_MACH_APIC_H */ 138#endif /* ASM_X86__MACH_NUMAQ__MACH_APIC_H */
diff --git a/include/asm-x86/mach-numaq/mach_apicdef.h b/include/asm-x86/mach-numaq/mach_apicdef.h
index bf439d0690f..f870ec5f778 100644
--- a/include/asm-x86/mach-numaq/mach_apicdef.h
+++ b/include/asm-x86/mach-numaq/mach_apicdef.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APICDEF_H 1#ifndef ASM_X86__MACH_NUMAQ__MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H 2#define ASM_X86__MACH_NUMAQ__MACH_APICDEF_H
3 3
4 4
5#define APIC_ID_MASK (0xF<<24) 5#define APIC_ID_MASK (0xF<<24)
@@ -11,4 +11,4 @@ static inline unsigned get_apic_id(unsigned long x)
11 11
12#define GET_APIC_ID(x) get_apic_id(x) 12#define GET_APIC_ID(x) get_apic_id(x)
13 13
14#endif 14#endif /* ASM_X86__MACH_NUMAQ__MACH_APICDEF_H */
diff --git a/include/asm-x86/mach-numaq/mach_ipi.h b/include/asm-x86/mach-numaq/mach_ipi.h
index c6044488e9e..1e835823f4b 100644
--- a/include/asm-x86/mach-numaq/mach_ipi.h
+++ b/include/asm-x86/mach-numaq/mach_ipi.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_IPI_H 1#ifndef ASM_X86__MACH_NUMAQ__MACH_IPI_H
2#define __ASM_MACH_IPI_H 2#define ASM_X86__MACH_NUMAQ__MACH_IPI_H
3 3
4void send_IPI_mask_sequence(cpumask_t, int vector); 4void send_IPI_mask_sequence(cpumask_t, int vector);
5 5
@@ -22,4 +22,4 @@ static inline void send_IPI_all(int vector)
22 send_IPI_mask(cpu_online_map, vector); 22 send_IPI_mask(cpu_online_map, vector);
23} 23}
24 24
25#endif /* __ASM_MACH_IPI_H */ 25#endif /* ASM_X86__MACH_NUMAQ__MACH_IPI_H */
diff --git a/include/asm-x86/mach-numaq/mach_mpparse.h b/include/asm-x86/mach-numaq/mach_mpparse.h
index 626aef6b155..74ade184920 100644
--- a/include/asm-x86/mach-numaq/mach_mpparse.h
+++ b/include/asm-x86/mach-numaq/mach_mpparse.h
@@ -1,7 +1,7 @@
1#ifndef __ASM_MACH_MPPARSE_H 1#ifndef ASM_X86__MACH_NUMAQ__MACH_MPPARSE_H
2#define __ASM_MACH_MPPARSE_H 2#define ASM_X86__MACH_NUMAQ__MACH_MPPARSE_H
3 3
4extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem, 4extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem,
5 char *productid); 5 char *productid);
6 6
7#endif /* __ASM_MACH_MPPARSE_H */ 7#endif /* ASM_X86__MACH_NUMAQ__MACH_MPPARSE_H */
diff --git a/include/asm-x86/mach-numaq/mach_wakecpu.h b/include/asm-x86/mach-numaq/mach_wakecpu.h
index 00530041a99..0db8cea643c 100644
--- a/include/asm-x86/mach-numaq/mach_wakecpu.h
+++ b/include/asm-x86/mach-numaq/mach_wakecpu.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_WAKECPU_H 1#ifndef ASM_X86__MACH_NUMAQ__MACH_WAKECPU_H
2#define __ASM_MACH_WAKECPU_H 2#define ASM_X86__MACH_NUMAQ__MACH_WAKECPU_H
3 3
4/* This file copes with machines that wakeup secondary CPUs by NMIs */ 4/* This file copes with machines that wakeup secondary CPUs by NMIs */
5 5
@@ -40,4 +40,4 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
40 40
41#define inquire_remote_apic(apicid) {} 41#define inquire_remote_apic(apicid) {}
42 42
43#endif /* __ASM_MACH_WAKECPU_H */ 43#endif /* ASM_X86__MACH_NUMAQ__MACH_WAKECPU_H */
diff --git a/include/asm-x86/mach-rdc321x/gpio.h b/include/asm-x86/mach-rdc321x/gpio.h
index acce0b7d397..94b6cdf532e 100644
--- a/include/asm-x86/mach-rdc321x/gpio.h
+++ b/include/asm-x86/mach-rdc321x/gpio.h
@@ -1,5 +1,7 @@
1#ifndef _RDC321X_GPIO_H 1#ifndef ASM_X86__MACH_RDC321X__GPIO_H
2#define _RDC321X_GPIO_H 2#define ASM_X86__MACH_RDC321X__GPIO_H
3
4#include <linux/kernel.h>
3 5
4extern int rdc_gpio_get_value(unsigned gpio); 6extern int rdc_gpio_get_value(unsigned gpio);
5extern void rdc_gpio_set_value(unsigned gpio, int value); 7extern void rdc_gpio_set_value(unsigned gpio, int value);
@@ -18,6 +20,7 @@ static inline int gpio_request(unsigned gpio, const char *label)
18 20
19static inline void gpio_free(unsigned gpio) 21static inline void gpio_free(unsigned gpio)
20{ 22{
23 might_sleep();
21 rdc_gpio_free(gpio); 24 rdc_gpio_free(gpio);
22} 25}
23 26
@@ -54,4 +57,4 @@ static inline int irq_to_gpio(unsigned irq)
54/* For cansleep */ 57/* For cansleep */
55#include <asm-generic/gpio.h> 58#include <asm-generic/gpio.h>
56 59
57#endif /* _RDC321X_GPIO_H_ */ 60#endif /* ASM_X86__MACH_RDC321X__GPIO_H */
diff --git a/include/asm-x86/mach-summit/irq_vectors_limits.h b/include/asm-x86/mach-summit/irq_vectors_limits.h
index 890ce3f5e09..22f376ad68e 100644
--- a/include/asm-x86/mach-summit/irq_vectors_limits.h
+++ b/include/asm-x86/mach-summit/irq_vectors_limits.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_IRQ_VECTORS_LIMITS_H 1#ifndef ASM_X86__MACH_SUMMIT__IRQ_VECTORS_LIMITS_H
2#define _ASM_IRQ_VECTORS_LIMITS_H 2#define ASM_X86__MACH_SUMMIT__IRQ_VECTORS_LIMITS_H
3 3
4/* 4/*
5 * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs, 5 * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs,
@@ -11,4 +11,4 @@
11#define NR_IRQS 224 11#define NR_IRQS 224
12#define NR_IRQ_VECTORS 1024 12#define NR_IRQ_VECTORS 1024
13 13
14#endif /* _ASM_IRQ_VECTORS_LIMITS_H */ 14#endif /* ASM_X86__MACH_SUMMIT__IRQ_VECTORS_LIMITS_H */
diff --git a/include/asm-x86/mach-summit/mach_apic.h b/include/asm-x86/mach-summit/mach_apic.h
index c47e2ab5c5c..7a66758d701 100644
--- a/include/asm-x86/mach-summit/mach_apic.h
+++ b/include/asm-x86/mach-summit/mach_apic.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APIC_H 1#ifndef ASM_X86__MACH_SUMMIT__MACH_APIC_H
2#define __ASM_MACH_APIC_H 2#define ASM_X86__MACH_SUMMIT__MACH_APIC_H
3 3
4#include <asm/smp.h> 4#include <asm/smp.h>
5 5
@@ -182,4 +182,4 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
182 return hard_smp_processor_id() >> index_msb; 182 return hard_smp_processor_id() >> index_msb;
183} 183}
184 184
185#endif /* __ASM_MACH_APIC_H */ 185#endif /* ASM_X86__MACH_SUMMIT__MACH_APIC_H */
diff --git a/include/asm-x86/mach-summit/mach_apicdef.h b/include/asm-x86/mach-summit/mach_apicdef.h
index a58ab5a75c8..d4bc8590c4f 100644
--- a/include/asm-x86/mach-summit/mach_apicdef.h
+++ b/include/asm-x86/mach-summit/mach_apicdef.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APICDEF_H 1#ifndef ASM_X86__MACH_SUMMIT__MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H 2#define ASM_X86__MACH_SUMMIT__MACH_APICDEF_H
3 3
4#define APIC_ID_MASK (0xFF<<24) 4#define APIC_ID_MASK (0xFF<<24)
5 5
@@ -10,4 +10,4 @@ static inline unsigned get_apic_id(unsigned long x)
10 10
11#define GET_APIC_ID(x) get_apic_id(x) 11#define GET_APIC_ID(x) get_apic_id(x)
12 12
13#endif 13#endif /* ASM_X86__MACH_SUMMIT__MACH_APICDEF_H */
diff --git a/include/asm-x86/mach-summit/mach_ipi.h b/include/asm-x86/mach-summit/mach_ipi.h
index 9404c535b7e..a3b31c528d9 100644
--- a/include/asm-x86/mach-summit/mach_ipi.h
+++ b/include/asm-x86/mach-summit/mach_ipi.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_IPI_H 1#ifndef ASM_X86__MACH_SUMMIT__MACH_IPI_H
2#define __ASM_MACH_IPI_H 2#define ASM_X86__MACH_SUMMIT__MACH_IPI_H
3 3
4void send_IPI_mask_sequence(cpumask_t mask, int vector); 4void send_IPI_mask_sequence(cpumask_t mask, int vector);
5 5
@@ -22,4 +22,4 @@ static inline void send_IPI_all(int vector)
22 send_IPI_mask(cpu_online_map, vector); 22 send_IPI_mask(cpu_online_map, vector);
23} 23}
24 24
25#endif /* __ASM_MACH_IPI_H */ 25#endif /* ASM_X86__MACH_SUMMIT__MACH_IPI_H */
diff --git a/include/asm-x86/mach-summit/mach_mpparse.h b/include/asm-x86/mach-summit/mach_mpparse.h
index fdf59170133..92396f28772 100644
--- a/include/asm-x86/mach-summit/mach_mpparse.h
+++ b/include/asm-x86/mach-summit/mach_mpparse.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_MPPARSE_H 1#ifndef ASM_X86__MACH_SUMMIT__MACH_MPPARSE_H
2#define __ASM_MACH_MPPARSE_H 2#define ASM_X86__MACH_SUMMIT__MACH_MPPARSE_H
3 3
4#include <mach_apic.h> 4#include <mach_apic.h>
5#include <asm/tsc.h> 5#include <asm/tsc.h>
@@ -107,4 +107,4 @@ static inline int is_WPEG(struct rio_detail *rio){
107 rio->type == LookOutAWPEG || rio->type == LookOutBWPEG); 107 rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
108} 108}
109 109
110#endif /* __ASM_MACH_MPPARSE_H */ 110#endif /* ASM_X86__MACH_SUMMIT__MACH_MPPARSE_H */
diff --git a/include/asm-x86/math_emu.h b/include/asm-x86/math_emu.h
index 9bf4ae93ab1..5768d8e95c8 100644
--- a/include/asm-x86/math_emu.h
+++ b/include/asm-x86/math_emu.h
@@ -1,5 +1,5 @@
1#ifndef _I386_MATH_EMU_H 1#ifndef ASM_X86__MATH_EMU_H
2#define _I386_MATH_EMU_H 2#define ASM_X86__MATH_EMU_H
3 3
4/* This structure matches the layout of the data saved to the stack 4/* This structure matches the layout of the data saved to the stack
5 following a device-not-present interrupt, part of it saved 5 following a device-not-present interrupt, part of it saved
@@ -28,4 +28,4 @@ struct info {
28 long ___vm86_fs; 28 long ___vm86_fs;
29 long ___vm86_gs; 29 long ___vm86_gs;
30}; 30};
31#endif 31#endif /* ASM_X86__MATH_EMU_H */
diff --git a/include/asm-x86/mc146818rtc.h b/include/asm-x86/mc146818rtc.h
index daf1ccde77a..a995f33176c 100644
--- a/include/asm-x86/mc146818rtc.h
+++ b/include/asm-x86/mc146818rtc.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * Machine dependent access functions for RTC registers. 2 * Machine dependent access functions for RTC registers.
3 */ 3 */
4#ifndef _ASM_MC146818RTC_H 4#ifndef ASM_X86__MC146818RTC_H
5#define _ASM_MC146818RTC_H 5#define ASM_X86__MC146818RTC_H
6 6
7#include <asm/io.h> 7#include <asm/io.h>
8#include <asm/system.h> 8#include <asm/system.h>
@@ -101,4 +101,4 @@ extern unsigned long mach_get_cmos_time(void);
101 101
102#define RTC_IRQ 8 102#define RTC_IRQ 8
103 103
104#endif /* _ASM_MC146818RTC_H */ 104#endif /* ASM_X86__MC146818RTC_H */
diff --git a/include/asm-x86/mca.h b/include/asm-x86/mca.h
index 09adf2eac4d..60d1ed287b1 100644
--- a/include/asm-x86/mca.h
+++ b/include/asm-x86/mca.h
@@ -1,8 +1,8 @@
1/* -*- mode: c; c-basic-offset: 8 -*- */ 1/* -*- mode: c; c-basic-offset: 8 -*- */
2 2
3/* Platform specific MCA defines */ 3/* Platform specific MCA defines */
4#ifndef _ASM_MCA_H 4#ifndef ASM_X86__MCA_H
5#define _ASM_MCA_H 5#define ASM_X86__MCA_H
6 6
7/* Maximal number of MCA slots - actually, some machines have less, but 7/* Maximal number of MCA slots - actually, some machines have less, but
8 * they all have sufficient number of POS registers to cover 8. 8 * they all have sufficient number of POS registers to cover 8.
@@ -40,4 +40,4 @@
40 */ 40 */
41#define MCA_NUMADAPTERS (MCA_MAX_SLOT_NR+3) 41#define MCA_NUMADAPTERS (MCA_MAX_SLOT_NR+3)
42 42
43#endif 43#endif /* ASM_X86__MCA_H */
diff --git a/include/asm-x86/mca_dma.h b/include/asm-x86/mca_dma.h
index c3dca6edc6b..49f22be237d 100644
--- a/include/asm-x86/mca_dma.h
+++ b/include/asm-x86/mca_dma.h
@@ -1,5 +1,5 @@
1#ifndef MCA_DMA_H 1#ifndef ASM_X86__MCA_DMA_H
2#define MCA_DMA_H 2#define ASM_X86__MCA_DMA_H
3 3
4#include <asm/io.h> 4#include <asm/io.h>
5#include <linux/ioport.h> 5#include <linux/ioport.h>
@@ -198,4 +198,4 @@ static inline void mca_set_dma_mode(unsigned int dmanr, unsigned int mode)
198 outb(mode, MCA_DMA_REG_EXE); 198 outb(mode, MCA_DMA_REG_EXE);
199} 199}
200 200
201#endif /* MCA_DMA_H */ 201#endif /* ASM_X86__MCA_DMA_H */
diff --git a/include/asm-x86/mce.h b/include/asm-x86/mce.h
index 531eaa58745..036133eaf74 100644
--- a/include/asm-x86/mce.h
+++ b/include/asm-x86/mce.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_MCE_H 1#ifndef ASM_X86__MCE_H
2#define _ASM_X86_MCE_H 2#define ASM_X86__MCE_H
3 3
4#ifdef __x86_64__ 4#ifdef __x86_64__
5 5
@@ -127,4 +127,4 @@ extern void restart_mce(void);
127 127
128#endif /* __KERNEL__ */ 128#endif /* __KERNEL__ */
129 129
130#endif 130#endif /* ASM_X86__MCE_H */
diff --git a/include/asm-x86/mman.h b/include/asm-x86/mman.h
index 90bc4108a4f..4ef28e6de38 100644
--- a/include/asm-x86/mman.h
+++ b/include/asm-x86/mman.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_MMAN_H 1#ifndef ASM_X86__MMAN_H
2#define _ASM_X86_MMAN_H 2#define ASM_X86__MMAN_H
3 3
4#include <asm-generic/mman.h> 4#include <asm-generic/mman.h>
5 5
@@ -17,4 +17,4 @@
17#define MCL_CURRENT 1 /* lock all current mappings */ 17#define MCL_CURRENT 1 /* lock all current mappings */
18#define MCL_FUTURE 2 /* lock all future mappings */ 18#define MCL_FUTURE 2 /* lock all future mappings */
19 19
20#endif /* _ASM_X86_MMAN_H */ 20#endif /* ASM_X86__MMAN_H */
diff --git a/include/asm-x86/mmconfig.h b/include/asm-x86/mmconfig.h
index e293ab81e85..fb79b1cf5d0 100644
--- a/include/asm-x86/mmconfig.h
+++ b/include/asm-x86/mmconfig.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_MMCONFIG_H 1#ifndef ASM_X86__MMCONFIG_H
2#define _ASM_MMCONFIG_H 2#define ASM_X86__MMCONFIG_H
3 3
4#ifdef CONFIG_PCI_MMCONFIG 4#ifdef CONFIG_PCI_MMCONFIG
5extern void __cpuinit fam10h_check_enable_mmcfg(void); 5extern void __cpuinit fam10h_check_enable_mmcfg(void);
@@ -9,4 +9,4 @@ static inline void fam10h_check_enable_mmcfg(void) { }
9static inline void check_enable_amd_mmconf_dmi(void) { } 9static inline void check_enable_amd_mmconf_dmi(void) { }
10#endif 10#endif
11 11
12#endif 12#endif /* ASM_X86__MMCONFIG_H */
diff --git a/include/asm-x86/mmu.h b/include/asm-x86/mmu.h
index 00e88679e11..9d5aff14334 100644
--- a/include/asm-x86/mmu.h
+++ b/include/asm-x86/mmu.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_MMU_H 1#ifndef ASM_X86__MMU_H
2#define _ASM_X86_MMU_H 2#define ASM_X86__MMU_H
3 3
4#include <linux/spinlock.h> 4#include <linux/spinlock.h>
5#include <linux/mutex.h> 5#include <linux/mutex.h>
@@ -7,14 +7,9 @@
7/* 7/*
8 * The x86 doesn't have a mmu context, but 8 * The x86 doesn't have a mmu context, but
9 * we put the segment information here. 9 * we put the segment information here.
10 *
11 * cpu_vm_mask is used to optimize ldt flushing.
12 */ 10 */
13typedef struct { 11typedef struct {
14 void *ldt; 12 void *ldt;
15#ifdef CONFIG_X86_64
16 rwlock_t ldtlock;
17#endif
18 int size; 13 int size;
19 struct mutex lock; 14 struct mutex lock;
20 void *vdso; 15 void *vdso;
@@ -28,4 +23,4 @@ static inline void leave_mm(int cpu)
28} 23}
29#endif 24#endif
30 25
31#endif /* _ASM_X86_MMU_H */ 26#endif /* ASM_X86__MMU_H */
diff --git a/include/asm-x86/mmu_context.h b/include/asm-x86/mmu_context.h
index fac57014e7c..8ec940bfd07 100644
--- a/include/asm-x86/mmu_context.h
+++ b/include/asm-x86/mmu_context.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_X86_MMU_CONTEXT_H 1#ifndef ASM_X86__MMU_CONTEXT_H
2#define __ASM_X86_MMU_CONTEXT_H 2#define ASM_X86__MMU_CONTEXT_H
3 3
4#include <asm/desc.h> 4#include <asm/desc.h>
5#include <asm/atomic.h> 5#include <asm/atomic.h>
@@ -34,4 +34,4 @@ do { \
34} while (0); 34} while (0);
35 35
36 36
37#endif /* __ASM_X86_MMU_CONTEXT_H */ 37#endif /* ASM_X86__MMU_CONTEXT_H */
diff --git a/include/asm-x86/mmu_context_32.h b/include/asm-x86/mmu_context_32.h
index 824fc575c6d..cce6f6e4afd 100644
--- a/include/asm-x86/mmu_context_32.h
+++ b/include/asm-x86/mmu_context_32.h
@@ -1,5 +1,5 @@
1#ifndef __I386_SCHED_H 1#ifndef ASM_X86__MMU_CONTEXT_32_H
2#define __I386_SCHED_H 2#define ASM_X86__MMU_CONTEXT_32_H
3 3
4static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 4static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
5{ 5{
@@ -53,4 +53,4 @@ static inline void switch_mm(struct mm_struct *prev,
53#define deactivate_mm(tsk, mm) \ 53#define deactivate_mm(tsk, mm) \
54 asm("movl %0,%%gs": :"r" (0)); 54 asm("movl %0,%%gs": :"r" (0));
55 55
56#endif 56#endif /* ASM_X86__MMU_CONTEXT_32_H */
diff --git a/include/asm-x86/mmu_context_64.h b/include/asm-x86/mmu_context_64.h
index c7000634cca..26758673c82 100644
--- a/include/asm-x86/mmu_context_64.h
+++ b/include/asm-x86/mmu_context_64.h
@@ -1,5 +1,5 @@
1#ifndef __X86_64_MMU_CONTEXT_H 1#ifndef ASM_X86__MMU_CONTEXT_64_H
2#define __X86_64_MMU_CONTEXT_H 2#define ASM_X86__MMU_CONTEXT_64_H
3 3
4#include <asm/pda.h> 4#include <asm/pda.h>
5 5
@@ -51,4 +51,4 @@ do { \
51 asm volatile("movl %0,%%fs"::"r"(0)); \ 51 asm volatile("movl %0,%%fs"::"r"(0)); \
52} while (0) 52} while (0)
53 53
54#endif 54#endif /* ASM_X86__MMU_CONTEXT_64_H */
diff --git a/include/asm-x86/mmx.h b/include/asm-x86/mmx.h
index 940881218ff..2e7299bb365 100644
--- a/include/asm-x86/mmx.h
+++ b/include/asm-x86/mmx.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_MMX_H 1#ifndef ASM_X86__MMX_H
2#define _ASM_MMX_H 2#define ASM_X86__MMX_H
3 3
4/* 4/*
5 * MMX 3Dnow! helper operations 5 * MMX 3Dnow! helper operations
@@ -11,4 +11,4 @@ extern void *_mmx_memcpy(void *to, const void *from, size_t size);
11extern void mmx_clear_page(void *page); 11extern void mmx_clear_page(void *page);
12extern void mmx_copy_page(void *to, void *from); 12extern void mmx_copy_page(void *to, void *from);
13 13
14#endif 14#endif /* ASM_X86__MMX_H */
diff --git a/include/asm-x86/mmzone_32.h b/include/asm-x86/mmzone_32.h
index 5862e646065..121b65d61d8 100644
--- a/include/asm-x86/mmzone_32.h
+++ b/include/asm-x86/mmzone_32.h
@@ -3,8 +3,8 @@
3 * 3 *
4 */ 4 */
5 5
6#ifndef _ASM_MMZONE_H_ 6#ifndef ASM_X86__MMZONE_32_H
7#define _ASM_MMZONE_H_ 7#define ASM_X86__MMZONE_32_H
8 8
9#include <asm/smp.h> 9#include <asm/smp.h>
10 10
@@ -131,4 +131,4 @@ static inline int pfn_valid(int pfn)
131}) 131})
132#endif /* CONFIG_NEED_MULTIPLE_NODES */ 132#endif /* CONFIG_NEED_MULTIPLE_NODES */
133 133
134#endif /* _ASM_MMZONE_H_ */ 134#endif /* ASM_X86__MMZONE_32_H */
diff --git a/include/asm-x86/mmzone_64.h b/include/asm-x86/mmzone_64.h
index 594bd0dc1d0..626b03a1487 100644
--- a/include/asm-x86/mmzone_64.h
+++ b/include/asm-x86/mmzone_64.h
@@ -1,8 +1,8 @@
1/* K8 NUMA support */ 1/* K8 NUMA support */
2/* Copyright 2002,2003 by Andi Kleen, SuSE Labs */ 2/* Copyright 2002,2003 by Andi Kleen, SuSE Labs */
3/* 2.5 Version loosely based on the NUMAQ Code by Pat Gaughen. */ 3/* 2.5 Version loosely based on the NUMAQ Code by Pat Gaughen. */
4#ifndef _ASM_X86_64_MMZONE_H 4#ifndef ASM_X86__MMZONE_64_H
5#define _ASM_X86_64_MMZONE_H 1 5#define ASM_X86__MMZONE_64_H
6 6
7 7
8#ifdef CONFIG_NUMA 8#ifdef CONFIG_NUMA
@@ -49,4 +49,4 @@ extern int early_pfn_to_nid(unsigned long pfn);
49#endif 49#endif
50 50
51#endif 51#endif
52#endif 52#endif /* ASM_X86__MMZONE_64_H */
diff --git a/include/asm-x86/module.h b/include/asm-x86/module.h
index bfedb247871..48dc3e0c07d 100644
--- a/include/asm-x86/module.h
+++ b/include/asm-x86/module.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_MODULE_H 1#ifndef ASM_X86__MODULE_H
2#define _ASM_MODULE_H 2#define ASM_X86__MODULE_H
3 3
4/* x86_32/64 are simple */ 4/* x86_32/64 are simple */
5struct mod_arch_specific {}; 5struct mod_arch_specific {};
@@ -79,4 +79,4 @@ struct mod_arch_specific {};
79# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE 79# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
80#endif 80#endif
81 81
82#endif /* _ASM_MODULE_H */ 82#endif /* ASM_X86__MODULE_H */
diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h
index b6995e567fc..118da365e37 100644
--- a/include/asm-x86/mpspec.h
+++ b/include/asm-x86/mpspec.h
@@ -1,5 +1,5 @@
1#ifndef _AM_X86_MPSPEC_H 1#ifndef ASM_X86__MPSPEC_H
2#define _AM_X86_MPSPEC_H 2#define ASM_X86__MPSPEC_H
3 3
4#include <linux/init.h> 4#include <linux/init.h>
5 5
@@ -141,4 +141,4 @@ static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map)
141 141
142extern physid_mask_t phys_cpu_present_map; 142extern physid_mask_t phys_cpu_present_map;
143 143
144#endif 144#endif /* ASM_X86__MPSPEC_H */
diff --git a/include/asm-x86/mpspec_def.h b/include/asm-x86/mpspec_def.h
index 38d1e73b49e..79166b04801 100644
--- a/include/asm-x86/mpspec_def.h
+++ b/include/asm-x86/mpspec_def.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MPSPEC_DEF_H 1#ifndef ASM_X86__MPSPEC_DEF_H
2#define __ASM_MPSPEC_DEF_H 2#define ASM_X86__MPSPEC_DEF_H
3 3
4/* 4/*
5 * Structure definitions for SMP machines following the 5 * Structure definitions for SMP machines following the
@@ -177,4 +177,4 @@ enum mp_bustype {
177 MP_BUS_PCI, 177 MP_BUS_PCI,
178 MP_BUS_MCA, 178 MP_BUS_MCA,
179}; 179};
180#endif 180#endif /* ASM_X86__MPSPEC_DEF_H */
diff --git a/include/asm-x86/msgbuf.h b/include/asm-x86/msgbuf.h
index 7e4e9481f51..1b538c907a3 100644
--- a/include/asm-x86/msgbuf.h
+++ b/include/asm-x86/msgbuf.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_MSGBUF_H 1#ifndef ASM_X86__MSGBUF_H
2#define _ASM_X86_MSGBUF_H 2#define ASM_X86__MSGBUF_H
3 3
4/* 4/*
5 * The msqid64_ds structure for i386 architecture. 5 * The msqid64_ds structure for i386 architecture.
@@ -36,4 +36,4 @@ struct msqid64_ds {
36 unsigned long __unused5; 36 unsigned long __unused5;
37}; 37};
38 38
39#endif /* _ASM_X86_MSGBUF_H */ 39#endif /* ASM_X86__MSGBUF_H */
diff --git a/include/asm-x86/msidef.h b/include/asm-x86/msidef.h
index 296f29ce426..3139666a94f 100644
--- a/include/asm-x86/msidef.h
+++ b/include/asm-x86/msidef.h
@@ -1,5 +1,5 @@
1#ifndef ASM_MSIDEF_H 1#ifndef ASM_X86__MSIDEF_H
2#define ASM_MSIDEF_H 2#define ASM_X86__MSIDEF_H
3 3
4/* 4/*
5 * Constants for Intel APIC based MSI messages. 5 * Constants for Intel APIC based MSI messages.
@@ -48,4 +48,4 @@
48#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \ 48#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \
49 MSI_ADDR_DEST_ID_MASK) 49 MSI_ADDR_DEST_ID_MASK)
50 50
51#endif /* ASM_MSIDEF_H */ 51#endif /* ASM_X86__MSIDEF_H */
diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h
index 44bce773012..3052f058ab0 100644
--- a/include/asm-x86/msr-index.h
+++ b/include/asm-x86/msr-index.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MSR_INDEX_H 1#ifndef ASM_X86__MSR_INDEX_H
2#define __ASM_MSR_INDEX_H 2#define ASM_X86__MSR_INDEX_H
3 3
4/* CPU model specific register (MSR) numbers */ 4/* CPU model specific register (MSR) numbers */
5 5
@@ -310,4 +310,4 @@
310/* Geode defined MSRs */ 310/* Geode defined MSRs */
311#define MSR_GEODE_BUSCONT_CONF0 0x00001900 311#define MSR_GEODE_BUSCONT_CONF0 0x00001900
312 312
313#endif /* __ASM_MSR_INDEX_H */ 313#endif /* ASM_X86__MSR_INDEX_H */
diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h
index 2362cfda1fb..530af1f6389 100644
--- a/include/asm-x86/msr.h
+++ b/include/asm-x86/msr.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_X86_MSR_H_ 1#ifndef ASM_X86__MSR_H
2#define __ASM_X86_MSR_H_ 2#define ASM_X86__MSR_H
3 3
4#include <asm/msr-index.h> 4#include <asm/msr-index.h>
5 5
@@ -63,6 +63,22 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
63 return EAX_EDX_VAL(val, low, high); 63 return EAX_EDX_VAL(val, low, high);
64} 64}
65 65
66static inline unsigned long long native_read_msr_amd_safe(unsigned int msr,
67 int *err)
68{
69 DECLARE_ARGS(val, low, high);
70
71 asm volatile("2: rdmsr ; xor %0,%0\n"
72 "1:\n\t"
73 ".section .fixup,\"ax\"\n\t"
74 "3: mov %3,%0 ; jmp 1b\n\t"
75 ".previous\n\t"
76 _ASM_EXTABLE(2b, 3b)
77 : "=r" (*err), EAX_EDX_RET(val, low, high)
78 : "c" (msr), "D" (0x9c5a203a), "i" (-EFAULT));
79 return EAX_EDX_VAL(val, low, high);
80}
81
66static inline void native_write_msr(unsigned int msr, 82static inline void native_write_msr(unsigned int msr,
67 unsigned low, unsigned high) 83 unsigned low, unsigned high)
68{ 84{
@@ -158,6 +174,13 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
158 *p = native_read_msr_safe(msr, &err); 174 *p = native_read_msr_safe(msr, &err);
159 return err; 175 return err;
160} 176}
177static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
178{
179 int err;
180
181 *p = native_read_msr_amd_safe(msr, &err);
182 return err;
183}
161 184
162#define rdtscl(low) \ 185#define rdtscl(low) \
163 ((low) = (u32)native_read_tsc()) 186 ((low) = (u32)native_read_tsc())
@@ -221,4 +244,4 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
221#endif /* __KERNEL__ */ 244#endif /* __KERNEL__ */
222 245
223 246
224#endif 247#endif /* ASM_X86__MSR_H */
diff --git a/include/asm-x86/mtrr.h b/include/asm-x86/mtrr.h
index a69a01a5172..23a7f83da95 100644
--- a/include/asm-x86/mtrr.h
+++ b/include/asm-x86/mtrr.h
@@ -20,8 +20,8 @@
20 The postal address is: 20 The postal address is:
21 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. 21 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
22*/ 22*/
23#ifndef _ASM_X86_MTRR_H 23#ifndef ASM_X86__MTRR_H
24#define _ASM_X86_MTRR_H 24#define ASM_X86__MTRR_H
25 25
26#include <linux/ioctl.h> 26#include <linux/ioctl.h>
27#include <linux/errno.h> 27#include <linux/errno.h>
@@ -170,4 +170,4 @@ struct mtrr_gentry32 {
170 170
171#endif /* __KERNEL__ */ 171#endif /* __KERNEL__ */
172 172
173#endif /* _ASM_X86_MTRR_H */ 173#endif /* ASM_X86__MTRR_H */
diff --git a/include/asm-x86/mutex_32.h b/include/asm-x86/mutex_32.h
index 73e928ef5f0..25c16d8ba3c 100644
--- a/include/asm-x86/mutex_32.h
+++ b/include/asm-x86/mutex_32.h
@@ -6,8 +6,8 @@
6 * 6 *
7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 */ 8 */
9#ifndef _ASM_MUTEX_H 9#ifndef ASM_X86__MUTEX_32_H
10#define _ASM_MUTEX_H 10#define ASM_X86__MUTEX_32_H
11 11
12#include <asm/alternative.h> 12#include <asm/alternative.h>
13 13
@@ -122,4 +122,4 @@ static inline int __mutex_fastpath_trylock(atomic_t *count,
122#endif 122#endif
123} 123}
124 124
125#endif 125#endif /* ASM_X86__MUTEX_32_H */
diff --git a/include/asm-x86/mutex_64.h b/include/asm-x86/mutex_64.h
index f3fae9becb3..918ba21ab9d 100644
--- a/include/asm-x86/mutex_64.h
+++ b/include/asm-x86/mutex_64.h
@@ -6,8 +6,8 @@
6 * 6 *
7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 */ 8 */
9#ifndef _ASM_MUTEX_H 9#ifndef ASM_X86__MUTEX_64_H
10#define _ASM_MUTEX_H 10#define ASM_X86__MUTEX_64_H
11 11
12/** 12/**
13 * __mutex_fastpath_lock - decrement and call function if negative 13 * __mutex_fastpath_lock - decrement and call function if negative
@@ -97,4 +97,4 @@ static inline int __mutex_fastpath_trylock(atomic_t *count,
97 return 0; 97 return 0;
98} 98}
99 99
100#endif 100#endif /* ASM_X86__MUTEX_64_H */
diff --git a/include/asm-x86/nmi.h b/include/asm-x86/nmi.h
index 21f8d0202a8..d5e715f024d 100644
--- a/include/asm-x86/nmi.h
+++ b/include/asm-x86/nmi.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_NMI_H_ 1#ifndef ASM_X86__NMI_H
2#define _ASM_X86_NMI_H_ 2#define ASM_X86__NMI_H
3 3
4#include <linux/pm.h> 4#include <linux/pm.h>
5#include <asm/irq.h> 5#include <asm/irq.h>
@@ -34,6 +34,7 @@ extern void stop_apic_nmi_watchdog(void *);
34extern void disable_timer_nmi_watchdog(void); 34extern void disable_timer_nmi_watchdog(void);
35extern void enable_timer_nmi_watchdog(void); 35extern void enable_timer_nmi_watchdog(void);
36extern int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason); 36extern int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason);
37extern void cpu_nmi_set_wd_enabled(void);
37 38
38extern atomic_t nmi_active; 39extern atomic_t nmi_active;
39extern unsigned int nmi_watchdog; 40extern unsigned int nmi_watchdog;
@@ -81,4 +82,4 @@ void enable_lapic_nmi_watchdog(void);
81void stop_nmi(void); 82void stop_nmi(void);
82void restart_nmi(void); 83void restart_nmi(void);
83 84
84#endif 85#endif /* ASM_X86__NMI_H */
diff --git a/include/asm-x86/nops.h b/include/asm-x86/nops.h
index ad0bedd10b8..ae742721ae7 100644
--- a/include/asm-x86/nops.h
+++ b/include/asm-x86/nops.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_NOPS_H 1#ifndef ASM_X86__NOPS_H
2#define _ASM_NOPS_H 1 2#define ASM_X86__NOPS_H
3 3
4/* Define nops for use with alternative() */ 4/* Define nops for use with alternative() */
5 5
@@ -115,4 +115,4 @@
115 115
116#define ASM_NOP_MAX 8 116#define ASM_NOP_MAX 8
117 117
118#endif 118#endif /* ASM_X86__NOPS_H */
diff --git a/include/asm-x86/numa_32.h b/include/asm-x86/numa_32.h
index 220d7b7707a..44cb07855c5 100644
--- a/include/asm-x86/numa_32.h
+++ b/include/asm-x86/numa_32.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_32_NUMA_H 1#ifndef ASM_X86__NUMA_32_H
2#define _ASM_X86_32_NUMA_H 1 2#define ASM_X86__NUMA_32_H
3 3
4extern int pxm_to_nid(int pxm); 4extern int pxm_to_nid(int pxm);
5extern void numa_remove_cpu(int cpu); 5extern void numa_remove_cpu(int cpu);
@@ -8,4 +8,4 @@ extern void numa_remove_cpu(int cpu);
8extern void set_highmem_pages_init(void); 8extern void set_highmem_pages_init(void);
9#endif 9#endif
10 10
11#endif /* _ASM_X86_32_NUMA_H */ 11#endif /* ASM_X86__NUMA_32_H */
diff --git a/include/asm-x86/numa_64.h b/include/asm-x86/numa_64.h
index 3830094434a..15c990395b0 100644
--- a/include/asm-x86/numa_64.h
+++ b/include/asm-x86/numa_64.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X8664_NUMA_H 1#ifndef ASM_X86__NUMA_64_H
2#define _ASM_X8664_NUMA_H 1 2#define ASM_X86__NUMA_64_H
3 3
4#include <linux/nodemask.h> 4#include <linux/nodemask.h>
5#include <asm/apicdef.h> 5#include <asm/apicdef.h>
@@ -40,4 +40,4 @@ static inline void numa_add_cpu(int cpu, int node) { }
40static inline void numa_remove_cpu(int cpu) { } 40static inline void numa_remove_cpu(int cpu) { }
41#endif 41#endif
42 42
43#endif 43#endif /* ASM_X86__NUMA_64_H */
diff --git a/include/asm-x86/numaq.h b/include/asm-x86/numaq.h
index 34b92d581fa..124bf7d4b70 100644
--- a/include/asm-x86/numaq.h
+++ b/include/asm-x86/numaq.h
@@ -23,8 +23,8 @@
23 * Send feedback to <gone@us.ibm.com> 23 * Send feedback to <gone@us.ibm.com>
24 */ 24 */
25 25
26#ifndef NUMAQ_H 26#ifndef ASM_X86__NUMAQ_H
27#define NUMAQ_H 27#define ASM_X86__NUMAQ_H
28 28
29#ifdef CONFIG_X86_NUMAQ 29#ifdef CONFIG_X86_NUMAQ
30 30
@@ -165,5 +165,5 @@ static inline int get_memcfg_numaq(void)
165 return 0; 165 return 0;
166} 166}
167#endif /* CONFIG_X86_NUMAQ */ 167#endif /* CONFIG_X86_NUMAQ */
168#endif /* NUMAQ_H */ 168#endif /* ASM_X86__NUMAQ_H */
169 169
diff --git a/include/asm-x86/olpc.h b/include/asm-x86/olpc.h
index 97d47133486..d7328b1a05c 100644
--- a/include/asm-x86/olpc.h
+++ b/include/asm-x86/olpc.h
@@ -1,7 +1,7 @@
1/* OLPC machine specific definitions */ 1/* OLPC machine specific definitions */
2 2
3#ifndef ASM_OLPC_H_ 3#ifndef ASM_X86__OLPC_H
4#define ASM_OLPC_H_ 4#define ASM_X86__OLPC_H
5 5
6#include <asm/geode.h> 6#include <asm/geode.h>
7 7
@@ -129,4 +129,4 @@ extern int olpc_ec_mask_unset(uint8_t bits);
129#define OLPC_GPIO_LID geode_gpio(26) 129#define OLPC_GPIO_LID geode_gpio(26)
130#define OLPC_GPIO_ECSCI geode_gpio(27) 130#define OLPC_GPIO_ECSCI geode_gpio(27)
131 131
132#endif 132#endif /* ASM_X86__OLPC_H */
diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h
index 49982110e4d..79544e6ffb8 100644
--- a/include/asm-x86/page.h
+++ b/include/asm-x86/page.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PAGE_H 1#ifndef ASM_X86__PAGE_H
2#define _ASM_X86_PAGE_H 2#define ASM_X86__PAGE_H
3 3
4#include <linux/const.h> 4#include <linux/const.h>
5 5
@@ -199,4 +199,4 @@ static inline pteval_t native_pte_flags(pte_t pte)
199#define __HAVE_ARCH_GATE_AREA 1 199#define __HAVE_ARCH_GATE_AREA 1
200 200
201#endif /* __KERNEL__ */ 201#endif /* __KERNEL__ */
202#endif /* _ASM_X86_PAGE_H */ 202#endif /* ASM_X86__PAGE_H */
diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h
index ab8528793f0..72f7305682c 100644
--- a/include/asm-x86/page_32.h
+++ b/include/asm-x86/page_32.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PAGE_32_H 1#ifndef ASM_X86__PAGE_32_H
2#define _ASM_X86_PAGE_32_H 2#define ASM_X86__PAGE_32_H
3 3
4/* 4/*
5 * This handles the memory map. 5 * This handles the memory map.
@@ -89,13 +89,11 @@ extern int nx_enabled;
89extern unsigned int __VMALLOC_RESERVE; 89extern unsigned int __VMALLOC_RESERVE;
90extern int sysctl_legacy_va_layout; 90extern int sysctl_legacy_va_layout;
91 91
92#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
93#define MAXMEM (-__PAGE_OFFSET - __VMALLOC_RESERVE)
94
95extern void find_low_pfn_range(void); 92extern void find_low_pfn_range(void);
96extern unsigned long init_memory_mapping(unsigned long start, 93extern unsigned long init_memory_mapping(unsigned long start,
97 unsigned long end); 94 unsigned long end);
98extern void initmem_init(unsigned long, unsigned long); 95extern void initmem_init(unsigned long, unsigned long);
96extern void free_initmem(void);
99extern void setup_bootmem_allocator(void); 97extern void setup_bootmem_allocator(void);
100 98
101 99
@@ -126,4 +124,4 @@ static inline void copy_page(void *to, void *from)
126#endif /* CONFIG_X86_3DNOW */ 124#endif /* CONFIG_X86_3DNOW */
127#endif /* !__ASSEMBLY__ */ 125#endif /* !__ASSEMBLY__ */
128 126
129#endif /* _ASM_X86_PAGE_32_H */ 127#endif /* ASM_X86__PAGE_32_H */
diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h
index c6916c83e6b..5e64acfed0a 100644
--- a/include/asm-x86/page_64.h
+++ b/include/asm-x86/page_64.h
@@ -1,5 +1,5 @@
1#ifndef _X86_64_PAGE_H 1#ifndef ASM_X86__PAGE_64_H
2#define _X86_64_PAGE_H 2#define ASM_X86__PAGE_64_H
3 3
4#define PAGETABLE_LEVELS 4 4#define PAGETABLE_LEVELS 4
5 5
@@ -91,6 +91,7 @@ extern unsigned long init_memory_mapping(unsigned long start,
91 unsigned long end); 91 unsigned long end);
92 92
93extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn); 93extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn);
94extern void free_initmem(void);
94 95
95extern void init_extra_mapping_uc(unsigned long phys, unsigned long size); 96extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
96extern void init_extra_mapping_wb(unsigned long phys, unsigned long size); 97extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
@@ -102,4 +103,4 @@ extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
102#endif 103#endif
103 104
104 105
105#endif /* _X86_64_PAGE_H */ 106#endif /* ASM_X86__PAGE_64_H */
diff --git a/include/asm-x86/param.h b/include/asm-x86/param.h
index 6f0d0422f4c..0009cfb11a5 100644
--- a/include/asm-x86/param.h
+++ b/include/asm-x86/param.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PARAM_H 1#ifndef ASM_X86__PARAM_H
2#define _ASM_X86_PARAM_H 2#define ASM_X86__PARAM_H
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5# define HZ CONFIG_HZ /* Internal kernel timer frequency */ 5# define HZ CONFIG_HZ /* Internal kernel timer frequency */
@@ -19,4 +19,4 @@
19 19
20#define MAXHOSTNAMELEN 64 /* max length of hostname */ 20#define MAXHOSTNAMELEN 64 /* max length of hostname */
21 21
22#endif /* _ASM_X86_PARAM_H */ 22#endif /* ASM_X86__PARAM_H */
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
index fbbde93f12d..891971f57d3 100644
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_PARAVIRT_H 1#ifndef ASM_X86__PARAVIRT_H
2#define __ASM_PARAVIRT_H 2#define ASM_X86__PARAVIRT_H
3/* Various instructions on x86 need to be replaced for 3/* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */ 4 * para-virtualization: those hooks are defined here. */
5 5
@@ -137,6 +137,7 @@ struct pv_cpu_ops {
137 137
138 /* MSR, PMC and TSR operations. 138 /* MSR, PMC and TSR operations.
139 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ 139 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
140 u64 (*read_msr_amd)(unsigned int msr, int *err);
140 u64 (*read_msr)(unsigned int msr, int *err); 141 u64 (*read_msr)(unsigned int msr, int *err);
141 int (*write_msr)(unsigned int msr, unsigned low, unsigned high); 142 int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
142 143
@@ -257,13 +258,13 @@ struct pv_mmu_ops {
257 * Hooks for allocating/releasing pagetable pages when they're 258 * Hooks for allocating/releasing pagetable pages when they're
258 * attached to a pagetable 259 * attached to a pagetable
259 */ 260 */
260 void (*alloc_pte)(struct mm_struct *mm, u32 pfn); 261 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
261 void (*alloc_pmd)(struct mm_struct *mm, u32 pfn); 262 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
262 void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); 263 void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
263 void (*alloc_pud)(struct mm_struct *mm, u32 pfn); 264 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
264 void (*release_pte)(u32 pfn); 265 void (*release_pte)(unsigned long pfn);
265 void (*release_pmd)(u32 pfn); 266 void (*release_pmd)(unsigned long pfn);
266 void (*release_pud)(u32 pfn); 267 void (*release_pud)(unsigned long pfn);
267 268
268 /* Pagetable manipulation functions */ 269 /* Pagetable manipulation functions */
269 void (*set_pte)(pte_t *ptep, pte_t pteval); 270 void (*set_pte)(pte_t *ptep, pte_t pteval);
@@ -726,6 +727,10 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err)
726{ 727{
727 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err); 728 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
728} 729}
730static inline u64 paravirt_read_msr_amd(unsigned msr, int *err)
731{
732 return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err);
733}
729static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) 734static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
730{ 735{
731 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high); 736 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
@@ -771,6 +776,13 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
771 *p = paravirt_read_msr(msr, &err); 776 *p = paravirt_read_msr(msr, &err);
772 return err; 777 return err;
773} 778}
779static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
780{
781 int err;
782
783 *p = paravirt_read_msr_amd(msr, &err);
784 return err;
785}
774 786
775static inline u64 paravirt_read_tsc(void) 787static inline u64 paravirt_read_tsc(void)
776{ 788{
@@ -993,35 +1005,35 @@ static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
993 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd); 1005 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
994} 1006}
995 1007
996static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn) 1008static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
997{ 1009{
998 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn); 1010 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
999} 1011}
1000static inline void paravirt_release_pte(unsigned pfn) 1012static inline void paravirt_release_pte(unsigned long pfn)
1001{ 1013{
1002 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn); 1014 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
1003} 1015}
1004 1016
1005static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn) 1017static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1006{ 1018{
1007 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn); 1019 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
1008} 1020}
1009 1021
1010static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn, 1022static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
1011 unsigned start, unsigned count) 1023 unsigned long start, unsigned long count)
1012{ 1024{
1013 PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count); 1025 PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
1014} 1026}
1015static inline void paravirt_release_pmd(unsigned pfn) 1027static inline void paravirt_release_pmd(unsigned long pfn)
1016{ 1028{
1017 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn); 1029 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
1018} 1030}
1019 1031
1020static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned pfn) 1032static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1021{ 1033{
1022 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn); 1034 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
1023} 1035}
1024static inline void paravirt_release_pud(unsigned pfn) 1036static inline void paravirt_release_pud(unsigned long pfn)
1025{ 1037{
1026 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn); 1038 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
1027} 1039}
@@ -1634,4 +1646,4 @@ static inline unsigned long __raw_local_irq_save(void)
1634 1646
1635#endif /* __ASSEMBLY__ */ 1647#endif /* __ASSEMBLY__ */
1636#endif /* CONFIG_PARAVIRT */ 1648#endif /* CONFIG_PARAVIRT */
1637#endif /* __ASM_PARAVIRT_H */ 1649#endif /* ASM_X86__PARAVIRT_H */
diff --git a/include/asm-x86/parport.h b/include/asm-x86/parport.h
index 3c4ffeb467e..2e3dda4dc3d 100644
--- a/include/asm-x86/parport.h
+++ b/include/asm-x86/parport.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PARPORT_H 1#ifndef ASM_X86__PARPORT_H
2#define _ASM_X86_PARPORT_H 2#define ASM_X86__PARPORT_H
3 3
4static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma); 4static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma);
5static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma) 5static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma)
@@ -7,4 +7,4 @@ static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma)
7 return parport_pc_find_isa_ports(autoirq, autodma); 7 return parport_pc_find_isa_ports(autoirq, autodma);
8} 8}
9 9
10#endif /* _ASM_X86_PARPORT_H */ 10#endif /* ASM_X86__PARPORT_H */
diff --git a/include/asm-x86/pat.h b/include/asm-x86/pat.h
index 7edc4730721..482c3e3f987 100644
--- a/include/asm-x86/pat.h
+++ b/include/asm-x86/pat.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_PAT_H 1#ifndef ASM_X86__PAT_H
2#define _ASM_PAT_H 2#define ASM_X86__PAT_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5 5
@@ -19,4 +19,4 @@ extern int free_memtype(u64 start, u64 end);
19 19
20extern void pat_disable(char *reason); 20extern void pat_disable(char *reason);
21 21
22#endif 22#endif /* ASM_X86__PAT_H */
diff --git a/include/asm-x86/pci-direct.h b/include/asm-x86/pci-direct.h
index 80c775d9fe2..da42be07b69 100644
--- a/include/asm-x86/pci-direct.h
+++ b/include/asm-x86/pci-direct.h
@@ -1,5 +1,5 @@
1#ifndef ASM_PCI_DIRECT_H 1#ifndef ASM_X86__PCI_DIRECT_H
2#define ASM_PCI_DIRECT_H 1 2#define ASM_X86__PCI_DIRECT_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5 5
@@ -18,4 +18,4 @@ extern int early_pci_allowed(void);
18extern unsigned int pci_early_dump_regs; 18extern unsigned int pci_early_dump_regs;
19extern void early_dump_pci_device(u8 bus, u8 slot, u8 func); 19extern void early_dump_pci_device(u8 bus, u8 slot, u8 func);
20extern void early_dump_pci_devices(void); 20extern void early_dump_pci_devices(void);
21#endif 21#endif /* ASM_X86__PCI_DIRECT_H */
diff --git a/include/asm-x86/pci.h b/include/asm-x86/pci.h
index 2db14cf17db..60258319299 100644
--- a/include/asm-x86/pci.h
+++ b/include/asm-x86/pci.h
@@ -1,5 +1,5 @@
1#ifndef __x86_PCI_H 1#ifndef ASM_X86__PCI_H
2#define __x86_PCI_H 2#define ASM_X86__PCI_H
3 3
4#include <linux/mm.h> /* for struct page */ 4#include <linux/mm.h> /* for struct page */
5#include <linux/types.h> 5#include <linux/types.h>
@@ -111,4 +111,4 @@ static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus)
111} 111}
112#endif 112#endif
113 113
114#endif 114#endif /* ASM_X86__PCI_H */
diff --git a/include/asm-x86/pci_32.h b/include/asm-x86/pci_32.h
index a50d4685128..3f2288207c0 100644
--- a/include/asm-x86/pci_32.h
+++ b/include/asm-x86/pci_32.h
@@ -1,5 +1,5 @@
1#ifndef __i386_PCI_H 1#ifndef ASM_X86__PCI_32_H
2#define __i386_PCI_H 2#define ASM_X86__PCI_32_H
3 3
4 4
5#ifdef __KERNEL__ 5#ifdef __KERNEL__
@@ -31,4 +31,4 @@ struct pci_dev;
31#endif /* __KERNEL__ */ 31#endif /* __KERNEL__ */
32 32
33 33
34#endif /* __i386_PCI_H */ 34#endif /* ASM_X86__PCI_32_H */
diff --git a/include/asm-x86/pci_64.h b/include/asm-x86/pci_64.h
index f330234ffa5..f72e12d5770 100644
--- a/include/asm-x86/pci_64.h
+++ b/include/asm-x86/pci_64.h
@@ -1,5 +1,5 @@
1#ifndef __x8664_PCI_H 1#ifndef ASM_X86__PCI_64_H
2#define __x8664_PCI_H 2#define ASM_X86__PCI_64_H
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
@@ -63,4 +63,4 @@ extern void pci_iommu_alloc(void);
63 63
64#endif /* __KERNEL__ */ 64#endif /* __KERNEL__ */
65 65
66#endif /* __x8664_PCI_H */ 66#endif /* ASM_X86__PCI_64_H */
diff --git a/include/asm-x86/pda.h b/include/asm-x86/pda.h
index b34e9a7cc80..80860afffbd 100644
--- a/include/asm-x86/pda.h
+++ b/include/asm-x86/pda.h
@@ -1,5 +1,5 @@
1#ifndef X86_64_PDA_H 1#ifndef ASM_X86__PDA_H
2#define X86_64_PDA_H 2#define ASM_X86__PDA_H
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5#include <linux/stddef.h> 5#include <linux/stddef.h>
@@ -134,4 +134,4 @@ do { \
134 134
135#define PDA_STACKOFFSET (5*8) 135#define PDA_STACKOFFSET (5*8)
136 136
137#endif 137#endif /* ASM_X86__PDA_H */
diff --git a/include/asm-x86/percpu.h b/include/asm-x86/percpu.h
index f643a3a92da..e10a1d0678c 100644
--- a/include/asm-x86/percpu.h
+++ b/include/asm-x86/percpu.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PERCPU_H_ 1#ifndef ASM_X86__PERCPU_H
2#define _ASM_X86_PERCPU_H_ 2#define ASM_X86__PERCPU_H
3 3
4#ifdef CONFIG_X86_64 4#ifdef CONFIG_X86_64
5#include <linux/compiler.h> 5#include <linux/compiler.h>
@@ -215,4 +215,4 @@ do { \
215 215
216#endif /* !CONFIG_SMP */ 216#endif /* !CONFIG_SMP */
217 217
218#endif /* _ASM_X86_PERCPU_H_ */ 218#endif /* ASM_X86__PERCPU_H */
diff --git a/include/asm-x86/pgalloc.h b/include/asm-x86/pgalloc.h
index d63ea431cb3..3cd23adedae 100644
--- a/include/asm-x86/pgalloc.h
+++ b/include/asm-x86/pgalloc.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PGALLOC_H 1#ifndef ASM_X86__PGALLOC_H
2#define _ASM_X86_PGALLOC_H 2#define ASM_X86__PGALLOC_H
3 3
4#include <linux/threads.h> 4#include <linux/threads.h>
5#include <linux/mm.h> /* for struct page */ 5#include <linux/mm.h> /* for struct page */
@@ -111,4 +111,4 @@ extern void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
111#endif /* PAGETABLE_LEVELS > 3 */ 111#endif /* PAGETABLE_LEVELS > 3 */
112#endif /* PAGETABLE_LEVELS > 2 */ 112#endif /* PAGETABLE_LEVELS > 2 */
113 113
114#endif /* _ASM_X86_PGALLOC_H */ 114#endif /* ASM_X86__PGALLOC_H */
diff --git a/include/asm-x86/pgtable-2level-defs.h b/include/asm-x86/pgtable-2level-defs.h
index 0f71c9f13da..7ec48f4e534 100644
--- a/include/asm-x86/pgtable-2level-defs.h
+++ b/include/asm-x86/pgtable-2level-defs.h
@@ -1,5 +1,5 @@
1#ifndef _I386_PGTABLE_2LEVEL_DEFS_H 1#ifndef ASM_X86__PGTABLE_2LEVEL_DEFS_H
2#define _I386_PGTABLE_2LEVEL_DEFS_H 2#define ASM_X86__PGTABLE_2LEVEL_DEFS_H
3 3
4#define SHARED_KERNEL_PMD 0 4#define SHARED_KERNEL_PMD 0
5 5
@@ -17,4 +17,4 @@
17 17
18#define PTRS_PER_PTE 1024 18#define PTRS_PER_PTE 1024
19 19
20#endif /* _I386_PGTABLE_2LEVEL_DEFS_H */ 20#endif /* ASM_X86__PGTABLE_2LEVEL_DEFS_H */
diff --git a/include/asm-x86/pgtable-2level.h b/include/asm-x86/pgtable-2level.h
index 46bc52c0eae..81762081dcd 100644
--- a/include/asm-x86/pgtable-2level.h
+++ b/include/asm-x86/pgtable-2level.h
@@ -1,5 +1,5 @@
1#ifndef _I386_PGTABLE_2LEVEL_H 1#ifndef ASM_X86__PGTABLE_2LEVEL_H
2#define _I386_PGTABLE_2LEVEL_H 2#define ASM_X86__PGTABLE_2LEVEL_H
3 3
4#define pte_ERROR(e) \ 4#define pte_ERROR(e) \
5 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) 5 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
@@ -53,9 +53,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
53#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) 53#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
54#endif 54#endif
55 55
56#define pte_page(x) pfn_to_page(pte_pfn(x))
57#define pte_none(x) (!(x).pte_low) 56#define pte_none(x) (!(x).pte_low)
58#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
59 57
60/* 58/*
61 * Bits 0, 6 and 7 are taken, split up the 29 bits of offset 59 * Bits 0, 6 and 7 are taken, split up the 29 bits of offset
@@ -78,4 +76,4 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
78#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) 76#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
79#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) 77#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
80 78
81#endif /* _I386_PGTABLE_2LEVEL_H */ 79#endif /* ASM_X86__PGTABLE_2LEVEL_H */
diff --git a/include/asm-x86/pgtable-3level-defs.h b/include/asm-x86/pgtable-3level-defs.h
index 448ac951631..c05fe6ff372 100644
--- a/include/asm-x86/pgtable-3level-defs.h
+++ b/include/asm-x86/pgtable-3level-defs.h
@@ -1,5 +1,5 @@
1#ifndef _I386_PGTABLE_3LEVEL_DEFS_H 1#ifndef ASM_X86__PGTABLE_3LEVEL_DEFS_H
2#define _I386_PGTABLE_3LEVEL_DEFS_H 2#define ASM_X86__PGTABLE_3LEVEL_DEFS_H
3 3
4#ifdef CONFIG_PARAVIRT 4#ifdef CONFIG_PARAVIRT
5#define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd) 5#define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd)
@@ -25,4 +25,4 @@
25 */ 25 */
26#define PTRS_PER_PTE 512 26#define PTRS_PER_PTE 512
27 27
28#endif /* _I386_PGTABLE_3LEVEL_DEFS_H */ 28#endif /* ASM_X86__PGTABLE_3LEVEL_DEFS_H */
diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h
index 105057f3403..75f4276b5dd 100644
--- a/include/asm-x86/pgtable-3level.h
+++ b/include/asm-x86/pgtable-3level.h
@@ -1,5 +1,5 @@
1#ifndef _I386_PGTABLE_3LEVEL_H 1#ifndef ASM_X86__PGTABLE_3LEVEL_H
2#define _I386_PGTABLE_3LEVEL_H 2#define ASM_X86__PGTABLE_3LEVEL_H
3 3
4/* 4/*
5 * Intel Physical Address Extension (PAE) Mode - three-level page 5 * Intel Physical Address Extension (PAE) Mode - three-level page
@@ -151,18 +151,11 @@ static inline int pte_same(pte_t a, pte_t b)
151 return a.pte_low == b.pte_low && a.pte_high == b.pte_high; 151 return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
152} 152}
153 153
154#define pte_page(x) pfn_to_page(pte_pfn(x))
155
156static inline int pte_none(pte_t pte) 154static inline int pte_none(pte_t pte)
157{ 155{
158 return !pte.pte_low && !pte.pte_high; 156 return !pte.pte_low && !pte.pte_high;
159} 157}
160 158
161static inline unsigned long pte_pfn(pte_t pte)
162{
163 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
164}
165
166/* 159/*
167 * Bits 0, 6 and 7 are taken in the low part of the pte, 160 * Bits 0, 6 and 7 are taken in the low part of the pte,
168 * put the 32 bits of offset into the high part. 161 * put the 32 bits of offset into the high part.
@@ -179,4 +172,4 @@ static inline unsigned long pte_pfn(pte_t pte)
179#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) 172#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
180#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } }) 173#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
181 174
182#endif /* _I386_PGTABLE_3LEVEL_H */ 175#endif /* ASM_X86__PGTABLE_3LEVEL_H */
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index 04caa2f544d..888add7b088 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PGTABLE_H 1#ifndef ASM_X86__PGTABLE_H
2#define _ASM_X86_PGTABLE_H 2#define ASM_X86__PGTABLE_H
3 3
4#define FIRST_USER_ADDRESS 0 4#define FIRST_USER_ADDRESS 0
5 5
@@ -186,6 +186,13 @@ static inline int pte_special(pte_t pte)
186 return pte_val(pte) & _PAGE_SPECIAL; 186 return pte_val(pte) & _PAGE_SPECIAL;
187} 187}
188 188
189static inline unsigned long pte_pfn(pte_t pte)
190{
191 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
192}
193
194#define pte_page(pte) pfn_to_page(pte_pfn(pte))
195
189static inline int pmd_large(pmd_t pte) 196static inline int pmd_large(pmd_t pte)
190{ 197{
191 return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == 198 return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
@@ -313,6 +320,8 @@ static inline void native_pagetable_setup_start(pgd_t *base) {}
313static inline void native_pagetable_setup_done(pgd_t *base) {} 320static inline void native_pagetable_setup_done(pgd_t *base) {}
314#endif 321#endif
315 322
323extern int arch_report_meminfo(char *page);
324
316#ifdef CONFIG_PARAVIRT 325#ifdef CONFIG_PARAVIRT
317#include <asm/paravirt.h> 326#include <asm/paravirt.h>
318#else /* !CONFIG_PARAVIRT */ 327#else /* !CONFIG_PARAVIRT */
@@ -521,4 +530,4 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
521#include <asm-generic/pgtable.h> 530#include <asm-generic/pgtable.h>
522#endif /* __ASSEMBLY__ */ 531#endif /* __ASSEMBLY__ */
523 532
524#endif /* _ASM_X86_PGTABLE_H */ 533#endif /* ASM_X86__PGTABLE_H */
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h
index 5c3b26567a9..8de702dc7d6 100644
--- a/include/asm-x86/pgtable_32.h
+++ b/include/asm-x86/pgtable_32.h
@@ -1,5 +1,5 @@
1#ifndef _I386_PGTABLE_H 1#ifndef ASM_X86__PGTABLE_32_H
2#define _I386_PGTABLE_H 2#define ASM_X86__PGTABLE_32_H
3 3
4 4
5/* 5/*
@@ -31,6 +31,7 @@ static inline void pgtable_cache_init(void) { }
31static inline void check_pgt_cache(void) { } 31static inline void check_pgt_cache(void) { }
32void paging_init(void); 32void paging_init(void);
33 33
34extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
34 35
35/* 36/*
36 * The Linux x86 paging architecture is 'compile-time dual-mode', it 37 * The Linux x86 paging architecture is 'compile-time dual-mode', it
@@ -56,8 +57,7 @@ void paging_init(void);
56 * area for the same reason. ;) 57 * area for the same reason. ;)
57 */ 58 */
58#define VMALLOC_OFFSET (8 * 1024 * 1024) 59#define VMALLOC_OFFSET (8 * 1024 * 1024)
59#define VMALLOC_START (((unsigned long)high_memory + 2 * VMALLOC_OFFSET - 1) \ 60#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
60 & ~(VMALLOC_OFFSET - 1))
61#ifdef CONFIG_X86_PAE 61#ifdef CONFIG_X86_PAE
62#define LAST_PKMAP 512 62#define LAST_PKMAP 512
63#else 63#else
@@ -73,6 +73,8 @@ void paging_init(void);
73# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) 73# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
74#endif 74#endif
75 75
76#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
77
76/* 78/*
77 * Define this if things work differently on an i386 and an i486: 79 * Define this if things work differently on an i386 and an i486:
78 * it will (on an i486) warn about kernel memory accesses that are 80 * it will (on an i486) warn about kernel memory accesses that are
@@ -186,4 +188,4 @@ do { \
186#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 188#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
187 remap_pfn_range(vma, vaddr, pfn, size, prot) 189 remap_pfn_range(vma, vaddr, pfn, size, prot)
188 190
189#endif /* _I386_PGTABLE_H */ 191#endif /* ASM_X86__PGTABLE_32_H */
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h
index 549144d03d9..fde9770e53d 100644
--- a/include/asm-x86/pgtable_64.h
+++ b/include/asm-x86/pgtable_64.h
@@ -1,5 +1,5 @@
1#ifndef _X86_64_PGTABLE_H 1#ifndef ASM_X86__PGTABLE_64_H
2#define _X86_64_PGTABLE_H 2#define ASM_X86__PGTABLE_64_H
3 3
4#include <linux/const.h> 4#include <linux/const.h>
5#ifndef __ASSEMBLY__ 5#ifndef __ASSEMBLY__
@@ -175,8 +175,6 @@ static inline int pmd_bad(pmd_t pmd)
175#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE)) 175#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE))
176 176
177#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */ 177#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */
178#define pte_page(x) pfn_to_page(pte_pfn((x)))
179#define pte_pfn(x) ((pte_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)
180 178
181/* 179/*
182 * Macro to mark a page protection value as "uncacheable". 180 * Macro to mark a page protection value as "uncacheable".
@@ -284,4 +282,4 @@ extern void cleanup_highmap(void);
284#define __HAVE_ARCH_PTE_SAME 282#define __HAVE_ARCH_PTE_SAME
285#endif /* !__ASSEMBLY__ */ 283#endif /* !__ASSEMBLY__ */
286 284
287#endif /* _X86_64_PGTABLE_H */ 285#endif /* ASM_X86__PGTABLE_64_H */
diff --git a/include/asm-x86/posix_types_32.h b/include/asm-x86/posix_types_32.h
index b031efda37e..70cf2bb0593 100644
--- a/include/asm-x86/posix_types_32.h
+++ b/include/asm-x86/posix_types_32.h
@@ -1,5 +1,5 @@
1#ifndef __ARCH_I386_POSIX_TYPES_H 1#ifndef ASM_X86__POSIX_TYPES_32_H
2#define __ARCH_I386_POSIX_TYPES_H 2#define ASM_X86__POSIX_TYPES_32_H
3 3
4/* 4/*
5 * This file is generally used by user-level software, so you need to 5 * This file is generally used by user-level software, so you need to
@@ -82,4 +82,4 @@ do { \
82 82
83#endif /* defined(__KERNEL__) */ 83#endif /* defined(__KERNEL__) */
84 84
85#endif 85#endif /* ASM_X86__POSIX_TYPES_32_H */
diff --git a/include/asm-x86/posix_types_64.h b/include/asm-x86/posix_types_64.h
index d6624c95854..388b4e7f4a4 100644
--- a/include/asm-x86/posix_types_64.h
+++ b/include/asm-x86/posix_types_64.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_64_POSIX_TYPES_H 1#ifndef ASM_X86__POSIX_TYPES_64_H
2#define _ASM_X86_64_POSIX_TYPES_H 2#define ASM_X86__POSIX_TYPES_64_H
3 3
4/* 4/*
5 * This file is generally used by user-level software, so you need to 5 * This file is generally used by user-level software, so you need to
@@ -116,4 +116,4 @@ static inline void __FD_ZERO(__kernel_fd_set *p)
116 116
117#endif /* defined(__KERNEL__) */ 117#endif /* defined(__KERNEL__) */
118 118
119#endif 119#endif /* ASM_X86__POSIX_TYPES_64_H */
diff --git a/include/asm-x86/prctl.h b/include/asm-x86/prctl.h
index 52952adef1c..e7ae34eb410 100644
--- a/include/asm-x86/prctl.h
+++ b/include/asm-x86/prctl.h
@@ -1,5 +1,5 @@
1#ifndef X86_64_PRCTL_H 1#ifndef ASM_X86__PRCTL_H
2#define X86_64_PRCTL_H 1 2#define ASM_X86__PRCTL_H
3 3
4#define ARCH_SET_GS 0x1001 4#define ARCH_SET_GS 0x1001
5#define ARCH_SET_FS 0x1002 5#define ARCH_SET_FS 0x1002
@@ -7,4 +7,4 @@
7#define ARCH_GET_GS 0x1004 7#define ARCH_GET_GS 0x1004
8 8
9 9
10#endif 10#endif /* ASM_X86__PRCTL_H */
diff --git a/include/asm-x86/processor-flags.h b/include/asm-x86/processor-flags.h
index eff2ecd7fff..5dd79774f69 100644
--- a/include/asm-x86/processor-flags.h
+++ b/include/asm-x86/processor-flags.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_I386_PROCESSOR_FLAGS_H 1#ifndef ASM_X86__PROCESSOR_FLAGS_H
2#define __ASM_I386_PROCESSOR_FLAGS_H 2#define ASM_X86__PROCESSOR_FLAGS_H
3/* Various flags defined: can be included from assembler. */ 3/* Various flags defined: can be included from assembler. */
4 4
5/* 5/*
@@ -96,4 +96,4 @@
96#endif 96#endif
97#endif 97#endif
98 98
99#endif /* __ASM_I386_PROCESSOR_FLAGS_H */ 99#endif /* ASM_X86__PROCESSOR_FLAGS_H */
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index 4df3e2f6fb5..5eaf9bf0a62 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_X86_PROCESSOR_H 1#ifndef ASM_X86__PROCESSOR_H
2#define __ASM_X86_PROCESSOR_H 2#define ASM_X86__PROCESSOR_H
3 3
4#include <asm/processor-flags.h> 4#include <asm/processor-flags.h>
5 5
@@ -20,6 +20,7 @@ struct mm_struct;
20#include <asm/msr.h> 20#include <asm/msr.h>
21#include <asm/desc_defs.h> 21#include <asm/desc_defs.h>
22#include <asm/nops.h> 22#include <asm/nops.h>
23#include <asm/ds.h>
23 24
24#include <linux/personality.h> 25#include <linux/personality.h>
25#include <linux/cpumask.h> 26#include <linux/cpumask.h>
@@ -140,6 +141,8 @@ DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
140#define current_cpu_data boot_cpu_data 141#define current_cpu_data boot_cpu_data
141#endif 142#endif
142 143
144extern const struct seq_operations cpuinfo_op;
145
143static inline int hlt_works(int cpu) 146static inline int hlt_works(int cpu)
144{ 147{
145#ifdef CONFIG_X86_32 148#ifdef CONFIG_X86_32
@@ -153,6 +156,8 @@ static inline int hlt_works(int cpu)
153 156
154extern void cpu_detect(struct cpuinfo_x86 *c); 157extern void cpu_detect(struct cpuinfo_x86 *c);
155 158
159extern struct pt_regs *idle_regs(struct pt_regs *);
160
156extern void early_cpu_init(void); 161extern void early_cpu_init(void);
157extern void identify_boot_cpu(void); 162extern void identify_boot_cpu(void);
158extern void identify_secondary_cpu(struct cpuinfo_x86 *); 163extern void identify_secondary_cpu(struct cpuinfo_x86 *);
@@ -411,9 +416,14 @@ struct thread_struct {
411 unsigned io_bitmap_max; 416 unsigned io_bitmap_max;
412/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ 417/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
413 unsigned long debugctlmsr; 418 unsigned long debugctlmsr;
414/* Debug Store - if not 0 points to a DS Save Area configuration; 419#ifdef CONFIG_X86_DS
415 * goes into MSR_IA32_DS_AREA */ 420/* Debug Store context; see include/asm-x86/ds.h; goes into MSR_IA32_DS_AREA */
416 unsigned long ds_area_msr; 421 struct ds_context *ds_ctx;
422#endif /* CONFIG_X86_DS */
423#ifdef CONFIG_X86_PTRACE_BTS
424/* the signal to send on a bts buffer overflow */
425 unsigned int bts_ovfl_signal;
426#endif /* CONFIG_X86_PTRACE_BTS */
417}; 427};
418 428
419static inline unsigned long native_get_debugreg(int regno) 429static inline unsigned long native_get_debugreg(int regno)
@@ -943,4 +953,4 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
943extern int get_tsc_mode(unsigned long adr); 953extern int get_tsc_mode(unsigned long adr);
944extern int set_tsc_mode(unsigned int val); 954extern int set_tsc_mode(unsigned int val);
945 955
946#endif 956#endif /* ASM_X86__PROCESSOR_H */
diff --git a/include/asm-x86/proto.h b/include/asm-x86/proto.h
index 3dd458c385c..6e89e8b4de0 100644
--- a/include/asm-x86/proto.h
+++ b/include/asm-x86/proto.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X8664_PROTO_H 1#ifndef ASM_X86__PROTO_H
2#define _ASM_X8664_PROTO_H 1 2#define ASM_X86__PROTO_H
3 3
4#include <asm/ldt.h> 4#include <asm/ldt.h>
5 5
@@ -29,4 +29,4 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
29#define round_up(x, y) (((x) + (y) - 1) & ~((y) - 1)) 29#define round_up(x, y) (((x) + (y) - 1) & ~((y) - 1))
30#define round_down(x, y) ((x) & ~((y) - 1)) 30#define round_down(x, y) ((x) & ~((y) - 1))
31 31
32#endif 32#endif /* ASM_X86__PROTO_H */
diff --git a/include/asm-x86/ptrace-abi.h b/include/asm-x86/ptrace-abi.h
index 72e7b9db29b..4298b8882a7 100644
--- a/include/asm-x86/ptrace-abi.h
+++ b/include/asm-x86/ptrace-abi.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PTRACE_ABI_H 1#ifndef ASM_X86__PTRACE_ABI_H
2#define _ASM_X86_PTRACE_ABI_H 2#define ASM_X86__PTRACE_ABI_H
3 3
4#ifdef __i386__ 4#ifdef __i386__
5 5
@@ -80,8 +80,9 @@
80 80
81#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */ 81#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */
82 82
83#ifndef __ASSEMBLY__ 83#ifdef CONFIG_X86_PTRACE_BTS
84 84
85#ifndef __ASSEMBLY__
85#include <asm/types.h> 86#include <asm/types.h>
86 87
87/* configuration/status structure used in PTRACE_BTS_CONFIG and 88/* configuration/status structure used in PTRACE_BTS_CONFIG and
@@ -97,20 +98,20 @@ struct ptrace_bts_config {
97 /* actual size of bts_struct in bytes */ 98 /* actual size of bts_struct in bytes */
98 __u32 bts_size; 99 __u32 bts_size;
99}; 100};
100#endif 101#endif /* __ASSEMBLY__ */
101 102
102#define PTRACE_BTS_O_TRACE 0x1 /* branch trace */ 103#define PTRACE_BTS_O_TRACE 0x1 /* branch trace */
103#define PTRACE_BTS_O_SCHED 0x2 /* scheduling events w/ jiffies */ 104#define PTRACE_BTS_O_SCHED 0x2 /* scheduling events w/ jiffies */
104#define PTRACE_BTS_O_SIGNAL 0x4 /* send SIG<signal> on buffer overflow 105#define PTRACE_BTS_O_SIGNAL 0x4 /* send SIG<signal> on buffer overflow
105 instead of wrapping around */ 106 instead of wrapping around */
106#define PTRACE_BTS_O_CUT_SIZE 0x8 /* cut requested size to max available 107#define PTRACE_BTS_O_ALLOC 0x8 /* (re)allocate buffer */
107 instead of failing */
108 108
109#define PTRACE_BTS_CONFIG 40 109#define PTRACE_BTS_CONFIG 40
110/* Configure branch trace recording. 110/* Configure branch trace recording.
111 ADDR points to a struct ptrace_bts_config. 111 ADDR points to a struct ptrace_bts_config.
112 DATA gives the size of that buffer. 112 DATA gives the size of that buffer.
113 A new buffer is allocated, iff the size changes. 113 A new buffer is allocated, if requested in the flags.
114 An overflow signal may only be requested for new buffers.
114 Returns the number of bytes read. 115 Returns the number of bytes read.
115*/ 116*/
116#define PTRACE_BTS_STATUS 41 117#define PTRACE_BTS_STATUS 41
@@ -119,7 +120,7 @@ struct ptrace_bts_config {
119 Returns the number of bytes written. 120 Returns the number of bytes written.
120*/ 121*/
121#define PTRACE_BTS_SIZE 42 122#define PTRACE_BTS_SIZE 42
122/* Return the number of available BTS records. 123/* Return the number of available BTS records for draining.
123 DATA and ADDR are ignored. 124 DATA and ADDR are ignored.
124*/ 125*/
125#define PTRACE_BTS_GET 43 126#define PTRACE_BTS_GET 43
@@ -139,5 +140,6 @@ struct ptrace_bts_config {
139 BTS records are read from oldest to newest. 140 BTS records are read from oldest to newest.
140 Returns number of BTS records drained. 141 Returns number of BTS records drained.
141*/ 142*/
143#endif /* CONFIG_X86_PTRACE_BTS */
142 144
143#endif 145#endif /* ASM_X86__PTRACE_ABI_H */
diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h
index 8a71db803da..d64a6109716 100644
--- a/include/asm-x86/ptrace.h
+++ b/include/asm-x86/ptrace.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PTRACE_H 1#ifndef ASM_X86__PTRACE_H
2#define _ASM_X86_PTRACE_H 2#define ASM_X86__PTRACE_H
3 3
4#include <linux/compiler.h> /* For __user */ 4#include <linux/compiler.h> /* For __user */
5#include <asm/ptrace-abi.h> 5#include <asm/ptrace-abi.h>
@@ -127,14 +127,48 @@ struct pt_regs {
127#endif /* __KERNEL__ */ 127#endif /* __KERNEL__ */
128#endif /* !__i386__ */ 128#endif /* !__i386__ */
129 129
130
131#ifdef CONFIG_X86_PTRACE_BTS
132/* a branch trace record entry
133 *
134 * In order to unify the interface between various processor versions,
135 * we use the below data structure for all processors.
136 */
137enum bts_qualifier {
138 BTS_INVALID = 0,
139 BTS_BRANCH,
140 BTS_TASK_ARRIVES,
141 BTS_TASK_DEPARTS
142};
143
144struct bts_struct {
145 __u64 qualifier;
146 union {
147 /* BTS_BRANCH */
148 struct {
149 __u64 from_ip;
150 __u64 to_ip;
151 } lbr;
152 /* BTS_TASK_ARRIVES or
153 BTS_TASK_DEPARTS */
154 __u64 jiffies;
155 } variant;
156};
157#endif /* CONFIG_X86_PTRACE_BTS */
158
130#ifdef __KERNEL__ 159#ifdef __KERNEL__
131 160
132/* the DS BTS struct is used for ptrace as well */ 161#include <linux/init.h>
133#include <asm/ds.h>
134 162
163struct cpuinfo_x86;
135struct task_struct; 164struct task_struct;
136 165
166#ifdef CONFIG_X86_PTRACE_BTS
167extern void __cpuinit ptrace_bts_init_intel(struct cpuinfo_x86 *);
137extern void ptrace_bts_take_timestamp(struct task_struct *, enum bts_qualifier); 168extern void ptrace_bts_take_timestamp(struct task_struct *, enum bts_qualifier);
169#else
170#define ptrace_bts_init_intel(config) do {} while (0)
171#endif /* CONFIG_X86_PTRACE_BTS */
138 172
139extern unsigned long profile_pc(struct pt_regs *regs); 173extern unsigned long profile_pc(struct pt_regs *regs);
140 174
@@ -148,6 +182,9 @@ extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
148void signal_fault(struct pt_regs *regs, void __user *frame, char *where); 182void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
149#endif 183#endif
150 184
185extern long syscall_trace_enter(struct pt_regs *);
186extern void syscall_trace_leave(struct pt_regs *);
187
151static inline unsigned long regs_return_value(struct pt_regs *regs) 188static inline unsigned long regs_return_value(struct pt_regs *regs)
152{ 189{
153 return regs->ax; 190 return regs->ax;
@@ -213,6 +250,11 @@ static inline unsigned long frame_pointer(struct pt_regs *regs)
213 return regs->bp; 250 return regs->bp;
214} 251}
215 252
253static inline unsigned long user_stack_pointer(struct pt_regs *regs)
254{
255 return regs->sp;
256}
257
216/* 258/*
217 * These are defined as per linux/ptrace.h, which see. 259 * These are defined as per linux/ptrace.h, which see.
218 */ 260 */
@@ -239,4 +281,4 @@ extern int do_set_thread_area(struct task_struct *p, int idx,
239 281
240#endif /* !__ASSEMBLY__ */ 282#endif /* !__ASSEMBLY__ */
241 283
242#endif 284#endif /* ASM_X86__PTRACE_H */
diff --git a/include/asm-x86/pvclock-abi.h b/include/asm-x86/pvclock-abi.h
index 6857f840b24..edb3b4ecfc8 100644
--- a/include/asm-x86/pvclock-abi.h
+++ b/include/asm-x86/pvclock-abi.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PVCLOCK_ABI_H_ 1#ifndef ASM_X86__PVCLOCK_ABI_H
2#define _ASM_X86_PVCLOCK_ABI_H_ 2#define ASM_X86__PVCLOCK_ABI_H
3#ifndef __ASSEMBLY__ 3#ifndef __ASSEMBLY__
4 4
5/* 5/*
@@ -39,4 +39,4 @@ struct pvclock_wall_clock {
39} __attribute__((__packed__)); 39} __attribute__((__packed__));
40 40
41#endif /* __ASSEMBLY__ */ 41#endif /* __ASSEMBLY__ */
42#endif /* _ASM_X86_PVCLOCK_ABI_H_ */ 42#endif /* ASM_X86__PVCLOCK_ABI_H */
diff --git a/include/asm-x86/pvclock.h b/include/asm-x86/pvclock.h
index 85b1bba8e0a..1a38f683480 100644
--- a/include/asm-x86/pvclock.h
+++ b/include/asm-x86/pvclock.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PVCLOCK_H_ 1#ifndef ASM_X86__PVCLOCK_H
2#define _ASM_X86_PVCLOCK_H_ 2#define ASM_X86__PVCLOCK_H
3 3
4#include <linux/clocksource.h> 4#include <linux/clocksource.h>
5#include <asm/pvclock-abi.h> 5#include <asm/pvclock-abi.h>
@@ -10,4 +10,4 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall,
10 struct pvclock_vcpu_time_info *vcpu, 10 struct pvclock_vcpu_time_info *vcpu,
11 struct timespec *ts); 11 struct timespec *ts);
12 12
13#endif /* _ASM_X86_PVCLOCK_H_ */ 13#endif /* ASM_X86__PVCLOCK_H */
diff --git a/include/asm-x86/reboot.h b/include/asm-x86/reboot.h
index 206f355786d..1c2f0ce9e31 100644
--- a/include/asm-x86/reboot.h
+++ b/include/asm-x86/reboot.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_REBOOT_H 1#ifndef ASM_X86__REBOOT_H
2#define _ASM_REBOOT_H 2#define ASM_X86__REBOOT_H
3 3
4struct pt_regs; 4struct pt_regs;
5 5
@@ -18,4 +18,4 @@ void native_machine_crash_shutdown(struct pt_regs *regs);
18void native_machine_shutdown(void); 18void native_machine_shutdown(void);
19void machine_real_restart(const unsigned char *code, int length); 19void machine_real_restart(const unsigned char *code, int length);
20 20
21#endif /* _ASM_REBOOT_H */ 21#endif /* ASM_X86__REBOOT_H */
diff --git a/include/asm-x86/reboot_fixups.h b/include/asm-x86/reboot_fixups.h
index 0cb7d87c2b6..2c2987d9757 100644
--- a/include/asm-x86/reboot_fixups.h
+++ b/include/asm-x86/reboot_fixups.h
@@ -1,6 +1,6 @@
1#ifndef _LINUX_REBOOT_FIXUPS_H 1#ifndef ASM_X86__REBOOT_FIXUPS_H
2#define _LINUX_REBOOT_FIXUPS_H 2#define ASM_X86__REBOOT_FIXUPS_H
3 3
4extern void mach_reboot_fixups(void); 4extern void mach_reboot_fixups(void);
5 5
6#endif /* _LINUX_REBOOT_FIXUPS_H */ 6#endif /* ASM_X86__REBOOT_FIXUPS_H */
diff --git a/include/asm-x86/required-features.h b/include/asm-x86/required-features.h
index 5c2ff4bc298..a01c4e37633 100644
--- a/include/asm-x86/required-features.h
+++ b/include/asm-x86/required-features.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_REQUIRED_FEATURES_H 1#ifndef ASM_X86__REQUIRED_FEATURES_H
2#define _ASM_REQUIRED_FEATURES_H 1 2#define ASM_X86__REQUIRED_FEATURES_H
3 3
4/* Define minimum CPUID feature set for kernel These bits are checked 4/* Define minimum CPUID feature set for kernel These bits are checked
5 really early to actually display a visible error message before the 5 really early to actually display a visible error message before the
@@ -79,4 +79,4 @@
79#define REQUIRED_MASK6 0 79#define REQUIRED_MASK6 0
80#define REQUIRED_MASK7 0 80#define REQUIRED_MASK7 0
81 81
82#endif 82#endif /* ASM_X86__REQUIRED_FEATURES_H */
diff --git a/include/asm-x86/resume-trace.h b/include/asm-x86/resume-trace.h
index 8d9f0b41ee8..e39376d7de5 100644
--- a/include/asm-x86/resume-trace.h
+++ b/include/asm-x86/resume-trace.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_RESUME_TRACE_H 1#ifndef ASM_X86__RESUME_TRACE_H
2#define _ASM_X86_RESUME_TRACE_H 2#define ASM_X86__RESUME_TRACE_H
3 3
4#include <asm/asm.h> 4#include <asm/asm.h>
5 5
@@ -7,7 +7,7 @@
7do { \ 7do { \
8 if (pm_trace_enabled) { \ 8 if (pm_trace_enabled) { \
9 const void *tracedata; \ 9 const void *tracedata; \
10 asm volatile(_ASM_MOV_UL " $1f,%0\n" \ 10 asm volatile(_ASM_MOV " $1f,%0\n" \
11 ".section .tracedata,\"a\"\n" \ 11 ".section .tracedata,\"a\"\n" \
12 "1:\t.word %c1\n\t" \ 12 "1:\t.word %c1\n\t" \
13 _ASM_PTR " %c2\n" \ 13 _ASM_PTR " %c2\n" \
@@ -18,4 +18,4 @@ do { \
18 } \ 18 } \
19} while (0) 19} while (0)
20 20
21#endif 21#endif /* ASM_X86__RESUME_TRACE_H */
diff --git a/include/asm-x86/rio.h b/include/asm-x86/rio.h
index c9448bd8968..5e1256bdee8 100644
--- a/include/asm-x86/rio.h
+++ b/include/asm-x86/rio.h
@@ -5,8 +5,8 @@
5 * Author: Laurent Vivier <Laurent.Vivier@bull.net> 5 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
6 */ 6 */
7 7
8#ifndef __ASM_RIO_H 8#ifndef ASM_X86__RIO_H
9#define __ASM_RIO_H 9#define ASM_X86__RIO_H
10 10
11#define RIO_TABLE_VERSION 3 11#define RIO_TABLE_VERSION 3
12 12
@@ -60,4 +60,4 @@ enum {
60 ALT_CALGARY = 5, /* Second Planar Calgary */ 60 ALT_CALGARY = 5, /* Second Planar Calgary */
61}; 61};
62 62
63#endif /* __ASM_RIO_H */ 63#endif /* ASM_X86__RIO_H */
diff --git a/include/asm-x86/rwlock.h b/include/asm-x86/rwlock.h
index 6a8c0d64510..48a3109e1a7 100644
--- a/include/asm-x86/rwlock.h
+++ b/include/asm-x86/rwlock.h
@@ -1,8 +1,8 @@
1#ifndef _ASM_X86_RWLOCK_H 1#ifndef ASM_X86__RWLOCK_H
2#define _ASM_X86_RWLOCK_H 2#define ASM_X86__RWLOCK_H
3 3
4#define RW_LOCK_BIAS 0x01000000 4#define RW_LOCK_BIAS 0x01000000
5 5
6/* Actual code is in asm/spinlock.h or in arch/x86/lib/rwlock.S */ 6/* Actual code is in asm/spinlock.h or in arch/x86/lib/rwlock.S */
7 7
8#endif /* _ASM_X86_RWLOCK_H */ 8#endif /* ASM_X86__RWLOCK_H */
diff --git a/include/asm-x86/rwsem.h b/include/asm-x86/rwsem.h
index 750f2a3542b..3ff3015b71a 100644
--- a/include/asm-x86/rwsem.h
+++ b/include/asm-x86/rwsem.h
@@ -29,8 +29,8 @@
29 * front, then they'll all be woken up, but no other readers will be. 29 * front, then they'll all be woken up, but no other readers will be.
30 */ 30 */
31 31
32#ifndef _I386_RWSEM_H 32#ifndef ASM_X86__RWSEM_H
33#define _I386_RWSEM_H 33#define ASM_X86__RWSEM_H
34 34
35#ifndef _LINUX_RWSEM_H 35#ifndef _LINUX_RWSEM_H
36#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" 36#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
@@ -262,4 +262,4 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
262} 262}
263 263
264#endif /* __KERNEL__ */ 264#endif /* __KERNEL__ */
265#endif /* _I386_RWSEM_H */ 265#endif /* ASM_X86__RWSEM_H */
diff --git a/include/asm-x86/scatterlist.h b/include/asm-x86/scatterlist.h
index c0432061f81..ee48f880005 100644
--- a/include/asm-x86/scatterlist.h
+++ b/include/asm-x86/scatterlist.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SCATTERLIST_H 1#ifndef ASM_X86__SCATTERLIST_H
2#define _ASM_X86_SCATTERLIST_H 2#define ASM_X86__SCATTERLIST_H
3 3
4#include <asm/types.h> 4#include <asm/types.h>
5 5
@@ -30,4 +30,4 @@ struct scatterlist {
30# define sg_dma_len(sg) ((sg)->dma_length) 30# define sg_dma_len(sg) ((sg)->dma_length)
31#endif 31#endif
32 32
33#endif 33#endif /* ASM_X86__SCATTERLIST_H */
diff --git a/include/asm-x86/seccomp_32.h b/include/asm-x86/seccomp_32.h
index 36e71c5f306..cf9ab2dbcef 100644
--- a/include/asm-x86/seccomp_32.h
+++ b/include/asm-x86/seccomp_32.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_SECCOMP_H 1#ifndef ASM_X86__SECCOMP_32_H
2#define _ASM_SECCOMP_H 2#define ASM_X86__SECCOMP_32_H
3 3
4#include <linux/thread_info.h> 4#include <linux/thread_info.h>
5 5
@@ -14,4 +14,4 @@
14#define __NR_seccomp_exit __NR_exit 14#define __NR_seccomp_exit __NR_exit
15#define __NR_seccomp_sigreturn __NR_sigreturn 15#define __NR_seccomp_sigreturn __NR_sigreturn
16 16
17#endif /* _ASM_SECCOMP_H */ 17#endif /* ASM_X86__SECCOMP_32_H */
diff --git a/include/asm-x86/seccomp_64.h b/include/asm-x86/seccomp_64.h
index 76cfe69aa63..03274cea751 100644
--- a/include/asm-x86/seccomp_64.h
+++ b/include/asm-x86/seccomp_64.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_SECCOMP_H 1#ifndef ASM_X86__SECCOMP_64_H
2#define _ASM_SECCOMP_H 2#define ASM_X86__SECCOMP_64_H
3 3
4#include <linux/thread_info.h> 4#include <linux/thread_info.h>
5 5
@@ -22,4 +22,4 @@
22#define __NR_seccomp_exit_32 __NR_ia32_exit 22#define __NR_seccomp_exit_32 __NR_ia32_exit
23#define __NR_seccomp_sigreturn_32 __NR_ia32_sigreturn 23#define __NR_seccomp_sigreturn_32 __NR_ia32_sigreturn
24 24
25#endif /* _ASM_SECCOMP_H */ 25#endif /* ASM_X86__SECCOMP_64_H */
diff --git a/include/asm-x86/segment.h b/include/asm-x86/segment.h
index 646452ea9ea..ea5f0a8686f 100644
--- a/include/asm-x86/segment.h
+++ b/include/asm-x86/segment.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SEGMENT_H_ 1#ifndef ASM_X86__SEGMENT_H
2#define _ASM_X86_SEGMENT_H_ 2#define ASM_X86__SEGMENT_H
3 3
4/* Constructor for a conventional segment GDT (or LDT) entry */ 4/* Constructor for a conventional segment GDT (or LDT) entry */
5/* This is a macro so it can be used in initializers */ 5/* This is a macro so it can be used in initializers */
@@ -212,4 +212,4 @@ extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][10];
212#endif 212#endif
213#endif 213#endif
214 214
215#endif 215#endif /* ASM_X86__SEGMENT_H */
diff --git a/include/asm-x86/sembuf.h b/include/asm-x86/sembuf.h
index ee50c801f7b..81f06b7e5a3 100644
--- a/include/asm-x86/sembuf.h
+++ b/include/asm-x86/sembuf.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SEMBUF_H 1#ifndef ASM_X86__SEMBUF_H
2#define _ASM_X86_SEMBUF_H 2#define ASM_X86__SEMBUF_H
3 3
4/* 4/*
5 * The semid64_ds structure for x86 architecture. 5 * The semid64_ds structure for x86 architecture.
@@ -21,4 +21,4 @@ struct semid64_ds {
21 unsigned long __unused4; 21 unsigned long __unused4;
22}; 22};
23 23
24#endif /* _ASM_X86_SEMBUF_H */ 24#endif /* ASM_X86__SEMBUF_H */
diff --git a/include/asm-x86/serial.h b/include/asm-x86/serial.h
index 628c801535e..303660b671e 100644
--- a/include/asm-x86/serial.h
+++ b/include/asm-x86/serial.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SERIAL_H 1#ifndef ASM_X86__SERIAL_H
2#define _ASM_X86_SERIAL_H 2#define ASM_X86__SERIAL_H
3 3
4/* 4/*
5 * This assumes you have a 1.8432 MHz clock for your UART. 5 * This assumes you have a 1.8432 MHz clock for your UART.
@@ -26,4 +26,4 @@
26 { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \ 26 { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
27 { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */ 27 { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
28 28
29#endif /* _ASM_X86_SERIAL_H */ 29#endif /* ASM_X86__SERIAL_H */
diff --git a/include/asm-x86/setup.h b/include/asm-x86/setup.h
index a07c6f1c01e..9030cb73c4d 100644
--- a/include/asm-x86/setup.h
+++ b/include/asm-x86/setup.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SETUP_H 1#ifndef ASM_X86__SETUP_H
2#define _ASM_X86_SETUP_H 2#define ASM_X86__SETUP_H
3 3
4#define COMMAND_LINE_SIZE 2048 4#define COMMAND_LINE_SIZE 2048
5 5
@@ -41,6 +41,7 @@ struct x86_quirks {
41}; 41};
42 42
43extern struct x86_quirks *x86_quirks; 43extern struct x86_quirks *x86_quirks;
44extern unsigned long saved_video_mode;
44 45
45#ifndef CONFIG_PARAVIRT 46#ifndef CONFIG_PARAVIRT
46#define paravirt_post_allocator_init() do {} while (0) 47#define paravirt_post_allocator_init() do {} while (0)
@@ -100,4 +101,4 @@ void __init x86_64_start_reservations(char *real_mode_data);
100#endif /* __ASSEMBLY__ */ 101#endif /* __ASSEMBLY__ */
101#endif /* __KERNEL__ */ 102#endif /* __KERNEL__ */
102 103
103#endif /* _ASM_X86_SETUP_H */ 104#endif /* ASM_X86__SETUP_H */
diff --git a/include/asm-x86/shmbuf.h b/include/asm-x86/shmbuf.h
index b51413b7497..f51aec2298e 100644
--- a/include/asm-x86/shmbuf.h
+++ b/include/asm-x86/shmbuf.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SHMBUF_H 1#ifndef ASM_X86__SHMBUF_H
2#define _ASM_X86_SHMBUF_H 2#define ASM_X86__SHMBUF_H
3 3
4/* 4/*
5 * The shmid64_ds structure for x86 architecture. 5 * The shmid64_ds structure for x86 architecture.
@@ -48,4 +48,4 @@ struct shminfo64 {
48 unsigned long __unused4; 48 unsigned long __unused4;
49}; 49};
50 50
51#endif /* _ASM_X86_SHMBUF_H */ 51#endif /* ASM_X86__SHMBUF_H */
diff --git a/include/asm-x86/shmparam.h b/include/asm-x86/shmparam.h
index 0880cf0917b..a83a1fd96a0 100644
--- a/include/asm-x86/shmparam.h
+++ b/include/asm-x86/shmparam.h
@@ -1,6 +1,6 @@
1#ifndef _ASM_X86_SHMPARAM_H 1#ifndef ASM_X86__SHMPARAM_H
2#define _ASM_X86_SHMPARAM_H 2#define ASM_X86__SHMPARAM_H
3 3
4#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ 4#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
5 5
6#endif /* _ASM_X86_SHMPARAM_H */ 6#endif /* ASM_X86__SHMPARAM_H */
diff --git a/include/asm-x86/sigcontext.h b/include/asm-x86/sigcontext.h
index 2f9c884d2c0..24879c85b29 100644
--- a/include/asm-x86/sigcontext.h
+++ b/include/asm-x86/sigcontext.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SIGCONTEXT_H 1#ifndef ASM_X86__SIGCONTEXT_H
2#define _ASM_X86_SIGCONTEXT_H 2#define ASM_X86__SIGCONTEXT_H
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <asm/types.h> 5#include <asm/types.h>
@@ -202,4 +202,4 @@ struct sigcontext {
202 202
203#endif /* !__i386__ */ 203#endif /* !__i386__ */
204 204
205#endif 205#endif /* ASM_X86__SIGCONTEXT_H */
diff --git a/include/asm-x86/sigcontext32.h b/include/asm-x86/sigcontext32.h
index 57a9686fb49..4e2ec732dd0 100644
--- a/include/asm-x86/sigcontext32.h
+++ b/include/asm-x86/sigcontext32.h
@@ -1,5 +1,5 @@
1#ifndef _SIGCONTEXT32_H 1#ifndef ASM_X86__SIGCONTEXT32_H
2#define _SIGCONTEXT32_H 1 2#define ASM_X86__SIGCONTEXT32_H
3 3
4/* signal context for 32bit programs. */ 4/* signal context for 32bit programs. */
5 5
@@ -68,4 +68,4 @@ struct sigcontext_ia32 {
68 unsigned int cr2; 68 unsigned int cr2;
69}; 69};
70 70
71#endif 71#endif /* ASM_X86__SIGCONTEXT32_H */
diff --git a/include/asm-x86/siginfo.h b/include/asm-x86/siginfo.h
index a477bea0c2a..808bdfb2958 100644
--- a/include/asm-x86/siginfo.h
+++ b/include/asm-x86/siginfo.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SIGINFO_H 1#ifndef ASM_X86__SIGINFO_H
2#define _ASM_X86_SIGINFO_H 2#define ASM_X86__SIGINFO_H
3 3
4#ifdef __x86_64__ 4#ifdef __x86_64__
5# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) 5# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
@@ -7,4 +7,4 @@
7 7
8#include <asm-generic/siginfo.h> 8#include <asm-generic/siginfo.h>
9 9
10#endif 10#endif /* ASM_X86__SIGINFO_H */
diff --git a/include/asm-x86/signal.h b/include/asm-x86/signal.h
index 6dac49364e9..65acc82d267 100644
--- a/include/asm-x86/signal.h
+++ b/include/asm-x86/signal.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SIGNAL_H 1#ifndef ASM_X86__SIGNAL_H
2#define _ASM_X86_SIGNAL_H 2#define ASM_X86__SIGNAL_H
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5#include <linux/types.h> 5#include <linux/types.h>
@@ -140,6 +140,9 @@ struct sigaction {
140struct k_sigaction { 140struct k_sigaction {
141 struct sigaction sa; 141 struct sigaction sa;
142}; 142};
143
144extern void do_notify_resume(struct pt_regs *, void *, __u32);
145
143# else /* __KERNEL__ */ 146# else /* __KERNEL__ */
144/* Here we must cater to libcs that poke about in kernel headers. */ 147/* Here we must cater to libcs that poke about in kernel headers. */
145 148
@@ -256,4 +259,4 @@ struct pt_regs;
256#endif /* __KERNEL__ */ 259#endif /* __KERNEL__ */
257#endif /* __ASSEMBLY__ */ 260#endif /* __ASSEMBLY__ */
258 261
259#endif 262#endif /* ASM_X86__SIGNAL_H */
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h
index 3c877f74f27..04f84f4e2c8 100644
--- a/include/asm-x86/smp.h
+++ b/include/asm-x86/smp.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SMP_H_ 1#ifndef ASM_X86__SMP_H
2#define _ASM_X86_SMP_H_ 2#define ASM_X86__SMP_H
3#ifndef __ASSEMBLY__ 3#ifndef __ASSEMBLY__
4#include <linux/cpumask.h> 4#include <linux/cpumask.h>
5#include <linux/init.h> 5#include <linux/init.h>
@@ -34,6 +34,9 @@ extern cpumask_t cpu_initialized;
34DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 34DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
35DECLARE_PER_CPU(cpumask_t, cpu_core_map); 35DECLARE_PER_CPU(cpumask_t, cpu_core_map);
36DECLARE_PER_CPU(u16, cpu_llc_id); 36DECLARE_PER_CPU(u16, cpu_llc_id);
37#ifdef CONFIG_X86_32
38DECLARE_PER_CPU(int, cpu_number);
39#endif
37 40
38DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); 41DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
39DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); 42DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
@@ -142,7 +145,6 @@ extern unsigned disabled_cpus __cpuinitdata;
142 * from the initial startup. We map APIC_BASE very early in page_setup(), 145 * from the initial startup. We map APIC_BASE very early in page_setup(),
143 * so this is correct in the x86 case. 146 * so this is correct in the x86 case.
144 */ 147 */
145DECLARE_PER_CPU(int, cpu_number);
146#define raw_smp_processor_id() (x86_read_percpu(cpu_number)) 148#define raw_smp_processor_id() (x86_read_percpu(cpu_number))
147extern int safe_smp_processor_id(void); 149extern int safe_smp_processor_id(void);
148 150
@@ -205,4 +207,4 @@ extern void cpu_uninit(void);
205#endif 207#endif
206 208
207#endif /* __ASSEMBLY__ */ 209#endif /* __ASSEMBLY__ */
208#endif 210#endif /* ASM_X86__SMP_H */
diff --git a/include/asm-x86/socket.h b/include/asm-x86/socket.h
index 80af9c4ccad..db73274c83c 100644
--- a/include/asm-x86/socket.h
+++ b/include/asm-x86/socket.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_SOCKET_H 1#ifndef ASM_X86__SOCKET_H
2#define _ASM_SOCKET_H 2#define ASM_X86__SOCKET_H
3 3
4#include <asm/sockios.h> 4#include <asm/sockios.h>
5 5
@@ -54,4 +54,4 @@
54 54
55#define SO_MARK 36 55#define SO_MARK 36
56 56
57#endif /* _ASM_SOCKET_H */ 57#endif /* ASM_X86__SOCKET_H */
diff --git a/include/asm-x86/sockios.h b/include/asm-x86/sockios.h
index 49cc72b5d3c..a006704fdc8 100644
--- a/include/asm-x86/sockios.h
+++ b/include/asm-x86/sockios.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SOCKIOS_H 1#ifndef ASM_X86__SOCKIOS_H
2#define _ASM_X86_SOCKIOS_H 2#define ASM_X86__SOCKIOS_H
3 3
4/* Socket-level I/O control calls. */ 4/* Socket-level I/O control calls. */
5#define FIOSETOWN 0x8901 5#define FIOSETOWN 0x8901
@@ -10,4 +10,4 @@
10#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ 10#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
11#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ 11#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
12 12
13#endif /* _ASM_X86_SOCKIOS_H */ 13#endif /* ASM_X86__SOCKIOS_H */
diff --git a/include/asm-x86/sparsemem.h b/include/asm-x86/sparsemem.h
index 9bd48b0a534..38f8e6bc318 100644
--- a/include/asm-x86/sparsemem.h
+++ b/include/asm-x86/sparsemem.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SPARSEMEM_H 1#ifndef ASM_X86__SPARSEMEM_H
2#define _ASM_X86_SPARSEMEM_H 2#define ASM_X86__SPARSEMEM_H
3 3
4#ifdef CONFIG_SPARSEMEM 4#ifdef CONFIG_SPARSEMEM
5/* 5/*
@@ -31,4 +31,4 @@
31#endif 31#endif
32 32
33#endif /* CONFIG_SPARSEMEM */ 33#endif /* CONFIG_SPARSEMEM */
34#endif 34#endif /* ASM_X86__SPARSEMEM_H */
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h
index e39c790dbfd..93adae338ac 100644
--- a/include/asm-x86/spinlock.h
+++ b/include/asm-x86/spinlock.h
@@ -1,5 +1,5 @@
1#ifndef _X86_SPINLOCK_H_ 1#ifndef ASM_X86__SPINLOCK_H
2#define _X86_SPINLOCK_H_ 2#define ASM_X86__SPINLOCK_H
3 3
4#include <asm/atomic.h> 4#include <asm/atomic.h>
5#include <asm/rwlock.h> 5#include <asm/rwlock.h>
@@ -97,7 +97,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
97 "jne 1f\n\t" 97 "jne 1f\n\t"
98 "movw %w0,%w1\n\t" 98 "movw %w0,%w1\n\t"
99 "incb %h1\n\t" 99 "incb %h1\n\t"
100 "lock ; cmpxchgw %w1,%2\n\t" 100 LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
101 "1:" 101 "1:"
102 "sete %b1\n\t" 102 "sete %b1\n\t"
103 "movzbl %b1,%0\n\t" 103 "movzbl %b1,%0\n\t"
@@ -135,7 +135,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
135 int inc = 0x00010000; 135 int inc = 0x00010000;
136 int tmp; 136 int tmp;
137 137
138 asm volatile("lock ; xaddl %0, %1\n" 138 asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
139 "movzwl %w0, %2\n\t" 139 "movzwl %w0, %2\n\t"
140 "shrl $16, %0\n\t" 140 "shrl $16, %0\n\t"
141 "1:\t" 141 "1:\t"
@@ -162,7 +162,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
162 "cmpl %0,%1\n\t" 162 "cmpl %0,%1\n\t"
163 "jne 1f\n\t" 163 "jne 1f\n\t"
164 "addl $0x00010000, %1\n\t" 164 "addl $0x00010000, %1\n\t"
165 "lock ; cmpxchgl %1,%2\n\t" 165 LOCK_PREFIX "cmpxchgl %1,%2\n\t"
166 "1:" 166 "1:"
167 "sete %b1\n\t" 167 "sete %b1\n\t"
168 "movzbl %b1,%0\n\t" 168 "movzbl %b1,%0\n\t"
@@ -366,4 +366,4 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
366#define _raw_read_relax(lock) cpu_relax() 366#define _raw_read_relax(lock) cpu_relax()
367#define _raw_write_relax(lock) cpu_relax() 367#define _raw_write_relax(lock) cpu_relax()
368 368
369#endif 369#endif /* ASM_X86__SPINLOCK_H */
diff --git a/include/asm-x86/spinlock_types.h b/include/asm-x86/spinlock_types.h
index 06c071c9eee..6aa9b562c50 100644
--- a/include/asm-x86/spinlock_types.h
+++ b/include/asm-x86/spinlock_types.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_SPINLOCK_TYPES_H 1#ifndef ASM_X86__SPINLOCK_TYPES_H
2#define __ASM_SPINLOCK_TYPES_H 2#define ASM_X86__SPINLOCK_TYPES_H
3 3
4#ifndef __LINUX_SPINLOCK_TYPES_H 4#ifndef __LINUX_SPINLOCK_TYPES_H
5# error "please don't include this file directly" 5# error "please don't include this file directly"
@@ -17,4 +17,4 @@ typedef struct {
17 17
18#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } 18#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
19 19
20#endif 20#endif /* ASM_X86__SPINLOCK_TYPES_H */
diff --git a/include/asm-x86/srat.h b/include/asm-x86/srat.h
index 774c919dc23..5363e4f7e1c 100644
--- a/include/asm-x86/srat.h
+++ b/include/asm-x86/srat.h
@@ -24,8 +24,8 @@
24 * Send feedback to Pat Gaughen <gone@us.ibm.com> 24 * Send feedback to Pat Gaughen <gone@us.ibm.com>
25 */ 25 */
26 26
27#ifndef _ASM_SRAT_H_ 27#ifndef ASM_X86__SRAT_H
28#define _ASM_SRAT_H_ 28#define ASM_X86__SRAT_H
29 29
30#ifdef CONFIG_ACPI_NUMA 30#ifdef CONFIG_ACPI_NUMA
31extern int get_memcfg_from_srat(void); 31extern int get_memcfg_from_srat(void);
@@ -36,4 +36,4 @@ static inline int get_memcfg_from_srat(void)
36} 36}
37#endif 37#endif
38 38
39#endif /* _ASM_SRAT_H_ */ 39#endif /* ASM_X86__SRAT_H */
diff --git a/include/asm-x86/stacktrace.h b/include/asm-x86/stacktrace.h
index 30f82526a8e..f43517e2853 100644
--- a/include/asm-x86/stacktrace.h
+++ b/include/asm-x86/stacktrace.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_STACKTRACE_H 1#ifndef ASM_X86__STACKTRACE_H
2#define _ASM_STACKTRACE_H 1 2#define ASM_X86__STACKTRACE_H
3 3
4extern int kstack_depth_to_print; 4extern int kstack_depth_to_print;
5 5
@@ -18,4 +18,4 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
18 unsigned long *stack, unsigned long bp, 18 unsigned long *stack, unsigned long bp,
19 const struct stacktrace_ops *ops, void *data); 19 const struct stacktrace_ops *ops, void *data);
20 20
21#endif 21#endif /* ASM_X86__STACKTRACE_H */
diff --git a/include/asm-x86/stat.h b/include/asm-x86/stat.h
index 5c22dcb5d17..1e120f62890 100644
--- a/include/asm-x86/stat.h
+++ b/include/asm-x86/stat.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_STAT_H 1#ifndef ASM_X86__STAT_H
2#define _ASM_X86_STAT_H 2#define ASM_X86__STAT_H
3 3
4#define STAT_HAVE_NSEC 1 4#define STAT_HAVE_NSEC 1
5 5
@@ -111,4 +111,4 @@ struct __old_kernel_stat {
111#endif 111#endif
112}; 112};
113 113
114#endif 114#endif /* ASM_X86__STAT_H */
diff --git a/include/asm-x86/statfs.h b/include/asm-x86/statfs.h
index 7c651aa9725..3f005bc3aa5 100644
--- a/include/asm-x86/statfs.h
+++ b/include/asm-x86/statfs.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_STATFS_H 1#ifndef ASM_X86__STATFS_H
2#define _ASM_X86_STATFS_H 2#define ASM_X86__STATFS_H
3 3
4#ifdef __i386__ 4#ifdef __i386__
5#include <asm-generic/statfs.h> 5#include <asm-generic/statfs.h>
@@ -60,4 +60,4 @@ struct compat_statfs64 {
60} __attribute__((packed)); 60} __attribute__((packed));
61 61
62#endif /* !__i386__ */ 62#endif /* !__i386__ */
63#endif 63#endif /* ASM_X86__STATFS_H */
diff --git a/include/asm-x86/string_32.h b/include/asm-x86/string_32.h
index 193578cd1fd..487843ed245 100644
--- a/include/asm-x86/string_32.h
+++ b/include/asm-x86/string_32.h
@@ -1,5 +1,5 @@
1#ifndef _I386_STRING_H_ 1#ifndef ASM_X86__STRING_32_H
2#define _I386_STRING_H_ 2#define ASM_X86__STRING_32_H
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
@@ -323,4 +323,4 @@ extern void *memscan(void *addr, int c, size_t size);
323 323
324#endif /* __KERNEL__ */ 324#endif /* __KERNEL__ */
325 325
326#endif 326#endif /* ASM_X86__STRING_32_H */
diff --git a/include/asm-x86/string_64.h b/include/asm-x86/string_64.h
index 52b5ab38339..a2add11d3b6 100644
--- a/include/asm-x86/string_64.h
+++ b/include/asm-x86/string_64.h
@@ -1,5 +1,5 @@
1#ifndef _X86_64_STRING_H_ 1#ifndef ASM_X86__STRING_64_H
2#define _X86_64_STRING_H_ 2#define ASM_X86__STRING_64_H
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
@@ -57,4 +57,4 @@ int strcmp(const char *cs, const char *ct);
57 57
58#endif /* __KERNEL__ */ 58#endif /* __KERNEL__ */
59 59
60#endif 60#endif /* ASM_X86__STRING_64_H */
diff --git a/include/asm-x86/suspend_32.h b/include/asm-x86/suspend_32.h
index 8675c6782a7..acb6d4d491f 100644
--- a/include/asm-x86/suspend_32.h
+++ b/include/asm-x86/suspend_32.h
@@ -3,8 +3,8 @@
3 * Based on code 3 * Based on code
4 * Copyright 2001 Patrick Mochel <mochel@osdl.org> 4 * Copyright 2001 Patrick Mochel <mochel@osdl.org>
5 */ 5 */
6#ifndef __ASM_X86_32_SUSPEND_H 6#ifndef ASM_X86__SUSPEND_32_H
7#define __ASM_X86_32_SUSPEND_H 7#define ASM_X86__SUSPEND_32_H
8 8
9#include <asm/desc.h> 9#include <asm/desc.h>
10#include <asm/i387.h> 10#include <asm/i387.h>
@@ -48,4 +48,4 @@ static inline void acpi_save_register_state(unsigned long return_point)
48extern int acpi_save_state_mem(void); 48extern int acpi_save_state_mem(void);
49#endif 49#endif
50 50
51#endif /* __ASM_X86_32_SUSPEND_H */ 51#endif /* ASM_X86__SUSPEND_32_H */
diff --git a/include/asm-x86/suspend_64.h b/include/asm-x86/suspend_64.h
index dc3262b4307..cf821dd310e 100644
--- a/include/asm-x86/suspend_64.h
+++ b/include/asm-x86/suspend_64.h
@@ -3,8 +3,8 @@
3 * Based on code 3 * Based on code
4 * Copyright 2001 Patrick Mochel <mochel@osdl.org> 4 * Copyright 2001 Patrick Mochel <mochel@osdl.org>
5 */ 5 */
6#ifndef __ASM_X86_64_SUSPEND_H 6#ifndef ASM_X86__SUSPEND_64_H
7#define __ASM_X86_64_SUSPEND_H 7#define ASM_X86__SUSPEND_64_H
8 8
9#include <asm/desc.h> 9#include <asm/desc.h>
10#include <asm/i387.h> 10#include <asm/i387.h>
@@ -49,4 +49,4 @@ extern int acpi_save_state_mem(void);
49extern char core_restore_code; 49extern char core_restore_code;
50extern char restore_registers; 50extern char restore_registers;
51 51
52#endif /* __ASM_X86_64_SUSPEND_H */ 52#endif /* ASM_X86__SUSPEND_64_H */
diff --git a/include/asm-x86/swiotlb.h b/include/asm-x86/swiotlb.h
index 2730b351afc..1e20adbcad4 100644
--- a/include/asm-x86/swiotlb.h
+++ b/include/asm-x86/swiotlb.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_SWIOTLB_H 1#ifndef ASM_X86__SWIOTLB_H
2#define _ASM_SWIOTLB_H 1 2#define ASM_X86__SWIOTLB_H
3 3
4#include <asm/dma-mapping.h> 4#include <asm/dma-mapping.h>
5 5
@@ -55,4 +55,4 @@ static inline void pci_swiotlb_init(void)
55 55
56static inline void dma_mark_clean(void *addr, size_t size) {} 56static inline void dma_mark_clean(void *addr, size_t size) {}
57 57
58#endif /* _ASM_SWIOTLB_H */ 58#endif /* ASM_X86__SWIOTLB_H */
diff --git a/include/asm-x86/sync_bitops.h b/include/asm-x86/sync_bitops.h
index b47a1d0b8a8..b689bee7110 100644
--- a/include/asm-x86/sync_bitops.h
+++ b/include/asm-x86/sync_bitops.h
@@ -1,5 +1,5 @@
1#ifndef _I386_SYNC_BITOPS_H 1#ifndef ASM_X86__SYNC_BITOPS_H
2#define _I386_SYNC_BITOPS_H 2#define ASM_X86__SYNC_BITOPS_H
3 3
4/* 4/*
5 * Copyright 1992, Linus Torvalds. 5 * Copyright 1992, Linus Torvalds.
@@ -127,4 +127,4 @@ static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr)
127 127
128#undef ADDR 128#undef ADDR
129 129
130#endif /* _I386_SYNC_BITOPS_H */ 130#endif /* ASM_X86__SYNC_BITOPS_H */
diff --git a/include/asm-x86/syscall.h b/include/asm-x86/syscall.h
new file mode 100644
index 00000000000..04c47dc5597
--- /dev/null
+++ b/include/asm-x86/syscall.h
@@ -0,0 +1,211 @@
1/*
2 * Access to user system call parameters and results
3 *
4 * Copyright (C) 2008 Red Hat, Inc. All rights reserved.
5 *
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU General Public License v.2.
9 *
10 * See asm-generic/syscall.h for descriptions of what we must do here.
11 */
12
13#ifndef _ASM_SYSCALL_H
14#define _ASM_SYSCALL_H 1
15
16#include <linux/sched.h>
17#include <linux/err.h>
18
19static inline long syscall_get_nr(struct task_struct *task,
20 struct pt_regs *regs)
21{
22 /*
23 * We always sign-extend a -1 value being set here,
24 * so this is always either -1L or a syscall number.
25 */
26 return regs->orig_ax;
27}
28
29static inline void syscall_rollback(struct task_struct *task,
30 struct pt_regs *regs)
31{
32 regs->ax = regs->orig_ax;
33}
34
35static inline long syscall_get_error(struct task_struct *task,
36 struct pt_regs *regs)
37{
38 unsigned long error = regs->ax;
39#ifdef CONFIG_IA32_EMULATION
40 /*
41 * TS_COMPAT is set for 32-bit syscall entries and then
42 * remains set until we return to user mode.
43 */
44 if (task_thread_info(task)->status & TS_COMPAT)
45 /*
46 * Sign-extend the value so (int)-EFOO becomes (long)-EFOO
47 * and will match correctly in comparisons.
48 */
49 error = (long) (int) error;
50#endif
51 return IS_ERR_VALUE(error) ? error : 0;
52}
53
54static inline long syscall_get_return_value(struct task_struct *task,
55 struct pt_regs *regs)
56{
57 return regs->ax;
58}
59
60static inline void syscall_set_return_value(struct task_struct *task,
61 struct pt_regs *regs,
62 int error, long val)
63{
64 regs->ax = (long) error ?: val;
65}
66
67#ifdef CONFIG_X86_32
68
69static inline void syscall_get_arguments(struct task_struct *task,
70 struct pt_regs *regs,
71 unsigned int i, unsigned int n,
72 unsigned long *args)
73{
74 BUG_ON(i + n > 6);
75 memcpy(args, &regs->bx + i, n * sizeof(args[0]));
76}
77
78static inline void syscall_set_arguments(struct task_struct *task,
79 struct pt_regs *regs,
80 unsigned int i, unsigned int n,
81 const unsigned long *args)
82{
83 BUG_ON(i + n > 6);
84 memcpy(&regs->bx + i, args, n * sizeof(args[0]));
85}
86
87#else /* CONFIG_X86_64 */
88
89static inline void syscall_get_arguments(struct task_struct *task,
90 struct pt_regs *regs,
91 unsigned int i, unsigned int n,
92 unsigned long *args)
93{
94# ifdef CONFIG_IA32_EMULATION
95 if (task_thread_info(task)->status & TS_COMPAT)
96 switch (i + n) {
97 case 6:
98 if (!n--) break;
99 *args++ = regs->bp;
100 case 5:
101 if (!n--) break;
102 *args++ = regs->di;
103 case 4:
104 if (!n--) break;
105 *args++ = regs->si;
106 case 3:
107 if (!n--) break;
108 *args++ = regs->dx;
109 case 2:
110 if (!n--) break;
111 *args++ = regs->cx;
112 case 1:
113 if (!n--) break;
114 *args++ = regs->bx;
115 case 0:
116 if (!n--) break;
117 default:
118 BUG();
119 break;
120 }
121 else
122# endif
123 switch (i + n) {
124 case 6:
125 if (!n--) break;
126 *args++ = regs->r9;
127 case 5:
128 if (!n--) break;
129 *args++ = regs->r8;
130 case 4:
131 if (!n--) break;
132 *args++ = regs->r10;
133 case 3:
134 if (!n--) break;
135 *args++ = regs->dx;
136 case 2:
137 if (!n--) break;
138 *args++ = regs->si;
139 case 1:
140 if (!n--) break;
141 *args++ = regs->di;
142 case 0:
143 if (!n--) break;
144 default:
145 BUG();
146 break;
147 }
148}
149
150static inline void syscall_set_arguments(struct task_struct *task,
151 struct pt_regs *regs,
152 unsigned int i, unsigned int n,
153 const unsigned long *args)
154{
155# ifdef CONFIG_IA32_EMULATION
156 if (task_thread_info(task)->status & TS_COMPAT)
157 switch (i + n) {
158 case 6:
159 if (!n--) break;
160 regs->bp = *args++;
161 case 5:
162 if (!n--) break;
163 regs->di = *args++;
164 case 4:
165 if (!n--) break;
166 regs->si = *args++;
167 case 3:
168 if (!n--) break;
169 regs->dx = *args++;
170 case 2:
171 if (!n--) break;
172 regs->cx = *args++;
173 case 1:
174 if (!n--) break;
175 regs->bx = *args++;
176 case 0:
177 if (!n--) break;
178 default:
179 BUG();
180 }
181 else
182# endif
183 switch (i + n) {
184 case 6:
185 if (!n--) break;
186 regs->r9 = *args++;
187 case 5:
188 if (!n--) break;
189 regs->r8 = *args++;
190 case 4:
191 if (!n--) break;
192 regs->r10 = *args++;
193 case 3:
194 if (!n--) break;
195 regs->dx = *args++;
196 case 2:
197 if (!n--) break;
198 regs->si = *args++;
199 case 1:
200 if (!n--) break;
201 regs->di = *args++;
202 case 0:
203 if (!n--) break;
204 default:
205 BUG();
206 }
207}
208
209#endif /* CONFIG_X86_32 */
210
211#endif /* _ASM_SYSCALL_H */
diff --git a/include/asm-x86/syscalls.h b/include/asm-x86/syscalls.h
new file mode 100644
index 00000000000..87803da4401
--- /dev/null
+++ b/include/asm-x86/syscalls.h
@@ -0,0 +1,93 @@
1/*
2 * syscalls.h - Linux syscall interfaces (arch-specific)
3 *
4 * Copyright (c) 2008 Jaswinder Singh
5 *
6 * This file is released under the GPLv2.
7 * See the file COPYING for more details.
8 */
9
10#ifndef _ASM_X86_SYSCALLS_H
11#define _ASM_X86_SYSCALLS_H
12
13#include <linux/compiler.h>
14#include <linux/linkage.h>
15#include <linux/types.h>
16#include <linux/signal.h>
17
18/* Common in X86_32 and X86_64 */
19/* kernel/ioport.c */
20asmlinkage long sys_ioperm(unsigned long, unsigned long, int);
21
22/* X86_32 only */
23#ifdef CONFIG_X86_32
24/* kernel/process_32.c */
25asmlinkage int sys_fork(struct pt_regs);
26asmlinkage int sys_clone(struct pt_regs);
27asmlinkage int sys_vfork(struct pt_regs);
28asmlinkage int sys_execve(struct pt_regs);
29
30/* kernel/signal_32.c */
31asmlinkage int sys_sigsuspend(int, int, old_sigset_t);
32asmlinkage int sys_sigaction(int, const struct old_sigaction __user *,
33 struct old_sigaction __user *);
34asmlinkage int sys_sigaltstack(unsigned long);
35asmlinkage unsigned long sys_sigreturn(unsigned long);
36asmlinkage int sys_rt_sigreturn(unsigned long);
37
38/* kernel/ioport.c */
39asmlinkage long sys_iopl(unsigned long);
40
41/* kernel/ldt.c */
42asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
43
44/* kernel/sys_i386_32.c */
45asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
46 unsigned long, unsigned long, unsigned long);
47struct mmap_arg_struct;
48asmlinkage int old_mmap(struct mmap_arg_struct __user *);
49struct sel_arg_struct;
50asmlinkage int old_select(struct sel_arg_struct __user *);
51asmlinkage int sys_ipc(uint, int, int, int, void __user *, long);
52struct old_utsname;
53asmlinkage int sys_uname(struct old_utsname __user *);
54struct oldold_utsname;
55asmlinkage int sys_olduname(struct oldold_utsname __user *);
56
57/* kernel/tls.c */
58asmlinkage int sys_set_thread_area(struct user_desc __user *);
59asmlinkage int sys_get_thread_area(struct user_desc __user *);
60
61/* kernel/vm86_32.c */
62asmlinkage int sys_vm86old(struct pt_regs);
63asmlinkage int sys_vm86(struct pt_regs);
64
65#else /* CONFIG_X86_32 */
66
67/* X86_64 only */
68/* kernel/process_64.c */
69asmlinkage long sys_fork(struct pt_regs *);
70asmlinkage long sys_clone(unsigned long, unsigned long,
71 void __user *, void __user *,
72 struct pt_regs *);
73asmlinkage long sys_vfork(struct pt_regs *);
74asmlinkage long sys_execve(char __user *, char __user * __user *,
75 char __user * __user *,
76 struct pt_regs *);
77
78/* kernel/ioport.c */
79asmlinkage long sys_iopl(unsigned int, struct pt_regs *);
80
81/* kernel/signal_64.c */
82asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *,
83 struct pt_regs *);
84asmlinkage long sys_rt_sigreturn(struct pt_regs *);
85
86/* kernel/sys_x86_64.c */
87asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long,
88 unsigned long, unsigned long, unsigned long);
89struct new_utsname;
90asmlinkage long sys_uname(struct new_utsname __user *);
91
92#endif /* CONFIG_X86_32 */
93#endif /* _ASM_X86_SYSCALLS_H */
diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h
index 983ce37c491..34505dd7b24 100644
--- a/include/asm-x86/system.h
+++ b/include/asm-x86/system.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SYSTEM_H_ 1#ifndef ASM_X86__SYSTEM_H
2#define _ASM_X86_SYSTEM_H_ 2#define ASM_X86__SYSTEM_H
3 3
4#include <asm/asm.h> 4#include <asm/asm.h>
5#include <asm/segment.h> 5#include <asm/segment.h>
@@ -419,4 +419,4 @@ static inline void rdtsc_barrier(void)
419 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); 419 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
420} 420}
421 421
422#endif 422#endif /* ASM_X86__SYSTEM_H */
diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h
index 97fa251ccb2..5aedb8bffc5 100644
--- a/include/asm-x86/system_64.h
+++ b/include/asm-x86/system_64.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_SYSTEM_H 1#ifndef ASM_X86__SYSTEM_64_H
2#define __ASM_SYSTEM_H 2#define ASM_X86__SYSTEM_64_H
3 3
4#include <asm/segment.h> 4#include <asm/segment.h>
5#include <asm/cmpxchg.h> 5#include <asm/cmpxchg.h>
@@ -19,4 +19,4 @@ static inline void write_cr8(unsigned long val)
19 19
20#include <linux/irqflags.h> 20#include <linux/irqflags.h>
21 21
22#endif 22#endif /* ASM_X86__SYSTEM_64_H */
diff --git a/include/asm-x86/tce.h b/include/asm-x86/tce.h
index b1a4ea00df7..e7932d7fbba 100644
--- a/include/asm-x86/tce.h
+++ b/include/asm-x86/tce.h
@@ -21,8 +21,8 @@
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */ 22 */
23 23
24#ifndef _ASM_X86_64_TCE_H 24#ifndef ASM_X86__TCE_H
25#define _ASM_X86_64_TCE_H 25#define ASM_X86__TCE_H
26 26
27extern unsigned int specified_table_size; 27extern unsigned int specified_table_size;
28struct iommu_table; 28struct iommu_table;
@@ -45,4 +45,4 @@ extern void * __init alloc_tce_table(void);
45extern void __init free_tce_table(void *tbl); 45extern void __init free_tce_table(void *tbl);
46extern int __init build_tce_table(struct pci_dev *dev, void __iomem *bbar); 46extern int __init build_tce_table(struct pci_dev *dev, void __iomem *bbar);
47 47
48#endif /* _ASM_X86_64_TCE_H */ 48#endif /* ASM_X86__TCE_H */
diff --git a/include/asm-x86/termbits.h b/include/asm-x86/termbits.h
index af1b70ea440..3d00dc5e0c7 100644
--- a/include/asm-x86/termbits.h
+++ b/include/asm-x86/termbits.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_TERMBITS_H 1#ifndef ASM_X86__TERMBITS_H
2#define _ASM_X86_TERMBITS_H 2#define ASM_X86__TERMBITS_H
3 3
4#include <linux/posix_types.h> 4#include <linux/posix_types.h>
5 5
@@ -195,4 +195,4 @@ struct ktermios {
195#define TCSADRAIN 1 195#define TCSADRAIN 1
196#define TCSAFLUSH 2 196#define TCSAFLUSH 2
197 197
198#endif /* _ASM_X86_TERMBITS_H */ 198#endif /* ASM_X86__TERMBITS_H */
diff --git a/include/asm-x86/termios.h b/include/asm-x86/termios.h
index f72956331c4..e235db24807 100644
--- a/include/asm-x86/termios.h
+++ b/include/asm-x86/termios.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_TERMIOS_H 1#ifndef ASM_X86__TERMIOS_H
2#define _ASM_X86_TERMIOS_H 2#define ASM_X86__TERMIOS_H
3 3
4#include <asm/termbits.h> 4#include <asm/termbits.h>
5#include <asm/ioctls.h> 5#include <asm/ioctls.h>
@@ -110,4 +110,4 @@ static inline int kernel_termios_to_user_termios_1(struct termios __user *u,
110 110
111#endif /* __KERNEL__ */ 111#endif /* __KERNEL__ */
112 112
113#endif /* _ASM_X86_TERMIOS_H */ 113#endif /* ASM_X86__TERMIOS_H */
diff --git a/include/asm-x86/therm_throt.h b/include/asm-x86/therm_throt.h
index 399bf6026b1..1c7f57b6b66 100644
--- a/include/asm-x86/therm_throt.h
+++ b/include/asm-x86/therm_throt.h
@@ -1,9 +1,9 @@
1#ifndef __ASM_I386_THERM_THROT_H__ 1#ifndef ASM_X86__THERM_THROT_H
2#define __ASM_I386_THERM_THROT_H__ 1 2#define ASM_X86__THERM_THROT_H
3 3
4#include <asm/atomic.h> 4#include <asm/atomic.h>
5 5
6extern atomic_t therm_throt_en; 6extern atomic_t therm_throt_en;
7int therm_throt_process(int curr); 7int therm_throt_process(int curr);
8 8
9#endif /* __ASM_I386_THERM_THROT_H__ */ 9#endif /* ASM_X86__THERM_THROT_H */
diff --git a/include/asm-x86/thread_info.h b/include/asm-x86/thread_info.h
index da0a675adf9..4db0066a3a3 100644
--- a/include/asm-x86/thread_info.h
+++ b/include/asm-x86/thread_info.h
@@ -4,8 +4,8 @@
4 * - Incorporating suggestions made by Linus Torvalds and Dave Miller 4 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
5 */ 5 */
6 6
7#ifndef _ASM_X86_THREAD_INFO_H 7#ifndef ASM_X86__THREAD_INFO_H
8#define _ASM_X86_THREAD_INFO_H 8#define ASM_X86__THREAD_INFO_H
9 9
10#include <linux/compiler.h> 10#include <linux/compiler.h>
11#include <asm/page.h> 11#include <asm/page.h>
@@ -71,6 +71,7 @@ struct thread_info {
71 * Warning: layout of LSW is hardcoded in entry.S 71 * Warning: layout of LSW is hardcoded in entry.S
72 */ 72 */
73#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ 73#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
74#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
74#define TIF_SIGPENDING 2 /* signal pending */ 75#define TIF_SIGPENDING 2 /* signal pending */
75#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 76#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
76#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ 77#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
@@ -93,6 +94,7 @@ struct thread_info {
93#define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */ 94#define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */
94 95
95#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 96#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
97#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
96#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 98#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
97#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) 99#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
98#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 100#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
@@ -133,7 +135,7 @@ struct thread_info {
133 135
134/* Only used for 64 bit */ 136/* Only used for 64 bit */
135#define _TIF_DO_NOTIFY_MASK \ 137#define _TIF_DO_NOTIFY_MASK \
136 (_TIF_SIGPENDING|_TIF_MCE_NOTIFY) 138 (_TIF_SIGPENDING|_TIF_MCE_NOTIFY|_TIF_NOTIFY_RESUME)
137 139
138/* flags to check in __switch_to() */ 140/* flags to check in __switch_to() */
139#define _TIF_WORK_CTXSW \ 141#define _TIF_WORK_CTXSW \
@@ -258,4 +260,4 @@ extern void free_thread_info(struct thread_info *ti);
258extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); 260extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
259#define arch_task_cache_init arch_task_cache_init 261#define arch_task_cache_init arch_task_cache_init
260#endif 262#endif
261#endif /* _ASM_X86_THREAD_INFO_H */ 263#endif /* ASM_X86__THREAD_INFO_H */
diff --git a/include/asm-x86/time.h b/include/asm-x86/time.h
index a17fa473e91..3e724eef7ac 100644
--- a/include/asm-x86/time.h
+++ b/include/asm-x86/time.h
@@ -1,5 +1,5 @@
1#ifndef _ASMX86_TIME_H 1#ifndef ASM_X86__TIME_H
2#define _ASMX86_TIME_H 2#define ASM_X86__TIME_H
3 3
4extern void hpet_time_init(void); 4extern void hpet_time_init(void);
5 5
@@ -46,6 +46,8 @@ static inline int native_set_wallclock(unsigned long nowtime)
46 46
47#endif 47#endif
48 48
49extern void time_init(void);
50
49#ifdef CONFIG_PARAVIRT 51#ifdef CONFIG_PARAVIRT
50#include <asm/paravirt.h> 52#include <asm/paravirt.h>
51#else /* !CONFIG_PARAVIRT */ 53#else /* !CONFIG_PARAVIRT */
@@ -58,4 +60,4 @@ static inline int native_set_wallclock(unsigned long nowtime)
58 60
59extern unsigned long __init calibrate_cpu(void); 61extern unsigned long __init calibrate_cpu(void);
60 62
61#endif 63#endif /* ASM_X86__TIME_H */
diff --git a/include/asm-x86/timer.h b/include/asm-x86/timer.h
index fb2a4ddddf3..d0babce4b47 100644
--- a/include/asm-x86/timer.h
+++ b/include/asm-x86/timer.h
@@ -1,5 +1,5 @@
1#ifndef _ASMi386_TIMER_H 1#ifndef ASM_X86__TIMER_H
2#define _ASMi386_TIMER_H 2#define ASM_X86__TIMER_H
3#include <linux/init.h> 3#include <linux/init.h>
4#include <linux/pm.h> 4#include <linux/pm.h>
5#include <linux/percpu.h> 5#include <linux/percpu.h>
@@ -9,9 +9,12 @@
9unsigned long long native_sched_clock(void); 9unsigned long long native_sched_clock(void);
10unsigned long native_calibrate_tsc(void); 10unsigned long native_calibrate_tsc(void);
11 11
12#ifdef CONFIG_X86_32
12extern int timer_ack; 13extern int timer_ack;
13extern int no_timer_check;
14extern int recalibrate_cpu_khz(void); 14extern int recalibrate_cpu_khz(void);
15#endif /* CONFIG_X86_32 */
16
17extern int no_timer_check;
15 18
16#ifndef CONFIG_PARAVIRT 19#ifndef CONFIG_PARAVIRT
17#define calibrate_tsc() native_calibrate_tsc() 20#define calibrate_tsc() native_calibrate_tsc()
@@ -60,4 +63,4 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
60 return ns; 63 return ns;
61} 64}
62 65
63#endif 66#endif /* ASM_X86__TIMER_H */
diff --git a/include/asm-x86/timex.h b/include/asm-x86/timex.h
index 43e5a78500c..d1ce2416a5d 100644
--- a/include/asm-x86/timex.h
+++ b/include/asm-x86/timex.h
@@ -1,6 +1,6 @@
1/* x86 architecture timex specifications */ 1/* x86 architecture timex specifications */
2#ifndef _ASM_X86_TIMEX_H 2#ifndef ASM_X86__TIMEX_H
3#define _ASM_X86_TIMEX_H 3#define ASM_X86__TIMEX_H
4 4
5#include <asm/processor.h> 5#include <asm/processor.h>
6#include <asm/tsc.h> 6#include <asm/tsc.h>
@@ -16,4 +16,4 @@
16 16
17#define ARCH_HAS_READ_CURRENT_TIMER 17#define ARCH_HAS_READ_CURRENT_TIMER
18 18
19#endif 19#endif /* ASM_X86__TIMEX_H */
diff --git a/include/asm-x86/tlb.h b/include/asm-x86/tlb.h
index e4e9e2d07a9..db36e9e89e8 100644
--- a/include/asm-x86/tlb.h
+++ b/include/asm-x86/tlb.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_TLB_H 1#ifndef ASM_X86__TLB_H
2#define _ASM_X86_TLB_H 2#define ASM_X86__TLB_H
3 3
4#define tlb_start_vma(tlb, vma) do { } while (0) 4#define tlb_start_vma(tlb, vma) do { } while (0)
5#define tlb_end_vma(tlb, vma) do { } while (0) 5#define tlb_end_vma(tlb, vma) do { } while (0)
@@ -8,4 +8,4 @@
8 8
9#include <asm-generic/tlb.h> 9#include <asm-generic/tlb.h>
10 10
11#endif 11#endif /* ASM_X86__TLB_H */
diff --git a/include/asm-x86/tlbflush.h b/include/asm-x86/tlbflush.h
index 35c76ceb9f4..ef68b76dc3c 100644
--- a/include/asm-x86/tlbflush.h
+++ b/include/asm-x86/tlbflush.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_TLBFLUSH_H 1#ifndef ASM_X86__TLBFLUSH_H
2#define _ASM_X86_TLBFLUSH_H 2#define ASM_X86__TLBFLUSH_H
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <linux/sched.h> 5#include <linux/sched.h>
@@ -165,4 +165,4 @@ static inline void flush_tlb_kernel_range(unsigned long start,
165 flush_tlb_all(); 165 flush_tlb_all();
166} 166}
167 167
168#endif /* _ASM_X86_TLBFLUSH_H */ 168#endif /* ASM_X86__TLBFLUSH_H */
diff --git a/include/asm-x86/topology.h b/include/asm-x86/topology.h
index 90ac7718469..7eca9bc022b 100644
--- a/include/asm-x86/topology.h
+++ b/include/asm-x86/topology.h
@@ -22,8 +22,8 @@
22 * 22 *
23 * Send feedback to <colpatch@us.ibm.com> 23 * Send feedback to <colpatch@us.ibm.com>
24 */ 24 */
25#ifndef _ASM_X86_TOPOLOGY_H 25#ifndef ASM_X86__TOPOLOGY_H
26#define _ASM_X86_TOPOLOGY_H 26#define ASM_X86__TOPOLOGY_H
27 27
28#ifdef CONFIG_X86_32 28#ifdef CONFIG_X86_32
29# ifdef CONFIG_X86_HT 29# ifdef CONFIG_X86_HT
@@ -255,4 +255,4 @@ static inline void set_mp_bus_to_node(int busnum, int node)
255} 255}
256#endif 256#endif
257 257
258#endif /* _ASM_X86_TOPOLOGY_H */ 258#endif /* ASM_X86__TOPOLOGY_H */
diff --git a/include/asm-x86/trampoline.h b/include/asm-x86/trampoline.h
index b156b08d013..0406bbd898a 100644
--- a/include/asm-x86/trampoline.h
+++ b/include/asm-x86/trampoline.h
@@ -1,5 +1,5 @@
1#ifndef __TRAMPOLINE_HEADER 1#ifndef ASM_X86__TRAMPOLINE_H
2#define __TRAMPOLINE_HEADER 2#define ASM_X86__TRAMPOLINE_H
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5 5
@@ -18,4 +18,4 @@ extern unsigned long setup_trampoline(void);
18 18
19#endif /* __ASSEMBLY__ */ 19#endif /* __ASSEMBLY__ */
20 20
21#endif /* __TRAMPOLINE_HEADER */ 21#endif /* ASM_X86__TRAMPOLINE_H */
diff --git a/include/asm-x86/traps.h b/include/asm-x86/traps.h
index a4b65a71bd6..2ccebc6fb0b 100644
--- a/include/asm-x86/traps.h
+++ b/include/asm-x86/traps.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_TRAPS_H 1#ifndef ASM_X86__TRAPS_H
2#define _ASM_X86_TRAPS_H 2#define ASM_X86__TRAPS_H
3 3
4/* Common in X86_32 and X86_64 */ 4/* Common in X86_32 and X86_64 */
5asmlinkage void divide_error(void); 5asmlinkage void divide_error(void);
@@ -51,6 +51,8 @@ void do_spurious_interrupt_bug(struct pt_regs *, long);
51unsigned long patch_espfix_desc(unsigned long, unsigned long); 51unsigned long patch_espfix_desc(unsigned long, unsigned long);
52asmlinkage void math_emulate(long); 52asmlinkage void math_emulate(long);
53 53
54void do_page_fault(struct pt_regs *regs, unsigned long error_code);
55
54#else /* CONFIG_X86_32 */ 56#else /* CONFIG_X86_32 */
55 57
56asmlinkage void double_fault(void); 58asmlinkage void double_fault(void);
@@ -62,5 +64,7 @@ asmlinkage void do_coprocessor_error(struct pt_regs *);
62asmlinkage void do_simd_coprocessor_error(struct pt_regs *); 64asmlinkage void do_simd_coprocessor_error(struct pt_regs *);
63asmlinkage void do_spurious_interrupt_bug(struct pt_regs *); 65asmlinkage void do_spurious_interrupt_bug(struct pt_regs *);
64 66
67asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code);
68
65#endif /* CONFIG_X86_32 */ 69#endif /* CONFIG_X86_32 */
66#endif /* _ASM_X86_TRAPS_H */ 70#endif /* ASM_X86__TRAPS_H */
diff --git a/include/asm-x86/tsc.h b/include/asm-x86/tsc.h
index cb6f6ee45b8..ad0f5c41e78 100644
--- a/include/asm-x86/tsc.h
+++ b/include/asm-x86/tsc.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * x86 TSC related functions 2 * x86 TSC related functions
3 */ 3 */
4#ifndef _ASM_X86_TSC_H 4#ifndef ASM_X86__TSC_H
5#define _ASM_X86_TSC_H 5#define ASM_X86__TSC_H
6 6
7#include <asm/processor.h> 7#include <asm/processor.h>
8 8
@@ -59,4 +59,4 @@ extern void check_tsc_sync_target(void);
59 59
60extern int notsc_setup(char *); 60extern int notsc_setup(char *);
61 61
62#endif 62#endif /* ASM_X86__TSC_H */
diff --git a/include/asm-x86/types.h b/include/asm-x86/types.h
index 1ac80cd9acf..e78b52e1744 100644
--- a/include/asm-x86/types.h
+++ b/include/asm-x86/types.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_TYPES_H 1#ifndef ASM_X86__TYPES_H
2#define _ASM_X86_TYPES_H 2#define ASM_X86__TYPES_H
3 3
4#include <asm-generic/int-ll64.h> 4#include <asm-generic/int-ll64.h>
5 5
@@ -33,4 +33,4 @@ typedef u32 dma_addr_t;
33#endif /* __ASSEMBLY__ */ 33#endif /* __ASSEMBLY__ */
34#endif /* __KERNEL__ */ 34#endif /* __KERNEL__ */
35 35
36#endif 36#endif /* ASM_X86__TYPES_H */
diff --git a/include/asm-x86/uaccess.h b/include/asm-x86/uaccess.h
index 5f702d1d521..48ebc0ad40e 100644
--- a/include/asm-x86/uaccess.h
+++ b/include/asm-x86/uaccess.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_UACCES_H_ 1#ifndef ASM_X86__UACCESS_H
2#define _ASM_UACCES_H_ 2#define ASM_X86__UACCESS_H
3/* 3/*
4 * User space memory access functions 4 * User space memory access functions
5 */ 5 */
@@ -450,5 +450,5 @@ extern struct movsl_mask {
450# include "uaccess_64.h" 450# include "uaccess_64.h"
451#endif 451#endif
452 452
453#endif 453#endif /* ASM_X86__UACCESS_H */
454 454
diff --git a/include/asm-x86/uaccess_32.h b/include/asm-x86/uaccess_32.h
index 6fdef39a0bc..6b5b57d9c6d 100644
--- a/include/asm-x86/uaccess_32.h
+++ b/include/asm-x86/uaccess_32.h
@@ -1,5 +1,5 @@
1#ifndef __i386_UACCESS_H 1#ifndef ASM_X86__UACCESS_32_H
2#define __i386_UACCESS_H 2#define ASM_X86__UACCESS_32_H
3 3
4/* 4/*
5 * User space memory access functions 5 * User space memory access functions
@@ -215,4 +215,4 @@ long strnlen_user(const char __user *str, long n);
215unsigned long __must_check clear_user(void __user *mem, unsigned long len); 215unsigned long __must_check clear_user(void __user *mem, unsigned long len);
216unsigned long __must_check __clear_user(void __user *mem, unsigned long len); 216unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
217 217
218#endif /* __i386_UACCESS_H */ 218#endif /* ASM_X86__UACCESS_32_H */
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h
index 45806d60bcb..c96c1f5d07a 100644
--- a/include/asm-x86/uaccess_64.h
+++ b/include/asm-x86/uaccess_64.h
@@ -1,5 +1,5 @@
1#ifndef __X86_64_UACCESS_H 1#ifndef ASM_X86__UACCESS_64_H
2#define __X86_64_UACCESS_H 2#define ASM_X86__UACCESS_64_H
3 3
4/* 4/*
5 * User space memory access functions 5 * User space memory access functions
@@ -199,4 +199,4 @@ static inline int __copy_from_user_inatomic_nocache(void *dst,
199unsigned long 199unsigned long
200copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest); 200copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
201 201
202#endif /* __X86_64_UACCESS_H */ 202#endif /* ASM_X86__UACCESS_64_H */
diff --git a/include/asm-x86/ucontext.h b/include/asm-x86/ucontext.h
index 50a79f7fcde..9948dd32808 100644
--- a/include/asm-x86/ucontext.h
+++ b/include/asm-x86/ucontext.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_UCONTEXT_H 1#ifndef ASM_X86__UCONTEXT_H
2#define _ASM_X86_UCONTEXT_H 2#define ASM_X86__UCONTEXT_H
3 3
4struct ucontext { 4struct ucontext {
5 unsigned long uc_flags; 5 unsigned long uc_flags;
@@ -9,4 +9,4 @@ struct ucontext {
9 sigset_t uc_sigmask; /* mask last for extensibility */ 9 sigset_t uc_sigmask; /* mask last for extensibility */
10}; 10};
11 11
12#endif /* _ASM_X86_UCONTEXT_H */ 12#endif /* ASM_X86__UCONTEXT_H */
diff --git a/include/asm-x86/unaligned.h b/include/asm-x86/unaligned.h
index a7bd416b476..59dcdec3716 100644
--- a/include/asm-x86/unaligned.h
+++ b/include/asm-x86/unaligned.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_UNALIGNED_H 1#ifndef ASM_X86__UNALIGNED_H
2#define _ASM_X86_UNALIGNED_H 2#define ASM_X86__UNALIGNED_H
3 3
4/* 4/*
5 * The x86 can do unaligned accesses itself. 5 * The x86 can do unaligned accesses itself.
@@ -11,4 +11,4 @@
11#define get_unaligned __get_unaligned_le 11#define get_unaligned __get_unaligned_le
12#define put_unaligned __put_unaligned_le 12#define put_unaligned __put_unaligned_le
13 13
14#endif /* _ASM_X86_UNALIGNED_H */ 14#endif /* ASM_X86__UNALIGNED_H */
diff --git a/include/asm-x86/unistd_32.h b/include/asm-x86/unistd_32.h
index d7394673b77..017f4a87c91 100644
--- a/include/asm-x86/unistd_32.h
+++ b/include/asm-x86/unistd_32.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_I386_UNISTD_H_ 1#ifndef ASM_X86__UNISTD_32_H
2#define _ASM_I386_UNISTD_H_ 2#define ASM_X86__UNISTD_32_H
3 3
4/* 4/*
5 * This file contains the system call numbers. 5 * This file contains the system call numbers.
@@ -376,4 +376,4 @@
376#endif 376#endif
377 377
378#endif /* __KERNEL__ */ 378#endif /* __KERNEL__ */
379#endif /* _ASM_I386_UNISTD_H_ */ 379#endif /* ASM_X86__UNISTD_32_H */
diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h
index 3a341d79179..ace83f1f678 100644
--- a/include/asm-x86/unistd_64.h
+++ b/include/asm-x86/unistd_64.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_64_UNISTD_H_ 1#ifndef ASM_X86__UNISTD_64_H
2#define _ASM_X86_64_UNISTD_H_ 2#define ASM_X86__UNISTD_64_H
3 3
4#ifndef __SYSCALL 4#ifndef __SYSCALL
5#define __SYSCALL(a, b) 5#define __SYSCALL(a, b)
@@ -690,4 +690,4 @@ __SYSCALL(__NR_inotify_init1, sys_inotify_init1)
690#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") 690#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
691#endif /* __KERNEL__ */ 691#endif /* __KERNEL__ */
692 692
693#endif /* _ASM_X86_64_UNISTD_H_ */ 693#endif /* ASM_X86__UNISTD_64_H */
diff --git a/include/asm-x86/unwind.h b/include/asm-x86/unwind.h
index 8b064bd9c55..a2151567db4 100644
--- a/include/asm-x86/unwind.h
+++ b/include/asm-x86/unwind.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_UNWIND_H 1#ifndef ASM_X86__UNWIND_H
2#define _ASM_X86_UNWIND_H 2#define ASM_X86__UNWIND_H
3 3
4#define UNW_PC(frame) ((void)(frame), 0UL) 4#define UNW_PC(frame) ((void)(frame), 0UL)
5#define UNW_SP(frame) ((void)(frame), 0UL) 5#define UNW_SP(frame) ((void)(frame), 0UL)
@@ -10,4 +10,4 @@ static inline int arch_unw_user_mode(const void *info)
10 return 0; 10 return 0;
11} 11}
12 12
13#endif /* _ASM_X86_UNWIND_H */ 13#endif /* ASM_X86__UNWIND_H */
diff --git a/include/asm-x86/user32.h b/include/asm-x86/user32.h
index a3d91004787..aa66c1857f0 100644
--- a/include/asm-x86/user32.h
+++ b/include/asm-x86/user32.h
@@ -1,5 +1,5 @@
1#ifndef USER32_H 1#ifndef ASM_X86__USER32_H
2#define USER32_H 1 2#define ASM_X86__USER32_H
3 3
4/* IA32 compatible user structures for ptrace. 4/* IA32 compatible user structures for ptrace.
5 * These should be used for 32bit coredumps too. */ 5 * These should be used for 32bit coredumps too. */
@@ -67,4 +67,4 @@ struct user32 {
67}; 67};
68 68
69 69
70#endif 70#endif /* ASM_X86__USER32_H */
diff --git a/include/asm-x86/user_32.h b/include/asm-x86/user_32.h
index d6e51edc259..e0fe2f55f1a 100644
--- a/include/asm-x86/user_32.h
+++ b/include/asm-x86/user_32.h
@@ -1,5 +1,5 @@
1#ifndef _I386_USER_H 1#ifndef ASM_X86__USER_32_H
2#define _I386_USER_H 2#define ASM_X86__USER_32_H
3 3
4#include <asm/page.h> 4#include <asm/page.h>
5/* Core file format: The core file is written in such a way that gdb 5/* Core file format: The core file is written in such a way that gdb
@@ -128,4 +128,4 @@ struct user{
128#define HOST_TEXT_START_ADDR (u.start_code) 128#define HOST_TEXT_START_ADDR (u.start_code)
129#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) 129#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
130 130
131#endif /* _I386_USER_H */ 131#endif /* ASM_X86__USER_32_H */
diff --git a/include/asm-x86/user_64.h b/include/asm-x86/user_64.h
index 6037b634c77..38b5799863b 100644
--- a/include/asm-x86/user_64.h
+++ b/include/asm-x86/user_64.h
@@ -1,5 +1,5 @@
1#ifndef _X86_64_USER_H 1#ifndef ASM_X86__USER_64_H
2#define _X86_64_USER_H 2#define ASM_X86__USER_64_H
3 3
4#include <asm/types.h> 4#include <asm/types.h>
5#include <asm/page.h> 5#include <asm/page.h>
@@ -134,4 +134,4 @@ struct user {
134#define HOST_TEXT_START_ADDR (u.start_code) 134#define HOST_TEXT_START_ADDR (u.start_code)
135#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) 135#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
136 136
137#endif /* _X86_64_USER_H */ 137#endif /* ASM_X86__USER_64_H */
diff --git a/include/asm-x86/uv/bios.h b/include/asm-x86/uv/bios.h
index aa73362ff5d..7cd6d7ec130 100644
--- a/include/asm-x86/uv/bios.h
+++ b/include/asm-x86/uv/bios.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_BIOS_H 1#ifndef ASM_X86__UV__BIOS_H
2#define _ASM_X86_BIOS_H 2#define ASM_X86__UV__BIOS_H
3 3
4/* 4/*
5 * BIOS layer definitions. 5 * BIOS layer definitions.
@@ -65,4 +65,4 @@ x86_bios_freq_base(unsigned long which, unsigned long *ticks_per_second,
65 unsigned long *drift_info); 65 unsigned long *drift_info);
66extern const char *x86_bios_strerror(long status); 66extern const char *x86_bios_strerror(long status);
67 67
68#endif /* _ASM_X86_BIOS_H */ 68#endif /* ASM_X86__UV__BIOS_H */
diff --git a/include/asm-x86/uv/uv_bau.h b/include/asm-x86/uv/uv_bau.h
index 610b6b308e9..77153fb18f5 100644
--- a/include/asm-x86/uv/uv_bau.h
+++ b/include/asm-x86/uv/uv_bau.h
@@ -8,8 +8,8 @@
8 * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10 10
11#ifndef __ASM_X86_UV_BAU__ 11#ifndef ASM_X86__UV__UV_BAU_H
12#define __ASM_X86_UV_BAU__ 12#define ASM_X86__UV__UV_BAU_H
13 13
14#include <linux/bitmap.h> 14#include <linux/bitmap.h>
15#define BITSPERBYTE 8 15#define BITSPERBYTE 8
@@ -329,4 +329,4 @@ extern int uv_flush_tlb_others(cpumask_t *, struct mm_struct *, unsigned long);
329extern void uv_bau_message_intr1(void); 329extern void uv_bau_message_intr1(void);
330extern void uv_bau_timeout_intr1(void); 330extern void uv_bau_timeout_intr1(void);
331 331
332#endif /* __ASM_X86_UV_BAU__ */ 332#endif /* ASM_X86__UV__UV_BAU_H */
diff --git a/include/asm-x86/uv/uv_hub.h b/include/asm-x86/uv/uv_hub.h
index a4ef26e5850..bdb5b01afbf 100644
--- a/include/asm-x86/uv/uv_hub.h
+++ b/include/asm-x86/uv/uv_hub.h
@@ -8,8 +8,8 @@
8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10 10
11#ifndef __ASM_X86_UV_HUB_H__ 11#ifndef ASM_X86__UV__UV_HUB_H
12#define __ASM_X86_UV_HUB_H__ 12#define ASM_X86__UV__UV_HUB_H
13 13
14#include <linux/numa.h> 14#include <linux/numa.h>
15#include <linux/percpu.h> 15#include <linux/percpu.h>
@@ -350,5 +350,5 @@ static inline int uv_num_possible_blades(void)
350 return uv_possible_blades; 350 return uv_possible_blades;
351} 351}
352 352
353#endif /* __ASM_X86_UV_HUB__ */ 353#endif /* ASM_X86__UV__UV_HUB_H */
354 354
diff --git a/include/asm-x86/uv/uv_mmrs.h b/include/asm-x86/uv/uv_mmrs.h
index 151fd7fcb80..8b03d89d245 100644
--- a/include/asm-x86/uv/uv_mmrs.h
+++ b/include/asm-x86/uv/uv_mmrs.h
@@ -8,8 +8,8 @@
8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10 10
11#ifndef __ASM_X86_UV_MMRS__ 11#ifndef ASM_X86__UV__UV_MMRS_H
12#define __ASM_X86_UV_MMRS__ 12#define ASM_X86__UV__UV_MMRS_H
13 13
14#define UV_MMR_ENABLE (1UL << 63) 14#define UV_MMR_ENABLE (1UL << 63)
15 15
@@ -1292,4 +1292,4 @@ union uvh_si_alias2_overlay_config_u {
1292}; 1292};
1293 1293
1294 1294
1295#endif /* __ASM_X86_UV_MMRS__ */ 1295#endif /* ASM_X86__UV__UV_MMRS_H */
diff --git a/include/asm-x86/vdso.h b/include/asm-x86/vdso.h
index 8e18fb80f5e..4ab320913ea 100644
--- a/include/asm-x86/vdso.h
+++ b/include/asm-x86/vdso.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_VDSO_H 1#ifndef ASM_X86__VDSO_H
2#define _ASM_X86_VDSO_H 1 2#define ASM_X86__VDSO_H
3 3
4#ifdef CONFIG_X86_64 4#ifdef CONFIG_X86_64
5extern const char VDSO64_PRELINK[]; 5extern const char VDSO64_PRELINK[];
@@ -44,4 +44,4 @@ extern const char vdso32_int80_start, vdso32_int80_end;
44extern const char vdso32_syscall_start, vdso32_syscall_end; 44extern const char vdso32_syscall_start, vdso32_syscall_end;
45extern const char vdso32_sysenter_start, vdso32_sysenter_end; 45extern const char vdso32_sysenter_start, vdso32_sysenter_end;
46 46
47#endif /* asm-x86/vdso.h */ 47#endif /* ASM_X86__VDSO_H */
diff --git a/include/asm-x86/vga.h b/include/asm-x86/vga.h
index 0ccf804377e..b9e493d07d0 100644
--- a/include/asm-x86/vga.h
+++ b/include/asm-x86/vga.h
@@ -4,8 +4,8 @@
4 * (c) 1998 Martin Mares <mj@ucw.cz> 4 * (c) 1998 Martin Mares <mj@ucw.cz>
5 */ 5 */
6 6
7#ifndef _LINUX_ASM_VGA_H_ 7#ifndef ASM_X86__VGA_H
8#define _LINUX_ASM_VGA_H_ 8#define ASM_X86__VGA_H
9 9
10/* 10/*
11 * On the PC, we can just recalculate addresses and then 11 * On the PC, we can just recalculate addresses and then
@@ -17,4 +17,4 @@
17#define vga_readb(x) (*(x)) 17#define vga_readb(x) (*(x))
18#define vga_writeb(x, y) (*(y) = (x)) 18#define vga_writeb(x, y) (*(y) = (x))
19 19
20#endif 20#endif /* ASM_X86__VGA_H */
diff --git a/include/asm-x86/vgtod.h b/include/asm-x86/vgtod.h
index 3301f092934..38fd1336402 100644
--- a/include/asm-x86/vgtod.h
+++ b/include/asm-x86/vgtod.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_VGTOD_H 1#ifndef ASM_X86__VGTOD_H
2#define _ASM_VGTOD_H 1 2#define ASM_X86__VGTOD_H
3 3
4#include <asm/vsyscall.h> 4#include <asm/vsyscall.h>
5#include <linux/clocksource.h> 5#include <linux/clocksource.h>
@@ -26,4 +26,4 @@ extern struct vsyscall_gtod_data __vsyscall_gtod_data
26__section_vsyscall_gtod_data; 26__section_vsyscall_gtod_data;
27extern struct vsyscall_gtod_data vsyscall_gtod_data; 27extern struct vsyscall_gtod_data vsyscall_gtod_data;
28 28
29#endif 29#endif /* ASM_X86__VGTOD_H */
diff --git a/include/asm-x86/visws/cobalt.h b/include/asm-x86/visws/cobalt.h
index 995258831b7..9627a8fe84e 100644
--- a/include/asm-x86/visws/cobalt.h
+++ b/include/asm-x86/visws/cobalt.h
@@ -1,5 +1,5 @@
1#ifndef __I386_SGI_COBALT_H 1#ifndef ASM_X86__VISWS__COBALT_H
2#define __I386_SGI_COBALT_H 2#define ASM_X86__VISWS__COBALT_H
3 3
4#include <asm/fixmap.h> 4#include <asm/fixmap.h>
5 5
@@ -122,4 +122,4 @@ extern char visws_board_type;
122 122
123extern char visws_board_rev; 123extern char visws_board_rev;
124 124
125#endif /* __I386_SGI_COBALT_H */ 125#endif /* ASM_X86__VISWS__COBALT_H */
diff --git a/include/asm-x86/visws/lithium.h b/include/asm-x86/visws/lithium.h
index dfcd4f07ab8..b36d3b378c6 100644
--- a/include/asm-x86/visws/lithium.h
+++ b/include/asm-x86/visws/lithium.h
@@ -1,5 +1,5 @@
1#ifndef __I386_SGI_LITHIUM_H 1#ifndef ASM_X86__VISWS__LITHIUM_H
2#define __I386_SGI_LITHIUM_H 2#define ASM_X86__VISWS__LITHIUM_H
3 3
4#include <asm/fixmap.h> 4#include <asm/fixmap.h>
5 5
@@ -49,5 +49,5 @@ static inline unsigned short li_pcib_read16(unsigned long reg)
49 return *((volatile unsigned short *)(LI_PCIB_VADDR+reg)); 49 return *((volatile unsigned short *)(LI_PCIB_VADDR+reg));
50} 50}
51 51
52#endif 52#endif /* ASM_X86__VISWS__LITHIUM_H */
53 53
diff --git a/include/asm-x86/visws/piix4.h b/include/asm-x86/visws/piix4.h
index 83ea4f46e41..61c938045ec 100644
--- a/include/asm-x86/visws/piix4.h
+++ b/include/asm-x86/visws/piix4.h
@@ -1,5 +1,5 @@
1#ifndef __I386_SGI_PIIX_H 1#ifndef ASM_X86__VISWS__PIIX4_H
2#define __I386_SGI_PIIX_H 2#define ASM_X86__VISWS__PIIX4_H
3 3
4/* 4/*
5 * PIIX4 as used on SGI Visual Workstations 5 * PIIX4 as used on SGI Visual Workstations
@@ -104,4 +104,4 @@
104 */ 104 */
105#define PIIX_GPI_STPCLK 0x4 // STPCLK signal routed back in 105#define PIIX_GPI_STPCLK 0x4 // STPCLK signal routed back in
106 106
107#endif 107#endif /* ASM_X86__VISWS__PIIX4_H */
diff --git a/include/asm-x86/vm86.h b/include/asm-x86/vm86.h
index 5ce351325e0..998bd18eb73 100644
--- a/include/asm-x86/vm86.h
+++ b/include/asm-x86/vm86.h
@@ -1,5 +1,5 @@
1#ifndef _LINUX_VM86_H 1#ifndef ASM_X86__VM86_H
2#define _LINUX_VM86_H 2#define ASM_X86__VM86_H
3 3
4/* 4/*
5 * I'm guessing at the VIF/VIP flag usage, but hope that this is how 5 * I'm guessing at the VIF/VIP flag usage, but hope that this is how
@@ -205,4 +205,4 @@ static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c)
205 205
206#endif /* __KERNEL__ */ 206#endif /* __KERNEL__ */
207 207
208#endif 208#endif /* ASM_X86__VM86_H */
diff --git a/include/asm-x86/vmi_time.h b/include/asm-x86/vmi_time.h
index c3118c38515..b2d39e6a08b 100644
--- a/include/asm-x86/vmi_time.h
+++ b/include/asm-x86/vmi_time.h
@@ -22,8 +22,8 @@
22 * 22 *
23 */ 23 */
24 24
25#ifndef __VMI_TIME_H 25#ifndef ASM_X86__VMI_TIME_H
26#define __VMI_TIME_H 26#define ASM_X86__VMI_TIME_H
27 27
28/* 28/*
29 * Raw VMI call indices for timer functions 29 * Raw VMI call indices for timer functions
@@ -95,4 +95,4 @@ extern void __devinit vmi_time_ap_init(void);
95 95
96#define CONFIG_VMI_ALARM_HZ 100 96#define CONFIG_VMI_ALARM_HZ 100
97 97
98#endif 98#endif /* ASM_X86__VMI_TIME_H */
diff --git a/include/asm-x86/vsyscall.h b/include/asm-x86/vsyscall.h
index 6b66ff905af..dcd4682413d 100644
--- a/include/asm-x86/vsyscall.h
+++ b/include/asm-x86/vsyscall.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_64_VSYSCALL_H_ 1#ifndef ASM_X86__VSYSCALL_H
2#define _ASM_X86_64_VSYSCALL_H_ 2#define ASM_X86__VSYSCALL_H
3 3
4enum vsyscall_num { 4enum vsyscall_num {
5 __NR_vgettimeofday, 5 __NR_vgettimeofday,
@@ -41,4 +41,4 @@ extern void map_vsyscall(void);
41 41
42#endif /* __KERNEL__ */ 42#endif /* __KERNEL__ */
43 43
44#endif /* _ASM_X86_64_VSYSCALL_H_ */ 44#endif /* ASM_X86__VSYSCALL_H */
diff --git a/include/asm-x86/xen/events.h b/include/asm-x86/xen/events.h
index 8ded7472002..8151f5b8b6c 100644
--- a/include/asm-x86/xen/events.h
+++ b/include/asm-x86/xen/events.h
@@ -1,5 +1,5 @@
1#ifndef __XEN_EVENTS_H 1#ifndef ASM_X86__XEN__EVENTS_H
2#define __XEN_EVENTS_H 2#define ASM_X86__XEN__EVENTS_H
3 3
4enum ipi_vector { 4enum ipi_vector {
5 XEN_RESCHEDULE_VECTOR, 5 XEN_RESCHEDULE_VECTOR,
@@ -21,4 +21,4 @@ static inline void xen_do_IRQ(int irq, struct pt_regs *regs)
21 do_IRQ(regs); 21 do_IRQ(regs);
22} 22}
23 23
24#endif /* __XEN_EVENTS_H */ 24#endif /* ASM_X86__XEN__EVENTS_H */
diff --git a/include/asm-x86/xen/grant_table.h b/include/asm-x86/xen/grant_table.h
index 2444d4593a3..c4baab4d2b6 100644
--- a/include/asm-x86/xen/grant_table.h
+++ b/include/asm-x86/xen/grant_table.h
@@ -1,7 +1,7 @@
1#ifndef __XEN_GRANT_TABLE_H 1#ifndef ASM_X86__XEN__GRANT_TABLE_H
2#define __XEN_GRANT_TABLE_H 2#define ASM_X86__XEN__GRANT_TABLE_H
3 3
4#define xen_alloc_vm_area(size) alloc_vm_area(size) 4#define xen_alloc_vm_area(size) alloc_vm_area(size)
5#define xen_free_vm_area(area) free_vm_area(area) 5#define xen_free_vm_area(area) free_vm_area(area)
6 6
7#endif /* __XEN_GRANT_TABLE_H */ 7#endif /* ASM_X86__XEN__GRANT_TABLE_H */
diff --git a/include/asm-x86/xen/hypercall.h b/include/asm-x86/xen/hypercall.h
index 91cb7fd5c12..44f4259bee3 100644
--- a/include/asm-x86/xen/hypercall.h
+++ b/include/asm-x86/xen/hypercall.h
@@ -30,8 +30,8 @@
30 * IN THE SOFTWARE. 30 * IN THE SOFTWARE.
31 */ 31 */
32 32
33#ifndef __HYPERCALL_H__ 33#ifndef ASM_X86__XEN__HYPERCALL_H
34#define __HYPERCALL_H__ 34#define ASM_X86__XEN__HYPERCALL_H
35 35
36#include <linux/errno.h> 36#include <linux/errno.h>
37#include <linux/string.h> 37#include <linux/string.h>
@@ -524,4 +524,4 @@ MULTI_stack_switch(struct multicall_entry *mcl,
524 mcl->args[1] = esp; 524 mcl->args[1] = esp;
525} 525}
526 526
527#endif /* __HYPERCALL_H__ */ 527#endif /* ASM_X86__XEN__HYPERCALL_H */
diff --git a/include/asm-x86/xen/hypervisor.h b/include/asm-x86/xen/hypervisor.h
index 04ee0610014..0ef3a88b869 100644
--- a/include/asm-x86/xen/hypervisor.h
+++ b/include/asm-x86/xen/hypervisor.h
@@ -30,8 +30,8 @@
30 * IN THE SOFTWARE. 30 * IN THE SOFTWARE.
31 */ 31 */
32 32
33#ifndef __HYPERVISOR_H__ 33#ifndef ASM_X86__XEN__HYPERVISOR_H
34#define __HYPERVISOR_H__ 34#define ASM_X86__XEN__HYPERVISOR_H
35 35
36#include <linux/types.h> 36#include <linux/types.h>
37#include <linux/kernel.h> 37#include <linux/kernel.h>
@@ -69,4 +69,4 @@ u64 jiffies_to_st(unsigned long jiffies);
69 69
70#define is_running_on_xen() (xen_start_info ? 1 : 0) 70#define is_running_on_xen() (xen_start_info ? 1 : 0)
71 71
72#endif /* __HYPERVISOR_H__ */ 72#endif /* ASM_X86__XEN__HYPERVISOR_H */
diff --git a/include/asm-x86/xen/interface.h b/include/asm-x86/xen/interface.h
index 9d810f2538a..d077bba96da 100644
--- a/include/asm-x86/xen/interface.h
+++ b/include/asm-x86/xen/interface.h
@@ -6,8 +6,8 @@
6 * Copyright (c) 2004, K A Fraser 6 * Copyright (c) 2004, K A Fraser
7 */ 7 */
8 8
9#ifndef __ASM_X86_XEN_INTERFACE_H 9#ifndef ASM_X86__XEN__INTERFACE_H
10#define __ASM_X86_XEN_INTERFACE_H 10#define ASM_X86__XEN__INTERFACE_H
11 11
12#ifdef __XEN__ 12#ifdef __XEN__
13#define __DEFINE_GUEST_HANDLE(name, type) \ 13#define __DEFINE_GUEST_HANDLE(name, type) \
@@ -172,4 +172,4 @@ DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context);
172#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid" 172#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"
173#endif 173#endif
174 174
175#endif /* __ASM_X86_XEN_INTERFACE_H */ 175#endif /* ASM_X86__XEN__INTERFACE_H */
diff --git a/include/asm-x86/xen/interface_32.h b/include/asm-x86/xen/interface_32.h
index d8ac41d5db8..08167e19fc6 100644
--- a/include/asm-x86/xen/interface_32.h
+++ b/include/asm-x86/xen/interface_32.h
@@ -6,8 +6,8 @@
6 * Copyright (c) 2004, K A Fraser 6 * Copyright (c) 2004, K A Fraser
7 */ 7 */
8 8
9#ifndef __ASM_X86_XEN_INTERFACE_32_H 9#ifndef ASM_X86__XEN__INTERFACE_32_H
10#define __ASM_X86_XEN_INTERFACE_32_H 10#define ASM_X86__XEN__INTERFACE_32_H
11 11
12 12
13/* 13/*
@@ -94,4 +94,4 @@ typedef struct xen_callback xen_callback_t;
94#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20)) 94#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
95#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20)) 95#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
96 96
97#endif /* __ASM_X86_XEN_INTERFACE_32_H */ 97#endif /* ASM_X86__XEN__INTERFACE_32_H */
diff --git a/include/asm-x86/xen/interface_64.h b/include/asm-x86/xen/interface_64.h
index 842266ce96e..046c0f1e01d 100644
--- a/include/asm-x86/xen/interface_64.h
+++ b/include/asm-x86/xen/interface_64.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_X86_XEN_INTERFACE_64_H 1#ifndef ASM_X86__XEN__INTERFACE_64_H
2#define __ASM_X86_XEN_INTERFACE_64_H 2#define ASM_X86__XEN__INTERFACE_64_H
3 3
4/* 4/*
5 * 64-bit segment selectors 5 * 64-bit segment selectors
@@ -156,4 +156,4 @@ typedef unsigned long xen_callback_t;
156#endif /* !__ASSEMBLY__ */ 156#endif /* !__ASSEMBLY__ */
157 157
158 158
159#endif /* __ASM_X86_XEN_INTERFACE_64_H */ 159#endif /* ASM_X86__XEN__INTERFACE_64_H */
diff --git a/include/asm-x86/xen/page.h b/include/asm-x86/xen/page.h
index 7b3835d3b77..c50185dccec 100644
--- a/include/asm-x86/xen/page.h
+++ b/include/asm-x86/xen/page.h
@@ -1,5 +1,5 @@
1#ifndef __XEN_PAGE_H 1#ifndef ASM_X86__XEN__PAGE_H
2#define __XEN_PAGE_H 2#define ASM_X86__XEN__PAGE_H
3 3
4#include <linux/pfn.h> 4#include <linux/pfn.h>
5 5
@@ -162,4 +162,4 @@ xmaddr_t arbitrary_virt_to_machine(void *address);
162void make_lowmem_page_readonly(void *vaddr); 162void make_lowmem_page_readonly(void *vaddr);
163void make_lowmem_page_readwrite(void *vaddr); 163void make_lowmem_page_readwrite(void *vaddr);
164 164
165#endif /* __XEN_PAGE_H */ 165#endif /* ASM_X86__XEN__PAGE_H */